ソースを参照

规则引擎实现

lijian 4 年 前
コミット
69637ab487

+ 12 - 0
pkg/actor/actor.go

@@ -0,0 +1,12 @@
+package actor
+
+import "sparrow/pkg/protocol"
+
+// Actor interface
+type Actor interface {
+	GetActorRef() Ref
+	Init(ctx Ctx) error
+	Process(msg protocol.ActorMsg) error
+	Destroy() error
+	OnProcessFailure(err error) *ProcessFailureStrategy
+}

+ 0 - 47
pkg/actor/actor_msg.go

@@ -1,47 +0,0 @@
-package actor
-
-import "sparrow/pkg/ruleEngine"
-
-type Msg interface {
-	GetMessageType() MsgType
-}
-
-// actor message type def
-type MsgType string
-
-const (
-	QUEUE_TO_RULE_ENGINE_MSG         MsgType = "QUEUE_TO_RULE_ENGINE_MSG"
-	RULE_CHAIN_TO_RULE_MSG           MsgType = "RULE_CHAIN_TO_RULE_MSG"
-	RULE_TO_RULE_CHAIN_TELL_NEXT_MSG MsgType = "RULE_TO_RULE_CHAIN_TELL_NEXT_MSG"
-	RULE_TO_SELF_MSG                 MsgType = "RULE_TO_SELF_MSG"
-)
-
-type QueueToRuleEngineMsg struct {
-	TenantId string
-	Message *ruleEngine.Message
-	RelationTypes  []ruleEngine.RelationType
-	FailureMessage error
-}
-
-func (q *QueueToRuleEngineMsg) GetMessageType() MsgType {
-	return QUEUE_TO_RULE_ENGINE_MSG
-}
-
-type RuleNodeToRuleChanTellNextMsg struct {
-	RuleNodeId     string
-	RelationTypes  []ruleEngine.RelationType
-	Message        *ruleEngine.Message
-	FailureMessage error
-}
-
-func (r *RuleNodeToRuleChanTellNextMsg) GetMessageType() MsgType {
-	return RULE_TO_RULE_CHAIN_TELL_NEXT_MSG
-}
-
-type RuleToSelfMsg struct {
-	Message *ruleEngine.Message
-}
-
-func (r *RuleToSelfMsg) GetMessageType() MsgType {
-	return RULE_TO_SELF_MSG
-}

+ 14 - 2
pkg/actor/actor_ref.go

@@ -1,12 +1,24 @@
 package actor
 
+import "sparrow/pkg/protocol"
 
 // Ref a actor reference
 type Ref interface {
 	// get actor id
 	GetActorId() string
 	// tell actor a message
-	Tell(msg Msg)
+	Tell(msg protocol.ActorMsg)
 	// tell actor a message with high priority
-	TellWithHighPriority(msg Msg)
+	TellWithHighPriority(msg protocol.ActorMsg)
+}
+
+// Ctx a actor context
+type Ctx interface {
+	Ref
+	GetSelf() string
+	GetParentRef() Ref
+	TellActor(actorId string, msg protocol.ActorMsg)
+	Stop(actorId string) error
+	GetOrCreateChildActor(actorId string, dispatcherName string, create Creator) (Ref, error)
+	BroadcastChildren(msg protocol.ActorMsg) error
 }

+ 202 - 0
pkg/actor/actor_system.go

@@ -0,0 +1,202 @@
+package actor
+
+import (
+	"errors"
+	"fmt"
+	"github.com/gogf/gf/os/grpool"
+	"sparrow/pkg/protocol"
+	"sparrow/pkg/queue"
+	"sparrow/pkg/server"
+	"sync"
+)
+
+// SystemContext actor system context, with some func
+type SystemContext struct {
+	ActorSystem    System
+	AppActor       Ref
+	ClusterService queue.ClusterService
+}
+
+func NewSystemContext(sys System) *SystemContext {
+	return &SystemContext{
+		ActorSystem: sys,
+	}
+}
+
+func (s *SystemContext) tell(msg protocol.ActorMsg) {
+	s.AppActor.Tell(msg)
+}
+
+func (s *SystemContext) tellWithHighPriority(msg protocol.ActorMsg) {
+	s.AppActor.TellWithHighPriority(msg)
+}
+
+// System actor system interface
+type System interface {
+	// 创建分发器
+	CreateDispatcher(name string, dispatcher IDispatcher) error
+	// 销毁分发器
+	DestroyDispatcher(name string) error
+	// 获取一个actor ref
+	GetActor(actorId string) Ref
+	// create root actor
+	CreateRootActor(dispatcherName string, creator Creator) (Ref, error)
+	// create child actor by parent actor
+	CreateChildActor(dispatcherName string, creator Creator, parentId string) (Ref, error)
+	// tell actor a message
+	Tell(actorId string, msg protocol.ActorMsg) error
+	// tell actor message with high priority
+	TellWithHighPriority(actorId string, msg protocol.ActorMsg) error
+	// stop actor by actor id
+	StopActorById(actorId string) error
+	// stop actor by actor ref
+	StopActorByRef(ref Ref) error
+	// broadcast message to children
+	BroadcastToChildren(parentActorId string, msg protocol.ActorMsg) error
+}
+
+// DefaultActorSystem a default actor system implements System interface
+type DefaultActorSystem struct {
+	dispatchers  map[string]IDispatcher
+	actors       map[string]*MailBox
+	parentActors map[string][]string
+	scheduler    *grpool.Pool
+	config       *DefaultActorSystemConfig
+	mu           sync.Mutex
+}
+
+// DefaultActorSystemConfig system config
+type DefaultActorSystemConfig struct {
+	SchedulerPoolSize            int // 系统调度执行池大小
+	AppDispatcherPoolSize        int // 应用调度执行池大小
+	TenantDispatcherPoolSize     int // 租户执行池大小
+	RuleEngineDispatcherPoolSize int // 规则引擎执行池大小
+}
+
+func NewDefaultActorSystem(config *DefaultActorSystemConfig) *DefaultActorSystem {
+	return &DefaultActorSystem{
+		dispatchers:  make(map[string]IDispatcher),
+		parentActors: make(map[string][]string),
+		scheduler:    grpool.New(config.SchedulerPoolSize),
+		config:       config,
+		actors:       make(map[string]*MailBox),
+	}
+}
+
+func (d *DefaultActorSystem) CreateDispatcher(name string, dispatcher IDispatcher) error {
+	if _, ok := d.dispatchers[name]; ok {
+		return errors.New(fmt.Sprintf("dispatcher name :%s is already registered!", name))
+	}
+	d.dispatchers[name] = dispatcher
+	return nil
+}
+
+func (d *DefaultActorSystem) DestroyDispatcher(name string) error {
+	if _, ok := d.dispatchers[name]; !ok {
+		return errors.New(fmt.Sprintf("dispatcher %s is not registered!", name))
+	}
+	if err := d.dispatchers[name].Destroy(); err != nil {
+		return err
+	}
+	delete(d.dispatchers, name)
+	return nil
+}
+
+func (d *DefaultActorSystem) GetActor(actorId string) Ref {
+	return d.actors[actorId]
+}
+
+func (d *DefaultActorSystem) CreateRootActor(dispatcherName string, creator Creator) (Ref, error) {
+	return d.creator(dispatcherName, creator, "")
+}
+
+func (d *DefaultActorSystem) Tell(actorId string, msg protocol.ActorMsg) error {
+	return d.tell(actorId, msg, false)
+}
+
+func (d *DefaultActorSystem) TellWithHighPriority(actorId string, msg protocol.ActorMsg) error {
+	return d.tell(actorId, msg, true)
+}
+
+func (d *DefaultActorSystem) tell(actorId string, msg protocol.ActorMsg, isHighPriority bool) error {
+	if mailBox, ok := d.actors[actorId]; ok {
+		if isHighPriority {
+			mailBox.TellWithHighPriority(msg)
+		} else {
+			mailBox.Tell(msg)
+		}
+	} else {
+		return errors.New(fmt.Sprintf("actor with id %s is not registered!", actorId))
+	}
+	return nil
+}
+
+func (d *DefaultActorSystem) stop(actorId string) error {
+	children := d.parentActors[actorId]
+	if len(children) > 0 {
+		for _, child := range children {
+			_ = d.stop(child)
+		}
+	}
+	if m, found := d.actors[actorId]; found {
+		return m.destroy()
+	}
+	return nil
+}
+
+func (d *DefaultActorSystem) StopActorById(actorId string) error {
+	return d.stop(actorId)
+}
+
+func (d *DefaultActorSystem) StopActorByRef(ref Ref) error {
+	return d.stop(ref.GetActorId())
+}
+
+func (d *DefaultActorSystem) BroadcastToChildren(parentActorId string, msg protocol.ActorMsg) error {
+	children := d.parentActors[parentActorId]
+	for _, item := range children {
+		if err := d.Tell(item, msg); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+func (d *DefaultActorSystem) CreateChildActor(dispatcherName string, creator Creator, parentId string) (Ref, error) {
+	return d.creator(dispatcherName, creator, parentId)
+}
+func (d *DefaultActorSystem) creator(name string, creator Creator, parentId string) (Ref, error) {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	dispatcher := d.dispatchers[name]
+	if dispatcher == nil {
+		return nil, errors.New(fmt.Sprintf("dispatcher %s not found!", name))
+	}
+	actorId := creator.CreateActorId()
+	var actorMailBox *MailBox
+	actorMailBox = d.actors[actorId]
+	if actorMailBox != nil {
+		server.Log.Debugf("actor with id :%s is already registered!", actorId)
+	} else {
+		server.Log.Debugf("creating actor with id :%s", actorId)
+		actor := creator.CreateActor()
+		var parentActor Ref
+		if parentId != "" {
+			parentActor = d.GetActor(parentId)
+			if parentActor == nil {
+				return nil, errors.New(fmt.Sprintf("Parent actor with id :%s is not registered!", parentId))
+			}
+		}
+		mailBox := NewMailBox(d, actorId, parentActor, actor, dispatcher)
+		d.actors[actorId] = actorMailBox
+		if err := mailBox.Init(); err != nil {
+			server.Log.Error(err)
+			return nil, err
+		}
+		actorMailBox = mailBox
+		if parentActor != nil {
+			d.parentActors[parentId] = append(d.parentActors[parentId], actorId)
+		}
+	}
+
+	return actorMailBox, nil
+}

+ 0 - 9
pkg/actor/app_actor.go

@@ -1,9 +0,0 @@
-package actor
-
-type AppActor struct {
-
-}
-
-func (a *AppActor) Receive(ctx actor.Context) {
-
-}

+ 15 - 0
pkg/actor/creator.go

@@ -0,0 +1,15 @@
+package actor
+
+// Creator interface
+type Creator interface {
+	// make actor id
+	CreateActorId() string
+	// make a actor
+	CreateActor() Actor
+}
+
+// ContextBasedCreator a creator with system context and actor context
+type ContextBasedCreator struct {
+	SystemCtx *SystemContext
+	Ctx       Ctx
+}

+ 42 - 0
pkg/actor/dispatcher.go

@@ -0,0 +1,42 @@
+package actor
+
+import (
+	"github.com/gogf/gf/os/grpool"
+	"runtime"
+)
+
+const (
+	APP_DISPATCHER_NAME    = "app-dispatcher"
+	TENANT_DISPATCHER_NAME = "tenant-dispatcher"
+	DEVICE_DISPATCHER_NAME = "device-dispatcher"
+	RULE_DISPATCHER_NAME   = "rule-dispatcher"
+)
+
+// IDispatcher 分发器接口
+type IDispatcher interface {
+	// submit a task,is a func
+	Submit(func()) error
+	Destroy() error
+}
+
+// PoolDispatcher gr pool implements IDispatcher
+type PoolDispatcher struct {
+	pool *grpool.Pool
+}
+
+func (p *PoolDispatcher) Destroy() error {
+	return nil
+}
+
+func (p *PoolDispatcher) Submit(fn func()) error {
+	return p.pool.Add(fn)
+}
+
+// NewPoolDispatcher new instance with limit
+func NewPoolDispatcher(limit int) *PoolDispatcher {
+	if limit <= 0 {
+		limit = runtime.GOMAXPROCS(-1)
+	}
+	pl := grpool.New(limit)
+	return &PoolDispatcher{pool: pl}
+}

+ 214 - 0
pkg/actor/mailbox.go

@@ -0,0 +1,214 @@
+package actor
+
+import (
+	"fmt"
+	"github.com/gogf/gf/container/gqueue"
+	"sparrow/pkg/protocol"
+	"sparrow/pkg/server"
+	"sync/atomic"
+)
+
+type MailBox struct {
+	system                 System
+	id                     string
+	parentRef              Ref
+	actor                  Actor
+	dispatcher             IDispatcher
+	highPriorityMessages   *gqueue.Queue
+	normalPriorityMessages *gqueue.Queue
+	busyState              int32
+	readySate              int32
+	throughPut             int // 处理的吞吐量
+}
+
+const (
+	_ int32 = iota
+	FREE
+	BUSY
+	NOTREADY
+	READY
+)
+
+func NewMailBox(
+	system System,
+	selfId string, parentRef Ref, actor Actor,
+	dispatcher IDispatcher,
+) *MailBox {
+	return &MailBox{
+		highPriorityMessages:   gqueue.New(),
+		normalPriorityMessages: gqueue.New(),
+		system:                 system,
+		id:                     selfId,
+		parentRef:              parentRef,
+		actor:                  actor,
+		dispatcher:             dispatcher,
+		throughPut:             10,
+	}
+}
+
+// set mailbox ready status
+func (m *MailBox) setReadyStat(stat int32) {
+	v := atomic.LoadInt32(&stat)
+	m.readySate = v
+}
+
+func (m *MailBox) getReadyStat() int32 {
+	return atomic.LoadInt32(&m.readySate)
+}
+
+func (m *MailBox) getBusyStat() int32 {
+	return atomic.LoadInt32(&m.busyState)
+}
+
+// set mailbox busy status
+func (m *MailBox) setBusyStat(stat int32) {
+	v := atomic.LoadInt32(&stat)
+	m.busyState = v
+}
+
+func (m *MailBox) tryInit(attempt int) {
+	server.Log.Debugf("Try to init actor, attempt %d", attempt)
+	err := m.actor.Init(m)
+	if err != nil {
+		server.Log.Errorf("failed to init actor, err :%s, attempt %d", err.Error(), attempt)
+		attempt += 1
+		if attempt > 10 {
+			_ = m.system.StopActorById(m.id)
+		}
+		_ = m.dispatcher.Submit(func() {
+			m.tryInit(attempt)
+		})
+	}
+	m.setReadyStat(READY)
+	m.setBusyStat(FREE)
+	m.tryProcessQueue(false)
+}
+
+func (m *MailBox) tryProcessQueue(newMsg bool) {
+	if m.getReadyStat() != READY {
+		server.Log.Debugf("MessageBox is not ready")
+		return
+	}
+
+	if newMsg || m.normalPriorityMessages.Size() > 0 || m.highPriorityMessages.Size() > 0 {
+		if m.getBusyStat() == FREE {
+			m.setBusyStat(BUSY)
+			err := m.dispatcher.Submit(func() {
+				m.processMailbox()
+			})
+			if err != nil {
+				server.Log.Error(err)
+			}
+		}
+	} else {
+		server.Log.Debugf("MessageBox is empty")
+	}
+}
+
+func (m *MailBox) processMailbox() {
+	var noMoreElement bool
+	for i := 0; i < m.throughPut; i++ {
+		var msg protocol.ActorMsg
+		var getQueue *gqueue.Queue
+		if m.highPriorityMessages.Size() > 0 {
+			getQueue = m.highPriorityMessages
+		} else if m.normalPriorityMessages.Size() > 0 {
+			getQueue = m.normalPriorityMessages
+		}
+		if getQueue == nil {
+			break
+		}
+		fmt.Printf("get queue")
+		msg = getQueue.Pop().(protocol.ActorMsg)
+		if msg != nil {
+			server.Log.Debugf("Going to process message:%s, %v", m.id, msg)
+			if err := m.actor.Process(msg); err != nil {
+				strategy := m.actor.OnProcessFailure(err)
+				if strategy._stop {
+					_ = m.system.StopActorById(m.id)
+				}
+			}
+		} else {
+			noMoreElement = true
+			break
+		}
+	}
+	if noMoreElement {
+		m.setBusyStat(FREE)
+		_ = m.dispatcher.Submit(func() {
+			m.tryProcessQueue(false)
+		})
+	} else {
+		_ = m.dispatcher.Submit(func() {
+			m.processMailbox()
+		})
+	}
+}
+
+// Init 邮箱初始化
+func (m *MailBox) Init() error {
+	return m.dispatcher.Submit(func() {
+		m.tryInit(1)
+	})
+}
+
+func (m *MailBox) GetActorId() string {
+	return m.id
+}
+
+func (m *MailBox) Tell(msg protocol.ActorMsg) {
+	m.enqueue(msg, false)
+}
+
+// push a message to queue
+func (m *MailBox) enqueue(msg protocol.ActorMsg, isHighPriority bool) {
+	if isHighPriority {
+		m.highPriorityMessages.Push(msg)
+	} else {
+		m.normalPriorityMessages.Push(msg)
+	}
+	m.tryProcessQueue(true)
+}
+
+func (m *MailBox) TellWithHighPriority(msg protocol.ActorMsg) {
+	m.enqueue(msg, true)
+}
+
+func (m *MailBox) GetSelf() string {
+	return m.id
+}
+
+func (m *MailBox) GetParentRef() Ref {
+	return m.parentRef
+}
+
+func (m *MailBox) TellActor(actorId string, msg protocol.ActorMsg) {
+	m.system.Tell(actorId, msg)
+}
+
+func (m *MailBox) Stop(actorId string) error {
+	return m.system.StopActorById(actorId)
+}
+
+func (m *MailBox) GetOrCreateChildActor(actorId string, dispatcherName string, creator Creator) (Ref, error) {
+	actorRef := m.system.GetActor(actorId)
+	if actorRef == nil {
+		return m.system.CreateChildActor(dispatcherName, creator, m.id)
+	}
+	return actorRef, nil
+}
+
+func (m *MailBox) BroadcastChildren(msg protocol.ActorMsg) error {
+	return m.system.BroadcastToChildren(m.id, msg)
+}
+
+func (m *MailBox) destroy() error {
+	return m.dispatcher.Submit(func() {
+		m.highPriorityMessages.Close()
+		m.normalPriorityMessages.Close()
+		m.setReadyStat(NOTREADY)
+		if err := m.actor.Destroy(); err != nil {
+			server.Log.Warnf("Failed to destroy actor :%s, err :%s", m.id, err.Error())
+		}
+	})
+}

+ 77 - 0
pkg/actor/msg.go

@@ -0,0 +1,77 @@
+package actor
+
+import (
+	"sparrow/pkg/protocol"
+	"sparrow/pkg/ruleEngine"
+)
+
+// RuleChainToRuleNodeMsg 规则链到规则节点的消息定义
+type RuleChainToRuleNodeMsg struct {
+	Message          *protocol.Message
+	Ctx              ruleEngine.Context
+	FromRelationType string
+}
+
+func (r *RuleChainToRuleNodeMsg) GetMessageType() protocol.MsgType {
+	return protocol.RULE_CHAIN_TO_RULE_MSG
+}
+
+// RuleChainToRuleChainMsg 规则链到另一个规则链的消息
+type RuleChainToRuleChainMsg struct {
+	TargetId         string
+	SourceId         string
+	FromRelationType string
+	Message          *protocol.Message
+}
+
+func (t *RuleChainToRuleChainMsg) GetMessageType() protocol.MsgType {
+	return protocol.RULE_CHAIN_TO_RULE_CHAIN_MSG
+}
+
+// TransportToDeviceActorMsg 传输层到设备actor的消息定义
+type TransportToDeviceActorMsg struct {
+	Message *protocol.Message
+}
+
+func (t *TransportToDeviceActorMsg) GetMessageType() protocol.MsgType {
+	return protocol.TRANSPORT_TO_DEVICE_ACTOR_MSG
+}
+
+// QueueToRuleEngineMsg 消息队列到规则引擎的消息定义
+type QueueToRuleEngineMsg struct {
+	TenantId       string
+	Message        *protocol.Message
+	RelationTypes  []string
+	FailureMessage error
+}
+
+func (q *QueueToRuleEngineMsg) GetMessageType() protocol.MsgType {
+	return protocol.QUEUE_TO_RULE_ENGINE_MSG
+}
+
+type RuleNodeToRuleChanTellNextMsg struct {
+	RuleNodeId     string
+	RelationTypes  []protocol.RelationType
+	Message        *protocol.Message
+	FailureMessage error
+}
+
+func (r *RuleNodeToRuleChanTellNextMsg) GetMessageType() protocol.MsgType {
+	return protocol.RULE_TO_RULE_CHAIN_TELL_NEXT_MSG
+}
+
+type RuleToSelfMsg struct {
+	Message *protocol.Message
+}
+
+func (r *RuleToSelfMsg) GetMessageType() protocol.MsgType {
+	return protocol.RULE_TO_SELF_MSG
+}
+
+// AppInitMsg app 初始化消息
+type AppInitMsg struct {
+}
+
+func (a *AppInitMsg) GetMessageType() protocol.MsgType {
+	return protocol.APP_INIT_MSG
+}

+ 17 - 0
pkg/actor/procee_failure_strategy.go

@@ -0,0 +1,17 @@
+package actor
+
+type ProcessFailureStrategy struct {
+	_stop bool
+}
+
+func newProcessFailureStrategy(stop bool) *ProcessFailureStrategy {
+	return &ProcessFailureStrategy{_stop: stop}
+}
+
+func Stop() *ProcessFailureStrategy {
+	return newProcessFailureStrategy(true)
+}
+
+func Resume() *ProcessFailureStrategy {
+	return newProcessFailureStrategy(false)
+}

+ 73 - 0
pkg/actors/app_actor.go

@@ -0,0 +1,73 @@
+package actors
+
+import (
+	"github.com/gogf/gf/util/guid"
+	"sparrow/pkg/actor"
+	"sparrow/pkg/protocol"
+	"sparrow/pkg/server"
+)
+
+// AppActor 服务级actor
+type AppActor struct {
+	actor.ContextBasedCreator
+}
+
+func (a *AppActor) CreateActorId() string {
+	panic("implement me")
+}
+
+func (a *AppActor) CreateActor() actor.Actor {
+	panic("implement me")
+}
+
+func (a *AppActor) GetActorRef() actor.Ref {
+	return a.Ctx
+}
+
+func (a *AppActor) Init(ctx actor.Ctx) error {
+	a.Ctx = ctx
+	return nil
+}
+
+func (a *AppActor) Process(msg protocol.ActorMsg) error {
+	switch msg.GetMessageType() {
+	case protocol.APP_INIT_MSG:
+		server.Log.Debugf("收到应用初始化消息")
+	default:
+		server.Log.Debugf("未知的消息类型:%s", msg.GetMessageType())
+	}
+	return nil
+}
+
+func (a *AppActor) Destroy() error {
+	return nil
+}
+
+func (a *AppActor) OnProcessFailure(err error) *actor.ProcessFailureStrategy {
+	if err != nil {
+		return actor.Stop()
+	} else {
+		return actor.Resume()
+	}
+}
+
+// AppActorCreator app actor creator implements creator interface
+type AppActorCreator struct {
+	actor.ContextBasedCreator
+}
+
+func NewAppActorCreator(systemCtx *actor.SystemContext) *AppActorCreator {
+	ins := new(AppActorCreator)
+	ins.SystemCtx = systemCtx
+	return ins
+}
+
+func (a *AppActorCreator) CreateActorId() string {
+	return guid.S()
+}
+
+func (a *AppActorCreator) CreateActor() actor.Actor {
+	appC := new(AppActor)
+	appC.SystemCtx = a.SystemCtx
+	return appC
+}

+ 247 - 0
pkg/actors/rule_chain_actor.go

@@ -0,0 +1,247 @@
+package actors
+
+import (
+	"errors"
+	"fmt"
+	"sparrow/pkg/actor"
+	"sparrow/pkg/entities"
+	"sparrow/pkg/protocol"
+	"sparrow/pkg/queue"
+	"sparrow/pkg/ruleEngine"
+	"sparrow/pkg/server"
+	"strings"
+)
+
+var ruleNodes = map[string]*ruleEngine.RuleNode{
+	"1": {
+		RuleNodeId:  "1",
+		RuleChainId: "11",
+		Type:        "MsgTypeFilterNode",
+		Name:        "simple node",
+		IsDebug:     true,
+		Config:      "",
+	},
+}
+
+type RuleChainActor struct {
+	actor.ContextBasedCreator
+	ruleChain   *ruleEngine.RuleChain
+	tenantId    string
+	firstId     string
+	firstNode   *ruleEngine.RuleNodeCtx
+	started     bool
+	ruleChainId string
+	parent      actor.Ref
+	nodeActors  map[string]*ruleEngine.RuleNodeCtx
+	nodeRoutes  map[string][]*ruleEngine.RuleNodeRelation
+
+	ruleChainName string
+
+	clusterService queue.ClusterService
+}
+
+func newRuleChainActor(
+	sysCtx *actor.SystemContext,
+	ruleChain *ruleEngine.RuleChain,
+	tenantId string,
+	parent actor.Ref,
+) *RuleChainActor {
+	item := &RuleChainActor{
+		ruleChainId:    ruleChain.ChainId,
+		ruleChain:      ruleChain,
+		tenantId:       tenantId,
+		parent:         parent,
+		nodeActors:     make(map[string]*ruleEngine.RuleNodeCtx),
+		nodeRoutes:     make(map[string][]*ruleEngine.RuleNodeRelation),
+		clusterService: sysCtx.ClusterService,
+	}
+	item.SystemCtx = sysCtx
+	return item
+}
+
+func (r *RuleChainActor) GetActorRef() actor.Ref {
+	return r.Ctx
+}
+
+func (r *RuleChainActor) Init(ctx actor.Ctx) error {
+	if r.ruleChain != nil {
+		r.ruleChainName = r.ruleChain.Name
+	}
+	r.Ctx = ctx
+	return nil
+}
+
+func (r *RuleChainActor) Process(msg protocol.ActorMsg) error {
+	switch msg.GetMessageType() {
+	case protocol.QUEUE_TO_RULE_ENGINE_MSG:
+		return r.onQueueToRuleEngineMsg(msg.(*actor.QueueToRuleEngineMsg))
+
+	}
+	return nil
+}
+
+func (r *RuleChainActor) onQueueToRuleEngineMsg(msg *actor.QueueToRuleEngineMsg) error {
+	actorMsg := msg.Message
+	server.Log.Debugf("Processing message")
+	if len(msg.RelationTypes) == 0 {
+		ruleNodeId := actorMsg.RuleNodeId
+		var targetCtx *ruleEngine.RuleNodeCtx
+		if ruleNodeId == "" {
+			targetCtx = r.firstNode
+		} else {
+			targetCtx = r.nodeActors[ruleNodeId]
+		}
+		if targetCtx != nil {
+			server.Log.Debugf("pushing message to target rule node,%s, %s", r.ruleChainId, ruleNodeId)
+			r.pushMsgToNode(targetCtx, actorMsg, "")
+		} else {
+			server.Log.Debugf("Rule node dose not exist. probably old message,%s, %s", r.ruleChainId, ruleNodeId)
+			actorMsg.GetCallBack().OnSuccess()
+		}
+	} else {
+		r.onTellNext(actorMsg, actorMsg.RuleNodeId, msg.RelationTypes, msg.FailureMessage.Error())
+	}
+	return nil
+}
+
+// on tell next actor
+func (r *RuleChainActor) onTellNext(msg *protocol.Message, originatorNodeId string,
+	relationTypes []string, errMsg string) {
+	originatorId := msg.Originator
+	tpi := queue.ResolvePartition(queue.RULE_ENGINE, msg.GetQueueName(), r.tenantId, originatorId)
+	var relations []*ruleEngine.RuleNodeRelation
+	if rs, ok := r.nodeRoutes[originatorNodeId]; ok {
+		for _, item := range rs {
+			if contains(relationTypes, item.Type) {
+				relations = append(relations, item)
+			}
+		}
+	}
+	if len(relations) == 0 {
+		server.Log.Debugf("No outbound relations to process,%s, %s", originatorId, r.tenantId)
+		if contains(relationTypes, string(protocol.Failure)) {
+			if ruleNodeCtx, ok := r.nodeActors[originatorNodeId]; ok {
+				msg.GetCallBack().OnFailure(errors.New(fmt.Sprintf(""+
+					"[%s], ruleChainName:%s, ruleNodeId:%s", errMsg, r.ruleChainName, ruleNodeCtx.Self.RuleNodeId)))
+			} else {
+				msg.GetCallBack().OnFailure(errors.New("failure during message processing by Rule Node"))
+			}
+		} else {
+			msg.GetCallBack().OnSuccess()
+		}
+	} else if len(relations) == 1 {
+		for _, rl := range relations {
+			server.Log.Debugf("push message to single target,%s, %s, %s, %s", r.tenantId, originatorId, msg.Id, rl.Out)
+			r.pushMsgToTarget(tpi, msg, rl.Out, rl.Type)
+		}
+	} else {
+
+		for _, rl := range relations {
+			target := rl.Out
+			r.putToQueue(tpi, msg, queue.NewMultipleMsgCallbackWrapper(int32(len(relations)), msg.GetCallBack()), target)
+		}
+	}
+}
+
+// push a message to target ctx
+func (r *RuleChainActor) pushMsgToTarget(tpi *queue.TopicPartitionInfo, msg *protocol.Message, entityId entities.EntityId, fromRelationType string) {
+	if tpi.MyPartition {
+		switch entityId.GetEntityType() {
+		case entities.RULE_NODE:
+			targetCtx := r.nodeActors[entityId.GetId()]
+			r.pushMsgToNode(targetCtx, msg, fromRelationType)
+		case entities.RULE_CHAIN:
+			r.parent.Tell(&actor.RuleChainToRuleChainMsg{
+				TargetId:         entityId.GetId(),
+				SourceId:         r.ruleChainId,
+				Message:          msg,
+				FromRelationType: fromRelationType,
+			})
+		}
+	} else {
+		r.putToQueue(tpi, msg, queue.NewMsgCallbackWrapper(msg.GetCallBack()), entityId)
+	}
+}
+
+// 把消息放到队列中
+func (r *RuleChainActor) putToQueue(tpi *queue.TopicPartitionInfo, msg *protocol.Message, queueCallback queue.Callback, targetEntity entities.EntityId) {
+	switch targetEntity.GetEntityType() {
+	case entities.RULE_NODE:
+		r.putMessageToQueue(tpi, msg.CopyWithRuleNodeId(targetEntity.GetId()), queueCallback)
+	case entities.RULE_CHAIN:
+		r.putMessageToQueue(tpi, msg.CopyWithRuleChainId(targetEntity.GetId()), queueCallback)
+	}
+}
+
+func (r *RuleChainActor) putMessageToQueue(tpi *queue.TopicPartitionInfo, msg *protocol.Message, queueCallback queue.Callback) {
+	msgBytes, err := msg.Encode()
+	if err != nil {
+		server.Log.Error(err)
+	}
+	r.clusterService.PushMessageToRuleEngine(tpi, msg.Id, msgBytes, queueCallback)
+}
+
+func contains(relations []string, relation string) bool {
+	if len(relations) == 0 {
+		return true
+	}
+	for _, item := range relations {
+		if strings.ToLower(item) == strings.ToLower(relation) {
+			return true
+		}
+	}
+	return false
+}
+
+// push a message to node actor
+func (r *RuleChainActor) pushMsgToNode(targetCtx *ruleEngine.RuleNodeCtx, msg *protocol.Message, relationType string) {
+	if targetCtx != nil {
+		targetCtx.SelfActor.Tell(&actor.RuleChainToRuleNodeMsg{
+			Message:          msg,
+			Ctx:              ruleEngine.NewDefaultContext(targetCtx, r.SystemCtx),
+			FromRelationType: relationType,
+		})
+	} else {
+		server.Log.Error("targetCtx is empty, %s, %s", r.ruleChainId, r.ruleChainName)
+	}
+}
+
+func (r *RuleChainActor) Destroy() error {
+	return nil
+}
+
+func (r *RuleChainActor) OnProcessFailure(err error) *actor.ProcessFailureStrategy {
+	if err != nil {
+		return actor.Stop()
+	} else {
+		return actor.Resume()
+	}
+}
+
+// RuleChainCreator
+type RuleChainCreator struct {
+	RuleChainActor
+}
+
+//NewRuleChainCreator create a instance
+func NewRuleChainCreator(
+	sysCtx *actor.SystemContext,
+	tenantId string,
+	ruleChan *ruleEngine.RuleChain,
+	parent actor.Ref,
+) *RuleChainCreator {
+	item := &RuleChainCreator{}
+	item.tenantId = tenantId
+	item.ruleChain = ruleChan
+	item.parent = parent
+	item.SystemCtx = sysCtx
+	return item
+}
+
+func (r *RuleChainCreator) CreateActorId() string {
+	return r.ruleChain.ChainId
+}
+
+func (r *RuleChainCreator) CreateActor() actor.Actor {
+	return newRuleChainActor(r.SystemCtx, r.ruleChain, r.tenantId, r.parent)
+}

+ 1 - 0
pkg/actors/rule_node_actor.go

@@ -0,0 +1 @@
+package actors

+ 165 - 0
pkg/actors/tenant_actor.go

@@ -0,0 +1,165 @@
+package actors
+
+import (
+	"errors"
+	"sparrow/pkg/actor"
+	"sparrow/pkg/protocol"
+	"sparrow/pkg/ruleEngine"
+	"sparrow/pkg/server"
+)
+
+// TODO: 先用测试数据
+var ruleChains = map[string]*ruleEngine.RuleChain{
+	"11": {
+		TenantId:    "1",
+		Name:        "Chain1",
+		FirstNodeId: "1",
+		IsRoot:      true,
+		IsDebug:     false,
+		Config:      "",
+		ChainId:     "11",
+	},
+	"22": {
+		TenantId:    "2",
+		Name:        "Chain2",
+		FirstNodeId: "1",
+		IsRoot:      false,
+		IsDebug:     false,
+		Config:      "",
+		ChainId:     "22",
+	},
+}
+
+// TenantActor 租户 actor
+type TenantActor struct {
+	actor.ContextBasedCreator
+	tenantId       string
+	rootChain      *ruleEngine.RuleChain
+	rootChainActor actor.Ref
+	cantFindTenant bool
+}
+
+func (t *TenantActor) initRuleChains() {
+	for _, ruleChain := range ruleChains {
+		server.Log.Debugf("Creating rule chain actor:%s", ruleChain.ChainId)
+		actorRef, err := t.getOrCreateActor(ruleChain.ChainId, ruleChain)
+		if err != nil {
+			server.Log.Errorf("Creating rule chain actor:%s err:%s", ruleChain.ChainId, err.Error())
+			continue
+		}
+		if ruleChain.IsRoot {
+			t.rootChain = ruleChain
+			t.rootChainActor = actorRef
+		}
+		server.Log.Debugf("Rule chain actor created:%s", ruleChain.ChainId)
+	}
+}
+
+func (t *TenantActor) destroyRuleChains() {
+	for _, ruleChain := range ruleChains {
+		_ = t.Ctx.Stop(ruleChain.ChainId)
+	}
+}
+
+func (t *TenantActor) getOrCreateActor(ruleChainId string, ruleChain *ruleEngine.RuleChain) (actor.Ref, error) {
+	return t.Ctx.GetOrCreateChildActor(ruleChainId,
+		actor.RULE_DISPATCHER_NAME,
+		NewRuleChainCreator(t.SystemCtx, t.tenantId, ruleChain, t.Ctx.GetParentRef()))
+}
+
+func (t *TenantActor) GetActorRef() actor.Ref {
+	return t.Ctx
+}
+
+func (t *TenantActor) Init(ctx actor.Ctx) error {
+	t.Ctx = ctx
+	server.Log.Debugf("Starting tenant actor:%s", t.tenantId)
+	t.initRuleChains()
+	return nil
+}
+
+func (t *TenantActor) Process(msg protocol.ActorMsg) error {
+	if t.cantFindTenant {
+		server.Log.Debugf("Processing missing Tenant msg")
+		if msg.GetMessageType() == protocol.QUEUE_TO_RULE_ENGINE_MSG {
+			qMsg := msg.(*actor.QueueToRuleEngineMsg)
+			qMsg.Message.GetCallBack().OnSuccess()
+		} else if msg.GetMessageType() == protocol.TRANSPORT_TO_DEVICE_ACTOR_MSG {
+			tMsg := msg.(*actor.TransportToDeviceActorMsg)
+			tMsg.Message.GetCallBack().OnSuccess()
+		}
+		return nil
+	}
+	switch msg.GetMessageType() {
+	case protocol.QUEUE_TO_RULE_ENGINE_MSG:
+		return t.onQueueToRuleEngineMsg(msg.(*actor.QueueToRuleEngineMsg))
+	case protocol.RULE_CHAIN_TO_RULE_CHAIN_MSG:
+		return t.onRuleChainToRuleChainMsg(msg.(*actor.RuleChainToRuleChainMsg))
+	case protocol.TRANSPORT_TO_DEVICE_ACTOR_MSG:
+		//TODO:实现到设备的消息处理
+	}
+	return nil
+}
+
+// TODO:基于services查找rule chain对象
+func (t *TenantActor) onRuleChainToRuleChainMsg(msg *actor.RuleChainToRuleChainMsg) error {
+	ruleChainId := msg.Message.RuleChanId
+	ref, err := t.getOrCreateActor(ruleChainId, ruleChains[ruleChainId])
+	if err != nil {
+		return err
+	}
+	ref.Tell(msg)
+	return nil
+}
+
+func (t *TenantActor) onQueueToRuleEngineMsg(msg *actor.QueueToRuleEngineMsg) error {
+	actorMsg := msg.Message
+	if actorMsg.RuleChanId == "" {
+		if t.rootChainActor != nil {
+			t.rootChainActor.Tell(msg)
+		} else {
+			actorMsg.GetCallBack().OnFailure(errors.New("no Root Rule Chain available"))
+			server.Log.Errorf("no root chain:%s", t.tenantId)
+		}
+	} else {
+		t.Ctx.TellActor(actorMsg.RuleChanId, msg)
+	}
+	actorMsg.GetCallBack().OnSuccess()
+	return nil
+}
+
+func (t *TenantActor) Destroy() error {
+	return nil
+}
+
+func (t *TenantActor) OnProcessFailure(err error) *actor.ProcessFailureStrategy {
+	if err != nil {
+		return actor.Stop()
+	} else {
+		return actor.Resume()
+	}
+}
+
+// TenantActorCreator 租户actor creator
+type TenantActorCreator struct {
+	actor.ContextBasedCreator
+	tenantId string
+}
+
+func NewTenantActorCreator(sysCtx *actor.SystemContext, tenantId string) *TenantActorCreator {
+	t := new(TenantActorCreator)
+	t.SystemCtx = sysCtx
+	t.tenantId = tenantId
+	return t
+}
+
+func (t *TenantActorCreator) CreateActorId() string {
+	return t.tenantId
+}
+
+func (t *TenantActorCreator) CreateActor() actor.Actor {
+	ins := new(TenantActor)
+	ins.tenantId = t.tenantId
+	ins.SystemCtx = t.SystemCtx
+	return ins
+}

+ 40 - 0
pkg/entities/entities.go

@@ -0,0 +1,40 @@
+package entities
+
+type EntityId interface {
+	GetId() string
+	GetEntityType() EntityType
+}
+
+type EntityType int
+
+const (
+	TENANT EntityType = iota
+	DEVICE
+	ALARM
+	RULE_CHAIN
+	RULE_NODE
+)
+
+type RuleNodeId struct {
+	id string
+}
+
+func (r *RuleNodeId) GetId() string {
+	return r.id
+}
+
+func (r *RuleNodeId) GetEntityType() EntityType {
+	return RULE_NODE
+}
+
+type RuleChainId struct {
+	id string
+}
+
+func (r *RuleChainId) GetId() string {
+	return r.id
+}
+
+func (r *RuleChainId) GetEntityType() EntityType {
+	return RULE_CHAIN
+}

+ 18 - 0
pkg/protocol/actor_msg.go

@@ -0,0 +1,18 @@
+package protocol
+
+type ActorMsg interface {
+	GetMessageType() MsgType
+}
+
+// actor message type def
+type MsgType string
+
+const (
+	QUEUE_TO_RULE_ENGINE_MSG         MsgType = "QUEUE_TO_RULE_ENGINE_MSG"
+	RULE_CHAIN_TO_RULE_MSG           MsgType = "RULE_CHAIN_TO_RULE_MSG"
+	RULE_TO_RULE_CHAIN_TELL_NEXT_MSG MsgType = "RULE_TO_RULE_CHAIN_TELL_NEXT_MSG"
+	RULE_TO_SELF_MSG                 MsgType = "RULE_TO_SELF_MSG"
+	APP_INIT_MSG                     MsgType = "APP_INIT_MSG"
+	TRANSPORT_TO_DEVICE_ACTOR_MSG    MsgType = "TRANSPORT_TO_DEVICE_ACTOR_MSG"
+	RULE_CHAIN_TO_RULE_CHAIN_MSG     MsgType = "RULE_CHAIN_TO_RULE_CHAIN_MSG"
+)

+ 125 - 0
pkg/protocol/message.go

@@ -0,0 +1,125 @@
+package protocol
+
+import (
+	"bytes"
+	"encoding/gob"
+	"time"
+)
+
+type MessageSerializer interface {
+	Encode() bytes.Buffer
+	Decode()
+}
+
+type Message struct {
+	QueueName  string
+	Id         string
+	Ts         *time.Time
+	Type       string
+	Data       string
+	RuleChanId string
+	RuleNodeId string
+	Callback   IMessageCallBack
+	MetaData   map[string]interface{}
+	Originator string
+}
+
+func (a *Message) Encode() ([]byte, error) {
+	var network bytes.Buffer
+	enc := gob.NewEncoder(&network)
+	err := enc.Encode(a)
+	if err != nil {
+		return nil, err
+	}
+	return network.Bytes(), nil
+}
+
+func (a *Message) Decode(data []byte) error {
+	var network bytes.Buffer
+	network.Write(data)
+	dec := gob.NewDecoder(&network)
+	return dec.Decode(a)
+}
+
+// IMessageCallBack message call back
+type IMessageCallBack interface {
+	// on success do sth.
+	OnSuccess()
+	// on failure do sth.
+	OnFailure(err error)
+	// on process start do sth.
+	OnProcessingStart(ruleNodeInfo *RuleNodeInfo)
+	// on process end do sth.
+	OnProcessingEnd(ruleNodeId string)
+}
+
+type emptyCallBack struct {
+}
+
+func (e emptyCallBack) OnProcessingStart(ruleNodeInfo *RuleNodeInfo) {
+
+}
+
+func (e emptyCallBack) OnProcessingEnd(ruleNodeId string) {
+
+}
+
+var EmptyCallBack = emptyCallBack{}
+
+func (e emptyCallBack) OnSuccess() {
+
+}
+
+func (e emptyCallBack) OnFailure(err error) {
+
+}
+
+// SetCallBack
+func (a *Message) SetCallBack(callback IMessageCallBack) {
+	a.Callback = callback
+}
+
+// GetCallBack get message call back
+func (a *Message) GetCallBack() IMessageCallBack {
+	if a.Callback == nil {
+		return EmptyCallBack
+	} else {
+		return a.Callback
+	}
+}
+
+func (a *Message) GetQueueName() string {
+	if a.QueueName == "" {
+		return "MAIN"
+	}
+	return a.QueueName
+}
+
+func (a *Message) CopyWithRuleChainId(ruleChainId string) *Message {
+	return &Message{
+		QueueName:  a.QueueName,
+		Id:         a.Id,
+		Ts:         a.Ts,
+		Type:       a.Type,
+		Data:       a.Data,
+		RuleChanId: ruleChainId,
+		RuleNodeId: a.RuleNodeId,
+		Callback:   a.Callback,
+		MetaData:   a.MetaData,
+		Originator: a.Originator,
+	}
+}
+func (a *Message) CopyWithRuleNodeId(ruleNodeId string) *Message {
+	return &Message{
+		QueueName:  a.QueueName,
+		Id:         a.Id,
+		Ts:         a.Ts,
+		Type:       a.Type,
+		Data:       a.Data,
+		RuleChanId: a.RuleChanId,
+		RuleNodeId: ruleNodeId,
+		Callback:   a.Callback,
+		MetaData:   a.MetaData,
+		Originator: a.Originator,
+	}
+}

+ 34 - 0
pkg/protocol/message_test.go

@@ -0,0 +1,34 @@
+package protocol
+
+import (
+	"github.com/stretchr/testify/assert"
+	"testing"
+)
+
+func TestMessage_Decode(t *testing.T) {
+	msg := &Message{
+		QueueName:  "123",
+		Id:         "123",
+		Ts:         nil,
+		Type:       "",
+		Data:       "",
+		RuleChanId: "",
+		RuleNodeId: "",
+		Callback:   nil,
+		MetaData: map[string]interface{}{
+			"name": 123,
+		},
+		Originator: "456",
+	}
+	result, err := msg.Encode()
+	if err != nil {
+		t.Error(err)
+	}
+
+	newMsg := new(Message)
+	err = newMsg.Decode(result)
+	if err != nil {
+		t.Error(err)
+	}
+	assert.Equal(t, newMsg, msg, "not equal")
+}

+ 1 - 1
pkg/protocol/protocol_test.go

@@ -1,8 +1,8 @@
 package protocol
 
 import (
-	"sparrow/pkg/tlv"
 	"reflect"
+	"sparrow/pkg/tlv"
 	"testing"
 	"time"
 )

+ 1 - 1
pkg/ruleEngine/schema.go → pkg/protocol/schema.go

@@ -1,4 +1,4 @@
-package ruleEngine
+package protocol
 
 import "fmt"
 

+ 5 - 0
pkg/queue/cluster_service.go

@@ -0,0 +1,5 @@
+package queue
+
+type ClusterService interface {
+	PushMessageToRuleEngine(info *TopicPartitionInfo, msgId string, msg []byte, callback Callback)
+}

+ 50 - 0
pkg/queue/queue_callback.go

@@ -0,0 +1,50 @@
+package queue
+
+import (
+	"sparrow/pkg/protocol"
+	"sync/atomic"
+)
+
+type Callback interface {
+	OnSuccess()
+	OnFailure(err error)
+}
+
+type MsgCallbackWrapper struct {
+	msgCallback protocol.IMessageCallBack
+}
+
+func NewMsgCallbackWrapper(cb protocol.IMessageCallBack) *MsgCallbackWrapper {
+	return &MsgCallbackWrapper{msgCallback: cb}
+}
+
+func (m *MsgCallbackWrapper) OnSuccess() {
+	m.OnSuccess()
+}
+
+func (m *MsgCallbackWrapper) OnFailure(err error) {
+	m.msgCallback.OnFailure(err)
+}
+
+type MultipleMsgCallbackWrapper struct {
+	callbackCount int32
+	msgCallBack   protocol.IMessageCallBack
+}
+
+func NewMultipleMsgCallbackWrapper(count int32, callback protocol.IMessageCallBack) *MultipleMsgCallbackWrapper {
+	return &MultipleMsgCallbackWrapper{
+		callbackCount: count,
+		msgCallBack:   callback,
+	}
+}
+
+func (m *MultipleMsgCallbackWrapper) OnSuccess() {
+	v := atomic.AddInt32(&m.callbackCount, -1)
+	if v <= 0 {
+		m.msgCallBack.OnSuccess()
+	}
+}
+
+func (m *MultipleMsgCallbackWrapper) OnFailure(err error) {
+	m.msgCallBack.OnFailure(err)
+}

+ 7 - 0
pkg/queue/service_type.go

@@ -0,0 +1,7 @@
+package queue
+
+const (
+	SP_CORE     = "sp_core"
+	RULE_ENGINE = "rule_engine"
+	TRANSPORT   = "transport"
+)

+ 27 - 0
pkg/queue/topic_partition_info.go

@@ -0,0 +1,27 @@
+package queue
+
+import (
+	"fmt"
+	"sparrow/pkg/utils"
+)
+
+type TopicPartitionInfo struct {
+	Topic       string
+	TenantId    string
+	Partition   string
+	MyPartition bool
+}
+
+func (a *TopicPartitionInfo) String() string {
+	return fmt.Sprintf("%s.%s.%s", a.Topic, a.TenantId, a.Partition)
+}
+
+func (a *TopicPartitionInfo) HashCode() string {
+	return utils.Md5(a.String())
+}
+
+// ResolvePartition 生成info
+func ResolvePartition(serviceType, queueName, tenantId, entityId string) *TopicPartitionInfo {
+	// TODO:生成主题逻辑
+	return &TopicPartitionInfo{}
+}

+ 33 - 22
pkg/ruleEngine/context.go

@@ -2,67 +2,78 @@ package ruleEngine
 
 import (
 	"sparrow/pkg/actor"
+	"sparrow/pkg/protocol"
 	"time"
 )
 
 type Context interface {
 	// 向所有基于success类型的关系节点发消息
-	TellSuccess(msg *Message)
+	TellSuccess(msg *protocol.Message)
 	// 基于某个关系发消息
-	TellNext(msg *Message, relationType RelationType)
+	TellNext(msg *protocol.Message, relationType protocol.RelationType)
 	// 向当前节点发消息,duration 为延迟时间
-	TellSelf(msg *Message, duration time.Duration)
+	TellSelf(msg *protocol.Message, duration time.Duration)
 	// 发送错误消息消息
-	TellError(msg *Message, err error)
+	TellError(msg *protocol.Message, err error)
+	// message ack
+	Ack(msg *protocol.Message)
 }
 
 // DefaultContext 默认的上下文
 type DefaultContext struct {
 	nodeCtx *RuleNodeCtx
+	mainCtx *actor.SystemContext
 }
 
-func New(nodeCtx *RuleNodeCtx) *DefaultContext {
-	return &DefaultContext{nodeCtx: nodeCtx}
+func NewDefaultContext(nodeCtx *RuleNodeCtx, mainCtx *actor.SystemContext) *DefaultContext {
+	return &DefaultContext{nodeCtx: nodeCtx, mainCtx: mainCtx}
 }
 
-func (d *DefaultContext) TellSuccess(msg *Message) {
-	d.tellNext(msg, []RelationType{Success}, nil)
+func (d *DefaultContext) TellSuccess(msg *protocol.Message) {
+	d.tellNext(msg, []protocol.RelationType{protocol.Success}, nil)
 }
 
-func (d *DefaultContext) TellNext(msg *Message, relationType RelationType) {
-	d.tellNext(msg, []RelationType{relationType}, nil)
+func (d *DefaultContext) TellNext(msg *protocol.Message, relationType protocol.RelationType) {
+	d.tellNext(msg, []protocol.RelationType{relationType}, nil)
 }
 
-func (d *DefaultContext) tellNext(msg *Message, relationTypes []RelationType, err error) {
-	if d.nodeCtx.self.IsDebug {
+func (d *DefaultContext) tellNext(msg *protocol.Message, relationTypes []protocol.RelationType, err error) {
+	if d.nodeCtx.Self.IsDebug {
 		// TODO: 输出调试日志
 	}
-	msg.GetCallBack().onProcessingEnd(d.nodeCtx.self.RuleNodeId)
-	d.nodeCtx.chainActor.Tell(
+	msg.GetCallBack().OnProcessingEnd(d.nodeCtx.Self.RuleNodeId)
+	d.nodeCtx.ChainActor.Tell(
 		&actor.RuleNodeToRuleChanTellNextMsg{
-			RuleNodeId:     d.nodeCtx.self.RuleNodeId,
+			RuleNodeId:     d.nodeCtx.Self.RuleNodeId,
 			RelationTypes:  relationTypes,
 			Message:        msg,
 			FailureMessage: err,
 		})
 }
 
-func (d *DefaultContext) TellSelf(msg *Message, duration time.Duration) {
+func (d *DefaultContext) TellSelf(msg *protocol.Message, duration time.Duration) {
 	if duration > 0 {
 		time.AfterFunc(duration, func() {
-			d.nodeCtx.selfActor.Tell(&actor.RuleToSelfMsg{Message: msg})
+			d.nodeCtx.SelfActor.Tell(&actor.RuleToSelfMsg{Message: msg})
 		})
 	}
 }
 
-func (d *DefaultContext) TellError(msg *Message, err error) {
-	if d.nodeCtx.self.IsDebug {
+func (d *DefaultContext) TellError(msg *protocol.Message, err error) {
+	if d.nodeCtx.Self.IsDebug {
 		// TODO: 处理调试
 	}
-	d.nodeCtx.chainActor.Tell(&actor.RuleNodeToRuleChanTellNextMsg{
-		RuleNodeId:     d.nodeCtx.self.RuleNodeId,
-		RelationTypes:  []RelationType{Failure},
+	d.nodeCtx.ChainActor.Tell(&actor.RuleNodeToRuleChanTellNextMsg{
+		RuleNodeId:     d.nodeCtx.Self.RuleNodeId,
+		RelationTypes:  []protocol.RelationType{protocol.Failure},
 		Message:        msg,
 		FailureMessage: err,
 	})
 }
+func (d *DefaultContext) Ack(msg *protocol.Message) {
+	if d.nodeCtx.Self.IsDebug {
+		// TODO: 处理调试
+	}
+	msg.GetCallBack().OnProcessingEnd(d.nodeCtx.Self.RuleNodeId)
+	msg.GetCallBack().OnSuccess()
+}

+ 0 - 55
pkg/ruleEngine/message.go

@@ -1,55 +0,0 @@
-package ruleEngine
-
-import "time"
-
-type Message struct {
-	QueueName  string
-	Id         string
-	Ts         time.Time
-	Type       string
-	Data       string
-	RuleChanId string
-	RuleNodeId string
-	Callback IMessageCallBack
-}
-
-// IMessageCallBack message call back
-type IMessageCallBack interface {
-	// on success do sth.
-	OnSuccess()
-	// on failure do sth.
-	OnFailure(err error)
-	// on process start do sth.
-	onProcessingStart(ruleNodeInfo *RuleNodeInfo)
-	// on process end do sth.
-	onProcessingEnd(ruleNodeId string)
-}
-
-type emptyCallBack struct {
-}
-
-func (e emptyCallBack) onProcessingStart(ruleNodeInfo *RuleNodeInfo) {
-
-}
-
-func (e emptyCallBack) onProcessingEnd(ruleNodeId string) {
-
-}
-
-var EmptyCallBack = emptyCallBack{}
-
-func (e emptyCallBack) OnSuccess() {
-
-}
-
-func (e emptyCallBack) OnFailure(err error) {
-
-}
-// GetCallBack get message call back
-func (a *Message) GetCallBack() IMessageCallBack {
-	if a.Callback == nil {
-		return EmptyCallBack
-	} else {
-		return a.Callback
-	}
-}

+ 17 - 5
pkg/ruleEngine/node.go

@@ -1,15 +1,27 @@
 package ruleEngine
 
-import "sparrow/pkg/actor"
+import (
+	"sparrow/pkg/actor"
+	"sparrow/pkg/entities"
+	"sparrow/pkg/protocol"
+)
 
 type Node interface {
 	Init(ctx Context, config string) error
-	OnMessage(ctx Context, message *Message) error
+	OnMessage(ctx Context, message *protocol.Message) error
 }
+
 // RuleNodeCtx 节点上下文
 type RuleNodeCtx struct {
 	TenantId   string    // 租户Id
-	chainActor actor.Ref // 规则链 actor
-	selfActor  actor.Ref // 当前节点的 actor
-	self       *RuleNode // 当前节点
+	ChainActor actor.Ref // 规则链 actor
+	SelfActor  actor.Ref // 当前节点的 actor
+	Self       *RuleNode // 当前节点
+}
+
+// RuleNodeRelation 节点关系
+type RuleNodeRelation struct {
+	In   entities.EntityId
+	Out  entities.EntityId
+	Type string
 }

+ 7 - 6
pkg/ruleEngine/nodes/msg_type_filter_node.go

@@ -2,6 +2,7 @@ package nodes
 
 import (
 	"encoding/json"
+	"sparrow/pkg/protocol"
 	"sparrow/pkg/ruleEngine"
 )
 
@@ -16,8 +17,8 @@ type MsgTypeFilterNodeConfig struct {
 func (m *MsgTypeFilterNode) Init(ctx ruleEngine.Context, config string) error {
 	if config == "" {
 		m.config = &MsgTypeFilterNodeConfig{MessageTypes: []string{
-			ruleEngine.POST_ATTRIBUTES_REQUEST,
-			ruleEngine.POST_EVENT_REQUEST,
+			protocol.POST_ATTRIBUTES_REQUEST,
+			protocol.POST_EVENT_REQUEST,
 		}}
 		return nil
 	}
@@ -30,13 +31,13 @@ func (m *MsgTypeFilterNode) Init(ctx ruleEngine.Context, config string) error {
 	return nil
 }
 
-func (m *MsgTypeFilterNode) OnMessage(ctx ruleEngine.Context, message *ruleEngine.Message) error {
-	var relation ruleEngine.RelationType
+func (m *MsgTypeFilterNode) OnMessage(ctx ruleEngine.Context, message *protocol.Message) error {
+	var relation protocol.RelationType
 	for _, msgType := range m.config.MessageTypes {
 		if message.Type == msgType {
-			relation = ruleEngine.True
+			relation = protocol.True
 		} else {
-			relation = ruleEngine.False
+			relation = protocol.False
 		}
 	}
 	ctx.TellNext(message, relation)

+ 12 - 11
pkg/ruleEngine/nodes/msg_type_switch_node.go

@@ -1,6 +1,7 @@
 package nodes
 
 import (
+	"sparrow/pkg/protocol"
 	"sparrow/pkg/ruleEngine"
 )
 
@@ -11,19 +12,19 @@ func (m *MsgTypeSwitchNode) Init(ctx ruleEngine.Context, config string) error {
 	return nil
 }
 
-func (m *MsgTypeSwitchNode) OnMessage(ctx ruleEngine.Context, message *ruleEngine.Message) error {
-	var relationType ruleEngine.RelationType
+func (m *MsgTypeSwitchNode) OnMessage(ctx ruleEngine.Context, message *protocol.Message) error {
+	var relationType protocol.RelationType
 	switch message.Type {
-	case ruleEngine.POST_ATTRIBUTES_REQUEST:
-		relationType = ruleEngine.PostAttributes
-	case ruleEngine.POST_EVENT_REQUEST:
-		relationType = ruleEngine.PostAttributes
-	case ruleEngine.CONNECT_EVENT:
-		relationType = ruleEngine.ConnectEvent
-	case ruleEngine.DISCONNECT_EVENT:
-		relationType = ruleEngine.DisconnectEvent
+	case protocol.POST_ATTRIBUTES_REQUEST:
+		relationType = protocol.PostAttributes
+	case protocol.POST_EVENT_REQUEST:
+		relationType = protocol.PostAttributes
+	case protocol.CONNECT_EVENT:
+		relationType = protocol.ConnectEvent
+	case protocol.DISCONNECT_EVENT:
+		relationType = protocol.DisconnectEvent
 	default:
-		relationType = ruleEngine.Other
+		relationType = protocol.Other
 	}
 	ctx.TellNext(message, relationType)
 	return nil

+ 1 - 1
pkg/server/log.go

@@ -8,7 +8,7 @@ import (
 
 var Log *logrus.Entry
 
-func initLog(name string, level string) error {
+func InitLog(name string, level string) error {
 	if Log == nil {
 		// Log as JSON instead of the default ASCII formatter.
 		logrus.SetFormatter(&logrus.JSONFormatter{})

+ 8 - 9
pkg/server/server.go

@@ -46,12 +46,11 @@ func Init(name string) error {
 		// read network info
 		readNetInterfaces()
 		// log
-		err := initLog(name, *confLogLevel)
+		err := InitLog(name, *confLogLevel)
 		if err != nil {
 			return err
 		}
 
-
 		// server instance
 		serverInstance = &Server{
 			name: name,
@@ -166,13 +165,13 @@ func RegisterRPCHandler(rcvr interface{}) error {
 
 		serverInstance.rpcsvr = &RPCServer{
 			TCPServer{
-addr:    addr,
-	handler: &handler,
-		useTls:  false, // rpc service do not use tls because it's in internal network
-},
-}
-}
-return nil
+				addr:    addr,
+				handler: &handler,
+				useTls:  false, // rpc service do not use tls because it's in internal network
+			},
+		}
+	}
+	return nil
 }
 
 // RegisterTimerTask register timer task