Browse Source

场景服务更新

liuxiulin 13 hours ago
parent
commit
4c232c18eb
100 changed files with 13210 additions and 26 deletions
  1. 10 1
      go.mod
  2. 2 0
      go.sum
  3. 9 0
      pkg/deviceStatus/deviceStatus.go
  4. 6 0
      pkg/rpcs/access.go
  5. 1 0
      pkg/rpcs/common.go
  6. 21 0
      pkg/rpcs/devicemanager.go
  7. 11 0
      pkg/rpcs/task.go
  8. 106 0
      pkg/scene/scene.go
  9. 1 1
      pkg/server/config.go
  10. 1 1
      pkg/server/log.go
  11. 1 1
      pkg/server/promethues.go
  12. 9 9
      pkg/server/server.go
  13. 1 1
      pkg/server/server_manager.go
  14. 63 0
      pkg/utils/util.go
  15. 21 0
      services/apiprovider/actions.go
  16. 2 0
      services/apiprovider/router.go
  17. 2 2
      services/devicemanager/main.go
  18. 50 0
      services/devicemanager/manager.go
  19. 5 0
      services/emqx-agent/agent.go
  20. 23 0
      services/knowoapi/controllers/device.go
  21. 1 1
      services/knowoapi/services/application.go
  22. 19 1
      services/knowoapi/services/device.go
  23. 2 2
      services/knowoapi/services/ota.go
  24. 1 1
      services/knowoapi/services/protocal.go
  25. 2 2
      services/knowoapi/services/role.go
  26. 1 1
      services/knowoapi/services/rule_chain.go
  27. 1 1
      services/knowoapi/services/sensor.go
  28. 1 1
      services/knowoapi/services/sub_device.go
  29. 103 0
      services/scene-service/internal/service/executer.go
  30. 170 0
      services/scene-service/internal/service/manager/device_status.go
  31. 130 0
      services/scene-service/internal/service/manager/timer.go
  32. 173 0
      services/scene-service/internal/service/manager/weather.go
  33. 161 0
      services/scene-service/internal/service/scene.go
  34. 28 0
      services/scene-service/main.go
  35. 16 0
      services/timer-service/internal/scheduler.go
  36. 9 0
      services/timer-service/internal/timer_service.go
  37. 4 0
      vendor/github.com/clbanning/mxj/v2/.travis.yml
  38. 22 0
      vendor/github.com/clbanning/mxj/v2/LICENSE
  39. 201 0
      vendor/github.com/clbanning/mxj/v2/anyxml.go
  40. 54 0
      vendor/github.com/clbanning/mxj/v2/atomFeedString.xml
  41. 143 0
      vendor/github.com/clbanning/mxj/v2/doc.go
  42. 93 0
      vendor/github.com/clbanning/mxj/v2/escapechars.go
  43. 9 0
      vendor/github.com/clbanning/mxj/v2/exists.go
  44. 287 0
      vendor/github.com/clbanning/mxj/v2/files.go
  45. 2 0
      vendor/github.com/clbanning/mxj/v2/files_test.badjson
  46. 9 0
      vendor/github.com/clbanning/mxj/v2/files_test.badxml
  47. 2 0
      vendor/github.com/clbanning/mxj/v2/files_test.json
  48. 9 0
      vendor/github.com/clbanning/mxj/v2/files_test.xml
  49. 1 0
      vendor/github.com/clbanning/mxj/v2/files_test_dup.json
  50. 1 0
      vendor/github.com/clbanning/mxj/v2/files_test_dup.xml
  51. 12 0
      vendor/github.com/clbanning/mxj/v2/files_test_indent.json
  52. 8 0
      vendor/github.com/clbanning/mxj/v2/files_test_indent.xml
  53. 35 0
      vendor/github.com/clbanning/mxj/v2/gob.go
  54. 323 0
      vendor/github.com/clbanning/mxj/v2/json.go
  55. 668 0
      vendor/github.com/clbanning/mxj/v2/keyvalues.go
  56. 112 0
      vendor/github.com/clbanning/mxj/v2/leafnode.go
  57. 86 0
      vendor/github.com/clbanning/mxj/v2/misc.go
  58. 128 0
      vendor/github.com/clbanning/mxj/v2/mxj.go
  59. 184 0
      vendor/github.com/clbanning/mxj/v2/newmap.go
  60. 209 0
      vendor/github.com/clbanning/mxj/v2/readme.md
  61. 37 0
      vendor/github.com/clbanning/mxj/v2/remove.go
  62. 61 0
      vendor/github.com/clbanning/mxj/v2/rename.go
  63. 26 0
      vendor/github.com/clbanning/mxj/v2/set.go
  64. 20 0
      vendor/github.com/clbanning/mxj/v2/setfieldsep.go
  65. 29 0
      vendor/github.com/clbanning/mxj/v2/songtext.xml
  66. 30 0
      vendor/github.com/clbanning/mxj/v2/strict.go
  67. 54 0
      vendor/github.com/clbanning/mxj/v2/struct.go
  68. 258 0
      vendor/github.com/clbanning/mxj/v2/updatevalues.go
  69. 1440 0
      vendor/github.com/clbanning/mxj/v2/xml.go
  70. 902 0
      vendor/github.com/clbanning/mxj/v2/xmlseq.go
  71. 18 0
      vendor/github.com/clbanning/mxj/v2/xmlseq2.go
  72. 2 0
      vendor/github.com/gogf/gf/v2/.codecov.yml
  73. 3 0
      vendor/github.com/gogf/gf/v2/.gitattributes
  74. 19 0
      vendor/github.com/gogf/gf/v2/.gitignore
  75. 282 0
      vendor/github.com/gogf/gf/v2/.golangci.yml
  76. 82 0
      vendor/github.com/gogf/gf/v2/.set_version.sh
  77. 26 0
      vendor/github.com/gogf/gf/v2/Makefile
  78. 103 0
      vendor/github.com/gogf/gf/v2/README.MD
  79. 203 0
      vendor/github.com/gogf/gf/v2/crypto/gaes/gaes.go
  80. 97 0
      vendor/github.com/gogf/gf/v2/crypto/gmd5/gmd5.go
  81. 765 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb.go
  82. 826 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core.go
  83. 305 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_config.go
  84. 67 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_ctx.go
  85. 43 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_link.go
  86. 409 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_structure.go
  87. 86 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_trace.go
  88. 544 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_transaction.go
  89. 431 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_underlying.go
  90. 246 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_utility.go
  91. 46 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_driver_default.go
  92. 31 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_driver_wrapper.go
  93. 114 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_driver_wrapper_db.go
  94. 897 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_func.go
  95. 334 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_model.go
  96. 124 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder.go
  97. 161 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_where.go
  98. 101 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_where_prefix.go
  99. 125 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_whereor.go
  100. 98 0
      vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_whereor_prefix.go

+ 10 - 1
go.mod

@@ -35,6 +35,11 @@ require (
 	labix.org/v2/mgo v0.0.0-20140701140051-000000000287
 )
 
+require (
+	github.com/clbanning/mxj/v2 v2.7.0 // indirect
+	github.com/magiconair/properties v1.8.6 // indirect
+)
+
 require (
 	filesdk v1.0.0
 	github.com/BurntSushi/toml v1.3.2 // indirect
@@ -137,6 +142,10 @@ require (
 	gopkg.in/sourcemap.v1 v1.0.5 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 	launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
+	weather-api-sdk v1.0.0
 )
 
-replace filesdk v1.0.0 => gogs.yehaoji.cn/yongxu/file-server-sdk.git v1.0.0
+replace (
+	filesdk v1.0.0 => gogs.yehaoji.cn/yongxu/file-server-sdk.git v1.0.0
+	weather-api-sdk v1.0.0 => gogs.yongxulvjian.com/yongxu/weather-api-sdk.git v1.0.2
+)

+ 2 - 0
go.sum

@@ -338,6 +338,8 @@ go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
 go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
 gogs.yehaoji.cn/yongxu/file-server-sdk.git v1.0.0 h1:Zjusu9Tq+WeFd59yJ01PPgX0e4smBGfJfeYiknzLAKA=
 gogs.yehaoji.cn/yongxu/file-server-sdk.git v1.0.0/go.mod h1:tGWbOVjXlvLPmVEZMDp+IfogSM7Y4094VrZ+y68K6qE=
+gogs.yongxulvjian.com/yongxu/weather-api-sdk.git v1.0.2 h1:URnwpqeQ1WBvql4QCha7Bv+JATwyvEfdq26sB0OiG8g=
+gogs.yongxulvjian.com/yongxu/weather-api-sdk.git v1.0.2/go.mod h1:pLHXQJPhIL2k5cBKn/Nzh775/ryTvVEpAtB2ezKZmt8=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

+ 9 - 0
pkg/deviceStatus/deviceStatus.go

@@ -79,3 +79,12 @@ func (mgr *DevStatusManager) GetDeviceInfo(id, productId string) (string, error)
 	}
 	return result.String(), nil
 }
+
+func (mgr *DevStatusManager) GetDeviceStatusByKey(key string) (string, error) {
+	// get status from redis
+	result, err := mgr.redisClient.DoVar("GET", key)
+	if err != nil {
+		return "", err
+	}
+	return result.String(), nil
+}

+ 6 - 0
pkg/rpcs/access.go

@@ -15,6 +15,7 @@ type ArgsGetStatus struct {
 	ProductId   string
 	VendorId    string
 	SubDeviceId string
+	Key         string
 }
 type ReplyGetStatus struct {
 	Status []protocol.SubData
@@ -27,6 +28,11 @@ type ArgsSendCommand struct {
 	WaitTime  uint32
 	Params    map[string]interface{}
 }
+type ArgsPublishMessage struct {
+	Topic   string
+	Payload []byte
+}
+
 type ReplySendCommand ReplyEmptyResult
 
 type ArgsDeviceUpgrade struct {

+ 1 - 0
pkg/rpcs/common.go

@@ -18,4 +18,5 @@ const (
 	SceneAccessServiceName = "SceneAccess"
 	TimerServiceName       = "TimerService"
 	OneKeyServiceName      = "OneKeyService"
+	SceneServiceName       = "SceneService"
 )

+ 21 - 0
pkg/rpcs/devicemanager.go

@@ -69,3 +69,24 @@ type ArgsGetAlarm struct {
 type ReplayAlarm struct {
 	AlarmParams *deviceAlarm.AlarmParams
 }
+
+type ArgsScene struct {
+	Key     string
+	SceneId string
+	TaskId  string
+	Config  string
+}
+
+type ArgsGetAllScene struct {
+	SceneId string
+	TaskId  string
+	Config  string
+}
+
+type ReplyScene struct {
+	Config string
+}
+
+type ReplyScenes struct {
+	Result string
+}

+ 11 - 0
pkg/rpcs/task.go

@@ -13,3 +13,14 @@ type ArgsSubmitTaskLifecycle struct {
 }
 
 type ReplySubmitTask ReplyEmptyResult
+
+type ArgsSubmitSceneAction struct {
+	Id         string `json:"id"`
+	Name       string `json:"name"`
+	Status     int    `json:"status"`
+	Action     string `json:"action"`
+	SceneType  string `json:"scene_type"`
+	DeviceType string `json:"device_type"`
+	Config     string `json:"config"`
+}
+type ReplySubmitSceneAction ReplyEmptyResult

+ 106 - 0
pkg/scene/scene.go

@@ -0,0 +1,106 @@
+package scene
+
+import (
+	"context"
+	"fmt"
+	"github.com/gogf/gf/v2/database/gredis"
+)
+
+const (
+	scenePrefix = "scene-manager:alarm:"
+	dataExpires = 7200
+)
+
+type GetSceneParams struct {
+	Type       string `json:"type"`
+	Name       string `json:"name"`
+	DeviceCode string `json:"device_code"`
+}
+
+type InfoScene struct {
+	Id         string `json:"id"`
+	Name       string `json:"name"`
+	Status     int    `json:"status"`
+	Action     string `json:"action"`
+	Desc       string `json:"desc"`
+	SceneType  string `json:"scene_type"`
+	DeviceType string `json:"device_type"`
+	Config     string `json:"config"`
+	Time       string `json:"time"`
+}
+
+type Info struct {
+	Key     string `json:"key"`
+	SceneId string `json:"scene_id"`
+	TaskId  string `json:"task_id"`
+	Config  string `json:"config"`
+}
+
+type Manager struct {
+	redisClient *gredis.Redis
+	ctx         context.Context
+}
+
+func NewSceneManager(host string, port int) *Manager {
+	red, _ := gredis.New(&gredis.Config{
+		Address:   fmt.Sprintf("%s:%d", host, port),
+		Db:        2,
+		MaxActive: 100,
+	})
+	ctx := context.Background()
+	helper := &Manager{
+		redisClient: red,
+		ctx:         ctx,
+	}
+	return helper
+}
+
+func (a *Manager) SaveScene(info *Info) error {
+	_, err := a.redisClient.Do(a.ctx, "SET", info.Key, info.Config)
+	if err != nil {
+		return err
+	}
+	_, err = a.redisClient.Do(a.ctx, info.Key, dataExpires)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *Manager) GetScene(key string) (string, error) {
+	// get status from redis
+	result, err := a.redisClient.Do(a.ctx, "GET", key)
+	if err != nil {
+		return "", err
+	}
+	return result.String(), nil
+}
+
+func (a *Manager) DeleteScene(key string) error {
+	// get status from redis
+	_, err := a.redisClient.Do(a.ctx, "DEL", key)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// ScanKeys 扫描所有匹配的key
+//func (a *Manager) ScanKeys(pattern string) ([]string, error) {
+//	var cursor uint64
+//	var allKeys []string
+//	for {
+//		// 扫描一批匹配的key
+//		_, err := a.redisClient.Do("Scan", cursor, pattern, 100)
+//		if err != nil {
+//			return nil, err
+//		}
+//		allKeys = append(allKeys, keys...)
+//		cursor = nextCursor
+//		// 游标归0时结束迭代
+//		if cursor == 0 {
+//			break
+//		}
+//	}
+//	return allKeys, nil
+//}

+ 1 - 1
pkg/server/config.go

@@ -34,7 +34,7 @@ var (
 
 	confRPCHost = flag.String(FlagRPCHost, "", "rpc server listen address, format ip:port")
 
-	confEtcd     = flag.String(FlagEtcd, "", "etcd service addr, format ip:port;ip:port")
+	confEtcd     = flag.String(FlagEtcd, "", "etcd manager addr, format ip:port;ip:port")
 	confExIp     = flag.String(FlagExternalIp, "", "服务使用的外网IP")
 	confLogLevel = flag.String(FlagLogLevel, "info", "default log level, options are panic|fatal|error|warn|info|debug")
 	confProme    = flag.String(FlagPrometheusAddr, "", "prometheus metric addr")

+ 1 - 1
pkg/server/log.go

@@ -26,7 +26,7 @@ func InitLog(name string, level string) error {
 
 		// default fields
 		Log = logrus.WithFields(logrus.Fields{
-			"service": name,
+			"manager": name,
 			"ip":      InternalIP,
 		})
 	}

+ 1 - 1
pkg/server/promethues.go

@@ -11,7 +11,7 @@ var rpcCallCnt = &Metric{
 	Name:        "rpc_call_total",
 	Description: "HOW MANY RPC CALL",
 	Type:        "counter_vec",
-	Args:        []string{"service"},
+	Args:        []string{"manager"},
 }
 
 var rpcCallDur = &Metric{

+ 9 - 9
pkg/server/server.go

@@ -1,8 +1,8 @@
-// Package server package server provides service interfaces and libraries.
+// Package server package server provides manager interfaces and libraries.
 // including:
 // tcp/http server library.
-// rpc service library with addon functionality.
-// service discory and registration Logic.
+// rpc manager library with addon functionality.
+// manager discory and registration Logic.
 // statistic lib.
 package server
 
@@ -30,7 +30,7 @@ type Server struct {
 	timerTask  TimerTask   // timer task
 	udpServer  *UDPServer
 	// functions
-	serverManager *ServerManager // service registration&discovery manager
+	serverManager *ServerManager // manager registration&discovery manager
 	rpcClient     *RPCClient     // rpc client
 	prome         *Prometheus
 }
@@ -52,7 +52,7 @@ func Init(name string) error {
 		serverInstance = &Server{
 			name: name,
 		}
-		// init service manager
+		// init manager manager
 		serverInstance.serverManager, err = NewServerManager(name, *confEtcd)
 		if err != nil {
 			return err
@@ -164,7 +164,7 @@ func RegisterRPCHandler(rcvr interface{}) error {
 
 		err = rpc.Register(rcvr)
 		if err != nil {
-			return errorf("Cannot Resgister RPC service: %s", err)
+			return errorf("Cannot Resgister RPC manager: %s", err)
 		}
 
 		handler := rpcHandler{}
@@ -173,7 +173,7 @@ func RegisterRPCHandler(rcvr interface{}) error {
 			TCPServer{
 				addr:    addr,
 				handler: &handler,
-				useTls:  false, // rpc service do not use tls because it's in internal network
+				useTls:  false, // rpc manager do not use tls because it's in internal network
 			},
 		}
 	}
@@ -208,7 +208,7 @@ func RPCCallByHost(host string, serverMethod string, args interface{}, reply int
 	return serverInstance.rpcClient.CallHost(host, serverMethod, args, reply)
 }
 
-// GetServerHosts get server's hosts by server name and service type
+// GetServerHosts get server's hosts by server name and manager type
 func GetServerHosts(serverName string, hostType string) ([]string, error) {
 	if serverInstance == nil {
 		return nil, errorf(errServerNotInit)
@@ -234,7 +234,7 @@ func GetHTTPHost() string {
 	return serverInstance.httpServer.addr.externalIp
 }
 
-// Run start service
+// Run start manager
 func Run() error {
 	if serverInstance == nil {
 		return errorf(errServerNotInit)

+ 1 - 1
pkg/server/server_manager.go

@@ -1,4 +1,4 @@
-// service registration and discovery
+// manager registration and discovery
 
 package server
 

+ 63 - 0
pkg/utils/util.go

@@ -6,6 +6,8 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"strconv"
+	"strings"
 
 	uuid "github.com/satori/go.uuid"
 )
@@ -50,3 +52,64 @@ func UUID() string {
 	s := uuid.NewV4()
 	return s.String()
 }
+
+// CheckString 字符串比较
+func CheckString(target string, value string, operator int) bool {
+	switch operator {
+	case 1:
+		return target == value
+	case 2:
+		return target != value
+	case 3:
+		return strings.Contains(value, target)
+	case 4:
+		return !strings.Contains(value, target)
+	}
+	return false
+}
+
+// CheckInt 数值比较
+func CheckInt(target, value, operator int) bool {
+	switch operator {
+	case 1:
+		return value > target
+	case 2:
+		return value >= target
+	case 3:
+		return value == target
+	case 4:
+		return value <= target
+	case 5:
+		return value < target
+	case 6:
+		return value != target
+	}
+	return false
+}
+
+// CheckValue 目标值验证
+func CheckValue(target string, value interface{}, fieldType, operator int) bool {
+	var strValue string
+	var intValue int
+	switch value.(type) {
+	case string:
+		strValue = value.(string)
+	case int:
+		intValue = value.(int)
+	case float64:
+		floatValue := value.(float64)
+		intValue = int(floatValue)
+	default:
+		break
+	}
+	switch fieldType {
+	case 1:
+		if strValue != "" {
+			return CheckString(target, strValue, operator)
+		}
+	case 2:
+		tInt, _ := strconv.Atoi(target)
+		return CheckInt(tInt, intValue, operator)
+	}
+	return false
+}

+ 21 - 0
services/apiprovider/actions.go

@@ -476,3 +476,24 @@ func SubmitTaskLifecycle(req *http.Request, r render.Render) {
 	r.JSON(http.StatusOK, Common{})
 	return
 }
+
+func SubmitSceneAction(req *http.Request, r render.Render) {
+
+	var ruleReq rpcs.ArgsSubmitSceneAction
+	decoder := json.NewDecoder(req.Body)
+	err := decoder.Decode(&ruleReq)
+	if err != nil {
+		r.JSON(http.StatusOK, renderError(ErrWrongRequestFormat, err))
+		return
+	}
+	reply := rpcs.ReplySubmitSceneAction{}
+	err = server.RPCCallByName(nil, rpcs.SceneServiceName, "SceneService.SubmitAction", ruleReq, &reply)
+	if err != nil {
+		r.JSON(http.StatusOK, renderError(ErrWrongSecret, errors.New("invalid secret key")))
+		server.Log.Errorf("submit scene-manager error: %v", err)
+		r.JSON(http.StatusOK, renderError(ErrSystemFault, err))
+		return
+	}
+	r.JSON(http.StatusOK, Common{})
+	return
+}

+ 2 - 0
services/apiprovider/router.go

@@ -70,6 +70,8 @@ func route(m *martini.ClassicMartini) {
 		r.Post("/scene_task", SubmitSceneTask)
 
 		r.Post("/task_lifecycle", SubmitTaskLifecycle)
+
+		r.Post("/submit_scene", SubmitSceneAction)
 	})
 	m.Group("/application/v2", func(r martini.Router) {
 		// send a command to device

+ 2 - 2
services/devicemanager/main.go

@@ -13,11 +13,11 @@ func main() {
 		return
 	}
 
-	// register a rpc service
+	// register a rpc manager
 	dm := NewDeviceManager(*confRedisHost, *confRedisPort, *confRedisDb)
 	err = server.RegisterRPCHandler(dm)
 	if err != nil {
-		server.Log.Errorf("Register RPC service Error: %s", err)
+		server.Log.Errorf("Register RPC manager Error: %s", err)
 		return
 	}
 

+ 50 - 0
services/devicemanager/manager.go

@@ -6,6 +6,7 @@ import (
 	"sparrow/pkg/online"
 	"sparrow/pkg/otaUpgrade"
 	"sparrow/pkg/rpcs"
+	"sparrow/pkg/scene"
 	"sparrow/pkg/token"
 )
 
@@ -15,6 +16,7 @@ type DeviceManager struct {
 	otaManager    *otaUpgrade.OtaManager
 	statusManager *deviceStatus.DevStatusManager
 	alarmManager  *deviceAlarm.DevAlarmManager
+	sceneManager  *scene.Manager
 }
 
 func NewDeviceManager(redishost string, port, db int) *DeviceManager {
@@ -23,12 +25,14 @@ func NewDeviceManager(redishost string, port, db int) *DeviceManager {
 	otaMgr := otaUpgrade.NewOtaManager(redishost, port, db)
 	statusMgr := deviceStatus.NewDevStatusManager(redishost, port)
 	alarmMgr := deviceAlarm.NewDevAlarmManager(redishost, port)
+	sceneMgr := scene.NewSceneManager(redishost, port)
 	return &DeviceManager{
 		onlineManager: mgr,
 		tokenHelper:   helper,
 		otaManager:    otaMgr,
 		statusManager: statusMgr,
 		alarmManager:  alarmMgr,
+		sceneManager:  sceneMgr,
 	}
 }
 
@@ -145,6 +149,17 @@ func (dm *DeviceManager) GetDeviceInfo(args rpcs.ArgsGetStatus, reply *rpcs.Repl
 	return nil
 }
 
+func (dm *DeviceManager) GetDeviceStatusByKey(args rpcs.ArgsGetStatus, reply *rpcs.ReplyStatus) error {
+	status, err := dm.statusManager.GetDeviceStatusByKey(args.Key)
+	if err != nil {
+		return err
+	}
+	if status != "" {
+		reply.Status = status
+	}
+	return nil
+}
+
 func (dm *DeviceManager) SetAlarm(args rpcs.ArgsAlarmInfo, reply *rpcs.ArgsAlarmInfo) error {
 	return dm.alarmManager.SetAlarm(&deviceAlarm.AlarmParams{
 		DeviceCode:      args.DeviceCode,
@@ -167,3 +182,38 @@ func (dm *DeviceManager) GetAlarm(args rpcs.ArgsGetAlarm, reply *rpcs.ReplayAlar
 func (dm *DeviceManager) DelAlarm(args rpcs.ArgsGetAlarm, reply *rpcs.ReplayAlarm) error {
 	return dm.alarmManager.DeleteAlarm(args.DeviceCode)
 }
+
+func (dm *DeviceManager) GetAllScene(args rpcs.ArgsScene, reply *rpcs.ReplyScenes) error {
+	result, err := dm.sceneManager.GetScene(args.Key)
+	if err != nil {
+		return err
+	}
+	if result != "" {
+		reply.Result = result
+	}
+	return nil
+}
+
+func (dm *DeviceManager) SaveScene(args rpcs.ArgsScene, reply *rpcs.ReplyScene) error {
+	return dm.sceneManager.SaveScene(&scene.Info{
+		Key:     args.Key,
+		SceneId: args.SceneId,
+		TaskId:  args.TaskId,
+		Config:  args.Config,
+	})
+}
+
+func (dm *DeviceManager) GetScene(args rpcs.ArgsScene, reply *rpcs.ReplyScene) error {
+	info, err := dm.sceneManager.GetScene(args.Key)
+	if err != nil {
+		return err
+	}
+	if info != "" {
+		reply.Config = info
+	}
+	return nil
+}
+
+func (dm *DeviceManager) DeleteScene(args rpcs.ArgsScene, reply *rpcs.ReplyScene) error {
+	return dm.sceneManager.DeleteScene(args.Key)
+}

+ 5 - 0
services/emqx-agent/agent.go

@@ -421,3 +421,8 @@ func (a *Access) UnlockDevice() {
 		}
 	}
 }
+
+// PublishMessage 发送消息
+func (a *Access) PublishMessage(args rpcs.ArgsPublishMessage, reply *rpcs.ReplyEmptyResult) error {
+	return a.client.PublishToMsgToDev(args.Topic, args.Payload)
+}

+ 23 - 0
services/knowoapi/controllers/device.go

@@ -516,3 +516,26 @@ func (a *DeviceController) PostSendcommand() {
 	}
 	done(a.Ctx, params.DeviceId)
 }
+
+// PostSendcommand 下发指令
+// POST /device/sendcommand
+func (a *DeviceController) PostSendcommand() {
+	params := new(models.SendCommandParams)
+	if err := parseBody(a.Ctx, params); err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+
+	err := a.Service.GetScenes(rpcs.ArgsSendCommand{
+		DeviceId:  params.DeviceId,
+		SubDevice: params.SunDeviceId,
+		Cmd:       params.Cmd,
+		Params:    params.Params,
+	})
+
+	if err != nil {
+		responseError(a.Ctx, ErrNormal, err.Error())
+		return
+	}
+	done(a.Ctx, params.DeviceId)
+}

+ 1 - 1
services/knowoapi/services/application.go

@@ -24,7 +24,7 @@ type appService struct {
 	keyGen *generator.KeyGenerator
 }
 
-// NewAppService new app service
+// NewAppService new app manager
 func NewAppService(model *model.All, gen *generator.KeyGenerator) ApplicationService {
 	return appService{
 		model:  model,

+ 19 - 1
services/knowoapi/services/device.go

@@ -5,6 +5,7 @@ import (
 	"math/rand"
 	"sparrow/pkg/models"
 	"sparrow/pkg/rpcs"
+	"sparrow/pkg/scene"
 	"sparrow/pkg/server"
 	"sparrow/services/knowoapi/model"
 )
@@ -59,13 +60,15 @@ type DeviceService interface {
 	SetMixedWaterLinkage(params models.SendSplitCommandParams) error
 	// SendCommand 下发指令
 	SendCommand(args rpcs.ArgsSendCommand) error
+	// GetScenes 获取场景
+	GetScenes(params scene.GetSceneParams) (*gjson.Json, error)
 }
 
 type deviceservice struct {
 	models *model.All
 }
 
-// NewDeviceService create device service
+// NewDeviceService create device manager
 func NewDeviceService(models *model.All) DeviceService {
 	return deviceservice{
 		models: models,
@@ -387,3 +390,18 @@ func (a deviceservice) SendCommand(args rpcs.ArgsSendCommand) error {
 	}
 	return nil
 }
+
+// GetScenes 获取场景
+func (a deviceservice) GetScenes(params scene.GetSceneParams) (*gjson.Json, error) {
+
+	var newArgs rpcs.ArgsScene
+	newArgs.Key = "scene-manager:*"
+	var reply rpcs.ReplyScenes
+
+	err := server.RPCCallByName(nil, rpcs.DeviceManagerName, "DeviceManager.GetAllScene", newArgs, &reply)
+	if err != nil {
+		server.Log.Errorf("设备状态数据获取失败:%v", err)
+		return nil, err
+	}
+	return gjson.New(reply.Result), nil
+}

+ 2 - 2
services/knowoapi/services/ota.go

@@ -10,7 +10,7 @@ import (
 	"sparrow/services/knowoapi/model"
 )
 
-// OtaService Ota service
+// OtaService Ota manager
 type OtaService interface {
 	Create(*models.Ota) error
 	Delete(*models.Ota) error
@@ -24,7 +24,7 @@ type otaService struct {
 	models *model.All
 }
 
-// NewOtaService new Ota service
+// NewOtaService new Ota manager
 func NewOtaService(models *model.All) OtaService {
 	return otaService{
 		models: models,

+ 1 - 1
services/knowoapi/services/protocal.go

@@ -21,7 +21,7 @@ type protocalService struct {
 	model *model.All
 }
 
-// NewProtocalService new protocal service
+// NewProtocalService new protocal manager
 func NewProtocalService(model *model.All) ProtocalService {
 	return protocalService{
 		model: model,

+ 2 - 2
services/knowoapi/services/role.go

@@ -6,7 +6,7 @@ import (
 	"sparrow/services/knowoapi/model"
 )
 
-// RoleService role service
+// RoleService role manager
 type RoleService interface {
 	Create(*models.Role) error
 	Delete(*models.Role) error
@@ -19,7 +19,7 @@ type roleService struct {
 	models *model.All
 }
 
-// NewRoleService new role service
+// NewRoleService new role manager
 func NewRoleService(models *model.All) RoleService {
 	return roleService{
 		models: models,

+ 1 - 1
services/knowoapi/services/rule_chain.go

@@ -22,7 +22,7 @@ type ruleChainService struct {
 	model *model.All
 }
 
-// NewRuleChainService  new ruleChain service
+// NewRuleChainService  new ruleChain manager
 func NewRuleChainService(model *model.All) RuleChainService {
 	return ruleChainService{
 		model: model,

+ 1 - 1
services/knowoapi/services/sensor.go

@@ -21,7 +21,7 @@ type sensorService struct {
 	model *model.All
 }
 
-// NewSensorService new sensor service
+// NewSensorService new sensor manager
 func NewSensorService(model *model.All) SensorService {
 	return sensorService{
 		model: model,

+ 1 - 1
services/knowoapi/services/sub_device.go

@@ -17,7 +17,7 @@ type subDeviceService struct {
 	models *model.All
 }
 
-// NewSubDeviceService create SubDevice service
+// NewSubDeviceService create SubDevice manager
 func NewSubDeviceService(models *model.All) SubDeviceService {
 	return subDeviceService{
 		models: models,

+ 103 - 0
services/scene-service/internal/service/executer.go

@@ -0,0 +1,103 @@
+package service
+
+import (
+	"sparrow/pkg/rpcs"
+	"sparrow/pkg/server"
+	"time"
+)
+
+type Action struct {
+	DeviceID         string                `json:"device_id"`         // 设备ID
+	SubDeviceId      string                `json:"sub_device_id"`     // 实体子设备Id,如果需要
+	ActionExecutor   string                `json:"action_executor"`   // 动作对象类型
+	ExecutorProperty *TaskExecutorProperty `json:"executor_property"` // 动作执行明细
+	PlcPubMessage    *PlcPubMessage        `json:"plc_pub_message"`   // PLC消息
+}
+
+// TaskExecutorProperty 定时任务执行动作执行参数
+type TaskExecutorProperty struct {
+	/*
+		指令 code。当 action_executor 是 device_issue 或 device_group_issue 时,此参数必填。
+	*/
+	FunctionCode string `json:"function_code"`
+	/*
+		指令 value。当 action_executor 是 device_issue 或 device_group_issue 时,此参数必填。
+	*/
+	FunctionValue map[string]interface{} `json:"function_value"`
+	/*
+		延时时间。当 action_executor 是 delay 时,此参数必填。
+	*/
+	DelaySeconds int64 `json:"delay_seconds"`
+}
+
+type PlcPubMessage struct {
+	Topic   string `json:"topic"`
+	Payload []byte `json:"payload"`
+}
+
+// TaskExecutor 任务执行器,用来执行具体的任务动作
+type TaskExecutor struct {
+	Actions []*Action
+}
+
+func NewTaskExecutor(actions []*Action) *TaskExecutor {
+	return &TaskExecutor{
+		Actions: actions,
+	}
+}
+
+func (a *TaskExecutor) Do() error {
+	for _, action := range a.Actions {
+		if err := a.doTask(action); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (a *TaskExecutor) doTask(action *Action) error {
+	// 调用设备接入服务
+	rpchost, err := getAccessRPCHost(action.DeviceID)
+	if err != nil {
+		return err
+	}
+	reply := &rpcs.ReplyEmptyResult{}
+	if rpchost != "" {
+		args := rpcs.ArgsSendCommand{
+			DeviceId:  action.DeviceID,
+			SubDevice: action.SubDeviceId,
+			Cmd:       action.ExecutorProperty.FunctionCode,
+			Params:    action.ExecutorProperty.FunctionValue,
+		}
+		server.Log.Debugf("do Device Issue task args:%v", args)
+		return server.RPCCallByHost(rpchost, "Access.SendCommand", args, reply)
+	}
+
+	var publishArgs rpcs.ArgsPublishMessage
+	publishArgs.Topic = action.PlcPubMessage.Topic
+	publishArgs.Payload = action.PlcPubMessage.Payload
+	err = server.RPCCallByName(nil, rpcs.EmqxAgentServiceName, "Access.PublishMessage", publishArgs, reply)
+	if err != nil {
+		server.Log.Errorf("plc设备发送消息失败:%v", err)
+	}
+
+	return nil
+}
+
+// 执行延时任务
+func (a *TaskExecutor) doDelayTask(action *Action) error {
+	time.Sleep(time.Duration(action.ExecutorProperty.DelaySeconds) * time.Second)
+	return a.doTask(action)
+}
+
+func getAccessRPCHost(deviceid string) (string, error) {
+	args := rpcs.ArgsGetDeviceOnlineStatus{
+		Id: deviceid,
+	}
+	reply := &rpcs.ReplyGetDeviceOnlineStatus{}
+	err := server.RPCCallByName(nil, rpcs.DeviceManagerName, "DeviceManager.GetDeviceOnlineStatus", args, reply)
+	if err != nil {
+		return "", err
+	}
+	return reply.AccessRPCHost, nil
+}

+ 170 - 0
services/scene-service/internal/service/manager/device_status.go

@@ -0,0 +1,170 @@
+package manager
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/gogf/gf/container/gmap"
+	"github.com/gogf/gf/encoding/gjson"
+	"sparrow/pkg/rpcs"
+	"sparrow/pkg/server"
+	"sparrow/pkg/utils"
+	"sparrow/services/scene-service/internal/service"
+	"time"
+)
+
+type DeviceSceneConfig struct {
+	SceneId      string             `json:"scene_id"`
+	DecisionExpr string             `json:"decision_expr"` // 条件表达式 and or
+	Conditions   []*DeviceCondition `json:"conditions"`    // 条件
+	Actions      []*service.Action  `json:"actions"`       // 执行动作
+	Interval     int                `json:"interval"`      // 检查间隔(秒)
+	ticker       *time.Ticker       `json:"-"`             // 定时器
+	stopChan     chan struct{}      `json:"-"`             // 停止信号通道
+}
+
+// DeviceCondition 设备场景配置
+type DeviceCondition struct {
+	DeviceType  string `json:"device_type"`   // 设备类型
+	DeviceId    string `json:"device_id"`     // 设备id
+	SubDeviceId string `json:"sub_device_id"` // 子设备id
+	FieldType   int    `json:"field_type"`    // 字段类型 1字符串 2数值
+	Field       string `json:"field"`         // 字段名
+	TargetValue string `json:"target_value"`  // 值
+	Operator    int    `json:"operator"`      // 比较类型  数值比较 1 >  2 >= 3 = 4 <= 5 < 6 !=
+}
+
+type DeviceSceneService struct {
+	tasks *gmap.HashMap
+}
+
+func NewDeviceSceneService() *DeviceSceneService {
+	return &DeviceSceneService{
+		tasks: gmap.New(true),
+	}
+}
+
+func (d *DeviceSceneService) Add(config string) error {
+	var c DeviceSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+	}
+	if len(c.Conditions) == 0 {
+		return errors.New("天气监控任务配置错误:判断条件不能为空")
+	}
+	// 初始化Ticker和停止通道
+	c.ticker = time.NewTicker(time.Duration(c.Interval) * time.Minute)
+	c.stopChan = make(chan struct{})
+
+	// 启动监控协程
+	go d.monitorTask(c)
+	d.tasks.Set(c.SceneId, c)
+	return nil
+}
+
+func (d *DeviceSceneService) Update(config string) error {
+	var c DeviceSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+		server.Log.Errorf("config to timerConfig error :%s", err.Error())
+	}
+	_ = d.Stop(c.SceneId)
+
+	// 初始化Ticker和停止通道
+	c.ticker = time.NewTicker(time.Duration(c.Interval) * time.Minute)
+	c.stopChan = make(chan struct{})
+
+	// 启动监控协程
+	go d.monitorTask(c)
+	d.tasks.Set(c.SceneId, c)
+	server.Log.Debugf("UpdateWeatherScene :%s", config)
+	return nil
+}
+
+func (d *DeviceSceneService) Remove(config string) error {
+	var c DeviceSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+		server.Log.Errorf("config to timerConfig error :%s", err.Error())
+	}
+
+	d.tasks.Remove(c.SceneId)
+	server.Log.Debugf("RemoveTimeScene :%s", c.SceneId)
+	return nil
+}
+
+// Start 停止任务
+func (d *DeviceSceneService) Start(id string) error {
+	if !d.tasks.Contains(id) {
+		return errors.New("任务不存在")
+	}
+	task := d.tasks.Get(id)
+	c := task.(DeviceSceneConfig)
+	go d.monitorTask(c)
+	return nil
+}
+
+// Stop 停止任务
+func (d *DeviceSceneService) Stop(id string) error {
+	if !d.tasks.Contains(id) {
+		return errors.New("任务不存在")
+	}
+	task := d.tasks.Get(id)
+	c := task.(DeviceSceneConfig)
+	c.stopChan <- struct{}{}
+	return nil
+}
+
+// monitorTask 监控任务:使用select监听Ticker和停止信号
+func (d *DeviceSceneService) monitorTask(config DeviceSceneConfig) {
+	for {
+		select {
+		case <-config.ticker.C: // 定时触发
+			result, err := d.checkDeviceCondition(config)
+			if err != nil {
+				server.Log.Errorf("compare weather condition error :%s", err.Error())
+			}
+			if result {
+				if err = service.NewTaskExecutor(config.Actions).Do(); err != nil {
+					server.Log.Errorf("weather do taskid :%s error:%s", config.SceneId, err.Error())
+				}
+			}
+		case <-config.stopChan: // 收到停止信号
+			config.ticker.Stop()
+			return
+		}
+	}
+}
+
+func (d *DeviceSceneService) checkDeviceCondition(config DeviceSceneConfig) (bool, error) {
+	var results []bool
+	for _, v := range config.Conditions {
+		var args rpcs.ArgsGetStatus
+		args.Key = fmt.Sprintf("device:%s:status:%s%s", v.DeviceType, v.DeviceId, v.SubDeviceId)
+		var reply rpcs.ReplyStatus
+		err := server.RPCCallByName(nil, rpcs.DeviceManagerName, "DeviceManager.GetDeviceStatus", args, &reply)
+		if err != nil {
+			server.Log.Errorf("设备状态数据获取失败:%v", err)
+			return false, err
+		}
+		j := gjson.New(reply.Status)
+		// 判断是否满足条件并填入到result
+		results = append(results, utils.CheckValue(v.TargetValue, j.Get(v.Field), v.FieldType, v.Operator))
+	}
+	switch config.DecisionExpr {
+	case "and":
+		for _, v := range results {
+			if !v {
+				return false, nil
+			}
+		}
+		return true, nil
+	case "or":
+		for _, v := range results {
+			if v {
+				return true, nil
+			}
+		}
+	}
+	return false, nil
+}

+ 130 - 0
services/scene-service/internal/service/manager/timer.go

@@ -0,0 +1,130 @@
+package manager
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"github.com/gogf/gf/container/gmap"
+	"github.com/gogf/gf/v2/os/gcron"
+	"sparrow/pkg/server"
+	"sparrow/services/scene-service/internal/service"
+)
+
+type TimerSceneConfig struct {
+	SceneId    string                `json:"scene_id"`
+	Conditions []*TimerTaskCondition `json:"conditions"`
+	Actions    []*service.Action     `json:"actions"`
+}
+
+type TimerTaskCondition struct {
+	TaskId string `json:"task_id"`
+	Times  int    `json:"times"` // 执行次数 -1 表示无限次
+	Cron   string `json:"cron"`
+}
+
+// TimerSceneService 定时场景实现
+type TimerSceneService struct {
+	task gmap.HashMap
+	cron *gcron.Cron
+}
+
+// NewTimerSceneService 创建定时场景
+func NewTimerSceneService() *TimerSceneService {
+	return &TimerSceneService{
+		cron: gcron.New(),
+	}
+}
+
+func (t *TimerSceneService) Add(config string) error {
+	var c TimerSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+		server.Log.Errorf("config to timerConfig error :%s", err.Error())
+		return err
+	}
+	//if len(c.Conditions) == 0 || len(c.Actions) == 0 {
+	//	server.Log.Errorf("timer scene-manager config error")
+	//	return errors.New("timer scene-manager config error")
+	//}
+	for _, v := range c.Conditions {
+		err = t.addTask(v, c.Actions)
+		if err != nil {
+			server.Log.Errorf("add timerTask error:sceneId:%s, taskId: %s, error: %v", c.SceneId, v.TaskId, err)
+			return err
+		}
+
+	}
+	t.task.Set(c.SceneId, c)
+	//server.Log.Debugf("AddTimeScene :%s", c.SceneId)
+	return nil
+}
+func (t *TimerSceneService) addTask(c *TimerTaskCondition, actions []*service.Action) error {
+	_, err := t.cron.AddTimes(context.Background(), c.Cron, c.Times, func(ctx context.Context) {
+		if err := service.NewTaskExecutor(actions).Do(); err != nil {
+			server.Log.Errorf("do task :%s error:%s", c.TaskId, err.Error())
+		}
+	}, c.TaskId)
+	return err
+}
+
+func (t *TimerSceneService) Update(config string) error {
+	var c TimerSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+		server.Log.Errorf("config to timerConfig error :%s", err.Error())
+	}
+	if t.task.Contains(c.SceneId) {
+		oldTask := t.task.Get(c.SceneId).(TimerSceneConfig)
+		for _, v := range oldTask.Conditions {
+			t.cron.Remove(v.TaskId)
+		}
+	}
+
+	for _, v := range c.Conditions {
+		err = t.addTask(v, c.Actions)
+		if err != nil {
+			server.Log.Errorf("add timerTask error:sceneId:%s, taskId: %s, error: %v", c.SceneId, v.TaskId, err)
+			return err
+		}
+	}
+	t.task.Set(c.SceneId, c)
+	server.Log.Debugf("UpdateTimeScene :%s", config)
+	return nil
+}
+
+func (t *TimerSceneService) Remove(id string) error {
+	if !t.task.Contains(id) {
+		return errors.New("场景不存在")
+	}
+	scene := t.task.Get(id).(TimerSceneConfig)
+	for _, v := range scene.Conditions {
+		t.cron.Remove(v.TaskId)
+	}
+	server.Log.Debugf("RemoveTimeScene :%s", scene.SceneId)
+	t.task.Remove(id)
+	return nil
+}
+
+func (t *TimerSceneService) Start(id string) error {
+	if !t.task.Contains(id) {
+		return errors.New("场景不存在")
+	}
+	scene := t.task.Get(id).(TimerSceneConfig)
+	for _, v := range scene.Conditions {
+		t.cron.Start(v.TaskId)
+	}
+	server.Log.Debugf("StartTimeScene :%s", scene.SceneId)
+	return nil
+}
+
+func (t *TimerSceneService) Stop(id string) error {
+	if !t.task.Contains(id) {
+		return errors.New("场景不存在")
+	}
+	scene := t.task.Get(id).(TimerSceneConfig)
+	for _, v := range scene.Conditions {
+		t.cron.Stop(v.TaskId)
+	}
+	server.Log.Debugf("StopTimeScene :%s", scene.SceneId)
+	return nil
+}

+ 173 - 0
services/scene-service/internal/service/manager/weather.go

@@ -0,0 +1,173 @@
+package manager
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/gogf/gf/container/gmap"
+	"github.com/gogf/gf/encoding/gjson"
+	"sparrow/pkg/server"
+	"sparrow/pkg/utils"
+	"sparrow/services/scene-service/internal/service"
+	"time"
+	weather "weather-api-sdk"
+)
+
+// WeatherSceneConfig 天气监控任务配置
+type WeatherSceneConfig struct {
+	SceneId      string              `json:"scene_id"`      // 任务id
+	DecisionExpr string              `json:"decision_expr"` // 条件表达式 and or
+	Conditions   []*WeatherCondition `json:"conditions"`    // 条件
+	Interval     int                 `json:"interval"`      // 检查间隔(分钟)
+	Actions      []*service.Action   `json:"actions"`       // 执行动作
+	ticker       *time.Ticker        `json:"-"`             // 定时器
+	stopChan     chan struct{}       `json:"-"`             // 停止信号通道
+}
+
+type WeatherCondition struct {
+	Location    string `json:"location"`     // 地点
+	FieldType   int    `json:"field_type"`   // 字段类型 1字符串 2数值
+	Operator    int    `json:"operator"`     // 比较类型  数值比较 1 >  2 >= 3 = 4 <= 5 < 6 !=   字符串比较  1 相等  2  不相等
+	Field       string `json:"field"`        // 字段名
+	TargetValue string `json:"target_value"` // 目标值
+}
+
+type WeatherSceneService struct {
+	tasks *gmap.HashMap
+}
+
+func NewWeatherSceneService() *WeatherSceneService {
+	return &WeatherSceneService{
+		tasks: gmap.NewHashMap(true),
+	}
+}
+
+func (w *WeatherSceneService) Add(config string) error {
+	var c WeatherSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+	}
+	if len(c.Conditions) == 0 {
+		return errors.New("天气监控任务配置错误:判断条件不能为空")
+	}
+	// 初始化Ticker和停止通道
+	c.ticker = time.NewTicker(time.Duration(c.Interval) * time.Minute)
+	c.stopChan = make(chan struct{})
+
+	// 启动监控协程
+	go w.monitorTask(c)
+	w.tasks.Set(c.SceneId, c)
+	return nil
+}
+
+// monitorTask 监控任务:使用select监听Ticker和停止信号
+func (w *WeatherSceneService) monitorTask(task WeatherSceneConfig) {
+	for {
+		select {
+		case <-task.ticker.C: // 定时触发
+			result, err := w.checkWeatherCondition(task)
+			if err != nil {
+				server.Log.Errorf("compare weather condition error :%s", err.Error())
+			}
+			if result {
+				if err = service.NewTaskExecutor(task.Actions).Do(); err != nil {
+					server.Log.Errorf("weather do taskid :%s error:%s", task.SceneId, err.Error())
+				}
+			}
+		case <-task.stopChan: // 收到停止信号
+			task.ticker.Stop()
+			return
+		}
+	}
+}
+
+func (w *WeatherSceneService) Update(config string) error {
+	var c WeatherSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+		server.Log.Errorf("config to timerConfig error :%s", err.Error())
+	}
+	_ = w.Stop(c.SceneId)
+
+	// 初始化Ticker和停止通道
+	c.ticker = time.NewTicker(time.Duration(c.Interval) * time.Minute)
+	c.stopChan = make(chan struct{})
+
+	// 启动监控协程
+	go w.monitorTask(c)
+	w.tasks.Set(c.SceneId, c)
+	server.Log.Debugf("UpdateWeatherScene :%s", config)
+	return nil
+}
+
+func (w *WeatherSceneService) Remove(config string) error {
+	var c WeatherSceneConfig
+	err := json.Unmarshal([]byte(config), &c)
+	if err != nil {
+		server.Log.Errorf("config to timerConfig error :%s", err.Error())
+	}
+
+	w.tasks.Remove(c.SceneId)
+	server.Log.Debugf("RemoveTimeScene :%s", c.SceneId)
+	return nil
+}
+
+// Start 停止任务
+func (w *WeatherSceneService) Start(id string) error {
+	if !w.tasks.Contains(id) {
+		return errors.New("任务不存在")
+	}
+	task := w.tasks.Get(id)
+	c := task.(WeatherSceneConfig)
+	go w.monitorTask(c)
+	return nil
+}
+
+// Stop 停止任务
+func (w *WeatherSceneService) Stop(id string) error {
+	if !w.tasks.Contains(id) {
+		return errors.New("任务不存在")
+	}
+	task := w.tasks.Get(id)
+	c := task.(WeatherSceneConfig)
+	c.stopChan <- struct{}{}
+	return nil
+}
+
+// checkWeatherCondition 检查天气
+func (w *WeatherSceneService) checkWeatherCondition(config WeatherSceneConfig) (bool, error) {
+	results := make([]bool, len(config.Conditions))
+	for _, condition := range config.Conditions {
+		// 获取天气数据
+		weatherInfo, err := getWeatherInfo(condition.Location)
+		fmt.Printf("任务 %s: 获取天气数据成功: %v\n", config.SceneId, weatherInfo)
+		if err != nil {
+			fmt.Printf("任务 %s: 获取天气数据失败: %v\n", config.SceneId, err)
+			return false, err
+		}
+		results = append(results, utils.CheckValue(condition.TargetValue, weatherInfo[condition.Field], condition.FieldType, condition.Operator))
+	}
+	switch config.DecisionExpr {
+	case "and":
+		for _, v := range results {
+			if !v {
+				return false, nil
+			}
+		}
+	case "or":
+		for _, v := range results {
+			if v {
+				return true, nil
+			}
+		}
+	}
+	return true, nil
+}
+
+func getWeatherInfo(location string) (map[string]interface{}, error) {
+	weatherInfo, err := weather.GetWeatherInfo(location, "SgqF9ZGBUj7R0dLAb")
+	if err != nil {
+		return nil, err
+	}
+	return gjson.New(weatherInfo).Map(), nil
+}

+ 161 - 0
services/scene-service/internal/service/scene.go

@@ -0,0 +1,161 @@
+package service
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/gogf/gf/container/gmap"
+	"github.com/gogf/gf/encoding/gjson"
+	"sparrow/pkg/rpcs"
+	"sparrow/pkg/server"
+	service2 "sparrow/services/scene-service/internal/service/manager"
+)
+
+type SceneInfo struct {
+	Id        string `json:"id"`
+	Name      string `json:"name"`
+	Status    int    `json:"status"`
+	SceneType string `json:"scene_type"`
+	Config    string `json:"config"`
+	Action    string `json:"action"`
+	Time      string `json:"time"`
+}
+
+type SceneService struct {
+	scenes       *gmap.HashMap // 场景ID到场景实例的映射
+	timer        TaskManager
+	weather      TaskManager
+	deviceStatus TaskManager
+}
+
+func NewSceneService() *SceneService {
+	return &SceneService{
+		scenes:       gmap.New(true),
+		timer:        service2.NewTimerSceneService(),
+		weather:      service2.NewWeatherSceneService(),
+		deviceStatus: service2.NewDeviceSceneService(),
+	}
+}
+
+type TaskManager interface {
+	Add(config string) error
+	Update(config string) error
+	Remove(config string) error
+	Start(config string) error
+	Stop(config string) error
+}
+
+// getServiceByType 根据场景类型获取对应的服务实例
+func (m *SceneService) getServiceByType(sceneType string) (TaskManager, error) {
+	switch sceneType {
+	case "oneKey":
+		return nil, nil
+	case "timer":
+		return m.timer, nil
+	case "weather":
+		return m.weather, nil
+	case "deviceStatus":
+		return m.deviceStatus, nil
+	default:
+		return nil, errors.New("不支持的场景类型: " + sceneType)
+	}
+}
+
+func (m *SceneService) SubmitAction(args rpcs.ArgsSubmitSceneAction, reply *rpcs.ReplySubmitSceneAction) error {
+	scene := SceneInfo{
+		Id:        args.Id,
+		Name:      args.Name,
+		Status:    args.Status,
+		SceneType: args.SceneType,
+		Config:    args.Config,
+		Action:    args.Action,
+	}
+	srv, err := m.getServiceByType(scene.SceneType)
+	if err != nil {
+		server.Log.Errorf("不支持的场景类型: %s, 场景ID: %s", scene.SceneType, scene.Id)
+		return err
+	}
+
+	switch scene.Action {
+	case "add":
+		err = srv.Add(scene.Config)
+		if err != nil {
+			return err
+		}
+		m.scenes.Set(scene.Id, scene)
+		return m.saveSceneRedis(scene)
+	case "remove":
+		err = srv.Remove(scene.Config)
+		if err != nil {
+			server.Log.Errorf("delete scene-manager error: Id: %s, error: %v", scene.Id, err)
+		}
+		m.scenes.Remove(scene.Id)
+		return m.delSceneRedis(scene)
+	case "update":
+		err = srv.Update(scene.Config)
+		if err != nil {
+			server.Log.Errorf("update scene-manager error: Id: %s, error: %v", scene.Id, err)
+			return err
+		}
+		m.scenes.Set(scene.Id, scene)
+		return m.saveSceneRedis(scene)
+	case "start":
+		err = srv.Start(scene.Config)
+		if err != nil {
+			server.Log.Errorf("start scene-manager error: Id: %s, error: %v", scene.Id, err)
+			return err
+		}
+		scene.Status = 1
+		m.scenes.Set(scene.Id, scene)
+		return m.saveSceneRedis(scene)
+	case "stop":
+		err = srv.Stop(scene.Config)
+		if err != nil {
+			server.Log.Errorf("stop scene-manager error: Id: %s, error: %v", scene.Id, err)
+			return err
+		}
+		scene.Status = 0
+		m.scenes.Set(scene.Id, scene)
+		return m.saveSceneRedis(scene)
+	case "do":
+		err = m.doAction(gjson.New(scene.Action).MustToJsonString())
+	}
+	return nil
+}
+
+func (m *SceneService) doAction(action string) error {
+	var actions []*Action
+	err := json.Unmarshal([]byte(action), &actions)
+	if err != nil {
+		server.Log.Errorf("unmarshal actions error :%v", err)
+		return err
+	}
+	return NewTaskExecutor(actions).Do()
+}
+
+func (m *SceneService) saveSceneRedis(scene SceneInfo) error {
+	sceneArgs := rpcs.ArgsScene{
+		Key:     fmt.Sprintf("scene-manager:%s:%s", scene.SceneType, scene.Id),
+		SceneId: scene.Id,
+		Config:  scene.Config,
+	}
+	sceneReply := rpcs.ReplyGetDeviceOnlineStatus{}
+	err := server.RPCCallByName(nil, rpcs.DeviceManagerName, "DeviceManager.SaveScene", sceneArgs, &sceneReply)
+	if err != nil {
+		server.Log.Errorf("save scene-manager error: Id: %s, error: %v", scene.Id, err)
+	}
+	return err
+}
+
+func (m *SceneService) delSceneRedis(scene SceneInfo) error {
+	sceneArgs := rpcs.ArgsScene{
+		Key:    fmt.Sprintf("scene-manager:%s:%s", scene.SceneType, scene.Id),
+		Config: scene.Config,
+	}
+	sceneReply := rpcs.ReplyGetDeviceOnlineStatus{}
+	err := server.RPCCallByName(nil, rpcs.DeviceManagerName, "DeviceManager.DeleteScene", sceneArgs, &sceneReply)
+	if err != nil {
+		server.Log.Errorf("save scene-manager error: Id: %s, error: %v", scene.Id, err)
+	}
+	return err
+}

+ 28 - 0
services/scene-service/main.go

@@ -0,0 +1,28 @@
+package main
+
+import (
+	"sparrow/pkg/rpcs"
+	"sparrow/pkg/server"
+	"sparrow/services/scene-service/internal/service"
+)
+
+func main() {
+	// init server
+	err := server.Init(rpcs.SceneServiceName)
+	if err != nil {
+		server.Log.Fatal(err)
+		return
+	}
+	s := service.NewSceneService()
+
+	err = server.RegisterRPCHandler(s)
+	if err != nil {
+		server.Log.Errorf("Register RPC manager Error: %s", err)
+		return
+	}
+
+	err = server.Run()
+	if err != nil {
+		server.Log.Fatal(err)
+	}
+}

+ 16 - 0
services/timer-service/internal/scheduler.go

@@ -5,6 +5,7 @@ import (
 	"encoding/json"
 	"github.com/gogf/gf/container/gmap"
 	"github.com/gogf/gf/encoding/gjson"
+	"github.com/gogf/gf/frame/g"
 	"github.com/gogf/gf/v2/os/gcron"
 	"sparrow/pkg/rule"
 	"sparrow/pkg/server"
@@ -40,6 +41,8 @@ func (t *TaskSchedule) AddTask(msg []byte) error {
 		if err = NewTaskExecutor(task.Actions).Do(); err != nil {
 			server.Log.Errorf("do taskid :%s error:%s", task.TaskId, err.Error())
 		}
+		_ = t.executeNotice(task.TaskId)
+
 	}, task.TaskId)
 	if err != nil {
 		return err
@@ -97,3 +100,16 @@ func (t *TaskSchedule) StopMessageHandle(msg *rule.TaskLifecycleMessage) error {
 	server.Log.Debugf("StopMessageHandle :%s", msg.TaskId)
 	return nil
 }
+
+func (t *TaskSchedule) executeNotice(taskId string) error {
+	client := g.Client()
+	client.SetHeader("Content-NoticeType", "application/json")
+	args := gjson.New("")
+	_ = args.Set("condition_id", taskId)
+	r, err := g.Client().Post(server.GetRPCHost(), "http://192.168.0.62:8199/web/v1/scenes/execute_notice", args)
+	if err != nil {
+		server.Log.Errorf("execute notice error:%s", err.Error())
+	}
+	defer r.Close()
+	return nil
+}

+ 9 - 0
services/timer-service/internal/timer_service.go

@@ -2,6 +2,7 @@ package internal
 
 import (
 	"fmt"
+	"github.com/gogf/gf/frame/g"
 	"github.com/streadway/amqp"
 	"sparrow/pkg/rpcs"
 	"sparrow/pkg/rule"
@@ -30,6 +31,7 @@ func NewTimerService(host string) *TimerService {
 		reconnectChan: make(chan struct{}),
 	}
 	go ts.handleReconnect()
+	go ts.restartAllTask()
 	return ts
 }
 
@@ -147,3 +149,10 @@ func (s *TimerService) handleReInit(conn *amqp.Connection) bool {
 		}
 	}
 }
+
+func (s *TimerService) restartAllTask() {
+	_, err := g.Client().Post(server.GetRPCHost(), "http://192.168.0.62:8199/web/v1/scenes/restart")
+	if err != nil {
+		server.Log.Errorf("restart all task error:%s", err.Error())
+	}
+}

+ 4 - 0
vendor/github.com/clbanning/mxj/v2/.travis.yml

@@ -0,0 +1,4 @@
+language: go
+
+go:
+- 1.x

+ 22 - 0
vendor/github.com/clbanning/mxj/v2/LICENSE

@@ -0,0 +1,22 @@
+Copyright (c) 2012-2021 Charles Banning <clbanning@gmail.com>.  All rights reserved.
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 201 - 0
vendor/github.com/clbanning/mxj/v2/anyxml.go

@@ -0,0 +1,201 @@
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"reflect"
+)
+
+const (
+	DefaultElementTag = "element"
+)
+
+// Encode arbitrary value as XML.
+//
+// Note: unmarshaling the resultant
+// XML may not return the original value, since tag labels may have been injected
+// to create the XML representation of the value.
+/*
+ Encode an arbitrary JSON object.
+	package main
+
+	import (
+		"encoding/json"
+		"fmt"
+		"github.com/clbanning/mxj"
+	)
+
+	func main() {
+		jsondata := []byte(`[
+			{ "somekey":"somevalue" },
+			"string",
+			3.14159265,
+			true
+		]`)
+		var i interface{}
+		err := json.Unmarshal(jsondata, &i)
+		if err != nil {
+			// do something
+		}
+		x, err := mxj.AnyXmlIndent(i, "", "  ", "mydoc")
+		if err != nil {
+			// do something else
+		}
+		fmt.Println(string(x))
+	}
+
+	output:
+		<mydoc>
+		  <somekey>somevalue</somekey>
+		  <element>string</element>
+		  <element>3.14159265</element>
+		  <element>true</element>
+		</mydoc>
+
+An extreme example is available in examples/goofy_map.go.
+*/
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXml( v, myRootTag, myElementTag).
+func AnyXml(v interface{}, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte("<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte("<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">"); err != nil {
+			return nil, err
+		}
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(false, s, tag, val, p)
+					}
+				} else {
+					err = marshalMapToXmlIndent(false, s, et, vv, p)
+				}
+			default:
+				err = marshalMapToXmlIndent(false, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString("</" + rt + ">"); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.Xml(rt)
+	default:
+		err = marshalMapToXmlIndent(false, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string.
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXmlIndent( v, "", "  ", myRootTag, myElementTag).
+func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte(prefix + "<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte(prefix + "<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">\n"); err != nil {
+			return nil, err
+		}
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we 1 tag in
+					err = marshalMapToXmlIndent(true, s, et, vv, p)
+					// *s += "\n"
+					if _, err = s.WriteString("\n"); err != nil {
+						return nil, err
+					}
+				}
+			default:
+				p.start = 0 // in case trailing p.start = 1
+				err = marshalMapToXmlIndent(true, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString(`</` + rt + `>`); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.XmlIndent(prefix, indent, rt)
+	default:
+		err = marshalMapToXmlIndent(true, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}

+ 54 - 0
vendor/github.com/clbanning/mxj/v2/atomFeedString.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+  An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+     feeds that will be pubsubhubbubbed.
+  2. every time one of those feeds changes, tell the hub
+     with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code.  I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+  This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops.  Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites.  I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> 	   `
+

+ 143 - 0
vendor/github.com/clbanning/mxj/v2/doc.go

@@ -0,0 +1,143 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2019, Charles Banning. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file
+
+/*
+Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package.  The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them.
+
+Note: this library was designed for processing ad hoc anonymous messages.  Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly.
+
+Related Packages:
+	checkxml: github.com/clbanning/checkxml provides functions for validating XML data.
+
+Notes:
+	2022.11.28: v2.7 - add SetGlobalKeyMapPrefix to change default prefix, '#', for default keys
+	2022.11.20: v2.6 - add NewMapForattedXmlSeq for XML docs formatted with whitespace character
+	2021.02.02: v2.5 - add XmlCheckIsValid toggle to force checking that the encoded XML is valid
+	2020.12.14: v2.4 - add XMLEscapeCharsDecoder to preserve XML escaped characters in Map values
+	2020.10.28: v2.3 - add TrimWhiteSpace option
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>].
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+
+SUMMARY
+
+   type Map map[string]interface{}
+
+   Create a Map value, 'mv', from any map[string]interface{} value, 'v':
+      mv := Map(v)
+
+   Unmarshal / marshal XML as a Map value, 'mv':
+      mv, err := NewMapXml(xmlValue) // unmarshal
+      xmlValue, err := mv.Xml()      // marshal
+
+   Unmarshal XML from an io.Reader as a Map value, 'mv':
+      mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+      mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+
+   Marshal Map value, 'mv', to an XML Writer (io.Writer):
+      err := mv.XmlWriter(xmlWriter)
+      raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+
+   Also, for prettified output:
+      xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+      err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+      raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+
+   Bulk process XML with error handling (note: handlers must return a boolean value):
+      err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+      err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+
+   Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader.
+
+   There are comparable functions and methods for JSON processing.
+
+   Arbitrary structure values can be decoded to / encoded from Map values:
+      mv, err := NewMapStruct(structVal)
+      err := mv.Struct(structPointer)
+
+   To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+   or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then:
+      paths := mv.PathsForKey(key)
+      path := mv.PathForKeyShortest(key)
+      values, err := mv.ValuesForKey(key, subkeys)
+      values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays.
+      count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+
+   Get everything at once, irrespective of path depth:
+      leafnodes := mv.LeafNodes()
+      leafvalues := mv.LeafValues()
+
+   A new Map with whatever keys are desired can be created from the current Map and then encoded in XML
+   or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.)
+      newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+      newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+      newXml, err := newMap.Xml()   // for example
+      newJson, err := newMap.Json() // ditto
+
+XML PARSING CONVENTIONS
+
+   Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+   Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - <ns:key>something</ns.key> parses to map["ns:key"]interface{}{"something"}
+      - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"}
+
+   Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+XML ENCODING CONVENTIONS
+   
+   - 'nil' Map values, which may represent 'null' JSON values, are encoded as "<tag/>".
+     NOTE: the operation is not symmetric as "<tag/>" elements are decoded as 'tag:""' Map values,
+           which, then, encode in JSON as '"tag":""' values..
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+*/
+package mxj

+ 93 - 0
vendor/github.com/clbanning/mxj/v2/escapechars.go

@@ -0,0 +1,93 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+)
+
+var xmlEscapeChars bool
+
+// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values.
+// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is
+// then '&amp;' will be re-escaped as '&amp;amp;'.
+//
+/*
+	The values are:
+	"   &quot;
+	'   &apos;
+	<   &lt;
+	>   &gt;
+	&   &amp;
+*/
+//
+// Note: if XMLEscapeCharsDecoder(true) has been called - or the default, 'false,' value
+// has been toggled to 'true' - then XMLEscapeChars(true) is ignored.  If XMLEscapeChars(true)
+// has already been called before XMLEscapeCharsDecoder(true), XMLEscapeChars(false) is called
+// to turn escape encoding on mv.Xml, etc., to prevent double escaping ampersands, '&'.
+func XMLEscapeChars(b ...bool) {
+	var bb bool
+	if len(b) == 0 {
+		bb = !xmlEscapeChars
+	} else {
+		bb = b[0]
+	}
+	if bb == true && xmlEscapeCharsDecoder == false {
+		xmlEscapeChars = true
+	} else {
+		xmlEscapeChars = false
+	}
+}
+
+// Scan for '&' first, since 's' may contain "&amp;" that is parsed to "&amp;amp;"
+// - or "&lt;" that is parsed to "&amp;lt;".
+var escapechars = [][2][]byte{
+	{[]byte(`&`), []byte(`&amp;`)},
+	{[]byte(`<`), []byte(`&lt;`)},
+	{[]byte(`>`), []byte(`&gt;`)},
+	{[]byte(`"`), []byte(`&quot;`)},
+	{[]byte(`'`), []byte(`&apos;`)},
+}
+
+func escapeChars(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+
+	b := []byte(s)
+	for _, v := range escapechars {
+		n := bytes.Count(b, v[0])
+		if n == 0 {
+			continue
+		}
+		b = bytes.Replace(b, v[0], v[1], n)
+	}
+	return string(b)
+}
+
+// per issue #84, escape CharData values from xml.Decoder
+
+var xmlEscapeCharsDecoder bool
+
+// XMLEscapeCharsDecoder(b ...bool) escapes XML characters in xml.CharData values
+// returned by Decoder.Token.  Thus, the internal Map values will contain escaped
+// values, and you do not need to set XMLEscapeChars for proper encoding.
+//
+// By default, the Map values have the non-escaped values returned by Decoder.Token.
+// XMLEscapeCharsDecoder(true) - or, XMLEscapeCharsDecoder() - will toggle escape
+// encoding 'on.'
+//
+// Note: if XMLEscapeCharDecoder(true) is call then XMLEscapeChars(false) is
+// called to prevent re-escaping the values on encoding using mv.Xml, etc.
+func XMLEscapeCharsDecoder(b ...bool) {
+	if len(b) == 0 {
+		xmlEscapeCharsDecoder = !xmlEscapeCharsDecoder
+	} else {
+		xmlEscapeCharsDecoder = b[0]
+	}
+	if xmlEscapeCharsDecoder == true && xmlEscapeChars == true {
+		xmlEscapeChars = false
+	}
+}

+ 9 - 0
vendor/github.com/clbanning/mxj/v2/exists.go

@@ -0,0 +1,9 @@
+package mxj
+
+// Checks whether the path exists. If err != nil then 'false' is returned
+// along with the error encountered parsing either the "path" or "subkeys"
+// argument.
+func (mv Map) Exists(path string, subkeys ...string) (bool, error) {
+	v, err := mv.ValuesForPath(path, subkeys...)
+	return (err == nil && len(v) > 0), err
+}

+ 287 - 0
vendor/github.com/clbanning/mxj/v2/files.go

@@ -0,0 +1,287 @@
+package mxj
+
+import (
+	"fmt"
+	"io"
+	"os"
+)
+
+type Maps []Map
+
+func NewMaps() Maps {
+	return make(Maps, 0)
+}
+
+type MapRaw struct {
+	M Map
+	R []byte
+}
+
+// NewMapsFromXmlFile - creates an array from a file of JSON values.
+func NewMapsFromJsonFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values.
+func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFile - creates an array from a file of XML values.
+func NewMapsFromXmlFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values.
+// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw().
+// It is slow at parsing a file from disk and is intended for relatively small utility files.
+func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ------------------------ Maps writing -------------------------
+// These are handy-dandy methods for dumping configuration data, etc.
+
+// JsonString - analogous to mv.Json()
+func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) {
+	var s string
+	for _, v := range mvs {
+		j, err := v.Json()
+		if err != nil {
+			return s, err
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// JsonStringIndent - analogous to mv.JsonIndent()
+func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) {
+	var s string
+	var haveFirst bool
+	for _, v := range mvs {
+		j, err := v.JsonIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		if haveFirst {
+			s += "\n"
+		} else {
+			haveFirst = true
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// XmlString - analogous to mv.Xml()
+func (mvs Maps) XmlString() (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.Xml()
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// XmlStringIndent - analogous to mv.XmlIndent()
+func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.XmlIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// JsonFile - write Maps to named file as JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonWriter method.
+func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonString(encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// JsonFileIndent - write Maps to named file as pretty JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonIndentWriter method.
+func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonStringIndent(prefix, indent, encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFile - write Maps to named file as XML
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlWriter method.
+func (mvs Maps) XmlFile(file string) error {
+	s, err := mvs.XmlString()
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFileIndent - write Maps to named file as pretty XML
+// Note: the file will be created,if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlIndentWriter method.
+func (mvs Maps) XmlFileIndent(file, prefix, indent string) error {
+	s, err := mvs.XmlStringIndent(prefix, indent)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}

+ 2 - 0
vendor/github.com/clbanning/mxj/v2/files_test.badjson

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"some", "bad":JSON, "in":"it" }

+ 9 - 0
vendor/github.com/clbanning/mxj/v2/files_test.badxml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</other>
+	<for>test case</for>
+</msg>

+ 2 - 0
vendor/github.com/clbanning/mxj/v2/files_test.json

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"just", "two":2, "JSON":"values", "true":true }

+ 9 - 0
vendor/github.com/clbanning/mxj/v2/files_test.xml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</another>
+	<for>test case</for>
+</msg>

+ 1 - 0
vendor/github.com/clbanning/mxj/v2/files_test_dup.json

@@ -0,0 +1 @@
+{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"}

+ 1 - 0
vendor/github.com/clbanning/mxj/v2/files_test_dup.xml

@@ -0,0 +1 @@
+<doc><data>for files.go</data><some>test</some></doc><msg><another>doc</another><for>test case</for><just>some</just></msg>

+ 12 - 0
vendor/github.com/clbanning/mxj/v2/files_test_indent.json

@@ -0,0 +1,12 @@
+{
+  "a": "test",
+  "file": "for",
+  "files_test.go": "case",
+  "this": "is"
+}
+{
+  "JSON": "values",
+  "true": true,
+  "two": 2,
+  "with": "just"
+}

+ 8 - 0
vendor/github.com/clbanning/mxj/v2/files_test_indent.xml

@@ -0,0 +1,8 @@
+<doc>
+  <data>for files.go</data>
+  <some>test</some>
+</doc><msg>
+  <another>doc</another>
+  <for>test case</for>
+  <just>some</just>
+</msg>

+ 35 - 0
vendor/github.com/clbanning/mxj/v2/gob.go

@@ -0,0 +1,35 @@
+// gob.go - Encode/Decode a Map into a gob object.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/gob"
+)
+
+// NewMapGob returns a Map value for a gob object that has been
+// encoded from a map[string]interface{} (or compatible type) value.
+// It is intended to provide symmetric handling of Maps that have
+// been encoded using mv.Gob.
+func NewMapGob(gobj []byte) (Map, error) {
+	m := make(map[string]interface{}, 0)
+	if len(gobj) == 0 {
+		return m, nil
+	}
+	r := bytes.NewReader(gobj)
+	dec := gob.NewDecoder(r)
+	if err := dec.Decode(&m); err != nil {
+		return m, err
+	}
+	return m, nil
+}
+
+// Gob returns a gob-encoded value for the Map 'mv'.
+func (mv Map) Gob() ([]byte, error) {
+	var buf bytes.Buffer
+	enc := gob.NewEncoder(&buf)
+	if err := enc.Encode(map[string]interface{}(mv)); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}

+ 323 - 0
vendor/github.com/clbanning/mxj/v2/json.go

@@ -0,0 +1,323 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+)
+
+// ------------------------------ write JSON -----------------------
+
+// Just a wrapper on json.Marshal.
+// If option safeEncoding is'true' then safe encoding of '<', '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) Json(safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.Marshal(mv)
+
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// Just a wrapper on json.MarshalIndent.
+// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.MarshalIndent(mv, prefix, indent)
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// The following implementation is provided for symmetry with NewMapJsonReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// Writes the Map as pretty JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// --------------------------- read JSON -----------------------------
+
+// Decode numericvalues as json.Number type Map values - see encoding/json#Number.
+// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), 
+// etc.; it does not affect NewMapXml(), etc.  The XML encoders mv.Xml() and mv.XmlIndent()
+// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number
+// value types and the resulting Map can be correctly encoded into a XML object.
+var JsonUseNumber bool
+
+// Just a wrapper on json.Unmarshal
+//	Converting JSON to XML is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapJson(jsonVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		xmlVal, xerr := mapVal.Xml()
+//		if xerr != nil {
+//			// handle error
+//		}
+// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}],
+// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map.
+// See mxj/j2x/j2x_test.go.
+func NewMapJson(jsonVal []byte) (Map, error) {
+	// empty or nil begets empty
+	if len(jsonVal) == 0 {
+		m := make(map[string]interface{}, 0)
+		return m, nil
+	}
+	// handle a goofy case ...
+	if jsonVal[0] == '[' {
+		jsonVal = []byte(`{"object":` + string(jsonVal) + `}`)
+	}
+	m := make(map[string]interface{})
+	// err := json.Unmarshal(jsonVal, &m)
+	buf := bytes.NewReader(jsonVal)
+	dec := json.NewDecoder(buf)
+	if JsonUseNumber {
+		dec.UseNumber()
+	}
+	err := dec.Decode(&m)
+	return m, err
+}
+
+// Retrieve a Map value from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object.
+func NewMapJsonReader(jsonReader io.Reader) (Map, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	return NewMapJson(*jb)
+}
+
+// Retrieve a Map value and raw JSON - []byte - from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object and retrieve the raw JSON in a single call.
+func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, *jb, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	m, merr := NewMapJson(*jb)
+	return m, *jb, merr
+}
+
+// Pull the next JSON string off the stream: just read from first '{' to its closing '}'.
+// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package.
+func getJson(rdr io.Reader) (*[]byte, error) {
+	bval := make([]byte, 1)
+	jb := make([]byte, 0)
+	var inQuote, inJson bool
+	var parenCnt int
+	var previous byte
+
+	// scan the input for a matched set of {...}
+	// json.Unmarshal will handle syntax checking.
+	for {
+		_, err := rdr.Read(bval)
+		if err != nil {
+			if err == io.EOF && inJson && parenCnt > 0 {
+				return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb))
+			}
+			return &jb, err
+		}
+		switch bval[0] {
+		case '{':
+			if !inQuote {
+				parenCnt++
+				inJson = true
+			}
+		case '}':
+			if !inQuote {
+				parenCnt--
+			}
+			if parenCnt < 0 {
+				return nil, fmt.Errorf("closing } without opening {: %s", string(jb))
+			}
+		case '"':
+			if inQuote {
+				if previous == '\\' {
+					break
+				}
+				inQuote = false
+			} else {
+				inQuote = true
+			}
+		case '\n', '\r', '\t', ' ':
+			if !inQuote {
+				continue
+			}
+		}
+		if inJson {
+			jb = append(jb, bval[0])
+			if parenCnt == 0 {
+				break
+			}
+		}
+		previous = bval[0]
+	}
+
+	return &jb, nil
+}
+
+// ------------------------------- JSON Reader handler via Map values  -----------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var jhandlerPollInterval = time.Duration(1e6)
+
+// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader().
+// This avoids treating one or other as a special case and discussing the underlying stdlib logic.
+
+// Bulk process JSON using handlers that process a Map value.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader  processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapJsonReader(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process JSON using handlers that process a Map value and the raw JSON.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapJsonReaderRaw(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}

+ 668 - 0
vendor/github.com/clbanning/mxj/v2/keyvalues.go

@@ -0,0 +1,668 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+//	keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters.
+
+package mxj
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// ----------------------------- get everything FOR a single key -------------------------
+
+const (
+	minArraySize = 32
+)
+
+var defaultArraySize int = minArraySize
+
+// SetArraySize adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath().
+// This can have the effect of significantly reducing memory allocation-copy functions for large data sets.
+// Returns the initial buffer size.
+func SetArraySize(size int) int {
+	if size > minArraySize {
+		defaultArraySize = size
+	} else {
+		defaultArraySize = minArraySize
+	}
+	return defaultArraySize
+}
+
+// ValuesForKey return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match.
+// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'key' refers to a list, then "key:value" could select a list member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	ret := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	hasKey(m, key, &ret, &cnt, subKeyMap)
+	return ret[:cnt], nil
+}
+
+var KeyNotExistError = errors.New("Key does not exist")
+
+// ValueForKey is a wrapper on ValuesForKey.  It returns the first member of []interface{}, if any.
+// If there is no value, "nil, nil" is returned.
+func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) {
+	vals, err := mv.ValuesForKey(key, subkeys...)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, KeyNotExistError
+	}
+	return vals[0], nil
+}
+
+// hasKey - if the map 'key' exists append it to array
+//          if it doesn't do nothing except scan array and map values
+func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) {
+	// func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		// see if the current value is of interest
+		if v, ok := vv[key]; ok {
+			switch v.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(v, subkeys) {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			case []interface{}:
+				for _, av := range v.([]interface{}) {
+					if hasSubKeys(av, subkeys) {
+						*ret = append(*ret, av)
+						*cnt++
+					}
+				}
+			default:
+				if len(subkeys) == 0 {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			}
+		}
+
+		// wildcard case
+		if key == "*" {
+			for _, v := range vv {
+				switch v.(type) {
+				case map[string]interface{}:
+					if hasSubKeys(v, subkeys) {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				case []interface{}:
+					for _, av := range v.([]interface{}) {
+						if hasSubKeys(av, subkeys) {
+							*ret = append(*ret, av)
+							*cnt++
+						}
+					}
+				default:
+					if len(subkeys) == 0 {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				}
+			}
+		}
+
+		// scan the rest
+		for _, v := range vv {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	case []interface{}:
+		for _, v := range iv.([]interface{}) {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	}
+}
+
+// -----------------------  get everything for a node in the Map ---------------------------
+
+// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.)
+// 2014.04.28 - implementation note.
+// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion
+// of wildcards and unindexed arrays.  Embedding such logic into valuesForKeyPath() would have made the
+// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead.
+
+// ValuesForPatb retrieves all values for a path from the Map.  If len(returned_values) == 0, then no match.
+// On error, the returned array is 'nil'.
+//   'path' is a dot-separated path of key values.
+//          - If a node in the path is '*', then everything beyond is walked.
+//          - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" -
+//            even "*[2].*[0].field".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'path' refers to a list, then "tag:value" would return member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	// If there are no array indexes in path, use legacy ValuesForPath() logic.
+	if strings.Index(path, "[") < 0 {
+		return mv.oldValuesForPath(path, subkeys...)
+	}
+
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys, kerr := parsePath(path)
+	if kerr != nil {
+		return nil, kerr
+	}
+
+	vals, verr := valuesForArray(keys, mv)
+	if verr != nil {
+		return nil, verr // Vals may be nil, but return empty array.
+	}
+
+	// Need to handle subkeys ... only return members of vals that satisfy conditions.
+	retvals := make([]interface{}, 0)
+	for _, v := range vals {
+		if hasSubKeys(v, subKeyMap) {
+			retvals = append(retvals, v)
+		}
+	}
+	return retvals, nil
+}
+
+func valuesForArray(keys []*key, m Map) ([]interface{}, error) {
+	var tmppath string
+	var haveFirst bool
+	var vals []interface{}
+	var verr error
+
+	lastkey := len(keys) - 1
+	for i := 0; i <= lastkey; i++ {
+		if !haveFirst {
+			tmppath = keys[i].name
+			haveFirst = true
+		} else {
+			tmppath += "." + keys[i].name
+		}
+
+		// Look-ahead: explode wildcards and unindexed arrays.
+		// Need to handle un-indexed list recursively:
+		// e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]".
+		// Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ...
+		if !keys[i].isArray && i < lastkey && keys[i+1].isArray {
+			// Can't pass subkeys because we may not be at literal end of path.
+			vv, vverr := m.oldValuesForPath(tmppath)
+			if vverr != nil {
+				return nil, vverr
+			}
+			for _, v := range vv {
+				// See if we can walk the value.
+				am, ok := v.(map[string]interface{})
+				if !ok {
+					continue
+				}
+				// Work the backend.
+				nvals, nvalserr := valuesForArray(keys[i+1:], Map(am))
+				if nvalserr != nil {
+					return nil, nvalserr
+				}
+				vals = append(vals, nvals...)
+			}
+			break // have recursed the whole path - return
+		}
+
+		if keys[i].isArray || i == lastkey {
+			// Don't pass subkeys because may not be at literal end of path.
+			vals, verr = m.oldValuesForPath(tmppath)
+		} else {
+			continue
+		}
+		if verr != nil {
+			return nil, verr
+		}
+
+		if i == lastkey && !keys[i].isArray {
+			break
+		}
+
+		// Now we're looking at an array - supposedly.
+		// Is index in range of vals?
+		if len(vals) <= keys[i].position {
+			vals = nil
+			break
+		}
+
+		// Return the array member of interest, if at end of path.
+		if i == lastkey {
+			vals = vals[keys[i].position:(keys[i].position + 1)]
+			break
+		}
+
+		// Extract the array member of interest.
+		am := vals[keys[i].position:(keys[i].position + 1)]
+
+		// must be a map[string]interface{} value so we can keep walking the path
+		amm, ok := am[0].(map[string]interface{})
+		if !ok {
+			vals = nil
+			break
+		}
+
+		m = Map(amm)
+		haveFirst = false
+	}
+
+	return vals, nil
+}
+
+type key struct {
+	name     string
+	isArray  bool
+	position int
+}
+
+func parsePath(s string) ([]*key, error) {
+	keys := strings.Split(s, ".")
+
+	ret := make([]*key, 0)
+
+	for i := 0; i < len(keys); i++ {
+		if keys[i] == "" {
+			continue
+		}
+
+		newkey := new(key)
+		if strings.Index(keys[i], "[") < 0 {
+			newkey.name = keys[i]
+			ret = append(ret, newkey)
+			continue
+		}
+
+		p := strings.Split(keys[i], "[")
+		newkey.name = p[0]
+		p = strings.Split(p[1], "]")
+		if p[0] == "" { // no right bracket
+			return nil, fmt.Errorf("no right bracket on key index: %s", keys[i])
+		}
+		// convert p[0] to a int value
+		pos, nerr := strconv.ParseInt(p[0], 10, 32)
+		if nerr != nil {
+			return nil, fmt.Errorf("cannot convert index to int value: %s", p[0])
+		}
+		newkey.position = int(pos)
+		newkey.isArray = true
+		ret = append(ret, newkey)
+	}
+
+	return ret, nil
+}
+
+// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'.
+func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys := strings.Split(path, ".")
+	if keys[len(keys)-1] == "" {
+		keys = keys[:len(keys)-1]
+	}
+	ivals := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap)
+	return ivals[:cnt], nil
+}
+
+func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) {
+	lenKeys := len(keys)
+
+	// load 'm' values into 'ret'
+	// expand any lists
+	if lenKeys == 0 {
+		switch m.(type) {
+		case map[string]interface{}:
+			if subkeys != nil {
+				if ok := hasSubKeys(m, subkeys); !ok {
+					return
+				}
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		case []interface{}:
+			for i, v := range m.([]interface{}) {
+				if subkeys != nil {
+					if ok := hasSubKeys(v, subkeys); !ok {
+						continue // only load list members with subkeys
+					}
+				}
+				*ret = append(*ret, (m.([]interface{}))[i])
+				*cnt++
+			}
+		default:
+			if subkeys != nil {
+				return // must be map[string]interface{} if there are subkeys
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		}
+		return
+	}
+
+	// key of interest
+	key := keys[0]
+	switch key {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				default:
+					// valuesForKeyPath(ret, v, keys[1:], subkeys)
+					valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[key]; ok {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[key]; ok {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				}
+			}
+		}
+	}
+}
+
+// hasSubKeys() - interface{} equality works for string, float64, bool
+// 'v' must be a map[string]interface{} value to have subkeys
+// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard.
+func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool {
+	if len(subkeys) == 0 {
+		return true
+	}
+
+	switch v.(type) {
+	case map[string]interface{}:
+		// do all subKey name:value pairs match?
+		mv := v.(map[string]interface{})
+		for skey, sval := range subkeys {
+			isNotKey := false
+			if skey[:1] == "!" { // a NOT-key
+				skey = skey[1:]
+				isNotKey = true
+			}
+			vv, ok := mv[skey]
+			if !ok { // key doesn't exist
+				if isNotKey { // key not there, but that's what we want
+					if kv, ok := sval.(string); ok && kv == "*" {
+						continue
+					}
+				}
+				return false
+			}
+			// wildcard check
+			if kv, ok := sval.(string); ok && kv == "*" {
+				if isNotKey { // key is there, and we don't want it
+					return false
+				}
+				continue
+			}
+			switch sval.(type) {
+			case string:
+				if s, ok := vv.(string); ok && s == sval.(string) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case bool:
+				if b, ok := vv.(bool); ok && b == sval.(bool) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case float64:
+				if f, ok := vv.(float64); ok && f == sval.(float64) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			}
+			// key there but didn't match subkey value
+			if isNotKey { // that's what we want
+				continue
+			}
+			return false
+		}
+		// all subkeys matched
+		return true
+	}
+
+	// not a map[string]interface{} value, can't have subkeys
+	return false
+}
+
+// Generate map of key:value entries as map[string]string.
+//	'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'.
+//	If len(kv) == 0, the return is (nil, nil).
+func getSubKeyMap(kv ...string) (map[string]interface{}, error) {
+	if len(kv) == 0 {
+		return nil, nil
+	}
+	m := make(map[string]interface{}, 0)
+	for _, v := range kv {
+		vv := strings.Split(v, fieldSep)
+		switch len(vv) {
+		case 2:
+			m[vv[0]] = interface{}(vv[1])
+		case 3:
+			switch vv[2] {
+			case "string", "char", "text":
+				m[vv[0]] = interface{}(vv[1])
+			case "bool", "boolean":
+				// ParseBool treats "1"==true & "0"==false
+				b, err := strconv.ParseBool(vv[1])
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(b)
+			case "float", "float64", "num", "number", "numeric":
+				f, err := strconv.ParseFloat(vv[1], 64)
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(f)
+			default:
+				return nil, fmt.Errorf("unknown subkey conversion spec: %s", v)
+			}
+		default:
+			return nil, fmt.Errorf("unknown subkey spec: %s", v)
+		}
+	}
+	return m, nil
+}
+
+// -------------------------------  END of valuesFor ... ----------------------------
+
+// ----------------------- locate where a key value is in the tree -------------------
+
+//----------------------------- find all paths to a key --------------------------------
+
+// PathsForKey returns all paths through Map, 'mv', (in dot-notation) that terminate with the specified key.
+// Results can be used with ValuesForPath.
+func (mv Map) PathsForKey(key string) []string {
+	m := map[string]interface{}(mv)
+	breadbasket := make(map[string]bool, 0)
+	breadcrumbs := ""
+
+	hasKeyPath(breadcrumbs, m, key, breadbasket)
+	if len(breadbasket) == 0 {
+		return nil
+	}
+
+	// unpack map keys to return
+	res := make([]string, len(breadbasket))
+	var i int
+	for k := range breadbasket {
+		res[i] = k
+		i++
+	}
+
+	return res
+}
+
+// PathForKeyShortest extracts the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'..
+// Paths are strings using dot-notation.
+func (mv Map) PathForKeyShortest(key string) string {
+	paths := mv.PathsForKey(key)
+
+	lp := len(paths)
+	if lp == 0 {
+		return ""
+	}
+	if lp == 1 {
+		return paths[0]
+	}
+
+	shortest := paths[0]
+	shortestLen := len(strings.Split(shortest, "."))
+
+	for i := 1; i < len(paths); i++ {
+		vlen := len(strings.Split(paths[i], "."))
+		if vlen < shortestLen {
+			shortest = paths[i]
+			shortestLen = vlen
+		}
+	}
+
+	return shortest
+}
+
+// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth
+// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'.
+func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		if _, ok := vv[key]; ok {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = key
+			} else {
+				nbc = crumbs + "." + key
+			}
+			basket[nbc] = true
+		}
+		// walk on down the path, key could occur again at deeper node
+		for k, v := range vv {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = k
+			} else {
+				nbc = crumbs + "." + k
+			}
+			hasKeyPath(nbc, v, key, basket)
+		}
+	case []interface{}:
+		// crumb-trail doesn't change, pass it on
+		for _, v := range iv.([]interface{}) {
+			hasKeyPath(crumbs, v, key, basket)
+		}
+	}
+}
+
+var PathNotExistError = errors.New("Path does not exist")
+
+// ValueForPath wraps ValuesFor Path and returns the first value returned.
+// If no value is found it returns 'nil' and PathNotExistError.
+func (mv Map) ValueForPath(path string) (interface{}, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, PathNotExistError
+	}
+	return vals[0], nil
+}
+
+// ValuesForPathString returns the first found value for the path as a string.
+func (mv Map) ValueForPathString(path string) (string, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return "", err
+	}
+	if len(vals) == 0 {
+		return "", errors.New("ValueForPath: path not found")
+	}
+	val := vals[0]
+	return fmt.Sprintf("%v", val), nil
+}
+
+// ValueOrEmptyForPathString returns the first found value for the path as a string.
+// If the path is not found then it returns an empty string.
+func (mv Map) ValueOrEmptyForPathString(path string) string {
+	str, _ := mv.ValueForPathString(path)
+	return str
+}

+ 112 - 0
vendor/github.com/clbanning/mxj/v2/leafnode.go

@@ -0,0 +1,112 @@
+package mxj
+
+// leafnode.go - return leaf nodes with paths and values for the Map
+// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw
+
+import (
+	"strconv"
+	"strings"
+)
+
+const (
+	NoAttributes = true // suppress LeafNode values that are attributes
+)
+
+// LeafNode - a terminal path value in a Map.
+// For XML Map values it represents an attribute or simple element value  - of type
+// string unless Map was created using Cast flag. For JSON Map values it represents
+// a string, numeric, boolean, or null value.
+type LeafNode struct {
+	Path  string      // a dot-notation representation of the path with array subscripting
+	Value interface{} // the value at the path termination
+}
+
+// LeafNodes - returns an array of all LeafNode values for the Map.
+// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-')
+// as well as the "#text" key for the associated simple element value.
+//
+// PrependAttrWithHypen(false) will result in attributes having .attr-name as 
+// terminal node in 'path' while the path for the element value, itself, will be 
+// the base path w/o "#text". 
+//
+// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax
+// rather than "[N]" syntax.
+func (mv Map) LeafNodes(no_attr ...bool) []LeafNode {
+	var a bool
+	if len(no_attr) == 1 {
+		a = no_attr[0]
+	}
+
+	l := make([]LeafNode, 0)
+	getLeafNodes("", "", map[string]interface{}(mv), &l, a)
+	return l
+}
+
+func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) {
+	// if stripping attributes, then also strip "#text" key
+	if !noattr || node != textK {
+		if path != "" && node[:1] != "[" {
+			path += "."
+		}
+		path += node
+	}
+	switch mv.(type) {
+	case map[string]interface{}:
+		for k, v := range mv.(map[string]interface{}) {
+			// if noattr && k[:1] == "-" {
+			if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue
+			}
+			getLeafNodes(path, k, v, l, noattr)
+		}
+	case []interface{}:
+		for i, v := range mv.([]interface{}) {
+			if useDotNotation {
+				getLeafNodes(path, strconv.Itoa(i), v, l, noattr)
+			} else {
+				getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr)
+			}
+		}
+	default:
+		// can't walk any further, so create leaf
+		n := LeafNode{path, mv}
+		*l = append(*l, n)
+	}
+}
+
+// LeafPaths - all paths that terminate in LeafNode values.
+func (mv Map) LeafPaths(no_attr ...bool) []string {
+	ln := mv.LeafNodes()
+	ss := make([]string, len(ln))
+	for i := 0; i < len(ln); i++ {
+		ss[i] = ln[i].Path
+	}
+	return ss
+}
+
+// LeafValues - all terminal values in the Map.
+func (mv Map) LeafValues(no_attr ...bool) []interface{} {
+	ln := mv.LeafNodes()
+	vv := make([]interface{}, len(ln))
+	for i := 0; i < len(ln); i++ {
+		vv[i] = ln[i].Value
+	}
+	return vv
+}
+
+// ====================== utilities ======================
+
+// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I
+var useDotNotation bool
+
+// LeafUseDotNotation sets a flag that list members in LeafNode paths
+// should be identified using ".N" syntax rather than the default "[N]"
+// syntax.  Calling LeafUseDotNotation with no arguments toggles the 
+// flag on/off; otherwise, the argument sets the flag value 'true'/'false'.
+func LeafUseDotNotation(b ...bool) {
+	if len(b) == 0 {
+		useDotNotation = !useDotNotation
+		return
+	}
+	useDotNotation = b[0]
+}

+ 86 - 0
vendor/github.com/clbanning/mxj/v2/misc.go

@@ -0,0 +1,86 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// misc.go - mimic functions (+others) called out in:
+//          https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ
+// Primarily these methods let you retrive XML structure information.
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Return the root element of the Map. If there is not a single key in Map,
+// then an error is returned.
+func (mv Map) Root() (string, error) {
+	mm := map[string]interface{}(mv)
+	if len(mm) != 1 {
+		return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm))
+	}
+	for k, _ := range mm {
+		return k, nil
+	}
+	return "", nil
+}
+
+// If the path is an element with sub-elements, return a list of the sub-element
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are prefixed with
+// '-', a hyphen, are considered attributes; see m.Attributes(path).
+func (mv Map) Elements(path string) ([]string, error) {
+	e, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch e.(type) {
+	case map[string]interface{}:
+		ee := e.(map[string]interface{})
+		elems := make([]string, len(ee))
+		var i int
+		for k, _ := range ee {
+			if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue // skip attributes
+			}
+			elems[i] = k
+			i++
+		}
+		elems = elems[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(elems)
+		return elems, nil
+	}
+	return nil, fmt.Errorf("no elements for path: %s", path)
+}
+
+// If the path is an element with attributes, return a list of the attribute
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are not prefixed with
+// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the
+// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then
+// there are no identifiable attributes.
+func (mv Map) Attributes(path string) ([]string, error) {
+	a, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch a.(type) {
+	case map[string]interface{}:
+		aa := a.(map[string]interface{})
+		attrs := make([]string, len(aa))
+		var i int
+		for k, _ := range aa {
+			if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 {
+				continue // skip non-attributes
+			}
+			attrs[i] = k[len(attrPrefix):]
+			i++
+		}
+		attrs = attrs[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(attrs)
+		return attrs, nil
+	}
+	return nil, fmt.Errorf("no attributes for path: %s", path)
+}

+ 128 - 0
vendor/github.com/clbanning/mxj/v2/mxj.go

@@ -0,0 +1,128 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+)
+
+const (
+	Cast         = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast)
+	SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding)
+)
+
+type Map map[string]interface{}
+
+// Allocate a Map.
+func New() Map {
+	m := make(map[string]interface{}, 0)
+	return m
+}
+
+// Cast a Map to map[string]interface{}
+func (mv Map) Old() map[string]interface{} {
+	return mv
+}
+
+// Return a copy of mv as a newly allocated Map.  If the Map only contains string,
+// numeric, map[string]interface{}, and []interface{} values, then it can be thought
+// of as a "deep copy."  Copying a structure (or structure reference) value is subject
+// to the noted restrictions.
+//	NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags
+//	      then only public fields of the structure are in the new Map - and with
+//	      keys that conform to any encoding tag instructions. The structure itself will
+//	      be represented as a map[string]interface{} value.
+func (mv Map) Copy() (Map, error) {
+	// this is the poor-man's deep copy
+	// not efficient, but it works
+	j, jerr := mv.Json()
+	// must handle, we don't know how mv got built
+	if jerr != nil {
+		return nil, jerr
+	}
+	return NewMapJson(j)
+}
+
+// --------------- StringIndent ... from x2j.WriteMap -------------
+
+// Pretty print a Map.
+func (mv Map) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (mv Map) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), false, true, offset...)
+}
+
+// writeMap - dumps the map[string]interface{} for examination.
+// 'typeInfo' causes value type to be printed.
+//	'offset' is initial indentation count; typically: Write(m).
+func writeMap(m interface{}, typeInfo, root bool, offset ...int) string {
+	var indent int
+	if len(offset) == 1 {
+		indent = offset[0]
+	}
+
+	var s string
+	switch m.(type) {
+	case []interface{}:
+		if typeInfo {
+			s += "[[]interface{}]"
+		}
+		for _, v := range m.([]interface{}) {
+			s += "\n"
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += writeMap(v, typeInfo, false, indent+1)
+		}
+	case map[string]interface{}:
+		list := make([][2]string, len(m.(map[string]interface{})))
+		var n int
+		for k, v := range m.(map[string]interface{}) {
+			list[n][0] = k
+			list[n][1] = writeMap(v, typeInfo, false, indent+1)
+			n++
+		}
+		sort.Sort(mapList(list))
+		for _, v := range list {
+			if root {
+				root = false
+			} else {
+				s += "\n"
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += v[0] + " : " + v[1]
+		}
+	default:
+		if typeInfo {
+			s += fmt.Sprintf("[%T] %+v", m, m)
+		} else {
+			s += fmt.Sprintf("%+v", m)
+		}
+	}
+	return s
+}
+
+// ======================== utility ===============
+
+type mapList [][2]string
+
+func (ml mapList) Len() int {
+	return len(ml)
+}
+
+func (ml mapList) Swap(i, j int) {
+	ml[i], ml[j] = ml[j], ml[i]
+}
+
+func (ml mapList) Less(i, j int) bool {
+	return ml[i][0] <= ml[j][0]
+}

+ 184 - 0
vendor/github.com/clbanning/mxj/v2/newmap.go

@@ -0,0 +1,184 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014, 2018 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings
+//            keys can use dot-notation, keyOld can use wildcard, '*'
+//
+// Computational strategy -
+// Using the key path - []string - traverse a new map[string]interface{} and
+// insert the oldVal as the newVal when we arrive at the end of the path.
+// If the type at the end is nil, then that is newVal
+// If the type at the end is a singleton (string, float64, bool) an array is created.
+// If the type at the end is an array, newVal is just appended.
+// If the type at the end is a map, it is inserted if possible or the map value
+//    is converted into an array if necessary.
+
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// (Map)NewMap - create a new Map from data in the current Map.
+//	'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey'
+//	should be the value for 'newKey' in the returned Map.
+//		- 'oldKey' supports dot-notation as described for (Map)ValuesForPath()
+//		- 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays
+//		- "oldKey" is shorthand for the keypair value "oldKey:oldKey"
+//		- "oldKey:" and ":newKey" are invalid keypair values
+//		- if 'oldKey' does not exist in the current Map, it is not written to the new Map.
+//		  "null" is not supported unless it is the current Map.
+//		- see newmap_test.go for several syntax examples
+// 	- mv.NewMap() == mxj.New()
+//
+//	NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc.
+func (mv Map) NewMap(keypairs ...string) (Map, error) {
+	n := make(map[string]interface{}, 0)
+	if len(keypairs) == 0 {
+		return n, nil
+	}
+
+	// loop through the pairs
+	var oldKey, newKey string
+	var path []string
+	for _, v := range keypairs {
+		if len(v) == 0 {
+			continue // just skip over empty keypair arguments
+		}
+
+		// initialize oldKey, newKey and check
+		vv := strings.Split(v, ":")
+		if len(vv) > 2 {
+			return n, errors.New("oldKey:newKey keypair value not valid - " + v)
+		}
+		if len(vv) == 1 {
+			oldKey, newKey = vv[0], vv[0]
+		} else {
+			oldKey, newKey = vv[0], vv[1]
+		}
+		strings.TrimSpace(oldKey)
+		strings.TrimSpace(newKey)
+		if i := strings.Index(newKey, "*"); i > -1 {
+			return n, errors.New("newKey value cannot contain wildcard character - " + v)
+		}
+		if i := strings.Index(newKey, "["); i > -1 {
+			return n, errors.New("newKey value cannot contain indexed arrays - " + v)
+		}
+		if oldKey == "" || newKey == "" {
+			return n, errors.New("oldKey or newKey is not specified - " + v)
+		}
+
+		// get oldKey value
+		oldVal, err := mv.ValuesForPath(oldKey)
+		if err != nil {
+			return n, err
+		}
+		if len(oldVal) == 0 {
+			continue // oldKey has no value, may not exist in mv
+		}
+
+		// break down path
+		path = strings.Split(newKey, ".")
+		if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec
+			path = path[:len(path)-1]
+		}
+
+		addNewVal(&n, path, oldVal)
+	}
+
+	return n, nil
+}
+
+// navigate 'n' to end of path and add val
+func addNewVal(n *map[string]interface{}, path []string, val []interface{}) {
+	// newVal - either singleton or array
+	var newVal interface{}
+	if len(val) == 1 {
+		newVal = val[0] // is type interface{}
+	} else {
+		newVal = interface{}(val)
+	}
+
+	// walk to the position of interest, create it if necessary
+	m := (*n)           // initialize map walker
+	var k string        // key for m
+	lp := len(path) - 1 // when to stop looking
+	for i := 0; i < len(path); i++ {
+		k = path[i]
+		if i == lp {
+			break
+		}
+		var nm map[string]interface{} // holds position of next-map
+		switch m[k].(type) {
+		case nil: // need a map for next node in path, so go there
+			nm = make(map[string]interface{}, 0)
+			m[k] = interface{}(nm)
+			m = m[k].(map[string]interface{})
+		case map[string]interface{}:
+			// OK - got somewhere to walk to, go there
+			m = m[k].(map[string]interface{})
+		case []interface{}:
+			// add a map and nm points to new map unless there's already
+			// a map in the array, then nm points there
+			// The placement of the next value in the array is dependent
+			// on the sequence of members - could land on a map or a nil
+			// value first.  TODO: how to test this.
+			a := make([]interface{}, 0)
+			var foundmap bool
+			for _, vv := range m[k].([]interface{}) {
+				switch vv.(type) {
+				case nil: // doesn't appear that this occurs, need a test case
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = make(map[string]interface{}, 0)
+					a = append(a, interface{}(nm))
+					foundmap = true
+				case map[string]interface{}:
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = vv.(map[string]interface{})
+					a = append(a, vv)
+					foundmap = true
+				default:
+					a = append(a, vv)
+				}
+			}
+			// no map found in array
+			if !foundmap {
+				nm = make(map[string]interface{}, 0)
+				a = append(a, interface{}(nm))
+			}
+			m[k] = interface{}(a) // must insert in map
+			m = nm
+		default: // it's a string, float, bool, etc.
+			aa := make([]interface{}, 0)
+			nm = make(map[string]interface{}, 0)
+			aa = append(aa, m[k], nm)
+			m[k] = interface{}(aa)
+			m = nm
+		}
+	}
+
+	// value is nil, array or a singleton of some kind
+	// initially m.(type) == map[string]interface{}
+	v := m[k]
+	switch v.(type) {
+	case nil: // initialized
+		m[k] = newVal
+	case []interface{}:
+		a := m[k].([]interface{})
+		a = append(a, newVal)
+		m[k] = interface{}(a)
+	default: // v exists:string, float64, bool, map[string]interface, etc.
+		a := make([]interface{}, 0)
+		a = append(a, v, newVal)
+		m[k] = interface{}(a)
+	}
+}

+ 209 - 0
vendor/github.com/clbanning/mxj/v2/readme.md

@@ -0,0 +1,209 @@
+<h2>mxj - to/from maps, XML and JSON</h2>
+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages.
+
+<h4>Installation</h4>
+Using go.mod:
+<pre>
+go get github.com/clbanning/mxj/v2@v2.7	
+</pre>
+
+<pre>
+import "github.com/clbanning/mxj/v2"
+</pre>
+
+... or just vendor the package.
+
+<h4>Related Packages</h4>
+
+https://github.com/clbanning/checkxml provides functions for validating XML data.
+
+<h4>Refactor Encoder - 2020.05.01</h4>
+Issue #70 highlighted that encoding large maps does not scale well, since the original logic used string appends operations. Using bytes.Buffer results in linear scaling for very large XML docs. (Metrics based on MacBook Pro i7 w/ 16 GB.)
+
+	Nodes      m.XML() time
+	54809       12.53708ms
+	109780      32.403183ms
+	164678      59.826412ms
+	482598     109.358007ms
+
+<h4>Refactor Decoder - 2015.11.15</h4>
+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant.  I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi.  Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value.  As shown by:
+
+	BenchmarkNewMapXml-4         	  100000	     18043 ns/op
+	BenchmarkNewStructXml-4      	  100000	     14892 ns/op
+	BenchmarkNewMapJson-4        	  300000	      4633 ns/op
+	BenchmarkNewStructJson-4     	  300000	      3427 ns/op
+	BenchmarkNewMapXmlBooks-4    	   20000	     82850 ns/op
+	BenchmarkNewStructXmlBooks-4 	   20000	     67822 ns/op
+	BenchmarkNewMapJsonBooks-4   	  100000	     17222 ns/op
+	BenchmarkNewStructJsonBooks-4	  100000	     15309 ns/op
+
+<h4>Notices</h4>
+
+	2022.11.28: v2.7 - add SetGlobalKeyMapPrefix to change default prefix, '#', for default keys
+	2022.11.20: v2.6 - add NewMapForattedXmlSeq for XML docs formatted with whitespace character
+	2021.02.02: v2.5 - add XmlCheckIsValid toggle to force checking that the encoded XML is valid
+	2020.12.14: v2.4 - add XMLEscapeCharsDecoder to preserve XML escaped characters in Map values
+	2020.10.28: v2.3 - add TrimWhiteSpace option
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>]
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq()
+	            and mv.XmlSeq() / mv.XmlSeqIndent().
+	2015-05-20: New: mv.StringIndentNoTypeInfo().
+	            Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(),
+	            mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo().
+	2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information.
+	            (NOTE: PreserveXmlList() is similar and will be here soon.)
+	2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+	2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references.
+
+<h4>Basic Unmarshal XML to map[string]interface{}</h4>
+<pre>type Map map[string]interface{}</pre>
+
+Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v':
+<pre>mv := Map(v)</pre>
+
+Unmarshal / marshal XML as a `Map` value, 'mv':
+<pre>mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal</pre>
+
+Unmarshal XML from an `io.Reader` as a `Map` value, 'mv':
+<pre>mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded</pre>
+
+Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`):
+<pre>err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter</pre>
+   
+Also, for prettified output:
+<pre>xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)</pre>
+
+Bulk process XML with error handling (note: handlers must return a boolean value):
+<pre>err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))</pre>
+
+Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`.
+
+There are comparable functions and methods for JSON processing.
+
+Arbitrary structure values can be decoded to / encoded from `Map` values:
+<pre>mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)</pre>
+
+<h4>Extract / modify Map values</h4>
+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then:
+<pre>paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)</pre>
+
+Get everything at once, irrespective of path depth:
+<pre>leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()</pre>
+
+A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML
+or JSON. (Note: keys can use dot-notation.)
+<pre>newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto</pre>
+
+<h4>Usage</h4>
+
+The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj).
+
+Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions.
+
+<h4>XML parsing conventions</h4>
+
+Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - `<ns:key>something</ns.key>` parses to `map["ns:key"]interface{}{"something"}`
+      - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}`
+
+Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+<h4>XML encoding conventions</h4>
+
+   - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as `<tag/>`.
+     NOTE: the operation is not symmetric as `<tag/>` elements are decoded as `tag:""` `Map` values,
+           which, then, encode in JSON as `"tag":""` values.
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+<h4>Running "go test"</h4>
+
+Because there are no guarantees on the sequence map elements are retrieved, the tests have been 
+written for visual verification in most cases.  One advantage is that you can easily use the 
+output from running "go test" as examples of calling the various functions and methods.
+
+<h4>Motivation</h4>
+
+I make extensive use of JSON for messaging and typically unmarshal the messages into
+`map[string]interface{}` values.  This is easily done using `json.Unmarshal` from the
+standard Go libraries.  Unfortunately, many legacy solutions use structured
+XML messages; in those environments the applications would have to be refactored to
+interoperate with my components.
+
+The better solution is to just provide an alternative HTTP handler that receives
+XML messages and parses it into a `map[string]interface{}` value and then reuse
+all the JSON-based code.  The Go `xml.Unmarshal()` function does not provide the same
+option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote
+a couple of small functions to fill this gap and released them as the x2j package.
+
+Over the next year and a half additional features were added, and the companion j2x
+package was released to address XML encoding of arbitrary JSON and `map[string]interface{}`
+values.  As part of a refactoring of our production system and looking at how we had been
+using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or
+JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}`
+values was the primary value.  Thus, everything was refactored into the mxj package.
+

+ 37 - 0
vendor/github.com/clbanning/mxj/v2/remove.go

@@ -0,0 +1,37 @@
+package mxj
+
+import "strings"
+
+// Removes the path.
+func (mv Map) Remove(path string) error {
+	m := map[string]interface{}(mv)
+	return remove(m, path)
+}
+
+func remove(m interface{}, path string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	lastKey := lastKey(path)
+	delete(val, lastKey)
+
+	return nil
+}
+
+// returns the last key of the path.
+// lastKey("a.b.c") would had returned "c"
+func lastKey(path string) string {
+	keys := strings.Split(path, ".")
+	key := keys[len(keys)-1]
+	return key
+}
+
+// returns the path without the last key
+// parentPath("a.b.c") whould had returned "a.b"
+func parentPath(path string) string {
+	keys := strings.Split(path, ".")
+	parentPath := strings.Join(keys[0:len(keys)-1], ".")
+	return parentPath
+}

+ 61 - 0
vendor/github.com/clbanning/mxj/v2/rename.go

@@ -0,0 +1,61 @@
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// RenameKey renames a key in a Map.
+// It works only for nested maps. 
+// It doesn't work for cases when the key is in a list.
+func (mv Map) RenameKey(path string, newName string) error {
+	var v bool
+	var err error
+	if v, err = mv.Exists(path); err == nil && !v {
+		return errors.New("RenameKey: path not found: " + path)
+	} else if err != nil {
+		return err
+	}
+	if v, err = mv.Exists(parentPath(path) + "." + newName); err == nil && v {
+		return errors.New("RenameKey: key already exists: " + newName)
+	} else if err != nil {
+		return err
+	}
+
+	m := map[string]interface{}(mv)
+	return renameKey(m, path, newName)
+}
+
+func renameKey(m interface{}, path string, newName string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	oldName := lastKey(path)
+	val[newName] = val[oldName]
+	delete(val, oldName)
+
+	return nil
+}
+
+// returns a value which contains a last key in the path
+// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3}
+func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) {
+	keys := strings.Split(path, ".")
+
+	switch mValue := m.(type) {
+	case map[string]interface{}:
+		for key, value := range mValue {
+			if key == keys[0] {
+				if len(keys) == 1 {
+					return mValue, nil
+				} else {
+					// keep looking for the full path to the key
+					return prevValueByPath(value, strings.Join(keys[1:], "."))
+				}
+			}
+		}
+	}
+	return nil, errors.New("prevValueByPath: didn't find path – " + path)
+}

+ 26 - 0
vendor/github.com/clbanning/mxj/v2/set.go

@@ -0,0 +1,26 @@
+package mxj
+
+import (
+	"strings"
+)
+
+// Sets the value for the path
+func (mv Map) SetValueForPath(value interface{}, path string) error {
+	pathAry := strings.Split(path, ".")
+	parentPathAry := pathAry[0 : len(pathAry)-1]
+	parentPath := strings.Join(parentPathAry, ".")
+
+	val, err := mv.ValueForPath(parentPath)
+	if err != nil {
+		return err
+	}
+	if val == nil {
+		return nil // we just ignore the request if there's no val
+	}
+
+	key := pathAry[len(pathAry)-1]
+	cVal := val.(map[string]interface{})
+	cVal[key] = value
+
+	return nil
+}

+ 20 - 0
vendor/github.com/clbanning/mxj/v2/setfieldsep.go

@@ -0,0 +1,20 @@
+package mxj
+
+// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862
+var fieldSep string = ":"
+
+// SetFieldSeparator changes the default field separator, ":", for the
+// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments
+// in mv.ValuesForKey and mv.ValuesForPath. 
+// 
+// E.g., if the newVal value is "http://blah/blah", setting the field separator
+// to "|" will allow the newVal specification, "<key>|http://blah/blah" to parse
+// properly.  If called with no argument or an empty string value, the field
+// separator is set to the default, ":".
+func SetFieldSeparator(s ...string) {
+	if len(s) == 0 || s[0] == "" {
+		fieldSep = ":" // the default
+		return
+	}
+	fieldSep = s[0]
+}

+ 29 - 0
vendor/github.com/clbanning/mxj/v2/songtext.xml

@@ -0,0 +1,29 @@
+<msg mtype="alert" mpriority="1">
+	<text>help me!</text>
+	<song title="A Long Time" author="Mayer Hawthorne">
+		<verses>
+			<verse name="verse 1" no="1">
+				<line no="1">Henry was a renegade</line>
+				<line no="2">Didn't like to play it safe</line>
+				<line no="3">One component at a time</line>
+				<line no="4">There's got to be a better way</line>
+				<line no="5">Oh, people came from miles around</line>
+				<line no="6">Searching for a steady job</line>
+				<line no="7">Welcome to the Motor Town</line>
+				<line no="8">Booming like an atom bomb</line>
+			</verse>
+			<verse name="verse 2" no="2">
+				<line no="1">Oh, Henry was the end of the story</line>
+				<line no="2">Then everything went wrong</line>
+				<line no="3">And we'll return it to its former glory</line>
+				<line no="4">But it just takes so long</line>
+			</verse>
+		</verses>
+		<chorus>
+			<line no="1">It's going to take a long time</line>
+			<line no="2">It's going to take it, but we'll make it one day</line>
+			<line no="3">It's going to take a long time</line>
+			<line no="4">It's going to take it, but we'll make it one day</line>
+		</chorus>
+	</song>
+</msg>

+ 30 - 0
vendor/github.com/clbanning/mxj/v2/strict.go

@@ -0,0 +1,30 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// strict.go actually addresses setting xml.Decoder attribute
+// values.  This'll let you parse non-standard XML.
+
+package mxj
+
+import (
+	"encoding/xml"
+)
+
+// CustomDecoder can be used to specify xml.Decoder attribute
+// values, e.g., Strict:false, to be used.  By default CustomDecoder
+// is nil.  If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is
+// ignored and must be set as part of the CustomDecoder value, if needed.
+//	Usage:
+//		mxj.CustomDecoder = &xml.Decoder{Strict:false}
+var CustomDecoder *xml.Decoder
+
+// useCustomDecoder copy over public attributes from customDecoder
+func useCustomDecoder(d *xml.Decoder) {
+	d.Strict = CustomDecoder.Strict
+	d.AutoClose = CustomDecoder.AutoClose
+	d.Entity = CustomDecoder.Entity
+	d.CharsetReader = CustomDecoder.CharsetReader
+	d.DefaultSpace = CustomDecoder.DefaultSpace
+}
+

+ 54 - 0
vendor/github.com/clbanning/mxj/v2/struct.go

@@ -0,0 +1,54 @@
+// Copyright 2012-2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"encoding/json"
+	"errors"
+	"reflect"
+
+	// "github.com/fatih/structs"
+)
+
+// Create a new Map value from a structure.  Error returned if argument is not a structure.
+// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map
+// for handling of "structs" tags.
+
+// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map.
+//	import "github.com/fatih/structs"
+//	...
+//	   sm, err := structs.Map(<some struct>)
+//	   if err != nil {
+//	      // handle error
+//	   }
+//	   m := mxj.Map(sm)
+// Alernatively uncomment the old source and import in struct.go.
+func NewMapStruct(structVal interface{}) (Map, error) {
+	return nil, errors.New("deprecated - see package documentation")
+	/*
+		if !structs.IsStruct(structVal) {
+			return nil, errors.New("NewMapStruct() error: argument is not type Struct")
+		}
+		return structs.Map(structVal), nil
+	*/
+}
+
+// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned
+// if argument is not a pointer or if json.Unmarshal returns an error.
+//	json.Unmarshal structure encoding rules are followed to encode public structure fields.
+func (mv Map) Struct(structPtr interface{}) error {
+	// should check that we're getting a pointer.
+	if reflect.ValueOf(structPtr).Kind() != reflect.Ptr {
+		return errors.New("mv.Struct() error: argument is not type Ptr")
+	}
+
+	m := map[string]interface{}(mv)
+	j, err := json.Marshal(m)
+	if err != nil {
+		return err
+	}
+
+	return json.Unmarshal(j, structPtr)
+}

+ 258 - 0
vendor/github.com/clbanning/mxj/v2/updatevalues.go

@@ -0,0 +1,258 @@
+// Copyright 2012-2014, 2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// updatevalues.go - modify a value based on path and possibly sub-keys
+// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values.
+
+package mxj
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Update value based on path and possible sub-key values.
+// A count of the number of values changed and any error are returned.
+// If the count == 0, then no path (and subkeys) matched.
+//	'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified
+//	             or a string value "key:value[:type]" where type is "bool" or "num" to cast the value.
+//	'path' is dot-notation list of keys to traverse; last key in path can be newVal key
+//	       NOTE: 'path' spec does not currently support indexed array references.
+//	'subkeys' are "key:value[:type]" entries that must match for path node
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//	              exclusion critera - e.g., "!author:William T. Gaddis".
+//
+//	NOTES:
+//		1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value.
+//		2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key.
+//		3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol,
+//	      perhaps "|".
+func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) {
+	m := map[string]interface{}(mv)
+
+	// extract the subkeys
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	// extract key and value from newVal
+	var key string
+	var val interface{}
+	switch newVal.(type) {
+	case map[string]interface{}, Map:
+		switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec)
+		case Map:
+			newVal = newVal.(Map).Old()
+		}
+		if len(newVal.(map[string]interface{})) != 1 {
+			return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal)
+		}
+		for key, val = range newVal.(map[string]interface{}) {
+		}
+	case string: // split it as a key:value pair
+		ss := strings.Split(newVal.(string), fieldSep)
+		n := len(ss)
+		if n < 2 || n > 3 {
+			return 0, fmt.Errorf("unknown newVal spec - %+v", newVal)
+		}
+		key = ss[0]
+		if n == 2 {
+			val = interface{}(ss[1])
+		} else if n == 3 {
+			switch ss[2] {
+			case "bool", "boolean":
+				nv, err := strconv.ParseBool(ss[1])
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal)
+				}
+				val = interface{}(nv)
+			case "num", "numeric", "float", "int":
+				nv, err := strconv.ParseFloat(ss[1], 64)
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal)
+				}
+				val = interface{}(nv)
+			default:
+				return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal)
+			}
+		}
+	default:
+		return 0, fmt.Errorf("invalid newVal type - %+v", newVal)
+	}
+
+	// parse path
+	keys := strings.Split(path, ".")
+
+	var count int
+	updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count)
+
+	return count, nil
+}
+
+// navigate the path
+func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) {
+	// ----- at end node: looking at possible node to get 'key' ----
+	if len(keys) == 1 {
+		updateValue(key, value, m, keys[0], subkeys, cnt)
+		return
+	}
+
+	// ----- here we are navigating the path thru the penultimate node --------
+	// key of interest is keys[0] - the next in the path
+	switch keys[0] {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				default:
+					updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[keys[0]]; ok {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[keys[0]]; ok {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				}
+			}
+		}
+	}
+}
+
+// change value if key and subkeys are present
+func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) {
+	// there are two possible options for the value of 'keys0': map[string]interface, []interface{}
+	// and 'key' is a key in the map or is a key in a map in a list.
+	switch m.(type) {
+	case map[string]interface{}: // gotta have the last key
+		if keys0 == "*" {
+			for k := range m.(map[string]interface{}) {
+				updateValue(key, value, m, k, subkeys, cnt)
+			}
+			return
+		}
+		endVal, _ := m.(map[string]interface{})[keys0]
+
+		// if newV key is the end of path, replace the value for path-end
+		// may be []interface{} - means replace just an entry w/ subkeys
+		// otherwise replace the keys0 value if subkeys are there
+		// NOTE: this will replace the subkeys, also
+		if key == keys0 {
+			switch endVal.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			case []interface{}:
+				// without subkeys can't select list member to modify
+				// so key:value spec is it ...
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+					break
+				}
+				nv := make([]interface{}, 0)
+				var valmodified bool
+				for _, v := range endVal.([]interface{}) {
+					// check entry subkeys
+					if hasSubKeys(v, subkeys) {
+						// replace v with value
+						nv = append(nv, value)
+						valmodified = true
+						(*cnt)++
+						continue
+					}
+					nv = append(nv, v)
+				}
+				if valmodified {
+					(m.(map[string]interface{}))[keys0] = interface{}(nv)
+				}
+			default: // anything else is a strict replacement
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			}
+			return
+		}
+
+		// so value is for an element of endVal
+		// if endVal is a map then 'key' must be there w/ subkeys
+		// if endVal is a list then 'key' must be in a list member w/ subkeys
+		switch endVal.(type) {
+		case map[string]interface{}:
+			if !hasSubKeys(endVal, subkeys) {
+				return
+			}
+			if _, ok := (endVal.(map[string]interface{}))[key]; ok {
+				(endVal.(map[string]interface{}))[key] = value
+				(*cnt)++
+			}
+		case []interface{}: // keys0 points to a list, check subkeys
+			for _, v := range endVal.([]interface{}) {
+				// got to be a map so we can replace value for 'key'
+				vv, vok := v.(map[string]interface{})
+				if !vok {
+					continue
+				}
+				if _, ok := vv[key]; !ok {
+					continue
+				}
+				if !hasSubKeys(vv, subkeys) {
+					continue
+				}
+				vv[key] = value
+				(*cnt)++
+			}
+		}
+	case []interface{}: // key may be in a list member
+		// don't need to handle keys0 == "*"; we're looking at everything, anyway.
+		for _, v := range m.([]interface{}) {
+			// only map values - we're looking for 'key'
+			mm, ok := v.(map[string]interface{})
+			if !ok {
+				continue
+			}
+			if _, ok := mm[key]; !ok {
+				continue
+			}
+			if !hasSubKeys(mm, subkeys) {
+				continue
+			}
+			mm[key] = value
+			(*cnt)++
+		}
+	}
+
+	// return
+}

+ 1440 - 0
vendor/github.com/clbanning/mxj/v2/xml.go

@@ -0,0 +1,1440 @@
+// Copyright 2012-2016, 2018-2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xml.go - basically the core of X2j for map[string]interface{} values.
+//          NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter
+// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	textK      = "#text"
+	seqK       = "#seq"
+	commentK   = "#comment"
+	attrK      = "#attr"
+	directiveK = "#directive"
+	procinstK  = "#procinst"
+	targetK    = "#target"
+	instK      = "#inst"
+)
+
+// Support overriding default Map keys prefix
+
+func SetGlobalKeyMapPrefix(s string) {
+	textK = strings.ReplaceAll(textK, textK[0:1], s)
+	seqK = strings.ReplaceAll(seqK, seqK[0:1], s)
+	commentK = strings.ReplaceAll(commentK, commentK[0:1], s)
+	directiveK = strings.ReplaceAll(directiveK, directiveK[0:1], s)
+	procinstK = strings.ReplaceAll(procinstK, procinstK[0:1], s)
+	targetK = strings.ReplaceAll(targetK, targetK[0:1], s)
+	instK = strings.ReplaceAll(instK, instK[0:1], s)
+	attrK = strings.ReplaceAll(attrK, attrK[0:1], s)
+}
+
+// ------------------- NewMapXml & NewMapXmlReader ... -------------------------
+
+// If XmlCharsetReader != nil, it will be used to decode the XML, if required.
+// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored;
+// set the CustomDecoder attribute instead.
+//   import (
+//	     charset "code.google.com/p/go-charset/charset"
+//	     github.com/clbanning/mxj
+//	 )
+//   ...
+//   mxj.XmlCharsetReader = charset.NewReader
+//   m, merr := mxj.NewMapXml(xmlValue)
+var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+// NewMapXml - convert a XML doc into a Map
+// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().)
+//	If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+//
+//	Converting XML to JSON is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapXml(xmlVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		jsonVal, jerr := mapVal.Json()
+//		if jerr != nil {
+//			// handle error
+//		}
+//
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//	   5. If DisableTrimWhiteSpace(b bool) has been called, then all values will be trimmed or not. 'true' by default.
+func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlToMap(xmlVal, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlReaderToMap(xmlReader, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value and slice with the raw XML.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	   3. The 'raw' return value may be larger than the XML text value.
+//	   4. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   5. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   6. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb) // see code at EOF
+
+	m, err := xmlReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	if err != nil {
+		return nil, b, err
+	}
+
+	return m, b, nil
+}
+
+// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// xmlToMap - convert a XML doc into map[string]interface{} value
+func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// PrependAttrWithHyphen. Prepend attribute tags with a hyphen.
+// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.)
+//	Note:
+//		If 'false', unmarshaling and marshaling is not symmetric. Attributes will be
+//		marshal'd as <attr_tag>attr</attr_tag> and may be part of a list.
+func PrependAttrWithHyphen(v bool) {
+	if v {
+		attrPrefix = "-"
+		lenAttrPrefix = len(attrPrefix)
+		return
+	}
+	attrPrefix = ""
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com.
+var includeTagSeqNum bool
+
+// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting
+// its position when parsed. This is of limited usefulness, since list values cannot
+// be tagged with "_seq" without changing their depth in the Map.
+// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what
+// you get.
+/*
+		<Obj c="la" x="dee" h="da">
+			<IntObj id="3"/>
+			<IntObj1 id="1"/>
+			<IntObj id="2"/>
+			<StrObj>hello</StrObj>
+		</Obj>
+
+	parses as:
+
+		{
+		Obj:{
+			"-c":"la",
+			"-h":"da",
+			"-x":"dee",
+			"intObj":[
+				{
+					"-id"="3",
+					"_seq":"0" // if mxj.Cast is passed, then: "_seq":0
+				},
+				{
+					"-id"="2",
+					"_seq":"2"
+				}],
+			"intObj1":{
+				"-id":"1",
+				"_seq":"1"
+				},
+			"StrObj":{
+				"#text":"hello", // simple element value gets "#text" tag
+				"_seq":"3"
+				}
+			}
+		}
+*/
+func IncludeTagSeqNum(b ...bool) {
+	if len(b) == 0 {
+		includeTagSeqNum = !includeTagSeqNum
+	} else if len(b) == 1 {
+		includeTagSeqNum = b[0]
+	}
+}
+
+// all keys will be "lower case"
+var lowerCase bool
+
+// Coerce all tag values to keys in lower case.  This is useful if you've got sources with variable
+// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec
+// in lower case.
+//	CoerceKeysToLower() will toggle the coercion flag true|false - on|off
+//	CoerceKeysToLower(true|false) will set the coercion flag on|off
+//
+//	NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as
+//	      the associated HandleXmlReader and HandleXmlReaderRaw.
+func CoerceKeysToLower(b ...bool) {
+	if len(b) == 0 {
+		lowerCase = !lowerCase
+	} else if len(b) == 1 {
+		lowerCase = b[0]
+	}
+}
+
+// disableTrimWhiteSpace sets if the white space should be removed or not
+var disableTrimWhiteSpace bool
+var trimRunes = "\t\r\b\n "
+
+// DisableTrimWhiteSpace set if the white space should be trimmed or not. By default white space is always trimmed. If
+// no argument is provided, trim white space will be disabled.
+func DisableTrimWhiteSpace(b ...bool) {
+	if len(b) == 0 {
+		disableTrimWhiteSpace = true
+	} else {
+		disableTrimWhiteSpace = b[0]
+	}
+
+	if disableTrimWhiteSpace {
+		trimRunes = "\t\r\b\n"
+	} else {
+		trimRunes = "\t\r\b\n "
+	}
+}
+
+// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels.
+// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "",
+// and adding a SetAttrPrefix(s string) function.
+
+var attrPrefix string = `-` // the default
+var lenAttrPrefix int = 1   // the default
+
+// SetAttrPrefix changes the default, "-", to the specified value, s.
+// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false).
+// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.)
+func SetAttrPrefix(s string) {
+	attrPrefix = s
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// 18jan17: Allows user to specify if the map keys should be in snake case instead
+// of the default hyphenated notation.
+var snakeCaseKeys bool
+
+// CoerceKeysToSnakeCase changes the default, false, to the specified value, b.
+// Note: the attribute prefix will be a hyphen, '-', or what ever string value has
+// been specified using SetAttrPrefix.
+func CoerceKeysToSnakeCase(b ...bool) {
+	if len(b) == 0 {
+		snakeCaseKeys = !snakeCaseKeys
+	} else if len(b) == 1 {
+		snakeCaseKeys = b[0]
+	}
+}
+
+// 10jan19: use of pull request #57 should be conditional - legacy code assumes
+// numeric values are float64.
+var castToInt bool
+
+// CastValuesToInt tries to coerce numeric valus to int64 or uint64 instead of the
+// default float64. Repeated calls with no argument will toggle this on/off, or this
+// handling will be set with the value of 'b'.
+func CastValuesToInt(b ...bool) {
+	if len(b) == 0 {
+		castToInt = !castToInt
+	} else if len(b) == 1 {
+		castToInt = b[0]
+	}
+}
+
+// 05feb17: support processing XMPP streams (issue #36)
+var handleXMPPStreamTag bool
+
+// HandleXMPPStreamTag causes decoder to parse XMPP <stream:stream> elements.
+// If called with no argument, XMPP stream element handling is toggled on/off.
+// (See xmppStream_test.go for example.)
+//	If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream"
+//	element will be  returned as:
+//		map["stream"]interface{}{map[-<attrs>]interface{}}.
+//	If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream"
+//	element will be returned as:
+//		map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}}
+//		where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.)
+func HandleXMPPStreamTag(b ...bool) {
+	if len(b) == 0 {
+		handleXMPPStreamTag = !handleXMPPStreamTag
+	} else if len(b) == 1 {
+		handleXMPPStreamTag = b[0]
+	}
+}
+
+// 21jan18 - decode all values as map["#text":value] (issue #56)
+var decodeSimpleValuesAsMap bool
+
+// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":<value>].
+// If called with no argument, the decoding is toggled on/off.
+//
+// By default the NewMapXml functions decode simple values without attributes as
+// map[<tag>:<value>]. This function causes simple values without attributes to be
+// decoded the same as simple values with attributes - map[<tag>:map["#text":<value>]].
+func DecodeSimpleValuesAsMap(b ...bool) {
+	if len(b) == 0 {
+		decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap
+	} else if len(b) == 1 {
+		decodeSimpleValuesAsMap = b[0]
+	}
+}
+
+// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly.
+// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one.
+// We've removed the intermediate *node tree with the allocation and subsequent rescanning.
+func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if lowerCase {
+		skey = strings.ToLower(skey)
+	}
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	// Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value.
+	var n, na map[string]interface{}
+	var seq int // for includeTagSeqNum
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		n = make(map[string]interface{})  // old n
+		na = make(map[string]interface{}) // old n.nodes
+		if len(a) > 0 {
+			for _, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				var key string
+				key = attrPrefix + v.Name.Local
+				if lowerCase {
+					key = strings.ToLower(key)
+				}
+				if xmlEscapeCharsDecoder { // per issue#84
+					v.Value = escapeChars(v.Value)
+				}
+				na[key] = cast(v.Value, r, key)
+			}
+		}
+	}
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.Token()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				return xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			// Note:
+			// * if CoerceKeysToLower() called, then key will be lower case.
+			// * if CoerceKeysToSnakeCase() called, then key will be converted to snake case.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element.
+			// In theory, we don't need this if len(na) == 1. But, we don't know what might
+			// come next - we're only parsing forward.  So if you ask for 'includeTagSeqNum' you
+			// get it on every element. (Personally, I never liked this, but I added it on request
+			// and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!)
+			if includeTagSeqNum {
+				switch val.(type) {
+				case []interface{}:
+					// noop - There's no clean way to handle this w/o changing message structure.
+				case map[string]interface{}:
+					val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag
+					seq++
+				case interface{}: // a non-nil simple element: string, float64, bool
+					v := map[string]interface{}{textK: val}
+					v["_seq"] = seq
+					seq++
+					val = v
+				}
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Note: in original node-tree parser, val defaulted to "";
+				// so we always had the default if len(node.nodes) == 0.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			} else if len(n) == 1 && len(na) > 0 {
+				// it's a simple element w/ no attributes w/ subelements
+				for _, v := range n {
+					na[textK] = v
+				}
+				n[skey] = na
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), trimRunes)
+			if xmlEscapeCharsDecoder { // issue#84
+				tt = escapeChars(tt)
+			}
+			if len(tt) > 0 {
+				if len(na) > 0 || decodeSimpleValuesAsMap {
+					na[textK] = cast(tt, r, textK)
+				} else if skey != "" {
+					n[skey] = cast(tt, r, skey)
+				} else {
+					// per Adrian (http://www.adrianlungu.com/) catch stray text
+					// in decoder stream -
+					// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+					// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+					// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+					continue
+				}
+			}
+		default:
+			// noop
+		}
+	}
+}
+
+var castNanInf bool
+
+// Cast "Nan", "Inf", "-Inf" XML values to 'float64'.
+// By default, these values will be decoded as 'string'.
+func CastNanInf(b ...bool) {
+	if len(b) == 0 {
+		castNanInf = !castNanInf
+	} else if len(b) == 1 {
+		castNanInf = b[0]
+	}
+}
+
+// cast - try to cast string values to bool or float64
+// 't' is the tag key that can be checked for 'not-casting'
+func cast(s string, r bool, t string) interface{} {
+	if checkTagToSkip != nil && t != "" && checkTagToSkip(t) {
+		// call the check-function here with 't[0]'
+		// if 'true' return s
+		return s
+	}
+
+	if r {
+		// handle nan and inf
+		if !castNanInf {
+			switch strings.ToLower(s) {
+			case "nan", "inf", "-inf":
+				return s
+			}
+		}
+
+		// handle numeric strings ahead of boolean
+		if castToInt {
+			if f, err := strconv.ParseInt(s, 10, 64); err == nil {
+				return f
+			}
+			if f, err := strconv.ParseUint(s, 10, 64); err == nil {
+				return f
+			}
+		}
+
+		if castToFloat {
+			if f, err := strconv.ParseFloat(s, 64); err == nil {
+				return f
+			}
+		}
+
+		// ParseBool treats "1"==true & "0"==false, we've already scanned those
+		// values as float64. See if value has 't' or 'f' as initial screen to
+		// minimize calls to ParseBool; also, see if len(s) < 6.
+		if castToBool {
+			if len(s) > 0 && len(s) < 6 {
+				switch s[:1] {
+				case "t", "T", "f", "F":
+					if b, err := strconv.ParseBool(s); err == nil {
+						return b
+					}
+				}
+			}
+		}
+	}
+	return s
+}
+
+// pull request, #59
+var castToFloat = true
+
+// CastValuesToFloat can be used to skip casting to float64 when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToFloat(b ...bool) {
+	if len(b) == 0 {
+		castToFloat = !castToFloat
+	} else if len(b) == 1 {
+		castToFloat = b[0]
+	}
+}
+
+var castToBool = true
+
+// CastValuesToBool can be used to skip casting to bool when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToBool(b ...bool) {
+	if len(b) == 0 {
+		castToBool = !castToBool
+	} else if len(b) == 1 {
+		castToBool = b[0]
+	}
+}
+
+// checkTagToSkip - switch to address Issue #58
+
+var checkTagToSkip func(string) bool
+
+// SetCheckTagToSkipFunc registers function to test whether the value
+// for a tag should be cast to bool or float64 when "cast" argument is 'true'.
+// (Dot tag path notation is not supported.)
+// NOTE: key may be "#text" if it's a simple element with attributes
+//       or "decodeSimpleValuesAsMap == true".
+// NOTE: does not apply to NewMapXmlSeq... functions.
+func SetCheckTagToSkipFunc(fn func(string) bool) {
+	checkTagToSkip = fn
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------
+
+const (
+	DefaultRootTag = "doc"
+)
+
+var useGoXmlEmptyElemSyntax bool
+
+// XmlGoEmptyElemSyntax() - <tag ...></tag> rather than <tag .../>.
+//	Go's encoding/xml package marshals empty XML elements as <tag ...></tag>.  By default this package
+//	encodes empty elements as <tag .../>.  If you're marshaling Map values that include structures
+//	(which are passed to xml.Marshal for encoding), this will let you conform to the standard package.
+func XmlGoEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = true
+}
+
+// XmlDefaultEmptyElemSyntax() - <tag .../> rather than <tag ...></tag>.
+// Return XML encoding for empty elements to the default package setting.
+// Reverses effect of XmlGoEmptyElemSyntax().
+func XmlDefaultEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = false
+}
+
+// ------- issue #88 ----------
+// xmlCheckIsValid set switch to force decoding the encoded XML to
+// see if it is valid XML.
+var xmlCheckIsValid bool
+
+// XmlCheckIsValid forces the encoded XML to be checked for validity.
+func XmlCheckIsValid(b ...bool) {
+	if len(b) == 1 {
+		xmlCheckIsValid = b[0]
+		return
+	}
+	xmlCheckIsValid = !xmlCheckIsValid
+}
+
+// Encode a Map as XML.  The companion of NewMapXml().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - Map keys that begin with a hyphen, '-', are interpreted as attributes.
+//      It is an error if the attribute doesn't have a []byte, string, number, or boolean value.
+//    - Map value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>".
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+//    - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax().
+// The attributes tag=value pairs are alphabetized by "tag".  Also, when encoding map[string]interface{} values -
+// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted.
+func (mv Map) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = marshalMapToXmlIndent(false, b, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(false, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+	}
+done:
+	if xmlCheckIsValid {
+		d := xml.NewDecoder(bytes.NewReader(b.Bytes()))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return b.Bytes(), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as  XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as  XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// Writes the Map as pretty XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// --------------  Handle XML stream by processing Map value --------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var xhandlerPollInterval = time.Millisecond
+
+// Bulk process XML using handlers that process a Map value.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapXmlReader(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process XML using handlers that process a Map value and the raw XML.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+//	See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader.
+func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapXmlReaderRaw(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// ----------------- END: Handle XML stream by processing Map value --------------
+
+// --------  a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ----------
+
+// This is a clone of io.TeeReader with the additional method t.ReadByte().
+// Thus, this TeeReader is also an io.ByteReader.
+// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written
+// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte()..
+// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with
+// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic.
+
+type teeReader struct {
+	r io.Reader
+	w io.Writer
+	b []byte
+}
+
+func myTeeReader(r io.Reader, w io.Writer) io.Reader {
+	b := make([]byte, 1)
+	return &teeReader{r, w, b}
+}
+
+// need for io.Reader - but we don't use it ...
+func (t *teeReader) Read(p []byte) (int, error) {
+	return 0, nil
+}
+
+func (t *teeReader) ReadByte() (byte, error) {
+	n, err := t.r.Read(t.b)
+	if n > 0 {
+		if _, err := t.w.Write(t.b[:1]); err != nil {
+			return t.b[0], err
+		}
+	}
+	return t.b[0], err
+}
+
+// For use with NewMapXmlReader & NewMapXmlSeqReader.
+type byteReader struct {
+	r io.Reader
+	b []byte
+}
+
+func myByteReader(r io.Reader) io.Reader {
+	b := make([]byte, 1)
+	return &byteReader{r, b}
+}
+
+// Need for io.Reader interface ...
+// Needed if reading a malformed http.Request.Body - issue #38.
+func (b *byteReader) Read(p []byte) (int, error) {
+	return b.r.Read(p)
+}
+
+func (b *byteReader) ReadByte() (byte, error) {
+	_, err := b.r.Read(b.b)
+	if len(b.b) > 0 {
+		// issue #38
+		return b.b[0], err
+	}
+	var c byte
+	return c, err
+}
+
+// ----------------------- END: io.TeeReader hack -----------------------------------
+
+// ---------------------- XmlIndent - from j2x package ----------------------------
+
+// Encode a map[string]interface{} as a pretty XML string.
+// See Xml for encoding rules.
+func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+			} else {
+				err = marshalMapToXmlIndent(true, b, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(true, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+	}
+	if xmlCheckIsValid {
+		d := xml.NewDecoder(bytes.NewReader(b.Bytes()))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return b.Bytes(), err
+}
+
+type pretty struct {
+	indent   string
+	cnt      int
+	padding  string
+	mapDepth int
+	start    int
+}
+
+func (p *pretty) Indent() {
+	p.padding += p.indent
+	p.cnt++
+}
+
+func (p *pretty) Outdent() {
+	if p.cnt > 0 {
+		p.padding = p.padding[:len(p.padding)-len(p.indent)]
+		p.cnt--
+	}
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+// NOTE: 01may20 - replaces mapToXmlIndent(); uses bytes.Buffer instead for string appends.
+func marshalMapToXmlIndent(doIndent bool, b *bytes.Buffer, key string, value interface{}, pp *pretty) error {
+	var err error
+	var endTag bool
+	var isSimple bool
+	var elen int
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	// per issue #48, 18apr18 - try and coerce maps to map[string]interface{}
+	// Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq().
+	if reflect.ValueOf(value).Kind() == reflect.Map {
+		switch value.(type) {
+		case map[string]interface{}:
+		default:
+			val := make(map[string]interface{})
+			vv := reflect.ValueOf(value)
+			keys := vv.MapKeys()
+			for _, k := range keys {
+				val[fmt.Sprint(k)] = vv.MapIndex(k).Interface()
+			}
+			value = val
+		}
+	}
+
+	// 14jul20.  The following block of code has become something of a catch all for odd stuff
+	// that might be passed in as a result of casting an arbitrary map[<T>]<T> to an mxj.Map
+	// value and then call m.Xml or m.XmlIndent. See issue #71 (and #73) for such edge cases.
+	switch value.(type) {
+	// these types are handled during encoding
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number:
+	case []map[string]interface{}, []string, []float64, []bool, []int, []int32, []int64, []float32, []json.Number:
+	case []interface{}:
+	case nil:
+		value = ""
+	default:
+		// see if value is a struct, if so marshal using encoding/xml package
+		if reflect.ValueOf(value).Kind() == reflect.Struct {
+			if v, err := xml.Marshal(value); err != nil {
+				return err
+			} else {
+				value = string(v)
+			}
+		} else {
+			// coerce eveything else into a string value
+			value = fmt.Sprint(value)
+		}
+	}
+
+	// start the XML tag with required indentaton and padding
+	if doIndent {
+		switch value.(type) {
+		case []interface{}, []string:
+			// list processing handles indentation for all elements
+		default:
+			if _, err = b.WriteString(p.padding); err != nil {
+				return err
+			}
+		}
+	}
+	switch value.(type) {
+	case []interface{}:
+	default:
+		if _, err = b.WriteString(`<` + key); err != nil {
+			return err
+		}
+	}
+
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		// scan out attributes - attribute keys have prepended attrPrefix
+		attrlist := make([][2]string, len(vv))
+		var n int
+		var ss string
+		for k, v := range vv {
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				switch v.(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(v.(string))
+					} else {
+						ss = v.(string)
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				case float64, bool, int, int32, int64, float32, json.Number:
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = fmt.Sprintf("%v", v)
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(v.([]byte)))
+					} else {
+						ss = string(v.([]byte))
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				default:
+					return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v)
+				}
+				n++
+			}
+		}
+		if n > 0 {
+			attrlist = attrlist[:n]
+			sort.Sort(attrList(attrlist))
+			for _, v := range attrlist {
+				if _, err = b.WriteString(` ` + v[0] + `="` + v[1] + `"`); err != nil {
+					return err
+				}
+			}
+		}
+		// only attributes?
+		if n == lenvv {
+			if useGoXmlEmptyElemSyntax {
+				if _, err = b.WriteString(`</` + key + ">"); err != nil {
+					return err
+				}
+			} else {
+				if _, err = b.WriteString(`/>`); err != nil {
+					return err
+				}
+			}
+			break
+		}
+
+		// simple element? Note: '#text" is an invalid XML tag.
+		isComplex := false
+		if v, ok := vv[textK]; ok && n+1 == lenvv {
+			// just the value and attributes
+			switch v.(type) {
+			case string:
+				if xmlEscapeChars {
+					v = escapeChars(v.(string))
+				} else {
+					v = v.(string)
+				}
+			case []byte:
+				if xmlEscapeChars {
+					v = escapeChars(string(v.([]byte)))
+				} else {
+					v = string(v.([]byte))
+				}
+			}
+			if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil {
+				return err
+			}
+			endTag = true
+			elen = 1
+			isSimple = true
+			break
+		} else if ok {
+			// need to handle when there are subelements in addition to the simple element value
+			// issue #90
+			switch v.(type) {
+			case string:
+				if xmlEscapeChars {
+					v = escapeChars(v.(string))
+				} else {
+					v = v.(string)
+				}
+			case []byte:
+				if xmlEscapeChars {
+					v = escapeChars(string(v.([]byte)))
+				} else {
+					v = string(v.([]byte))
+				}
+			}
+			if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil {
+				return err
+			}
+			isComplex = true
+		}
+
+		// close tag with possible attributes
+		if !isComplex {
+			if _, err = b.WriteString(">"); err != nil {
+				return err
+			}
+		}
+		if doIndent {
+			// *s += "\n"
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		// something more complex
+		p.mapDepth++
+		// extract the map k:v pairs and sort on key
+		elemlist := make([][2]interface{}, len(vv))
+		n = 0
+		for k, v := range vv {
+			if k == textK {
+				// simple element handled above
+				continue
+			}
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				continue
+			}
+			elemlist[n][0] = k
+			elemlist[n][1] = v
+			n++
+		}
+		elemlist = elemlist[:n]
+		sort.Sort(elemList(elemlist))
+		var i int
+		for _, v := range elemlist {
+			switch v[1].(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := marshalMapToXmlIndent(doIndent, b, v[0].(string), v[1], p); err != nil {
+				return err
+			}
+			switch v[1].(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content ...
+	case []interface{}:
+		// special case - found during implementing Issue #23
+		if len(value.([]interface{})) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case []string:
+		// This was added by https://github.com/slotix ... not a type that
+		// would be encountered if mv generated from NewMapXml, NewMapJson.
+		// Could be encountered in AnyXml(), so we'll let it stay, though
+		// it should be merged with case []interface{}, above.
+		//quick fix for []string type
+		//[]string should be treated exaclty as []interface{}
+		if len(value.([]string)) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]string) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			// *s += p.padding
+			if _, err = b.WriteString(p.padding); err != nil {
+				return err
+			}
+		}
+		if _, err = b.WriteString("<" + key); err != nil {
+			return err
+		}
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			v := value.(string)
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		case float64, bool, int, int32, int64, float32, json.Number:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v) // always > 0
+			if _, err = b.WriteString(">" + v); err != nil {
+				return err
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			v := string(value.([]byte))
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		default:
+			if _, err = b.WriteString(">"); err != nil {
+				return err
+			}
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				if _, err = b.WriteString(">UNKNOWN"); err != nil {
+					return err
+				}
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					if _, err = b.Write(v); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				if _, err = b.WriteString(p.padding); err != nil {
+					return err
+				}
+			}
+		}
+		if elen > 0 || useGoXmlEmptyElemSyntax {
+			if elen == 0 {
+				if _, err = b.WriteString(">"); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString(`</` + key + ">"); err != nil {
+				return err
+			}
+		} else {
+			if _, err = b.WriteString(`/>`); err != nil {
+				return err
+			}
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// ============================ sort interface implementation =================
+
+type attrList [][2]string
+
+func (a attrList) Len() int {
+	return len(a)
+}
+
+func (a attrList) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a attrList) Less(i, j int) bool {
+	return a[i][0] <= a[j][0]
+}
+
+type elemList [][2]interface{}
+
+func (e elemList) Len() int {
+	return len(e)
+}
+
+func (e elemList) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemList) Less(i, j int) bool {
+	return e[i][0].(string) <= e[j][0].(string)
+}

+ 902 - 0
vendor/github.com/clbanning/mxj/v2/xmlseq.go

@@ -0,0 +1,902 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding.
+// Also, handles comments, directives and process instructions.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+// MapSeq is like Map but contains seqencing indices to allow recovering the original order of
+// the XML elements when the map[string]interface{} is marshaled. Element attributes are
+// stored as a map["#attr"]map[<attr_key>]map[string]interface{}{"#text":"<value>", "#seq":<attr_index>}
+// value instead of denoting the keys with a prefix character.  Also, comments, directives and
+// process instructions are preserved.
+type MapSeq map[string]interface{}
+
+// NoRoot is returned by NewXmlSeq, etc., when a comment, directive or procinstr element is parsed
+// in the XML data stream and the element is not contained in an XML object with a root element.
+var NoRoot = errors.New("no root key")
+var NO_ROOT = NoRoot // maintain backwards compatibility
+
+// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... -------------------------
+
+// NewMapXmlSeq converts a XML doc into a MapSeq value with elements id'd with decoding sequence key represented
+// as map["#seq"]<int value>.
+// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+// NOTE: "#seq" key/value pairs are removed on encoding with msv.Xml() / msv.XmlIndent().
+//	• attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":<aval>, "#seq":<num>}
+//	• all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well.
+//	• lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that
+//	  include a "#seq" k:v pair based on sequence they are decoded.  Thus, XML like:
+//	      <doc>
+//	         <ltag>value 1</ltag>
+//	         <newtag>value 2</newtag>
+//	         <ltag>value 3</ltag>
+//	      </doc>
+//	  is decoded as:
+//	    doc :
+//	      ltag :[[]interface{}]
+//	        [item: 0]
+//	          #seq :[int] 0
+//	          #text :[string] value 1
+//	        [item: 1]
+//	          #seq :[int] 2
+//	          #text :[string] value 3
+//	      newtag :
+//	        #seq :[int] 1
+//	        #text :[string] value 2
+//	  It will encode in proper sequence even though the MapSeq representation merges all "ltag" elements in an array.
+//	• comments - "<!--comment-->" -  are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair.
+//	• directives - "<!text>" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair.
+//	• process instructions  - "<?instr?>" - are decoded as map["#procinst"]interface{} where the #procinst value
+//	  is of map[string]interface{} type with the following keys: #target, #inst, and #seq.
+//	• comments, directives, and procinsts that are NOT part of a document with a root key will be returned as
+//	  map[string]interface{} and the error value 'NoRoot'.
+//	• note: "<![CDATA[" syntax is lost in xml.Decode parser - and is not handled here, either.
+//	   and: "\r\n" is converted to "\n"
+//
+//	NOTES:
+//	   1. The 'xmlVal' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	NAME SPACES:
+//	   1. Keys in the MapSeq value that are parsed from a <name space prefix>:<local name> tag preserve the
+//	      "<prefix>:" notation rather than stripping it as with NewMapXml().
+//	   2. Attribute keys for name space prefix declarations preserve "xmlns:<prefix>" notation.
+//
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment",
+//	      "#directive" or #procinst" key.
+//	   2. Unmarshaling an XML doc that is formatted using the whitespace character, " ", will error, since
+//	      Decoder.RawToken treats such occurances as significant. See NewMapFormattedXmlSeq().
+func NewMapXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlSeqToMap(xmlVal, r)
+}
+
+// NewMapFormattedXmlSeq performs the same as NewMapXmlSeq but is useful for processing XML objects that
+// are formatted using the whitespace character, " ".  (The stdlib xml.Decoder, by default, treats all
+// whitespace as significant; Decoder.Token() and Decoder.RawToken() will return strings of one or more
+// whitespace characters and without alphanumeric or punctuation characters as xml.CharData values.)
+//
+// If you're processing such XML, then this will convert all occurrences of whitespace-only strings
+// into an empty string, "", prior to parsing the XML - irrespective of whether the occurrence is
+// formatting or is a actual element value.
+func NewMapFormattedXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) {
+	var c bool
+	if len(cast) == 1 {
+		c = cast[0]
+	}
+
+	// Per PR #104 - clean out formatting characters so they don't show up in Decoder.RawToken() stream.
+	// NOTE: Also replaces element values that are solely comprised of formatting/whitespace characters
+	// with empty string, "".
+	r := regexp.MustCompile(`>[\n\t\r ]*<`)
+	xmlVal = r.ReplaceAll(xmlVal, []byte("><"))
+	return xmlSeqToMap(xmlVal, c)
+}
+
+// NewMpaXmlSeqReader returns next XML doc from an io.Reader as a MapSeq value.
+//	NOTES:
+//	   1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment",
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlSeqReaderToMap(xmlReader, r)
+}
+
+// NewMapXmlSeqReaderRaw returns the  next XML doc from  an io.Reader as a MapSeq value.
+// Returns MapSeq value, slice with the raw XML, and any error.
+//	NOTES:
+//	   1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	    2. The 'raw' return value may be larger than the XML text value.
+//	    3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	       extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	    4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	       re-encode the message in its original structure.
+//	    5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	ERRORS:
+//	    1. If a NoRoot error, "no root key," is returned, check if the initial map key is "#comment",
+//	       "#directive" or #procinst" key.
+func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (MapSeq, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb)
+
+	m, err := xmlSeqReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	// err may be NoRoot
+	return m, b, err
+}
+
+// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// xmlSeqToMap - convert a XML doc into map[string]interface{} value
+func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly.
+// Add #seq tag value for each element decoded - to be used for Encoding later.
+func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	var n, na map[string]interface{}
+	var seq int // for including seq num when decoding
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		// 'n' only needs one slot - save call to runtime•hashGrow()
+		// 'na' we don't know
+		n = make(map[string]interface{}, 1)
+		na = make(map[string]interface{})
+		if len(a) > 0 {
+			// xml.Attr is decoded into: map["#attr"]map[<attr_label>]interface{}
+			// where interface{} is map[string]interface{}{"#text":<attr_val>, "#seq":<attr_seq>}
+			aa := make(map[string]interface{}, len(a))
+			for i, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				if xmlEscapeCharsDecoder { // per issue#84
+					v.Value = escapeChars(v.Value)
+				}
+				if len(v.Name.Space) > 0 {
+					aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{textK: cast(v.Value, r, ""), seqK: i}
+				} else {
+					aa[v.Name.Local] = map[string]interface{}{textK: cast(v.Value, r, ""), seqK: i}
+				}
+			}
+			na[attrK] = aa
+		}
+	}
+
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream:stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.RawToken()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				if len(tt.Name.Space) > 0 {
+					return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+				} else {
+					return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+				}
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			var nn map[string]interface{}
+			if len(tt.Name.Space) > 0 {
+				nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+			} else {
+				nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// add "#seq" k:v pair -
+			// Sequence number included even in list elements - this should allow us
+			// to properly resequence even something goofy like:
+			//     <list>item 1</list>
+			//     <subelement>item 2</subelement>
+			//     <list>item 3</list>
+			// where all the "list" subelements are decoded into an array.
+			switch val.(type) {
+			case map[string]interface{}:
+				val.(map[string]interface{})[seqK] = seq
+				seq++
+			case interface{}: // a non-nil simple element: string, float64, bool
+				v := map[string]interface{}{textK: val, seqK: seq}
+				seq++
+				val = v
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			if skey != "" {
+				tt := t.(xml.EndElement)
+				if snakeCaseKeys {
+					tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1)
+				}
+				var name string
+				if len(tt.Name.Space) > 0 {
+					name = tt.Name.Space + `:` + tt.Name.Local
+				} else {
+					name = tt.Name.Local
+				}
+				if skey != name {
+					return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d",
+						skey, name, p.InputOffset())
+				}
+			}
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Empty element content will be  map["etag"]map["#text"]""
+				// after #seq injection - map["etag"]map["#seq"]seq - after return.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), trimRunes)
+			if xmlEscapeCharsDecoder { // issue#84
+				tt = escapeChars(tt)
+			}
+			if skey == "" {
+				// per Adrian (http://www.adrianlungu.com/) catch stray text
+				// in decoder stream -
+				// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+				// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+				// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+				continue
+			}
+			if len(tt) > 0 {
+				// every simple element is a #text and has #seq associated with it
+				na[textK] = cast(tt, r, "")
+				na[seqK] = seq
+				seq++
+			}
+		case xml.Comment:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{commentK: string(t.(xml.Comment))}
+				return n, NoRoot
+			}
+			cm := make(map[string]interface{}, 2)
+			cm[textK] = string(t.(xml.Comment))
+			cm[seqK] = seq
+			seq++
+			na[commentK] = cm
+		case xml.Directive:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{directiveK: string(t.(xml.Directive))}
+				return n, NoRoot
+			}
+			dm := make(map[string]interface{}, 2)
+			dm[textK] = string(t.(xml.Directive))
+			dm[seqK] = seq
+			seq++
+			na[directiveK] = dm
+		case xml.ProcInst:
+			if n == nil {
+				na = map[string]interface{}{targetK: t.(xml.ProcInst).Target, instK: string(t.(xml.ProcInst).Inst)}
+				n = map[string]interface{}{procinstK: na}
+				return n, NoRoot
+			}
+			pm := make(map[string]interface{}, 3)
+			pm[targetK] = t.(xml.ProcInst).Target
+			pm[instK] = string(t.(xml.ProcInst).Inst)
+			pm[seqK] = seq
+			seq++
+			na[procinstK] = pm
+		default:
+			// noop - shouldn't ever get here, now, since we handle all token types
+		}
+	}
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// --------------------- mv.XmlSeq & mv.XmlSeqWriter -------------------------
+
+// Xml encodes a MapSeq as XML with elements sorted on #seq.  The companion of NewMapXmlSeq().
+// The following rules apply.
+//    - The "#seq" key value is used to seqence the subelements or attributes only.
+//    - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key.
+//    - The "#comment" map key identifies a comment in the value "#text" map entry - <!--comment-->.
+//    - The "#directive" map key identifies a directive in the value "#text" map entry - <!directive>.
+//    - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst"
+//      map entries - <?target inst?>.
+//    - Value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called.
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+func (mv MapSeq) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it's an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlSeqIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	if xmlCheckIsValid {
+		d := xml.NewDecoder(bytes.NewReader([]byte(*s)))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return []byte(*s), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// XmlWriter Writes the MapSeq value as  XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlWriteRaw writes the MapSeq value as XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// XmlIndentWriter writes the MapSeq value as pretty XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlIndentWriterRaw writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlSeqIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// ---------------------- XmlSeqIndent ----------------------------
+
+// XmlIndent encodes a map[string]interface{} as a pretty XML string.
+// See MapSeq.XmlSeq() for encoding rules.
+func (mv MapSeq) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlSeqIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+	}
+	if xmlCheckIsValid {
+		if _, err = NewMapXml([]byte(*s)); err != nil {
+			return nil, err
+		}
+		d := xml.NewDecoder(bytes.NewReader([]byte(*s)))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return []byte(*s), err
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	var noEndTag bool
+	var elen int
+	var ss string
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		if key != commentK && key != directiveK && key != procinstK {
+			*s += `<` + key
+		}
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		val := value.(map[string]interface{})
+
+		if key == commentK {
+			*s += `<!--` + val[textK].(string) + `-->`
+			noEndTag = true
+			break
+		}
+
+		if key == directiveK {
+			*s += `<!` + val[textK].(string) + `>`
+			noEndTag = true
+			break
+		}
+
+		if key == procinstK {
+			*s += `<?` + val[targetK].(string) + ` ` + val[instK].(string) + `?>`
+			noEndTag = true
+			break
+		}
+
+		haveAttrs := false
+		// process attributes first
+		if v, ok := val[attrK].(map[string]interface{}); ok {
+			// First, unroll the map[string]interface{} into a []keyval array.
+			// Then sequence it.
+			kv := make([]keyval, len(v))
+			n := 0
+			for ak, av := range v {
+				kv[n] = keyval{ak, av}
+				n++
+			}
+			sort.Sort(elemListSeq(kv))
+			// Now encode the attributes in original decoding sequence, using keyval array.
+			for _, a := range kv {
+				vv := a.v.(map[string]interface{})
+				switch vv[textK].(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(vv[textK].(string))
+					} else {
+						ss = vv[textK].(string)
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				case float64, bool, int, int32, int64, float32:
+					*s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv[textK]) + `"`
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(vv[textK].([]byte)))
+					} else {
+						ss = string(vv[textK].([]byte))
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", a.k)
+				}
+			}
+			haveAttrs = true
+		}
+
+		// simple element?
+		// every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr"
+		_, seqOK := val[seqK] // have key
+		if v, ok := val[textK]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK {
+			if stmp, ok := v.(string); ok && stmp != "" {
+				if xmlEscapeChars {
+					stmp = escapeChars(stmp)
+				}
+				*s += ">" + stmp
+				endTag = true
+				elen = 1
+			}
+			isSimple = true
+			break
+		} else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK {
+			// here no #text but have #seq or #seq+#attr
+			endTag = false
+			break
+		}
+
+		// we now need to sequence everything except attributes
+		// 'kv' will hold everything that needs to be written
+		kv := make([]keyval, 0)
+		for k, v := range val {
+			if k == attrK { // already processed
+				continue
+			}
+			if k == seqK { // ignore - just for sorting
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+				// unwind the array as separate entries
+				for _, vv := range v.([]interface{}) {
+					kv = append(kv, keyval{k, vv})
+				}
+			default:
+				kv = append(kv, keyval{k, v})
+			}
+		}
+
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		sort.Sort(elemListSeq(kv))
+		i := 0
+		for _, v := range kv {
+			switch v.v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil {
+				return err
+			}
+			switch v.v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content other than attrs
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			*s += p.padding
+		}
+		*s += "<" + key
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			if xmlEscapeChars {
+				ss = escapeChars(value.(string))
+			} else {
+				ss = value.(string)
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		case float64, bool, int, int32, int64, float32:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			if xmlEscapeChars {
+				ss = escapeChars(string(value.([]byte)))
+			} else {
+				ss = string(value.([]byte))
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					*s += string(v)
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag && !noEndTag {
+		if doIndent {
+			if !isSimple {
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			if elen > 0 || useGoXmlEmptyElemSyntax {
+				if elen == 0 {
+					*s += ">"
+				}
+				*s += `</` + key + ">"
+			} else {
+				*s += `/>`
+			}
+		}
+	} else if !noEndTag {
+		if useGoXmlEmptyElemSyntax {
+			*s += `</` + key + ">"
+			// *s += "></" + key + ">"
+		} else {
+			*s += "/>"
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// the element sort implementation
+
+type keyval struct {
+	k string
+	v interface{}
+}
+type elemListSeq []keyval
+
+func (e elemListSeq) Len() int {
+	return len(e)
+}
+
+func (e elemListSeq) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemListSeq) Less(i, j int) bool {
+	var iseq, jseq int
+	var fiseq, fjseq float64
+	var ok bool
+	if iseq, ok = e[i].v.(map[string]interface{})[seqK].(int); !ok {
+		if fiseq, ok = e[i].v.(map[string]interface{})[seqK].(float64); ok {
+			iseq = int(fiseq)
+		} else {
+			iseq = 9999999
+		}
+	}
+
+	if jseq, ok = e[j].v.(map[string]interface{})[seqK].(int); !ok {
+		if fjseq, ok = e[j].v.(map[string]interface{})[seqK].(float64); ok {
+			jseq = int(fjseq)
+		} else {
+			jseq = 9999999
+		}
+	}
+
+	return iseq <= jseq
+}
+
+// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio
+
+// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent().
+// It preserves comments, directives and process instructions,
+func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) {
+	x, err := NewMapXmlSeq(b)
+	if err != nil {
+		return nil, err
+	}
+	return x.XmlIndent(prefix, indent)
+}

+ 18 - 0
vendor/github.com/clbanning/mxj/v2/xmlseq2.go

@@ -0,0 +1,18 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+// ---------------- expose Map methods to MapSeq type ---------------------------
+
+// Pretty print a Map.
+func (msv MapSeq) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (msv MapSeq) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), false, true, offset...)
+}
+

+ 2 - 0
vendor/github.com/gogf/gf/v2/.codecov.yml

@@ -0,0 +1,2 @@
+ignore:
+  - "cmd"  # ignore cmd folders and all its contents

+ 3 - 0
vendor/github.com/gogf/gf/v2/.gitattributes

@@ -0,0 +1,3 @@
+*.js linguist-language=GO
+*.css linguist-language=GO
+*.html linguist-language=GO

+ 19 - 0
vendor/github.com/gogf/gf/v2/.gitignore

@@ -0,0 +1,19 @@
+.buildpath
+.hgignore.swp
+.project
+.orig
+.swp
+.idea/
+.settings/
+.vscode/
+vendor/
+pkg/
+bin/
+**/.DS_Store
+.test/
+cmd/gf/main
+cmd/gf/gf
+temp/
+go.work
+go.work.sum
+!cmd/gf/go.work

+ 282 - 0
vendor/github.com/gogf/gf/v2/.golangci.yml

@@ -0,0 +1,282 @@
+## This file contains all available configuration options
+## with their default values.
+
+# See https://github.com/golangci/golangci-lint#config-file
+# See https://golangci-lint.run/usage/configuration/
+
+# Options for analysis running.
+run:
+  # Exit code when at least one issue was found.
+  # Default: 1
+  issues-exit-code: 2
+
+  # Include test files or not.
+  # Default: true
+  tests: false
+
+  # Which dirs to skip: issues from them won't be reported.
+  # Can use regexp here: `generated.*`, regexp is applied on full path.
+  # Default value is empty list,
+  # but default dirs are skipped independently of this option's value (see skip-dirs-use-default).
+  # "/" will be replaced by current OS file path separator to properly work on Windows.
+  skip-dirs: []
+
+  # Which files to skip: they will be analyzed, but issues from them won't be reported.
+  # Default value is empty list,
+  # but there is no need to include all autogenerated files,
+  # we confidently recognize autogenerated files.
+  # If it's not please let us know.
+  # "/" will be replaced by current OS file path separator to properly work on Windows.
+  skip-files: []
+
+
+# Main linters configurations.
+# See https://golangci-lint.run/usage/linters
+linters:
+  # Disable all default enabled linters.
+  disable-all: true
+  # Custom enable linters we want to use.
+  enable:
+    - errcheck      # Errcheck is a program for checking for unchecked errors in go programs.
+    - errchkjson    # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted.
+    - funlen        # Tool for detection of long functions
+    - goconst       # Finds repeated strings that could be replaced by a constant
+    - gocritic      # Provides diagnostics that check for bugs, performance and style issues.
+    - gofmt         # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
+    - gosimple      # Linter for Go source code that specializes in simplifying code
+    - govet         # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
+    - misspell      # Finds commonly misspelled English words in comments
+    - nolintlint    # Reports ill-formed or insufficient nolint directives
+    - revive        # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.
+    - staticcheck   # It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary.
+    - typecheck     # Like the front-end of a Go compiler, parses and type-checks Go code
+    - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library.
+    - whitespace    # Tool for detection of leading and trailing whitespace
+
+
+issues:
+  exclude-rules:
+    # helpers in tests often (rightfully) pass a *testing.T as their first argument
+    - path: _test\.go
+      text: "context.Context should be the first parameter of a function"
+      linters:
+        - revive
+    # Yes, they are, but it's okay in a test
+    - path: _test\.go
+      text: "exported func.*returns unexported type.*which can be annoying to use"
+      linters:
+        - revive
+    # https://github.com/go-critic/go-critic/issues/926
+    - linters:
+        - gocritic
+      text: "unnecessaryDefer:"
+
+
+# https://golangci-lint.run/usage/linters
+linters-settings:
+  # https://golangci-lint.run/usage/linters/#misspell
+  misspell:
+    locale: US
+    ignore-words:
+      - cancelled
+
+  # https://golangci-lint.run/usage/linters/#revive
+  # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
+  revive:
+    ignore-generated-header: true
+    severity: error
+    rules:
+      - name: atomic
+      - name: line-length-limit
+        severity: error
+        arguments: [ 380 ]
+      - name: unhandled-error
+        severity: warning
+        disabled: true
+        arguments: []
+      - name: var-naming
+        severity: warning
+        disabled: true
+        arguments:
+          # AllowList
+          - [ "ID","URL","IP","HTTP","JSON","API","UID","Id","Api","Uid","Http","Json","Ip","Url" ]
+          # DenyList
+          - [ "VM" ]
+      - name: string-format
+        severity: warning
+        disabled: false
+        arguments:
+          - - 'core.WriteError[1].Message'
+            - '/^([^A-Z]|$)/'
+            - must not start with a capital letter
+          - - 'fmt.Errorf[0]'
+            - '/(^|[^\.!?])$/'
+            - must not end in punctuation
+          - - panic
+            - '/^[^\n]*$/'
+            - must not contain line breaks
+      - name: function-result-limit
+        severity: warning
+        disabled: false
+        arguments: [ 4 ]
+
+  # https://golangci-lint.run/usage/linters/#funlen
+  funlen:
+    # Checks the number of lines in a function.
+    # If lower than 0, disable the check.
+    # Default: 60
+    lines: 330
+    # Checks the number of statements in a function.
+    # If lower than 0, disable the check.
+    # Default: 40
+    statements: -1
+
+  # https://golangci-lint.run/usage/linters/#goconst
+  goconst:
+    # Minimal length of string constant.
+    # Default: 3
+    min-len: 4
+    # Minimum occurrences of constant string count to trigger issue.
+    # Default: 3
+    # For subsequent optimization, the value is reduced.
+    min-occurrences: 30
+    # Ignore test files.
+    # Default: false
+    ignore-tests: true
+    # Look for existing constants matching the values.
+    # Default: true
+    match-constant: false
+    # Search also for duplicated numbers.
+    # Default: false
+    numbers: true
+    # Minimum value, only works with goconst.numbers
+    # Default: 3
+    min: 5
+    # Maximum value, only works with goconst.numbers
+    # Default: 3
+    max: 20
+    # Ignore when constant is not used as function argument.
+    # Default: true
+    ignore-calls: false
+
+  # https://golangci-lint.run/usage/linters/#gocritic
+  gocritic:
+    disabled-checks:
+      - ifElseChain
+      - assignOp
+      - appendAssign
+      - singleCaseSwitch
+      - regexpMust
+      - typeSwitchVar
+      - elseif
+
+  # https://golangci-lint.run/usage/linters/#gosimple
+  gosimple:
+    # Select the Go version to target.
+    # Default: 1.13
+    # Deprecated: use the global `run.go` instead.
+    go: "1.15"
+    # Sxxxx checks in https://staticcheck.io/docs/configuration/options/#checks
+    # Default: ["*"]
+    checks: [
+      "all", "-S1000", "-S1001", "-S1002", "-S1008", "-S1009", "-S1016", "-S1023", "-S1025", "-S1029", "-S1034", "-S1040"
+    ]
+
+  # https://golangci-lint.run/usage/linters/#govet
+  govet:
+    # Report about shadowed variables.
+    # Default: false
+    check-shadowing: true
+    # Settings per analyzer.
+    settings:
+      # Analyzer name, run `go tool vet help` to see all analyzers.
+      printf:
+        # Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`).
+        # Default: []
+        funcs:
+          - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
+          - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
+          - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
+          - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
+        # shadow:
+        # Whether to be strict about shadowing; can be noisy.
+        # Default: false
+        # strict: false
+      unusedresult:
+        # Comma-separated list of functions whose results must be used
+        # (in addition to defaults context.WithCancel,context.WithDeadline,context.WithTimeout,context.WithValue,
+        # errors.New,fmt.Errorf,fmt.Sprint,fmt.Sprintf,sort.Reverse)
+        # Default []
+        funcs:
+          - pkg.MyFunc
+          - context.WithCancel
+        # Comma-separated list of names of methods of type func() string whose results must be used
+        # (in addition to default Error,String)
+        # Default []
+        stringmethods:
+          - MyMethod
+    # Enable all analyzers.
+    # Default: false
+    enable-all: true
+    # Disable analyzers by name.
+    # Run `go tool vet help` to see all analyzers.
+    # Default: []
+    disable:
+      - asmdecl
+      - assign
+      - atomic
+      - atomicalign
+      - bools
+      - buildtag
+      - cgocall
+      - composites
+      - copylocks
+      - deepequalerrors
+      - errorsas
+      - fieldalignment
+      - findcall
+      - framepointer
+      - httpresponse
+      - ifaceassert
+      - loopclosure
+      - lostcancel
+      - nilfunc
+      - nilness
+      - reflectvaluecompare
+      - shift
+      - shadow
+      - sigchanyzer
+      - sortslice
+      - stdmethods
+      - stringintconv
+      - structtag
+      - testinggoroutine
+      - tests
+      - unmarshal
+      - unreachable
+      - unsafeptr
+      - unusedwrite
+
+  # https://golangci-lint.run/usage/linters/#staticcheck
+  staticcheck:
+    # Select the Go version to target.
+    # Default: "1.13"
+    # Deprecated: use the global `run.go` instead.
+    go: "1.15"
+    # SAxxxx checks in https://staticcheck.io/docs/configuration/options/#checks
+    # Default: ["*"]
+    checks: [ "all","-SA1019","-SA4015","-SA1029","-SA1016","-SA9003","-SA4006","-SA6003" ]
+
+  # https://golangci-lint.run/usage/linters/#gofmt
+  gofmt:
+    # Simplify code: gofmt with `-s` option.
+    # Default: true
+    simplify: true
+    # Apply the rewrite rules to the source before reformatting.
+    # https://pkg.go.dev/cmd/gofmt
+    # Default: []
+    rewrite-rules: [ ]
+      # - pattern: 'interface{}'
+      #   replacement: 'any'
+      # - pattern: 'a[b:len(a)]'
+      #   replacement: 'a[b:]'

+ 82 - 0
vendor/github.com/gogf/gf/v2/.set_version.sh

@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+if [ $# -ne 2 ]; then
+    echo "Parameter exception, please execute in the format of $0 [directory] [version number]"
+    echo "PS:$0 ./ v2.4.0"
+    exit 1
+fi
+
+if [ ! -d "$1" ]; then
+    echo "Error: Directory does not exist"
+    exit 1
+fi
+
+if [[ "$2" != v* ]]; then
+    echo "Error: Version number must start with v"
+    exit 1
+fi
+
+workdir=.
+newVersion=$2
+echo "Prepare to replace the GF library version numbers in all go.mod files in the ${workdir} directory with ${newVersion}"
+
+# check find command support or not
+output=$(find "${workdir}" -name go.mod 2>&1)
+if [[ $? -ne 0 ]]; then
+    echo "Error: please use bash or zsh to run!"
+    exit 1
+fi
+
+if [[ true ]]; then
+    echo "package gf" > version.go
+    echo "" >> version.go
+    echo "const (" >> version.go
+    echo -e "\t// VERSION is the current GoFrame version." >> version.go
+    echo -e "\tVERSION = \"${newVersion}\"" >> version.go
+    echo ")" >> version.go
+fi
+
+if [ -f "go.work" ]; then
+    mv go.work go.work.version.bak
+    echo "Back up the go.work file to avoid affecting the upgrade"
+fi
+
+for file in `find ${workdir} -name go.mod`; do
+    goModPath=$(dirname $file)
+    echo ""
+    echo "processing dir: $goModPath"
+    cd $goModPath
+    if [ $goModPath = "./cmd/gf" ]; then
+        mv go.work go.work.version.bak
+        go mod edit -replace github.com/gogf/gf/v2=../../
+        go mod edit -replace github.com/gogf/gf/contrib/drivers/clickhouse/v2=../../contrib/drivers/clickhouse
+        go mod edit -replace github.com/gogf/gf/contrib/drivers/mssql/v2=../../contrib/drivers/mssql
+        go mod edit -replace github.com/gogf/gf/contrib/drivers/mysql/v2=../../contrib/drivers/mysql
+        go mod edit -replace github.com/gogf/gf/contrib/drivers/oracle/v2=../../contrib/drivers/oracle
+        go mod edit -replace github.com/gogf/gf/contrib/drivers/pgsql/v2=../../contrib/drivers/pgsql
+        go mod edit -replace github.com/gogf/gf/contrib/drivers/sqlite/v2=../../contrib/drivers/sqlite
+    # else
+    #     cd -
+    #     continue 1
+    fi
+    go mod tidy
+    # Upgrading only GF related libraries, sometimes even if a version number is specified, it may not be possible to successfully upgrade. Please confirm before submitting the code
+    go list -f "{{if and (not .Indirect) (not .Main)}}{{.Path}}@${newVersion}{{end}}" -m all | grep "^github.com/gogf/gf"
+    go list -f "{{if and (not .Indirect) (not .Main)}}{{.Path}}@${newVersion}{{end}}" -m all | grep "^github.com/gogf/gf" | xargs -L1 go get -v 
+    go mod tidy
+    if [ $goModPath = "./cmd/gf" ]; then
+        go mod edit -dropreplace github.com/gogf/gf/v2
+        go mod edit -dropreplace github.com/gogf/gf/contrib/drivers/clickhouse/v2
+        go mod edit -dropreplace github.com/gogf/gf/contrib/drivers/mssql/v2
+        go mod edit -dropreplace github.com/gogf/gf/contrib/drivers/mysql/v2
+        go mod edit -dropreplace github.com/gogf/gf/contrib/drivers/oracle/v2
+        go mod edit -dropreplace github.com/gogf/gf/contrib/drivers/pgsql/v2
+        go mod edit -dropreplace github.com/gogf/gf/contrib/drivers/sqlite/v2
+        mv go.work.version.bak go.work
+    fi
+    cd -
+done
+
+if [ -f "go.work.version.bak" ]; then
+    mv go.work.version.bak go.work
+    echo "Restore the go.work file"
+fi

+ 26 - 0
vendor/github.com/gogf/gf/v2/Makefile

@@ -0,0 +1,26 @@
+SHELL := /bin/bash
+
+.PHONY: tidy
+tidy:
+	$(eval files=$(shell find . -name go.mod))
+	@set -e; \
+	for file in ${files}; do \
+		goModPath=$$(dirname $$file); \
+		cd $$goModPath; \
+		go mod tidy; \
+		cd -; \
+	done
+
+.PHONY: lint
+lint:
+	golangci-lint run
+
+# make version to=v2.4.0
+.PHONY: version
+version:
+	@set -e; \
+	newVersion=$(to); \
+	./.set_version.sh ./ $$newVersion; \
+	echo "make version to=$(to) done"
+
+

+ 103 - 0
vendor/github.com/gogf/gf/v2/README.MD

@@ -0,0 +1,103 @@
+# GoFrame
+
+<div align=center>
+<img src="https://goframe.org/statics/image/logo2.png?v=1" width="300"/>
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/gogf/gf/v2.svg)](https://pkg.go.dev/github.com/gogf/gf/v2)
+[![GoFrame CI](https://github.com/gogf/gf/actions/workflows/ci-main.yml/badge.svg)](https://github.com/gogf/gf/actions/workflows/ci-main.yml)
+[![Go Report Card](https://goreportcard.com/badge/github.com/gogf/gf/v2)](https://goreportcard.com/report/github.com/gogf/gf/v2)
+[![Code Coverage](https://codecov.io/gh/gogf/gf/branch/master/graph/badge.svg)](https://codecov.io/gh/gogf/gf)
+[![Production Ready](https://img.shields.io/badge/production-ready-blue.svg)](https://github.com/gogf/gf)
+[![License](https://img.shields.io/github/license/gogf/gf.svg?style=flat)](https://github.com/gogf/gf)
+
+[![Release](https://img.shields.io/github/v/release/gogf/gf)](https://github.com/gogf/gf/releases)
+[![GitHub pull requests](https://img.shields.io/github/issues-pr/gogf/gf)](https://github.com/gogf/gf/pulls)
+[![GitHub closed pull requests](https://img.shields.io/github/issues-pr-closed/gogf/gf)](https://github.com/gogf/gf/pulls?q=is%3Apr+is%3Aclosed)
+[![GitHub issues](https://img.shields.io/github/issues/gogf/gf)](https://github.com/gogf/gf/issues)
+[![GitHub closed issues](https://img.shields.io/github/issues-closed/gogf/gf)](https://github.com/gogf/gf/issues?q=is%3Aissue+is%3Aclosed)
+![Stars](https://img.shields.io/github/stars/gogf/gf)
+![Forks](https://img.shields.io/github/forks/gogf/gf)
+
+</div>
+
+`GoFrame` is a modular, powerful, high-performance and enterprise-class application development framework of Golang.
+
+# Features
+
+- modular, loosely coupled design
+- rich components, out-of-the-box
+- automatic codes generating for efficiency
+- simple and easy to use, detailed documentation
+- interface designed components, with high scalability
+- fully supported tracing and error stack feature
+- specially developed and powerful ORM component
+- robust engineering design specifications
+- convenient development CLI tool provide
+- OpenTelemetry observability features support
+- OpenAPIV3 documentation generating, automatically
+- much, much more...ready to explore?
+
+# Installation
+
+Enter your repo. directory and execute following command:
+
+## primary module
+
+```bash
+go get -u -v github.com/gogf/gf/v2
+```
+
+## cli tool
+
+```bash
+go install github.com/gogf/gf/cmd/gf/v2@latest
+```
+
+# Limitation
+
+```
+golang version >= 1.18
+```
+
+# Documentation
+
+- Chinese Official Site(中文官网): [https://goframe.org](https://goframe.org/display/gf)
+- GoDoc API: [https://pkg.go.dev/github.com/gogf/gf/v2](https://pkg.go.dev/github.com/gogf/gf/v2)
+
+# License
+
+`GoFrame` is licensed under the [MIT License](LICENSE), 100% free and open-source, forever.
+
+# Part Of Users
+
+- [Tencent](https://www.tencent.com/)
+- [ZTE](https://www.zte.com.cn/china/)
+- [Ant Financial Services](https://www.antfin.com/)
+- [VIVO](https://www.vivo.com/)
+- [MedLinker](https://www.medlinker.com/)
+- [KuCoin](https://www.kucoin.io/)
+- [LeYouJia](https://www.leyoujia.com/)
+- [IGG](https://igg.com)
+- [37](https://www.37.com)
+- [XiMaLaYa](https://www.ximalaya.com)
+- [ZYBang](https://www.zybang.com/)
+
+> We list part of the users here, if your company or products are using `GoFrame`, please let us know [here](https://goframe.org/pages/viewpage.action?pageId=1114415).
+
+# Contributors
+
+This project exists thanks to all the people who contribute. [[Contributors](https://github.com/gogf/gf/graphs/contributors)].
+<a href="https://github.com/gogf/gf/graphs/contributors"><img src="https://contributors-img.web.app/image?repo=gogf/gf" /></a>
+
+# Donators
+
+If you love `GoFrame`, why not [buy developer a cup of coffee](https://goframe.org/pages/viewpage.action?pageId=1115633)?
+
+# Sponsors
+
+We appreciate any kind of sponsorship for `GoFrame` development. If you've got some interesting, please contact WeChat `389961817` / Email `john@goframe.org`.
+
+# Thanks
+
+<a href="https://www.jetbrains.com/?from=GoFrame"><img src="https://goframe.org/download/thumbnails/1114119/jetbrains.png" height="120" alt="JetBrains"/></a>
+<a href="https://www.atlassian.com/?from=GoFrame"><img src="https://goframe.org/download/attachments/1114119/atlassian.jpg" height="120" alt="Atlassian"/></a>

+ 203 - 0
vendor/github.com/gogf/gf/v2/crypto/gaes/gaes.go

@@ -0,0 +1,203 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+// Package gaes provides useful API for AES encryption/decryption algorithms.
+package gaes
+
+import (
+	"bytes"
+	"crypto/aes"
+	"crypto/cipher"
+	"fmt"
+
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+)
+
+const (
+	// IVDefaultValue is the default value for IV.
+	IVDefaultValue = "I Love Go Frame!"
+)
+
+// Encrypt is alias of EncryptCBC.
+func Encrypt(plainText []byte, key []byte, iv ...[]byte) ([]byte, error) {
+	return EncryptCBC(plainText, key, iv...)
+}
+
+// Decrypt is alias of DecryptCBC.
+func Decrypt(cipherText []byte, key []byte, iv ...[]byte) ([]byte, error) {
+	return DecryptCBC(cipherText, key, iv...)
+}
+
+// EncryptCBC encrypts `plainText` using CBC mode.
+// Note that the key must be 16/24/32 bit length.
+// The parameter `iv` initialization vector is unnecessary.
+func EncryptCBC(plainText []byte, key []byte, iv ...[]byte) ([]byte, error) {
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `aes.NewCipher failed for key "%s"`, key)
+		return nil, err
+	}
+	blockSize := block.BlockSize()
+	plainText = PKCS7Padding(plainText, blockSize)
+	ivValue := ([]byte)(nil)
+	if len(iv) > 0 {
+		ivValue = iv[0]
+	} else {
+		ivValue = []byte(IVDefaultValue)
+	}
+	blockMode := cipher.NewCBCEncrypter(block, ivValue)
+	cipherText := make([]byte, len(plainText))
+	blockMode.CryptBlocks(cipherText, plainText)
+
+	return cipherText, nil
+}
+
+// DecryptCBC decrypts `cipherText` using CBC mode.
+// Note that the key must be 16/24/32 bit length.
+// The parameter `iv` initialization vector is unnecessary.
+func DecryptCBC(cipherText []byte, key []byte, iv ...[]byte) ([]byte, error) {
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `aes.NewCipher failed for key "%s"`, key)
+		return nil, err
+	}
+	blockSize := block.BlockSize()
+	if len(cipherText) < blockSize {
+		return nil, gerror.NewCode(gcode.CodeInvalidParameter, "cipherText too short")
+	}
+	ivValue := ([]byte)(nil)
+	if len(iv) > 0 {
+		ivValue = iv[0]
+	} else {
+		ivValue = []byte(IVDefaultValue)
+	}
+	if len(cipherText)%blockSize != 0 {
+		return nil, gerror.NewCode(gcode.CodeInvalidParameter, "cipherText is not a multiple of the block size")
+	}
+	blockModel := cipher.NewCBCDecrypter(block, ivValue)
+	plainText := make([]byte, len(cipherText))
+	blockModel.CryptBlocks(plainText, cipherText)
+	plainText, e := PKCS7UnPadding(plainText, blockSize)
+	if e != nil {
+		return nil, e
+	}
+	return plainText, nil
+}
+
+// PKCS5Padding applies PKCS#5 padding to the source byte slice to match the given block size.
+//
+// If the block size is not provided, it defaults to 8.
+func PKCS5Padding(src []byte, blockSize ...int) []byte {
+	blockSizeTemp := 8
+	if len(blockSize) > 0 {
+		blockSizeTemp = blockSize[0]
+	}
+	return PKCS7Padding(src, blockSizeTemp)
+}
+
+// PKCS5UnPadding removes PKCS#5 padding from the source byte slice based on the given block size.
+//
+// If the block size is not provided, it defaults to 8.
+func PKCS5UnPadding(src []byte, blockSize ...int) ([]byte, error) {
+	blockSizeTemp := 8
+	if len(blockSize) > 0 {
+		blockSizeTemp = blockSize[0]
+	}
+	return PKCS7UnPadding(src, blockSizeTemp)
+}
+
+// PKCS7Padding applies PKCS#7 padding to the source byte slice to match the given block size.
+func PKCS7Padding(src []byte, blockSize int) []byte {
+	padding := blockSize - len(src)%blockSize
+	padtext := bytes.Repeat([]byte{byte(padding)}, padding)
+	return append(src, padtext...)
+}
+
+// PKCS7UnPadding removes PKCS#7 padding from the source byte slice based on the given block size.
+func PKCS7UnPadding(src []byte, blockSize int) ([]byte, error) {
+	length := len(src)
+	if blockSize <= 0 {
+		return nil, gerror.NewCode(gcode.CodeInvalidParameter, fmt.Sprintf("invalid blockSize: %d", blockSize))
+	}
+
+	if length%blockSize != 0 || length == 0 {
+		return nil, gerror.NewCode(gcode.CodeInvalidParameter, "invalid data len")
+	}
+
+	unpadding := int(src[length-1])
+	if unpadding > blockSize || unpadding == 0 {
+		return nil, gerror.NewCode(gcode.CodeInvalidParameter, "invalid unpadding")
+	}
+
+	padding := src[length-unpadding:]
+	for i := 0; i < unpadding; i++ {
+		if padding[i] != byte(unpadding) {
+			return nil, gerror.NewCode(gcode.CodeInvalidParameter, "invalid padding")
+		}
+	}
+
+	return src[:(length - unpadding)], nil
+}
+
+// EncryptCFB encrypts `plainText` using CFB mode.
+// Note that the key must be 16/24/32 bit length.
+// The parameter `iv` initialization vector is unnecessary.
+func EncryptCFB(plainText []byte, key []byte, padding *int, iv ...[]byte) ([]byte, error) {
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `aes.NewCipher failed for key "%s"`, key)
+		return nil, err
+	}
+	blockSize := block.BlockSize()
+	plainText, *padding = ZeroPadding(plainText, blockSize)
+	ivValue := ([]byte)(nil)
+	if len(iv) > 0 {
+		ivValue = iv[0]
+	} else {
+		ivValue = []byte(IVDefaultValue)
+	}
+	stream := cipher.NewCFBEncrypter(block, ivValue)
+	cipherText := make([]byte, len(plainText))
+	stream.XORKeyStream(cipherText, plainText)
+	return cipherText, nil
+}
+
+// DecryptCFB decrypts `plainText` using CFB mode.
+// Note that the key must be 16/24/32 bit length.
+// The parameter `iv` initialization vector is unnecessary.
+func DecryptCFB(cipherText []byte, key []byte, unPadding int, iv ...[]byte) ([]byte, error) {
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `aes.NewCipher failed for key "%s"`, key)
+		return nil, err
+	}
+	if len(cipherText) < aes.BlockSize {
+		return nil, gerror.NewCode(gcode.CodeInvalidParameter, "cipherText too short")
+	}
+	ivValue := ([]byte)(nil)
+	if len(iv) > 0 {
+		ivValue = iv[0]
+	} else {
+		ivValue = []byte(IVDefaultValue)
+	}
+	stream := cipher.NewCFBDecrypter(block, ivValue)
+	plainText := make([]byte, len(cipherText))
+	stream.XORKeyStream(plainText, cipherText)
+	plainText = ZeroUnPadding(plainText, unPadding)
+	return plainText, nil
+}
+
+func ZeroPadding(cipherText []byte, blockSize int) ([]byte, int) {
+	padding := blockSize - len(cipherText)%blockSize
+	padText := bytes.Repeat([]byte{byte(0)}, padding)
+	return append(cipherText, padText...), padding
+}
+
+func ZeroUnPadding(plaintext []byte, unPadding int) []byte {
+	length := len(plaintext)
+	return plaintext[:(length - unPadding)]
+}

+ 97 - 0
vendor/github.com/gogf/gf/v2/crypto/gmd5/gmd5.go

@@ -0,0 +1,97 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+// Package gmd5 provides useful API for MD5 encryption algorithms.
+package gmd5
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+	"os"
+
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/util/gconv"
+)
+
+// Encrypt encrypts any type of variable using MD5 algorithms.
+// It uses gconv package to convert `v` to its bytes type.
+func Encrypt(data interface{}) (encrypt string, err error) {
+	return EncryptBytes(gconv.Bytes(data))
+}
+
+// MustEncrypt encrypts any type of variable using MD5 algorithms.
+// It uses gconv package to convert `v` to its bytes type.
+// It panics if any error occurs.
+func MustEncrypt(data interface{}) string {
+	result, err := Encrypt(data)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// EncryptBytes encrypts `data` using MD5 algorithms.
+func EncryptBytes(data []byte) (encrypt string, err error) {
+	h := md5.New()
+	if _, err = h.Write(data); err != nil {
+		err = gerror.Wrap(err, `hash.Write failed`)
+		return "", err
+	}
+	return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
+// MustEncryptBytes encrypts `data` using MD5 algorithms.
+// It panics if any error occurs.
+func MustEncryptBytes(data []byte) string {
+	result, err := EncryptBytes(data)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// EncryptString encrypts string `data` using MD5 algorithms.
+func EncryptString(data string) (encrypt string, err error) {
+	return EncryptBytes([]byte(data))
+}
+
+// MustEncryptString encrypts string `data` using MD5 algorithms.
+// It panics if any error occurs.
+func MustEncryptString(data string) string {
+	result, err := EncryptString(data)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// EncryptFile encrypts file content of `path` using MD5 algorithms.
+func EncryptFile(path string) (encrypt string, err error) {
+	f, err := os.Open(path)
+	if err != nil {
+		err = gerror.Wrapf(err, `os.Open failed for name "%s"`, path)
+		return "", err
+	}
+	defer f.Close()
+	h := md5.New()
+	_, err = io.Copy(h, f)
+	if err != nil {
+		err = gerror.Wrap(err, `io.Copy failed`)
+		return "", err
+	}
+	return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
+// MustEncryptFile encrypts file content of `path` using MD5 algorithms.
+// It panics if any error occurs.
+func MustEncryptFile(path string) string {
+	result, err := EncryptFile(path)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}

+ 765 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb.go

@@ -0,0 +1,765 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+// Package gdb provides ORM features for popular relationship databases.
+package gdb
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"time"
+
+	"github.com/gogf/gf/v2/container/garray"
+	"github.com/gogf/gf/v2/container/gmap"
+	"github.com/gogf/gf/v2/container/gtype"
+	"github.com/gogf/gf/v2/container/gvar"
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/os/gcache"
+	"github.com/gogf/gf/v2/os/gcmd"
+	"github.com/gogf/gf/v2/os/gctx"
+	"github.com/gogf/gf/v2/os/glog"
+	"github.com/gogf/gf/v2/util/grand"
+	"github.com/gogf/gf/v2/util/gutil"
+)
+
+// DB defines the interfaces for ORM operations.
+type DB interface {
+	// ===========================================================================
+	// Model creation.
+	// ===========================================================================
+
+	// Model creates and returns a new ORM model from given schema.
+	// The parameter `table` can be more than one table names, and also alias name, like:
+	// 1. Model names:
+	//    Model("user")
+	//    Model("user u")
+	//    Model("user, user_detail")
+	//    Model("user u, user_detail ud")
+	// 2. Model name with alias: Model("user", "u")
+	// Also see Core.Model.
+	Model(tableNameOrStruct ...interface{}) *Model
+
+	// Raw creates and returns a model based on a raw sql not a table.
+	Raw(rawSql string, args ...interface{}) *Model
+
+	// Schema creates and returns a schema.
+	// Also see Core.Schema.
+	Schema(schema string) *Schema
+
+	// With creates and returns an ORM model based on metadata of given object.
+	// Also see Core.With.
+	With(objects ...interface{}) *Model
+
+	// Open creates a raw connection object for database with given node configuration.
+	// Note that it is not recommended using the function manually.
+	// Also see DriverMysql.Open.
+	Open(config *ConfigNode) (*sql.DB, error)
+
+	// Ctx is a chaining function, which creates and returns a new DB that is a shallow copy
+	// of current DB object and with given context in it.
+	// Also see Core.Ctx.
+	Ctx(ctx context.Context) DB
+
+	// Close closes the database and prevents new queries from starting.
+	// Close then waits for all queries that have started processing on the server
+	// to finish.
+	//
+	// It is rare to Close a DB, as the DB handle is meant to be
+	// long-lived and shared between many goroutines.
+	Close(ctx context.Context) error
+
+	// ===========================================================================
+	// Query APIs.
+	// ===========================================================================
+
+	Query(ctx context.Context, sql string, args ...interface{}) (Result, error)    // See Core.Query.
+	Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) // See Core.Exec.
+	Prepare(ctx context.Context, sql string, execOnMaster ...bool) (*Stmt, error)  // See Core.Prepare.
+
+	// ===========================================================================
+	// Common APIs for CURD.
+	// ===========================================================================
+
+	Insert(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error)                               // See Core.Insert.
+	InsertIgnore(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error)                         // See Core.InsertIgnore.
+	InsertAndGetId(ctx context.Context, table string, data interface{}, batch ...int) (int64, error)                            // See Core.InsertAndGetId.
+	Replace(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error)                              // See Core.Replace.
+	Save(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error)                                 // See Core.Save.
+	Update(ctx context.Context, table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error) // See Core.Update.
+	Delete(ctx context.Context, table string, condition interface{}, args ...interface{}) (sql.Result, error)                   // See Core.Delete.
+
+	// ===========================================================================
+	// Internal APIs for CURD, which can be overwritten by custom CURD implements.
+	// ===========================================================================
+
+	DoSelect(ctx context.Context, link Link, sql string, args ...interface{}) (result Result, err error)                                           // See Core.DoSelect.
+	DoInsert(ctx context.Context, link Link, table string, data List, option DoInsertOption) (result sql.Result, err error)                        // See Core.DoInsert.
+	DoUpdate(ctx context.Context, link Link, table string, data interface{}, condition string, args ...interface{}) (result sql.Result, err error) // See Core.DoUpdate.
+	DoDelete(ctx context.Context, link Link, table string, condition string, args ...interface{}) (result sql.Result, err error)                   // See Core.DoDelete.
+
+	DoQuery(ctx context.Context, link Link, sql string, args ...interface{}) (result Result, err error)    // See Core.DoQuery.
+	DoExec(ctx context.Context, link Link, sql string, args ...interface{}) (result sql.Result, err error) // See Core.DoExec.
+
+	DoFilter(ctx context.Context, link Link, sql string, args []interface{}) (newSql string, newArgs []interface{}, err error) // See Core.DoFilter.
+	DoCommit(ctx context.Context, in DoCommitInput) (out DoCommitOutput, err error)                                            // See Core.DoCommit.
+
+	DoPrepare(ctx context.Context, link Link, sql string) (*Stmt, error) // See Core.DoPrepare.
+
+	// ===========================================================================
+	// Query APIs for convenience purpose.
+	// ===========================================================================
+
+	GetAll(ctx context.Context, sql string, args ...interface{}) (Result, error)                // See Core.GetAll.
+	GetOne(ctx context.Context, sql string, args ...interface{}) (Record, error)                // See Core.GetOne.
+	GetValue(ctx context.Context, sql string, args ...interface{}) (Value, error)               // See Core.GetValue.
+	GetArray(ctx context.Context, sql string, args ...interface{}) ([]Value, error)             // See Core.GetArray.
+	GetCount(ctx context.Context, sql string, args ...interface{}) (int, error)                 // See Core.GetCount.
+	GetScan(ctx context.Context, objPointer interface{}, sql string, args ...interface{}) error // See Core.GetScan.
+	Union(unions ...*Model) *Model                                                              // See Core.Union.
+	UnionAll(unions ...*Model) *Model                                                           // See Core.UnionAll.
+
+	// ===========================================================================
+	// Master/Slave specification support.
+	// ===========================================================================
+
+	Master(schema ...string) (*sql.DB, error) // See Core.Master.
+	Slave(schema ...string) (*sql.DB, error)  // See Core.Slave.
+
+	// ===========================================================================
+	// Ping-Pong.
+	// ===========================================================================
+
+	PingMaster() error // See Core.PingMaster.
+	PingSlave() error  // See Core.PingSlave.
+
+	// ===========================================================================
+	// Transaction.
+	// ===========================================================================
+
+	Begin(ctx context.Context) (TX, error)                                           // See Core.Begin.
+	Transaction(ctx context.Context, f func(ctx context.Context, tx TX) error) error // See Core.Transaction.
+
+	// ===========================================================================
+	// Configuration methods.
+	// ===========================================================================
+
+	GetCache() *gcache.Cache            // See Core.GetCache.
+	SetDebug(debug bool)                // See Core.SetDebug.
+	GetDebug() bool                     // See Core.GetDebug.
+	GetSchema() string                  // See Core.GetSchema.
+	GetPrefix() string                  // See Core.GetPrefix.
+	GetGroup() string                   // See Core.GetGroup.
+	SetDryRun(enabled bool)             // See Core.SetDryRun.
+	GetDryRun() bool                    // See Core.GetDryRun.
+	SetLogger(logger glog.ILogger)      // See Core.SetLogger.
+	GetLogger() glog.ILogger            // See Core.GetLogger.
+	GetConfig() *ConfigNode             // See Core.GetConfig.
+	SetMaxIdleConnCount(n int)          // See Core.SetMaxIdleConnCount.
+	SetMaxOpenConnCount(n int)          // See Core.SetMaxOpenConnCount.
+	SetMaxConnLifeTime(d time.Duration) // See Core.SetMaxConnLifeTime.
+
+	// ===========================================================================
+	// Utility methods.
+	// ===========================================================================
+
+	GetCtx() context.Context                                                                                 // See Core.GetCtx.
+	GetCore() *Core                                                                                          // See Core.GetCore
+	GetChars() (charLeft string, charRight string)                                                           // See Core.GetChars.
+	Tables(ctx context.Context, schema ...string) (tables []string, err error)                               // See Core.Tables. The driver must implement this function.
+	TableFields(ctx context.Context, table string, schema ...string) (map[string]*TableField, error)         // See Core.TableFields. The driver must implement this function.
+	ConvertValueForField(ctx context.Context, fieldType string, fieldValue interface{}) (interface{}, error) // See Core.ConvertValueForField
+	ConvertValueForLocal(ctx context.Context, fieldType string, fieldValue interface{}) (interface{}, error) // See Core.ConvertValueForLocal
+	CheckLocalTypeForField(ctx context.Context, fieldType string, fieldValue interface{}) (LocalType, error) // See Core.CheckLocalTypeForField
+}
+
+// TX defines the interfaces for ORM transaction operations.
+type TX interface {
+	Link
+
+	Ctx(ctx context.Context) TX
+	Raw(rawSql string, args ...interface{}) *Model
+	Model(tableNameQueryOrStruct ...interface{}) *Model
+	With(object interface{}) *Model
+
+	// ===========================================================================
+	// Nested transaction if necessary.
+	// ===========================================================================
+
+	Begin() error
+	Commit() error
+	Rollback() error
+	Transaction(ctx context.Context, f func(ctx context.Context, tx TX) error) (err error)
+
+	// ===========================================================================
+	// Core method.
+	// ===========================================================================
+
+	Query(sql string, args ...interface{}) (result Result, err error)
+	Exec(sql string, args ...interface{}) (sql.Result, error)
+	Prepare(sql string) (*Stmt, error)
+
+	// ===========================================================================
+	// Query.
+	// ===========================================================================
+
+	GetAll(sql string, args ...interface{}) (Result, error)
+	GetOne(sql string, args ...interface{}) (Record, error)
+	GetStruct(obj interface{}, sql string, args ...interface{}) error
+	GetStructs(objPointerSlice interface{}, sql string, args ...interface{}) error
+	GetScan(pointer interface{}, sql string, args ...interface{}) error
+	GetValue(sql string, args ...interface{}) (Value, error)
+	GetCount(sql string, args ...interface{}) (int64, error)
+
+	// ===========================================================================
+	// CURD.
+	// ===========================================================================
+
+	Insert(table string, data interface{}, batch ...int) (sql.Result, error)
+	InsertIgnore(table string, data interface{}, batch ...int) (sql.Result, error)
+	InsertAndGetId(table string, data interface{}, batch ...int) (int64, error)
+	Replace(table string, data interface{}, batch ...int) (sql.Result, error)
+	Save(table string, data interface{}, batch ...int) (sql.Result, error)
+	Update(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error)
+	Delete(table string, condition interface{}, args ...interface{}) (sql.Result, error)
+
+	// ===========================================================================
+	// Utility methods.
+	// ===========================================================================
+
+	GetCtx() context.Context
+	GetDB() DB
+	GetSqlTX() *sql.Tx
+	IsClosed() bool
+
+	// ===========================================================================
+	// Save point feature.
+	// ===========================================================================
+
+	SavePoint(point string) error
+	RollbackTo(point string) error
+}
+
+// Core is the base struct for database management.
+type Core struct {
+	db            DB              // DB interface object.
+	ctx           context.Context // Context for chaining operation only. Do not set a default value in Core initialization.
+	group         string          // Configuration group name.
+	schema        string          // Custom schema for this object.
+	debug         *gtype.Bool     // Enable debug mode for the database, which can be changed in runtime.
+	cache         *gcache.Cache   // Cache manager, SQL result cache only.
+	links         *gmap.StrAnyMap // links caches all created links by node.
+	logger        glog.ILogger    // Logger for logging functionality.
+	config        *ConfigNode     // Current config node.
+	dynamicConfig dynamicConfig   // Dynamic configurations, which can be changed in runtime.
+}
+
+type dynamicConfig struct {
+	MaxIdleConnCount int
+	MaxOpenConnCount int
+	MaxConnLifeTime  time.Duration
+}
+
+// DoCommitInput is the input parameters for function DoCommit.
+type DoCommitInput struct {
+	Db            *sql.DB
+	Tx            *sql.Tx
+	Stmt          *sql.Stmt
+	Link          Link
+	Sql           string
+	Args          []interface{}
+	Type          string
+	IsTransaction bool
+}
+
+// DoCommitOutput is the output parameters for function DoCommit.
+type DoCommitOutput struct {
+	Result    sql.Result  // Result is the result of exec statement.
+	Records   []Record    // Records is the result of query statement.
+	Stmt      *Stmt       // Stmt is the Statement object result for Prepare.
+	Tx        TX          // Tx is the transaction object result for Begin.
+	RawResult interface{} // RawResult is the underlying result, which might be sql.Result/*sql.Rows/*sql.Row.
+}
+
+// Driver is the interface for integrating sql drivers into package gdb.
+type Driver interface {
+	// New creates and returns a database object for specified database server.
+	New(core *Core, node *ConfigNode) (DB, error)
+}
+
+// Link is a common database function wrapper interface.
+// Note that, any operation using `Link` will have no SQL logging.
+type Link interface {
+	QueryContext(ctx context.Context, sql string, args ...interface{}) (*sql.Rows, error)
+	ExecContext(ctx context.Context, sql string, args ...interface{}) (sql.Result, error)
+	PrepareContext(ctx context.Context, sql string) (*sql.Stmt, error)
+	IsOnMaster() bool
+	IsTransaction() bool
+}
+
+// Sql is the sql recording struct.
+type Sql struct {
+	Sql           string        // SQL string(may contain reserved char '?').
+	Type          string        // SQL operation type.
+	Args          []interface{} // Arguments for this sql.
+	Format        string        // Formatted sql which contains arguments in the sql.
+	Error         error         // Execution result.
+	Start         int64         // Start execution timestamp in milliseconds.
+	End           int64         // End execution timestamp in milliseconds.
+	Group         string        // Group is the group name of the configuration that the sql is executed from.
+	Schema        string        // Schema is the schema name of the configuration that the sql is executed from.
+	IsTransaction bool          // IsTransaction marks whether this sql is executed in transaction.
+	RowsAffected  int64         // RowsAffected marks retrieved or affected number with current sql statement.
+}
+
+// DoInsertOption is the input struct for function DoInsert.
+type DoInsertOption struct {
+	OnDuplicateStr string                 // Custom string for `on duplicated` statement.
+	OnDuplicateMap map[string]interface{} // Custom key-value map from `OnDuplicateEx` function for `on duplicated` statement.
+	InsertOption   InsertOption           // Insert operation in constant value.
+	BatchCount     int                    // Batch count for batch inserting.
+}
+
+// TableField is the struct for table field.
+type TableField struct {
+	Index   int         // For ordering purpose as map is unordered.
+	Name    string      // Field name.
+	Type    string      // Field type. Eg: 'int(10) unsigned', 'varchar(64)'.
+	Null    bool        // Field can be null or not.
+	Key     string      // The index information(empty if it's not an index). Eg: PRI, MUL.
+	Default interface{} // Default value for the field.
+	Extra   string      // Extra information. Eg: auto_increment.
+	Comment string      // Field comment.
+}
+
+// Counter  is the type for update count.
+type Counter struct {
+	Field string
+	Value float64
+}
+
+type (
+	Raw    string                   // Raw is a raw sql that will not be treated as argument but as a direct sql part.
+	Value  = *gvar.Var              // Value is the field value type.
+	Record map[string]Value         // Record is the row record of the table.
+	Result []Record                 // Result is the row record array.
+	Map    = map[string]interface{} // Map is alias of map[string]interface{}, which is the most common usage map type.
+	List   = []Map                  // List is type of map array.
+)
+
+type CatchSQLManager struct {
+	SQLArray *garray.StrArray
+	DoCommit bool // DoCommit marks it will be committed to underlying driver or not.
+}
+
+const (
+	defaultModelSafe                      = false
+	defaultCharset                        = `utf8`
+	defaultProtocol                       = `tcp`
+	unionTypeNormal                       = 0
+	unionTypeAll                          = 1
+	defaultMaxIdleConnCount               = 10               // Max idle connection count in pool.
+	defaultMaxOpenConnCount               = 0                // Max open connection count in pool. Default is no limit.
+	defaultMaxConnLifeTime                = 30 * time.Second // Max lifetime for per connection in pool in seconds.
+	ctxTimeoutTypeExec                    = 0
+	ctxTimeoutTypeQuery                   = 1
+	ctxTimeoutTypePrepare                 = 2
+	cachePrefixTableFields                = `TableFields:`
+	cachePrefixSelectCache                = `SelectCache:`
+	commandEnvKeyForDryRun                = "gf.gdb.dryrun"
+	modelForDaoSuffix                     = `ForDao`
+	dbRoleSlave                           = `slave`
+	ctxKeyForDB               gctx.StrKey = `CtxKeyForDB`
+	ctxKeyCatchSQL            gctx.StrKey = `CtxKeyCatchSQL`
+	ctxKeyInternalProducedSQL gctx.StrKey = `CtxKeyInternalProducedSQL`
+
+	// type:[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
+	linkPattern = `(\w+):([\w\-\$]*):(.*?)@(\w+?)\((.+?)\)/{0,1}([^\?]*)\?{0,1}(.*)`
+)
+
+type queryType int
+
+const (
+	queryTypeNormal queryType = 0
+	queryTypeCount  queryType = 1
+	queryTypeValue  queryType = 2
+)
+
+type joinOperator string
+
+const (
+	joinOperatorLeft  joinOperator = "LEFT"
+	joinOperatorRight joinOperator = "RIGHT"
+	joinOperatorInner joinOperator = "INNER"
+)
+
+type InsertOption int
+
+const (
+	InsertOptionDefault        InsertOption = 0
+	InsertOptionReplace        InsertOption = 1
+	InsertOptionSave           InsertOption = 2
+	InsertOptionIgnore         InsertOption = 3
+	InsertOperationInsert                   = "INSERT"
+	InsertOperationReplace                  = "REPLACE"
+	InsertOperationIgnore                   = "INSERT IGNORE"
+	InsertOnDuplicateKeyUpdate              = "ON DUPLICATE KEY UPDATE"
+)
+
+const (
+	SqlTypeBegin               = "DB.Begin"
+	SqlTypeTXCommit            = "TX.Commit"
+	SqlTypeTXRollback          = "TX.Rollback"
+	SqlTypeExecContext         = "DB.ExecContext"
+	SqlTypeQueryContext        = "DB.QueryContext"
+	SqlTypePrepareContext      = "DB.PrepareContext"
+	SqlTypeStmtExecContext     = "DB.Statement.ExecContext"
+	SqlTypeStmtQueryContext    = "DB.Statement.QueryContext"
+	SqlTypeStmtQueryRowContext = "DB.Statement.QueryRowContext"
+)
+
+type LocalType string
+
+const (
+	LocalTypeString      LocalType = "string"
+	LocalTypeDate        LocalType = "date"
+	LocalTypeDatetime    LocalType = "datetime"
+	LocalTypeInt         LocalType = "int"
+	LocalTypeUint        LocalType = "uint"
+	LocalTypeInt64       LocalType = "int64"
+	LocalTypeUint64      LocalType = "uint64"
+	LocalTypeIntSlice    LocalType = "[]int"
+	LocalTypeInt64Slice  LocalType = "[]int64"
+	LocalTypeUint64Slice LocalType = "[]uint64"
+	LocalTypeInt64Bytes  LocalType = "int64-bytes"
+	LocalTypeUint64Bytes LocalType = "uint64-bytes"
+	LocalTypeFloat32     LocalType = "float32"
+	LocalTypeFloat64     LocalType = "float64"
+	LocalTypeBytes       LocalType = "[]byte"
+	LocalTypeBool        LocalType = "bool"
+	LocalTypeJson        LocalType = "json"
+	LocalTypeJsonb       LocalType = "jsonb"
+)
+
+const (
+	fieldTypeBinary     = "binary"
+	fieldTypeVarbinary  = "varbinary"
+	fieldTypeBlob       = "blob"
+	fieldTypeTinyblob   = "tinyblob"
+	fieldTypeMediumblob = "mediumblob"
+	fieldTypeLongblob   = "longblob"
+	fieldTypeInt        = "int"
+	fieldTypeTinyint    = "tinyint"
+	fieldTypeSmallInt   = "small_int"
+	fieldTypeSmallint   = "smallint"
+	fieldTypeMediumInt  = "medium_int"
+	fieldTypeMediumint  = "mediumint"
+	fieldTypeSerial     = "serial"
+	fieldTypeBigInt     = "big_int"
+	fieldTypeBigint     = "bigint"
+	fieldTypeBigserial  = "bigserial"
+	fieldTypeReal       = "real"
+	fieldTypeFloat      = "float"
+	fieldTypeDouble     = "double"
+	fieldTypeDecimal    = "decimal"
+	fieldTypeMoney      = "money"
+	fieldTypeNumeric    = "numeric"
+	fieldTypeSmallmoney = "smallmoney"
+	fieldTypeBool       = "bool"
+	fieldTypeBit        = "bit"
+	fieldTypeDate       = "date"
+	fieldTypeDatetime   = "datetime"
+	fieldTypeTimestamp  = "timestamp"
+	fieldTypeTimestampz = "timestamptz"
+	fieldTypeJson       = "json"
+	fieldTypeJsonb      = "jsonb"
+)
+
+var (
+	// instances is the management map for instances.
+	instances = gmap.NewStrAnyMap(true)
+
+	// driverMap manages all custom registered driver.
+	driverMap = map[string]Driver{}
+
+	// lastOperatorRegPattern is the regular expression pattern for a string
+	// which has operator at its tail.
+	lastOperatorRegPattern = `[<>=]+\s*$`
+
+	// regularFieldNameRegPattern is the regular expression pattern for a string
+	// which is a regular field name of table.
+	regularFieldNameRegPattern = `^[\w\.\-]+$`
+
+	// regularFieldNameWithoutDotRegPattern is similar to regularFieldNameRegPattern but not allows '.'.
+	// Note that, although some databases allow char '.' in the field name, but it here does not allow '.'
+	// in the field name as it conflicts with "db.table.field" pattern in SOME situations.
+	regularFieldNameWithoutDotRegPattern = `^[\w\-]+$`
+
+	// allDryRun sets dry-run feature for all database connections.
+	// It is commonly used for command options for convenience.
+	allDryRun = false
+
+	// tableFieldsMap caches the table information retrieved from database.
+	tableFieldsMap = gmap.NewStrAnyMap(true)
+)
+
+func init() {
+	// allDryRun is initialized from environment or command options.
+	allDryRun = gcmd.GetOptWithEnv(commandEnvKeyForDryRun, false).Bool()
+}
+
+// Register registers custom database driver to gdb.
+func Register(name string, driver Driver) error {
+	driverMap[name] = newDriverWrapper(driver)
+	return nil
+}
+
+// New creates and returns an ORM object with given configuration node.
+func New(node ConfigNode) (db DB, err error) {
+	return newDBByConfigNode(&node, "")
+}
+
+// NewByGroup creates and returns an ORM object with global configurations.
+// The parameter `name` specifies the configuration group name,
+// which is DefaultGroupName in default.
+func NewByGroup(group ...string) (db DB, err error) {
+	groupName := configs.group
+	if len(group) > 0 && group[0] != "" {
+		groupName = group[0]
+	}
+	configs.RLock()
+	defer configs.RUnlock()
+
+	if len(configs.config) < 1 {
+		return nil, gerror.NewCode(
+			gcode.CodeInvalidConfiguration,
+			"database configuration is empty, please set the database configuration before using",
+		)
+	}
+	if _, ok := configs.config[groupName]; ok {
+		var node *ConfigNode
+		if node, err = getConfigNodeByGroup(groupName, true); err == nil {
+			return newDBByConfigNode(node, groupName)
+		}
+		return nil, err
+	}
+	return nil, gerror.NewCodef(
+		gcode.CodeInvalidConfiguration,
+		`database configuration node "%s" is not found, did you misspell group name "%s" or miss the database configuration?`,
+		groupName, groupName,
+	)
+}
+
+// newDBByConfigNode creates and returns an ORM object with given configuration node and group name.
+//
+// Very Note:
+// The parameter `node` is used for DB creation, not for underlying connection creation.
+// So all db type configurations in the same group should be the same.
+func newDBByConfigNode(node *ConfigNode, group string) (db DB, err error) {
+	if node.Link != "" {
+		node = parseConfigNodeLink(node)
+	}
+	c := &Core{
+		group:  group,
+		debug:  gtype.NewBool(),
+		cache:  gcache.New(),
+		links:  gmap.NewStrAnyMap(true),
+		logger: glog.New(),
+		config: node,
+		dynamicConfig: dynamicConfig{
+			MaxIdleConnCount: node.MaxIdleConnCount,
+			MaxOpenConnCount: node.MaxOpenConnCount,
+			MaxConnLifeTime:  node.MaxConnLifeTime,
+		},
+	}
+	if v, ok := driverMap[node.Type]; ok {
+		if c.db, err = v.New(c, node); err != nil {
+			return nil, err
+		}
+		return c.db, nil
+	}
+	errorMsg := `cannot find database driver for specified database type "%s"`
+	errorMsg += `, did you misspell type name "%s" or forget importing the database driver? `
+	errorMsg += `possible reference: https://github.com/gogf/gf/tree/master/contrib/drivers`
+	return nil, gerror.NewCodef(gcode.CodeInvalidConfiguration, errorMsg, node.Type, node.Type)
+}
+
+// Instance returns an instance for DB operations.
+// The parameter `name` specifies the configuration group name,
+// which is DefaultGroupName in default.
+func Instance(name ...string) (db DB, err error) {
+	group := configs.group
+	if len(name) > 0 && name[0] != "" {
+		group = name[0]
+	}
+	v := instances.GetOrSetFuncLock(group, func() interface{} {
+		db, err = NewByGroup(group)
+		return db
+	})
+	if v != nil {
+		return v.(DB), nil
+	}
+	return
+}
+
+// getConfigNodeByGroup calculates and returns a configuration node of given group. It
+// calculates the value internally using weight algorithm for load balance.
+//
+// The parameter `master` specifies whether retrieving a master node, or else a slave node
+// if master-slave configured.
+func getConfigNodeByGroup(group string, master bool) (*ConfigNode, error) {
+	if list, ok := configs.config[group]; ok {
+		// Separates master and slave configuration nodes array.
+		var (
+			masterList = make(ConfigGroup, 0)
+			slaveList  = make(ConfigGroup, 0)
+		)
+		for i := 0; i < len(list); i++ {
+			if list[i].Role == dbRoleSlave {
+				slaveList = append(slaveList, list[i])
+			} else {
+				masterList = append(masterList, list[i])
+			}
+		}
+		if len(masterList) < 1 {
+			return nil, gerror.NewCode(
+				gcode.CodeInvalidConfiguration,
+				"at least one master node configuration's need to make sense",
+			)
+		}
+		if len(slaveList) < 1 {
+			slaveList = masterList
+		}
+		if master {
+			return getConfigNodeByWeight(masterList), nil
+		} else {
+			return getConfigNodeByWeight(slaveList), nil
+		}
+	}
+	return nil, gerror.NewCodef(
+		gcode.CodeInvalidConfiguration,
+		"empty database configuration for item name '%s'",
+		group,
+	)
+}
+
+// getConfigNodeByWeight calculates the configuration weights and randomly returns a node.
+//
+// Calculation algorithm brief:
+// 1. If we have 2 nodes, and their weights are both 1, then the weight range is [0, 199];
+// 2. Node1 weight range is [0, 99], and node2 weight range is [100, 199], ratio is 1:1;
+// 3. If the random number is 99, it then chooses and returns node1;.
+func getConfigNodeByWeight(cg ConfigGroup) *ConfigNode {
+	if len(cg) < 2 {
+		return &cg[0]
+	}
+	var total int
+	for i := 0; i < len(cg); i++ {
+		total += cg[i].Weight * 100
+	}
+	// If total is 0 means all the nodes have no weight attribute configured.
+	// It then defaults each node's weight attribute to 1.
+	if total == 0 {
+		for i := 0; i < len(cg); i++ {
+			cg[i].Weight = 1
+			total += cg[i].Weight * 100
+		}
+	}
+	// Exclude the right border value.
+	var (
+		min    = 0
+		max    = 0
+		random = grand.N(0, total-1)
+	)
+	for i := 0; i < len(cg); i++ {
+		max = min + cg[i].Weight*100
+		if random >= min && random < max {
+			// ====================================================
+			// Return a COPY of the ConfigNode.
+			// ====================================================
+			node := ConfigNode{}
+			node = cg[i]
+			return &node
+		}
+		min = max
+	}
+	return nil
+}
+
+// getSqlDb retrieves and returns an underlying database connection object.
+// The parameter `master` specifies whether retrieves master node connection if
+// master-slave nodes are configured.
+func (c *Core) getSqlDb(master bool, schema ...string) (sqlDb *sql.DB, err error) {
+	var (
+		node *ConfigNode
+		ctx  = c.db.GetCtx()
+	)
+	if c.group != "" {
+		// Load balance.
+		configs.RLock()
+		defer configs.RUnlock()
+		// Value COPY for node.
+		node, err = getConfigNodeByGroup(c.group, master)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// Value COPY for node.
+		n := *c.db.GetConfig()
+		node = &n
+	}
+	if node.Charset == "" {
+		node.Charset = defaultCharset
+	}
+	// Changes the schema.
+	nodeSchema := gutil.GetOrDefaultStr(c.schema, schema...)
+	if nodeSchema != "" {
+		node.Name = nodeSchema
+	}
+	// Update the configuration object in internal data.
+	internalData := c.GetInternalCtxDataFromCtx(ctx)
+	if internalData != nil {
+		internalData.ConfigNode = node
+	}
+	// Cache the underlying connection pool object by node.
+	instanceNameByNode := fmt.Sprintf(`%+v`, node)
+	instanceValue := c.links.GetOrSetFuncLock(instanceNameByNode, func() interface{} {
+		if sqlDb, err = c.db.Open(node); err != nil {
+			return nil
+		}
+		if sqlDb == nil {
+			return nil
+		}
+		if c.dynamicConfig.MaxIdleConnCount > 0 {
+			sqlDb.SetMaxIdleConns(c.dynamicConfig.MaxIdleConnCount)
+		} else {
+			sqlDb.SetMaxIdleConns(defaultMaxIdleConnCount)
+		}
+		if c.dynamicConfig.MaxOpenConnCount > 0 {
+			sqlDb.SetMaxOpenConns(c.dynamicConfig.MaxOpenConnCount)
+		} else {
+			sqlDb.SetMaxOpenConns(defaultMaxOpenConnCount)
+		}
+		if c.dynamicConfig.MaxConnLifeTime > 0 {
+			sqlDb.SetConnMaxLifetime(c.dynamicConfig.MaxConnLifeTime)
+		} else {
+			sqlDb.SetConnMaxLifetime(defaultMaxConnLifeTime)
+		}
+		return sqlDb
+	})
+	if instanceValue != nil && sqlDb == nil {
+		// It reads from instance map.
+		sqlDb = instanceValue.(*sql.DB)
+	}
+	if node.Debug {
+		c.db.SetDebug(node.Debug)
+	}
+	if node.DryRun {
+		c.db.SetDryRun(node.DryRun)
+	}
+	return
+}

+ 826 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core.go

@@ -0,0 +1,826 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+//
+
+package gdb
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/gogf/gf/v2/container/gmap"
+	"github.com/gogf/gf/v2/container/gset"
+	"github.com/gogf/gf/v2/container/gvar"
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/internal/intlog"
+	"github.com/gogf/gf/v2/internal/reflection"
+	"github.com/gogf/gf/v2/internal/utils"
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gconv"
+	"github.com/gogf/gf/v2/util/gutil"
+)
+
+// GetCore returns the underlying *Core object.
+func (c *Core) GetCore() *Core {
+	return c
+}
+
+// Ctx is a chaining function, which creates and returns a new DB that is a shallow copy
+// of current DB object and with given context in it.
+// Note that this returned DB object can be used only once, so do not assign it to
+// a global or package variable for long using.
+func (c *Core) Ctx(ctx context.Context) DB {
+	if ctx == nil {
+		return c.db
+	}
+	// It makes a shallow copy of current db and changes its context for next chaining operation.
+	var (
+		err        error
+		newCore    = &Core{}
+		configNode = c.db.GetConfig()
+	)
+	*newCore = *c
+	// It creates a new DB object(NOT NEW CONNECTION), which is commonly a wrapper for object `Core`.
+	newCore.db, err = driverMap[configNode.Type].New(newCore, configNode)
+	if err != nil {
+		// It is really a serious error here.
+		// Do not let it continue.
+		panic(err)
+	}
+	newCore.ctx = WithDB(ctx, newCore.db)
+	newCore.ctx = c.InjectInternalCtxData(newCore.ctx)
+	return newCore.db
+}
+
+// GetCtx returns the context for current DB.
+// It returns `context.Background()` is there's no context previously set.
+func (c *Core) GetCtx() context.Context {
+	ctx := c.ctx
+	if ctx == nil {
+		ctx = context.TODO()
+	}
+	return c.InjectInternalCtxData(ctx)
+}
+
+// GetCtxTimeout returns the context and cancel function for specified timeout type.
+func (c *Core) GetCtxTimeout(ctx context.Context, timeoutType int) (context.Context, context.CancelFunc) {
+	if ctx == nil {
+		ctx = c.db.GetCtx()
+	} else {
+		ctx = context.WithValue(ctx, "WrappedByGetCtxTimeout", nil)
+	}
+	switch timeoutType {
+	case ctxTimeoutTypeExec:
+		if c.db.GetConfig().ExecTimeout > 0 {
+			return context.WithTimeout(ctx, c.db.GetConfig().ExecTimeout)
+		}
+	case ctxTimeoutTypeQuery:
+		if c.db.GetConfig().QueryTimeout > 0 {
+			return context.WithTimeout(ctx, c.db.GetConfig().QueryTimeout)
+		}
+	case ctxTimeoutTypePrepare:
+		if c.db.GetConfig().PrepareTimeout > 0 {
+			return context.WithTimeout(ctx, c.db.GetConfig().PrepareTimeout)
+		}
+	default:
+		panic(gerror.NewCodef(gcode.CodeInvalidParameter, "invalid context timeout type: %d", timeoutType))
+	}
+	return ctx, func() {}
+}
+
+// Close closes the database and prevents new queries from starting.
+// Close then waits for all queries that have started processing on the server
+// to finish.
+//
+// It is rare to Close a DB, as the DB handle is meant to be
+// long-lived and shared between many goroutines.
+func (c *Core) Close(ctx context.Context) (err error) {
+	if err = c.cache.Close(ctx); err != nil {
+		return err
+	}
+	c.links.LockFunc(func(m map[string]interface{}) {
+		for k, v := range m {
+			if db, ok := v.(*sql.DB); ok {
+				err = db.Close()
+				if err != nil {
+					err = gerror.WrapCode(gcode.CodeDbOperationError, err, `db.Close failed`)
+				}
+				intlog.Printf(ctx, `close link: %s, err: %v`, k, err)
+				if err != nil {
+					return
+				}
+				delete(m, k)
+			}
+		}
+	})
+	return
+}
+
+// Master creates and returns a connection from master node if master-slave configured.
+// It returns the default connection if master-slave not configured.
+func (c *Core) Master(schema ...string) (*sql.DB, error) {
+	var (
+		usedSchema   = gutil.GetOrDefaultStr(c.schema, schema...)
+		charL, charR = c.db.GetChars()
+	)
+	return c.getSqlDb(true, gstr.Trim(usedSchema, charL+charR))
+}
+
+// Slave creates and returns a connection from slave node if master-slave configured.
+// It returns the default connection if master-slave not configured.
+func (c *Core) Slave(schema ...string) (*sql.DB, error) {
+	var (
+		usedSchema   = gutil.GetOrDefaultStr(c.schema, schema...)
+		charL, charR = c.db.GetChars()
+	)
+	return c.getSqlDb(false, gstr.Trim(usedSchema, charL+charR))
+}
+
+// GetAll queries and returns data records from database.
+func (c *Core) GetAll(ctx context.Context, sql string, args ...interface{}) (Result, error) {
+	return c.db.DoSelect(ctx, nil, sql, args...)
+}
+
+// DoSelect queries and returns data records from database.
+func (c *Core) DoSelect(ctx context.Context, link Link, sql string, args ...interface{}) (result Result, err error) {
+	return c.db.DoQuery(ctx, link, sql, args...)
+}
+
+// GetOne queries and returns one record from database.
+func (c *Core) GetOne(ctx context.Context, sql string, args ...interface{}) (Record, error) {
+	list, err := c.db.GetAll(ctx, sql, args...)
+	if err != nil {
+		return nil, err
+	}
+	if len(list) > 0 {
+		return list[0], nil
+	}
+	return nil, nil
+}
+
+// GetArray queries and returns data values as slice from database.
+// Note that if there are multiple columns in the result, it returns just one column values randomly.
+func (c *Core) GetArray(ctx context.Context, sql string, args ...interface{}) ([]Value, error) {
+	all, err := c.db.DoSelect(ctx, nil, sql, args...)
+	if err != nil {
+		return nil, err
+	}
+	return all.Array(), nil
+}
+
+// doGetStruct queries one record from database and converts it to given struct.
+// The parameter `pointer` should be a pointer to struct.
+func (c *Core) doGetStruct(ctx context.Context, pointer interface{}, sql string, args ...interface{}) error {
+	one, err := c.db.GetOne(ctx, sql, args...)
+	if err != nil {
+		return err
+	}
+	return one.Struct(pointer)
+}
+
+// doGetStructs queries records from database and converts them to given struct.
+// The parameter `pointer` should be type of struct slice: []struct/[]*struct.
+func (c *Core) doGetStructs(ctx context.Context, pointer interface{}, sql string, args ...interface{}) error {
+	all, err := c.db.GetAll(ctx, sql, args...)
+	if err != nil {
+		return err
+	}
+	return all.Structs(pointer)
+}
+
+// GetScan queries one or more records from database and converts them to given struct or
+// struct array.
+//
+// If parameter `pointer` is type of struct pointer, it calls GetStruct internally for
+// the conversion. If parameter `pointer` is type of slice, it calls GetStructs internally
+// for conversion.
+func (c *Core) GetScan(ctx context.Context, pointer interface{}, sql string, args ...interface{}) error {
+	reflectInfo := reflection.OriginTypeAndKind(pointer)
+	if reflectInfo.InputKind != reflect.Ptr {
+		return gerror.NewCodef(
+			gcode.CodeInvalidParameter,
+			"params should be type of pointer, but got: %v",
+			reflectInfo.InputKind,
+		)
+	}
+	switch reflectInfo.OriginKind {
+	case reflect.Array, reflect.Slice:
+		return c.db.GetCore().doGetStructs(ctx, pointer, sql, args...)
+
+	case reflect.Struct:
+		return c.db.GetCore().doGetStruct(ctx, pointer, sql, args...)
+	}
+	return gerror.NewCodef(
+		gcode.CodeInvalidParameter,
+		`in valid parameter type "%v", of which element type should be type of struct/slice`,
+		reflectInfo.InputType,
+	)
+}
+
+// GetValue queries and returns the field value from database.
+// The sql should query only one field from database, or else it returns only one
+// field of the result.
+func (c *Core) GetValue(ctx context.Context, sql string, args ...interface{}) (Value, error) {
+	one, err := c.db.GetOne(ctx, sql, args...)
+	if err != nil {
+		return gvar.New(nil), err
+	}
+	for _, v := range one {
+		return v, nil
+	}
+	return gvar.New(nil), nil
+}
+
+// GetCount queries and returns the count from database.
+func (c *Core) GetCount(ctx context.Context, sql string, args ...interface{}) (int, error) {
+	// If the query fields do not contain function "COUNT",
+	// it replaces the sql string and adds the "COUNT" function to the fields.
+	if !gregex.IsMatchString(`(?i)SELECT\s+COUNT\(.+\)\s+FROM`, sql) {
+		sql, _ = gregex.ReplaceString(`(?i)(SELECT)\s+(.+)\s+(FROM)`, `$1 COUNT($2) $3`, sql)
+	}
+	value, err := c.db.GetValue(ctx, sql, args...)
+	if err != nil {
+		return 0, err
+	}
+	return value.Int(), nil
+}
+
+// Union does "(SELECT xxx FROM xxx) UNION (SELECT xxx FROM xxx) ..." statement.
+func (c *Core) Union(unions ...*Model) *Model {
+	var ctx = c.db.GetCtx()
+	return c.doUnion(ctx, unionTypeNormal, unions...)
+}
+
+// UnionAll does "(SELECT xxx FROM xxx) UNION ALL (SELECT xxx FROM xxx) ..." statement.
+func (c *Core) UnionAll(unions ...*Model) *Model {
+	var ctx = c.db.GetCtx()
+	return c.doUnion(ctx, unionTypeAll, unions...)
+}
+
+func (c *Core) doUnion(ctx context.Context, unionType int, unions ...*Model) *Model {
+	var (
+		unionTypeStr   string
+		composedSqlStr string
+		composedArgs   = make([]interface{}, 0)
+	)
+	if unionType == unionTypeAll {
+		unionTypeStr = "UNION ALL"
+	} else {
+		unionTypeStr = "UNION"
+	}
+	for _, v := range unions {
+		sqlWithHolder, holderArgs := v.getFormattedSqlAndArgs(ctx, queryTypeNormal, false)
+		if composedSqlStr == "" {
+			composedSqlStr += fmt.Sprintf(`(%s)`, sqlWithHolder)
+		} else {
+			composedSqlStr += fmt.Sprintf(` %s (%s)`, unionTypeStr, sqlWithHolder)
+		}
+		composedArgs = append(composedArgs, holderArgs...)
+	}
+	return c.db.Raw(composedSqlStr, composedArgs...)
+}
+
+// PingMaster pings the master node to check authentication or keeps the connection alive.
+func (c *Core) PingMaster() error {
+	var ctx = c.db.GetCtx()
+	if master, err := c.db.Master(); err != nil {
+		return err
+	} else {
+		if err = master.PingContext(ctx); err != nil {
+			err = gerror.WrapCode(gcode.CodeDbOperationError, err, `master.Ping failed`)
+		}
+		return err
+	}
+}
+
+// PingSlave pings the slave node to check authentication or keeps the connection alive.
+func (c *Core) PingSlave() error {
+	var ctx = c.db.GetCtx()
+	if slave, err := c.db.Slave(); err != nil {
+		return err
+	} else {
+		if err = slave.PingContext(ctx); err != nil {
+			err = gerror.WrapCode(gcode.CodeDbOperationError, err, `slave.Ping failed`)
+		}
+		return err
+	}
+}
+
+// Insert does "INSERT INTO ..." statement for the table.
+// If there's already one unique record of the data in the table, it returns error.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `batch` specifies the batch operation count when given data is slice.
+func (c *Core) Insert(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return c.Model(table).Ctx(ctx).Data(data).Batch(batch[0]).Insert()
+	}
+	return c.Model(table).Ctx(ctx).Data(data).Insert()
+}
+
+// InsertIgnore does "INSERT IGNORE INTO ..." statement for the table.
+// If there's already one unique record of the data in the table, it ignores the inserting.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `batch` specifies the batch operation count when given data is slice.
+func (c *Core) InsertIgnore(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return c.Model(table).Ctx(ctx).Data(data).Batch(batch[0]).InsertIgnore()
+	}
+	return c.Model(table).Ctx(ctx).Data(data).InsertIgnore()
+}
+
+// InsertAndGetId performs action Insert and returns the last insert id that automatically generated.
+func (c *Core) InsertAndGetId(ctx context.Context, table string, data interface{}, batch ...int) (int64, error) {
+	if len(batch) > 0 {
+		return c.Model(table).Ctx(ctx).Data(data).Batch(batch[0]).InsertAndGetId()
+	}
+	return c.Model(table).Ctx(ctx).Data(data).InsertAndGetId()
+}
+
+// Replace does "REPLACE INTO ..." statement for the table.
+// If there's already one unique record of the data in the table, it deletes the record
+// and inserts a new one.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// If given data is type of slice, it then does batch replacing, and the optional parameter
+// `batch` specifies the batch operation count.
+func (c *Core) Replace(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return c.Model(table).Ctx(ctx).Data(data).Batch(batch[0]).Replace()
+	}
+	return c.Model(table).Ctx(ctx).Data(data).Replace()
+}
+
+// Save does "INSERT INTO ... ON DUPLICATE KEY UPDATE..." statement for the table.
+// It updates the record if there's primary or unique index in the saving data,
+// or else it inserts a new record into the table.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// If given data is type of slice, it then does batch saving, and the optional parameter
+// `batch` specifies the batch operation count.
+func (c *Core) Save(ctx context.Context, table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return c.Model(table).Ctx(ctx).Data(data).Batch(batch[0]).Save()
+	}
+	return c.Model(table).Ctx(ctx).Data(data).Save()
+}
+
+func (c *Core) fieldsToSequence(ctx context.Context, table string, fields []string) ([]string, error) {
+	var (
+		fieldSet               = gset.NewStrSetFrom(fields)
+		fieldsResultInSequence = make([]string, 0)
+		tableFields, err       = c.db.TableFields(ctx, table)
+	)
+	if err != nil {
+		return nil, err
+	}
+	// Sort the fields in order.
+	var fieldsOfTableInSequence = make([]string, len(tableFields))
+	for _, field := range tableFields {
+		fieldsOfTableInSequence[field.Index] = field.Name
+	}
+	// Sort the input fields.
+	for _, fieldName := range fieldsOfTableInSequence {
+		if fieldSet.Contains(fieldName) {
+			fieldsResultInSequence = append(fieldsResultInSequence, fieldName)
+		}
+	}
+	return fieldsResultInSequence, nil
+}
+
+// DoInsert inserts or updates data forF given table.
+// This function is usually used for custom interface definition, you do not need call it manually.
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `option` values are as follows:
+// InsertOptionDefault: just insert, if there's unique/primary key in the data, it returns error;
+// InsertOptionReplace: if there's unique/primary key in the data, it deletes it from table and inserts a new one;
+// InsertOptionSave:    if there's unique/primary key in the data, it updates it or else inserts a new one;
+// InsertOptionIgnore:  if there's unique/primary key in the data, it ignores the inserting;
+func (c *Core) DoInsert(ctx context.Context, link Link, table string, list List, option DoInsertOption) (result sql.Result, err error) {
+	var (
+		keys           []string      // Field names.
+		values         []string      // Value holder string array, like: (?,?,?)
+		params         []interface{} // Values that will be committed to underlying database driver.
+		onDuplicateStr string        // onDuplicateStr is used in "ON DUPLICATE KEY UPDATE" statement.
+	)
+	// ============================================================================================
+	// Group the list by fields. Different fields to different list.
+	// It here uses ListMap to keep sequence for data inserting.
+	// ============================================================================================
+	var keyListMap = gmap.NewListMap()
+	for _, item := range list {
+		var (
+			tmpKeys              = make([]string, 0)
+			tmpKeysInSequenceStr string
+		)
+		for k := range item {
+			tmpKeys = append(tmpKeys, k)
+		}
+		keys, err = c.fieldsToSequence(ctx, table, tmpKeys)
+		if err != nil {
+			return nil, err
+		}
+		tmpKeysInSequenceStr = gstr.Join(keys, ",")
+		if !keyListMap.Contains(tmpKeysInSequenceStr) {
+			keyListMap.Set(tmpKeysInSequenceStr, make(List, 0))
+		}
+		tmpKeysInSequenceList := keyListMap.Get(tmpKeysInSequenceStr).(List)
+		tmpKeysInSequenceList = append(tmpKeysInSequenceList, item)
+		keyListMap.Set(tmpKeysInSequenceStr, tmpKeysInSequenceList)
+	}
+	if keyListMap.Size() > 1 {
+		var (
+			tmpResult    sql.Result
+			sqlResult    SqlResult
+			rowsAffected int64
+		)
+		keyListMap.Iterator(func(key, value interface{}) bool {
+			tmpResult, err = c.DoInsert(ctx, link, table, value.(List), option)
+			if err != nil {
+				return false
+			}
+			rowsAffected, err = tmpResult.RowsAffected()
+			if err != nil {
+				return false
+			}
+			sqlResult.Result = tmpResult
+			sqlResult.Affected += rowsAffected
+			return true
+		})
+		return &sqlResult, err
+	}
+
+	// Prepare the batch result pointer.
+	var (
+		charL, charR = c.db.GetChars()
+		batchResult  = new(SqlResult)
+		keysStr      = charL + strings.Join(keys, charR+","+charL) + charR
+		operation    = GetInsertOperationByOption(option.InsertOption)
+	)
+	if option.InsertOption == InsertOptionSave {
+		onDuplicateStr = c.formatOnDuplicate(keys, option)
+	}
+	var (
+		listLength  = len(list)
+		valueHolder = make([]string, 0)
+	)
+	for i := 0; i < listLength; i++ {
+		values = values[:0]
+		// Note that the map type is unordered,
+		// so it should use slice+key to retrieve the value.
+		for _, k := range keys {
+			if s, ok := list[i][k].(Raw); ok {
+				values = append(values, gconv.String(s))
+			} else {
+				values = append(values, "?")
+				params = append(params, list[i][k])
+			}
+		}
+		valueHolder = append(valueHolder, "("+gstr.Join(values, ",")+")")
+		// Batch package checks: It meets the batch number, or it is the last element.
+		if len(valueHolder) == option.BatchCount || (i == listLength-1 && len(valueHolder) > 0) {
+			var (
+				stdSqlResult sql.Result
+				affectedRows int64
+			)
+			stdSqlResult, err = c.db.DoExec(ctx, link, fmt.Sprintf(
+				"%s INTO %s(%s) VALUES%s %s",
+				operation, c.QuotePrefixTableName(table), keysStr,
+				gstr.Join(valueHolder, ","),
+				onDuplicateStr,
+			), params...)
+			if err != nil {
+				return stdSqlResult, err
+			}
+			if affectedRows, err = stdSqlResult.RowsAffected(); err != nil {
+				err = gerror.WrapCode(gcode.CodeDbOperationError, err, `sql.Result.RowsAffected failed`)
+				return stdSqlResult, err
+			} else {
+				batchResult.Result = stdSqlResult
+				batchResult.Affected += affectedRows
+			}
+			params = params[:0]
+			valueHolder = valueHolder[:0]
+		}
+	}
+	return batchResult, nil
+}
+
+func (c *Core) formatOnDuplicate(columns []string, option DoInsertOption) string {
+	var onDuplicateStr string
+	if option.OnDuplicateStr != "" {
+		onDuplicateStr = option.OnDuplicateStr
+	} else if len(option.OnDuplicateMap) > 0 {
+		for k, v := range option.OnDuplicateMap {
+			if len(onDuplicateStr) > 0 {
+				onDuplicateStr += ","
+			}
+			switch v.(type) {
+			case Raw, *Raw:
+				onDuplicateStr += fmt.Sprintf(
+					"%s=%s",
+					c.QuoteWord(k),
+					v,
+				)
+			default:
+				onDuplicateStr += fmt.Sprintf(
+					"%s=VALUES(%s)",
+					c.QuoteWord(k),
+					c.QuoteWord(gconv.String(v)),
+				)
+			}
+		}
+	} else {
+		for _, column := range columns {
+			// If it's SAVE operation, do not automatically update the creating time.
+			if c.isSoftCreatedFieldName(column) {
+				continue
+			}
+			if len(onDuplicateStr) > 0 {
+				onDuplicateStr += ","
+			}
+			onDuplicateStr += fmt.Sprintf(
+				"%s=VALUES(%s)",
+				c.QuoteWord(column),
+				c.QuoteWord(column),
+			)
+		}
+	}
+	return InsertOnDuplicateKeyUpdate + " " + onDuplicateStr
+}
+
+// Update does "UPDATE ... " statement for the table.
+//
+// The parameter `data` can be type of string/map/gmap/struct/*struct, etc.
+// Eg: "uid=10000", "uid", 10000, g.Map{"uid": 10000, "name":"john"}
+//
+// The parameter `condition` can be type of string/map/gmap/slice/struct/*struct, etc.
+// It is commonly used with parameter `args`.
+// Eg:
+// "uid=10000",
+// "uid", 10000
+// "money>? AND name like ?", 99999, "vip_%"
+// "status IN (?)", g.Slice{1,2,3}
+// "age IN(?,?)", 18, 50
+// User{ Id : 1, UserName : "john"}.
+func (c *Core) Update(ctx context.Context, table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error) {
+	return c.Model(table).Ctx(ctx).Data(data).Where(condition, args...).Update()
+}
+
+// DoUpdate does "UPDATE ... " statement for the table.
+// This function is usually used for custom interface definition, you do not need to call it manually.
+func (c *Core) DoUpdate(ctx context.Context, link Link, table string, data interface{}, condition string, args ...interface{}) (result sql.Result, err error) {
+	table = c.QuotePrefixTableName(table)
+	var (
+		rv   = reflect.ValueOf(data)
+		kind = rv.Kind()
+	)
+	if kind == reflect.Ptr {
+		rv = rv.Elem()
+		kind = rv.Kind()
+	}
+	var (
+		params  []interface{}
+		updates string
+	)
+	switch kind {
+	case reflect.Map, reflect.Struct:
+		var (
+			fields         []string
+			dataMap        map[string]interface{}
+			counterHandler = func(column string, counter Counter) {
+				if counter.Value != 0 {
+					column = c.QuoteWord(column)
+					var (
+						columnRef = c.QuoteWord(counter.Field)
+						columnVal = counter.Value
+						operator  = "+"
+					)
+					if columnVal < 0 {
+						operator = "-"
+						columnVal = -columnVal
+					}
+					fields = append(fields, fmt.Sprintf("%s=%s%s?", column, columnRef, operator))
+					params = append(params, columnVal)
+				}
+			}
+		)
+		dataMap, err = c.ConvertDataForRecord(ctx, data, table)
+		if err != nil {
+			return nil, err
+		}
+		// Sort the data keys in sequence of table fields.
+		var (
+			dataKeys       = make([]string, 0)
+			keysInSequence = make([]string, 0)
+		)
+		for k := range dataMap {
+			dataKeys = append(dataKeys, k)
+		}
+		keysInSequence, err = c.fieldsToSequence(ctx, table, dataKeys)
+		if err != nil {
+			return nil, err
+		}
+		for _, k := range keysInSequence {
+			v := dataMap[k]
+			switch value := v.(type) {
+			case *Counter:
+				counterHandler(k, *value)
+
+			case Counter:
+				counterHandler(k, value)
+
+			default:
+				if s, ok := v.(Raw); ok {
+					fields = append(fields, c.QuoteWord(k)+"="+gconv.String(s))
+				} else {
+					fields = append(fields, c.QuoteWord(k)+"=?")
+					params = append(params, v)
+				}
+			}
+		}
+		updates = strings.Join(fields, ",")
+
+	default:
+		updates = gconv.String(data)
+	}
+	if len(updates) == 0 {
+		return nil, gerror.NewCode(gcode.CodeMissingParameter, "data cannot be empty")
+	}
+	if len(params) > 0 {
+		args = append(params, args...)
+	}
+	// If no link passed, it then uses the master link.
+	if link == nil {
+		if link, err = c.MasterLink(); err != nil {
+			return nil, err
+		}
+	}
+	return c.db.DoExec(ctx, link, fmt.Sprintf(
+		"UPDATE %s SET %s%s",
+		table, updates, condition,
+	),
+		args...,
+	)
+}
+
+// Delete does "DELETE FROM ... " statement for the table.
+//
+// The parameter `condition` can be type of string/map/gmap/slice/struct/*struct, etc.
+// It is commonly used with parameter `args`.
+// Eg:
+// "uid=10000",
+// "uid", 10000
+// "money>? AND name like ?", 99999, "vip_%"
+// "status IN (?)", g.Slice{1,2,3}
+// "age IN(?,?)", 18, 50
+// User{ Id : 1, UserName : "john"}.
+func (c *Core) Delete(ctx context.Context, table string, condition interface{}, args ...interface{}) (result sql.Result, err error) {
+	return c.Model(table).Ctx(ctx).Where(condition, args...).Delete()
+}
+
+// DoDelete does "DELETE FROM ... " statement for the table.
+// This function is usually used for custom interface definition, you do not need call it manually.
+func (c *Core) DoDelete(ctx context.Context, link Link, table string, condition string, args ...interface{}) (result sql.Result, err error) {
+	if link == nil {
+		if link, err = c.MasterLink(); err != nil {
+			return nil, err
+		}
+	}
+	table = c.QuotePrefixTableName(table)
+	return c.db.DoExec(ctx, link, fmt.Sprintf("DELETE FROM %s%s", table, condition), args...)
+}
+
+// FilteredLink retrieves and returns filtered `linkInfo` that can be using for
+// logging or tracing purpose.
+func (c *Core) FilteredLink() string {
+	return fmt.Sprintf(
+		`%s@%s(%s:%s)/%s`,
+		c.config.User, c.config.Protocol, c.config.Host, c.config.Port, c.config.Name,
+	)
+}
+
+// MarshalJSON implements the interface MarshalJSON for json.Marshal.
+// It just returns the pointer address.
+//
+// Note that this interface implements mainly for workaround for a json infinite loop bug
+// of Golang version < v1.14.
+func (c Core) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`%+v`, c)), nil
+}
+
+// writeSqlToLogger outputs the Sql object to logger.
+// It is enabled only if configuration "debug" is true.
+func (c *Core) writeSqlToLogger(ctx context.Context, sql *Sql) {
+	var transactionIdStr string
+	if sql.IsTransaction {
+		if v := ctx.Value(transactionIdForLoggerCtx); v != nil {
+			transactionIdStr = fmt.Sprintf(`[txid:%d] `, v.(uint64))
+		}
+	}
+	s := fmt.Sprintf(
+		"[%3d ms] [%s] [%s] [rows:%-3d] %s%s",
+		sql.End-sql.Start, sql.Group, sql.Schema, sql.RowsAffected, transactionIdStr, sql.Format,
+	)
+	if sql.Error != nil {
+		s += "\nError: " + sql.Error.Error()
+		c.logger.Error(ctx, s)
+	} else {
+		c.logger.Debug(ctx, s)
+	}
+}
+
+// HasTable determine whether the table name exists in the database.
+func (c *Core) HasTable(name string) (bool, error) {
+	tables, err := c.GetTablesWithCache()
+	if err != nil {
+		return false, err
+	}
+	for _, table := range tables {
+		if table == name {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// GetTablesWithCache retrieves and returns the table names of current database with cache.
+func (c *Core) GetTablesWithCache() ([]string, error) {
+	var (
+		ctx      = c.db.GetCtx()
+		cacheKey = fmt.Sprintf(`Tables: %s`, c.db.GetGroup())
+	)
+	result, err := c.GetCache().GetOrSetFuncLock(
+		ctx, cacheKey, func(ctx context.Context) (interface{}, error) {
+			tableList, err := c.db.Tables(ctx)
+			if err != nil {
+				return false, err
+			}
+			return tableList, nil
+		}, 0,
+	)
+	if err != nil {
+		return nil, err
+	}
+	return result.Strings(), nil
+}
+
+// isSoftCreatedFieldName checks and returns whether given field name is an automatic-filled created time.
+func (c *Core) isSoftCreatedFieldName(fieldName string) bool {
+	if fieldName == "" {
+		return false
+	}
+	if config := c.db.GetConfig(); config.CreatedAt != "" {
+		if utils.EqualFoldWithoutChars(fieldName, config.CreatedAt) {
+			return true
+		}
+		return gstr.InArray(append([]string{config.CreatedAt}, createdFieldNames...), fieldName)
+	}
+	for _, v := range createdFieldNames {
+		if utils.EqualFoldWithoutChars(fieldName, v) {
+			return true
+		}
+	}
+	return false
+}
+
+// FormatSqlBeforeExecuting formats the sql string and its arguments before executing.
+// The internal handleArguments function might be called twice during the SQL procedure,
+// but do not worry about it, it's safe and efficient.
+func (c *Core) FormatSqlBeforeExecuting(sql string, args []interface{}) (newSql string, newArgs []interface{}) {
+	// DO NOT do this as there may be multiple lines and comments in the sql.
+	// sql = gstr.Trim(sql)
+	// sql = gstr.Replace(sql, "\n", " ")
+	// sql, _ = gregex.ReplaceString(`\s{2,}`, ` `, sql)
+	return handleArguments(sql, args)
+}

+ 305 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_config.go

@@ -0,0 +1,305 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"sync"
+	"time"
+
+	"github.com/gogf/gf/v2/os/gcache"
+	"github.com/gogf/gf/v2/os/glog"
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gconv"
+)
+
+// Config is the configuration management object.
+type Config map[string]ConfigGroup
+
+// ConfigGroup is a slice of configuration node for specified named group.
+type ConfigGroup []ConfigNode
+
+// ConfigNode is configuration for one node.
+type ConfigNode struct {
+	Host                 string        `json:"host"`                 // Host of server, ip or domain like: 127.0.0.1, localhost
+	Port                 string        `json:"port"`                 // Port, it's commonly 3306.
+	User                 string        `json:"user"`                 // Authentication username.
+	Pass                 string        `json:"pass"`                 // Authentication password.
+	Name                 string        `json:"name"`                 // Default used database name.
+	Type                 string        `json:"type"`                 // Database type: mysql, sqlite, mssql, pgsql, oracle.
+	Link                 string        `json:"link"`                 // (Optional) Custom link information for all configuration in one single string.
+	Extra                string        `json:"extra"`                // (Optional) Extra configuration according the registered third-party database driver.
+	Role                 string        `json:"role"`                 // (Optional, "master" in default) Node role, used for master-slave mode: master, slave.
+	Debug                bool          `json:"debug"`                // (Optional) Debug mode enables debug information logging and output.
+	Prefix               string        `json:"prefix"`               // (Optional) Table prefix.
+	DryRun               bool          `json:"dryRun"`               // (Optional) Dry run, which does SELECT but no INSERT/UPDATE/DELETE statements.
+	Weight               int           `json:"weight"`               // (Optional) Weight for load balance calculating, it's useless if there's just one node.
+	Charset              string        `json:"charset"`              // (Optional, "utf8" in default) Custom charset when operating on database.
+	Protocol             string        `json:"protocol"`             // (Optional, "tcp" in default) See net.Dial for more information which networks are available.
+	Timezone             string        `json:"timezone"`             // (Optional) Sets the time zone for displaying and interpreting time stamps.
+	Namespace            string        `json:"namespace"`            // (Optional) Namespace for some databases. Eg, in pgsql, the `Name` acts as the `catalog`, the `NameSpace` acts as the `schema`.
+	MaxIdleConnCount     int           `json:"maxIdle"`              // (Optional) Max idle connection configuration for underlying connection pool.
+	MaxOpenConnCount     int           `json:"maxOpen"`              // (Optional) Max open connection configuration for underlying connection pool.
+	MaxConnLifeTime      time.Duration `json:"maxLifeTime"`          // (Optional) Max amount of time a connection may be idle before being closed.
+	QueryTimeout         time.Duration `json:"queryTimeout"`         // (Optional) Max query time for per dql.
+	ExecTimeout          time.Duration `json:"execTimeout"`          // (Optional) Max exec time for dml.
+	TranTimeout          time.Duration `json:"tranTimeout"`          // (Optional) Max exec time for a transaction.
+	PrepareTimeout       time.Duration `json:"prepareTimeout"`       // (Optional) Max exec time for prepare operation.
+	CreatedAt            string        `json:"createdAt"`            // (Optional) The field name of table for automatic-filled created datetime.
+	UpdatedAt            string        `json:"updatedAt"`            // (Optional) The field name of table for automatic-filled updated datetime.
+	DeletedAt            string        `json:"deletedAt"`            // (Optional) The field name of table for automatic-filled updated datetime.
+	TimeMaintainDisabled bool          `json:"timeMaintainDisabled"` // (Optional) Disable the automatic time maintaining feature.
+}
+
+const (
+	DefaultGroupName = "default" // Default group name.
+)
+
+// configs specifies internal used configuration object.
+var configs struct {
+	sync.RWMutex
+	config Config // All configurations.
+	group  string // Default configuration group.
+}
+
+func init() {
+	configs.config = make(Config)
+	configs.group = DefaultGroupName
+}
+
+// SetConfig sets the global configuration for package.
+// It will overwrite the old configuration of package.
+func SetConfig(config Config) {
+	defer instances.Clear()
+	configs.Lock()
+	defer configs.Unlock()
+	for k, nodes := range config {
+		for i, node := range nodes {
+			nodes[i] = parseConfigNode(node)
+		}
+		config[k] = nodes
+	}
+	configs.config = config
+}
+
+// SetConfigGroup sets the configuration for given group.
+func SetConfigGroup(group string, nodes ConfigGroup) {
+	defer instances.Clear()
+	configs.Lock()
+	defer configs.Unlock()
+	for i, node := range nodes {
+		nodes[i] = parseConfigNode(node)
+	}
+	configs.config[group] = nodes
+}
+
+// AddConfigNode adds one node configuration to configuration of given group.
+func AddConfigNode(group string, node ConfigNode) {
+	defer instances.Clear()
+	configs.Lock()
+	defer configs.Unlock()
+	configs.config[group] = append(configs.config[group], parseConfigNode(node))
+}
+
+// parseConfigNode parses `Link` configuration syntax.
+func parseConfigNode(node ConfigNode) ConfigNode {
+	if node.Link != "" {
+		node = *parseConfigNodeLink(&node)
+	}
+	if node.Link != "" && node.Type == "" {
+		match, _ := gregex.MatchString(`([a-z]+):(.+)`, node.Link)
+		if len(match) == 3 {
+			node.Type = gstr.Trim(match[1])
+			node.Link = gstr.Trim(match[2])
+		}
+	}
+	return node
+}
+
+// AddDefaultConfigNode adds one node configuration to configuration of default group.
+func AddDefaultConfigNode(node ConfigNode) {
+	AddConfigNode(DefaultGroupName, node)
+}
+
+// AddDefaultConfigGroup adds multiple node configurations to configuration of default group.
+func AddDefaultConfigGroup(nodes ConfigGroup) {
+	SetConfigGroup(DefaultGroupName, nodes)
+}
+
+// GetConfig retrieves and returns the configuration of given group.
+func GetConfig(group string) ConfigGroup {
+	configs.RLock()
+	defer configs.RUnlock()
+	return configs.config[group]
+}
+
+// SetDefaultGroup sets the group name for default configuration.
+func SetDefaultGroup(name string) {
+	defer instances.Clear()
+	configs.Lock()
+	defer configs.Unlock()
+	configs.group = name
+}
+
+// GetDefaultGroup returns the { name of default configuration.
+func GetDefaultGroup() string {
+	defer instances.Clear()
+	configs.RLock()
+	defer configs.RUnlock()
+	return configs.group
+}
+
+// IsConfigured checks and returns whether the database configured.
+// It returns true if any configuration exists.
+func IsConfigured() bool {
+	configs.RLock()
+	defer configs.RUnlock()
+	return len(configs.config) > 0
+}
+
+// SetLogger sets the logger for orm.
+func (c *Core) SetLogger(logger glog.ILogger) {
+	c.logger = logger
+}
+
+// GetLogger returns the (logger) of the orm.
+func (c *Core) GetLogger() glog.ILogger {
+	return c.logger
+}
+
+// SetMaxIdleConnCount sets the maximum number of connections in the idle
+// connection pool.
+//
+// If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
+// then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
+//
+// If n <= 0, no idle connections are retained.
+//
+// The default max idle connections is currently 2. This may change in
+// a future release.
+func (c *Core) SetMaxIdleConnCount(n int) {
+	c.dynamicConfig.MaxIdleConnCount = n
+}
+
+// SetMaxOpenConnCount sets the maximum number of open connections to the database.
+//
+// If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
+// MaxIdleConns, then MaxIdleConns will be reduced to match the new
+// MaxOpenConns limit.
+//
+// If n <= 0, then there is no limit on the number of open connections.
+// The default is 0 (unlimited).
+func (c *Core) SetMaxOpenConnCount(n int) {
+	c.dynamicConfig.MaxOpenConnCount = n
+}
+
+// SetMaxConnLifeTime sets the maximum amount of time a connection may be reused.
+//
+// Expired connections may be closed lazily before reuse.
+//
+// If d <= 0, connections are not closed due to a connection's age.
+func (c *Core) SetMaxConnLifeTime(d time.Duration) {
+	c.dynamicConfig.MaxConnLifeTime = d
+}
+
+// GetConfig returns the current used node configuration.
+func (c *Core) GetConfig() *ConfigNode {
+	internalData := c.GetInternalCtxDataFromCtx(c.db.GetCtx())
+	if internalData != nil && internalData.ConfigNode != nil {
+		// Note:
+		// It so here checks and returns the config from current DB,
+		// if different schemas between current DB and config.Name from context,
+		// for example, in nested transaction scenario, the context is passed all through the logic procedure,
+		// but the config.Name from context may be still the original one from the first transaction object.
+		if c.config.Name == internalData.ConfigNode.Name {
+			return internalData.ConfigNode
+		}
+	}
+	return c.config
+}
+
+// SetDebug enables/disables the debug mode.
+func (c *Core) SetDebug(debug bool) {
+	c.debug.Set(debug)
+}
+
+// GetDebug returns the debug value.
+func (c *Core) GetDebug() bool {
+	return c.debug.Val()
+}
+
+// GetCache returns the internal cache object.
+func (c *Core) GetCache() *gcache.Cache {
+	return c.cache
+}
+
+// GetGroup returns the group string configured.
+func (c *Core) GetGroup() string {
+	return c.group
+}
+
+// SetDryRun enables/disables the DryRun feature.
+func (c *Core) SetDryRun(enabled bool) {
+	c.config.DryRun = enabled
+}
+
+// GetDryRun returns the DryRun value.
+func (c *Core) GetDryRun() bool {
+	return c.config.DryRun || allDryRun
+}
+
+// GetPrefix returns the table prefix string configured.
+func (c *Core) GetPrefix() string {
+	return c.config.Prefix
+}
+
+// GetSchema returns the schema configured.
+func (c *Core) GetSchema() string {
+	schema := c.schema
+	if schema == "" {
+		schema = c.db.GetConfig().Name
+	}
+	return schema
+}
+
+func parseConfigNodeLink(node *ConfigNode) *ConfigNode {
+	var match []string
+	if node.Link != "" {
+		match, _ = gregex.MatchString(linkPattern, node.Link)
+		if len(match) > 5 {
+			node.Type = match[1]
+			node.User = match[2]
+			node.Pass = match[3]
+			node.Protocol = match[4]
+			array := gstr.Split(match[5], ":")
+			if len(array) == 2 && node.Protocol != "file" {
+				node.Host = array[0]
+				node.Port = array[1]
+				node.Name = match[6]
+			} else {
+				node.Name = match[5]
+			}
+			if len(match) > 6 && match[7] != "" {
+				node.Extra = match[7]
+			}
+			node.Link = ""
+		}
+	}
+	if node.Extra != "" {
+		if m, _ := gstr.Parse(node.Extra); len(m) > 0 {
+			_ = gconv.Struct(m, &node)
+		}
+	}
+	// Default value checks.
+	if node.Charset == "" {
+		node.Charset = defaultCharset
+	}
+	if node.Protocol == "" {
+		node.Protocol = defaultProtocol
+	}
+	return node
+}

+ 67 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_ctx.go

@@ -0,0 +1,67 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"context"
+
+	"github.com/gogf/gf/v2/os/gctx"
+)
+
+// internalCtxData stores data in ctx for internal usage purpose.
+type internalCtxData struct {
+	// Operation DB.
+	DB DB
+
+	// Used configuration node in current operation.
+	ConfigNode *ConfigNode
+
+	// The first column in result response from database server.
+	// This attribute is used for Value/Count selection statement purpose,
+	// which is to avoid HOOK handler that might modify the result columns
+	// that can confuse the Value/Count selection statement logic.
+	FirstResultColumn string
+}
+
+const (
+	internalCtxDataKeyInCtx gctx.StrKey = "InternalCtxData"
+
+	// `ignoreResultKeyInCtx` is a mark for some db drivers that do not support `RowsAffected` function,
+	// for example: `clickhouse`. The `clickhouse` does not support fetching insert/update results,
+	// but returns errors when execute `RowsAffected`. It here ignores the calling of `RowsAffected`
+	// to avoid triggering errors, rather than ignoring errors after they are triggered.
+	ignoreResultKeyInCtx gctx.StrKey = "IgnoreResult"
+)
+
+func (c *Core) InjectInternalCtxData(ctx context.Context) context.Context {
+	// If the internal data is already injected, it does nothing.
+	if ctx.Value(internalCtxDataKeyInCtx) != nil {
+		return ctx
+	}
+	return context.WithValue(ctx, internalCtxDataKeyInCtx, &internalCtxData{
+		DB:         c.db,
+		ConfigNode: c.config,
+	})
+}
+
+func (c *Core) GetInternalCtxDataFromCtx(ctx context.Context) *internalCtxData {
+	if v := ctx.Value(internalCtxDataKeyInCtx); v != nil {
+		return v.(*internalCtxData)
+	}
+	return nil
+}
+
+func (c *Core) InjectIgnoreResult(ctx context.Context) context.Context {
+	if ctx.Value(ignoreResultKeyInCtx) != nil {
+		return ctx
+	}
+	return context.WithValue(ctx, ignoreResultKeyInCtx, true)
+}
+
+func (c *Core) GetIgnoreResultFromCtx(ctx context.Context) bool {
+	return ctx.Value(ignoreResultKeyInCtx) != nil
+}

+ 43 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_link.go

@@ -0,0 +1,43 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"database/sql"
+)
+
+// dbLink is used to implement interface Link for DB.
+type dbLink struct {
+	*sql.DB         // Underlying DB object.
+	isOnMaster bool // isOnMaster marks whether current link is operated on master node.
+}
+
+// txLink is used to implement interface Link for TX.
+type txLink struct {
+	*sql.Tx
+}
+
+// IsTransaction returns if current Link is a transaction.
+func (l *dbLink) IsTransaction() bool {
+	return false
+}
+
+// IsOnMaster checks and returns whether current link is operated on master node.
+func (l *dbLink) IsOnMaster() bool {
+	return l.isOnMaster
+}
+
+// IsTransaction returns if current Link is a transaction.
+func (l *txLink) IsTransaction() bool {
+	return true
+}
+
+// IsOnMaster checks and returns whether current link is operated on master node.
+// Note that, transaction operation is always operated on master node.
+func (l *txLink) IsOnMaster() bool {
+	return true
+}

+ 409 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_structure.go

@@ -0,0 +1,409 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"context"
+	"database/sql/driver"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/gogf/gf/v2/encoding/gbinary"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/internal/intlog"
+	"github.com/gogf/gf/v2/internal/json"
+	"github.com/gogf/gf/v2/os/gtime"
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gconv"
+	"github.com/gogf/gf/v2/util/gutil"
+)
+
+// GetFieldTypeStr retrieves and returns the field type string for certain field by name.
+func (c *Core) GetFieldTypeStr(ctx context.Context, fieldName, table, schema string) string {
+	field := c.GetFieldType(ctx, fieldName, table, schema)
+	if field != nil {
+		return field.Type
+	}
+	return ""
+}
+
+// GetFieldType retrieves and returns the field type object for certain field by name.
+func (c *Core) GetFieldType(ctx context.Context, fieldName, table, schema string) *TableField {
+	fieldsMap, err := c.db.TableFields(ctx, table, schema)
+	if err != nil {
+		intlog.Errorf(
+			ctx,
+			`TableFields failed for table "%s", schema "%s": %+v`,
+			table, schema, err,
+		)
+		return nil
+	}
+	for tableFieldName, tableField := range fieldsMap {
+		if tableFieldName == fieldName {
+			return tableField
+		}
+	}
+	return nil
+}
+
+// ConvertDataForRecord is a very important function, which does converting for any data that
+// will be inserted into table/collection as a record.
+//
+// The parameter `value` should be type of *map/map/*struct/struct.
+// It supports embedded struct definition for struct.
+func (c *Core) ConvertDataForRecord(ctx context.Context, value interface{}, table string) (map[string]interface{}, error) {
+	var (
+		err  error
+		data = MapOrStructToMapDeep(value, true)
+	)
+	for fieldName, fieldValue := range data {
+		data[fieldName], err = c.db.ConvertValueForField(
+			ctx,
+			c.GetFieldTypeStr(ctx, fieldName, table, c.GetSchema()),
+			fieldValue,
+		)
+		if err != nil {
+			return nil, gerror.Wrapf(err, `ConvertDataForRecord failed for value: %#v`, fieldValue)
+		}
+	}
+	return data, nil
+}
+
+// ConvertValueForField converts value to the type of the record field.
+// The parameter `fieldType` is the target record field.
+// The parameter `fieldValue` is the value that to be committed to record field.
+func (c *Core) ConvertValueForField(ctx context.Context, fieldType string, fieldValue interface{}) (interface{}, error) {
+	var (
+		err            error
+		convertedValue = fieldValue
+	)
+	// If `value` implements interface `driver.Valuer`, it then uses the interface for value converting.
+	if valuer, ok := fieldValue.(driver.Valuer); ok {
+		if convertedValue, err = valuer.Value(); err != nil {
+			if err != nil {
+				return nil, err
+			}
+		}
+		return convertedValue, nil
+	}
+	// Default value converting.
+	var (
+		rvValue = reflect.ValueOf(fieldValue)
+		rvKind  = rvValue.Kind()
+	)
+	for rvKind == reflect.Ptr {
+		rvValue = rvValue.Elem()
+		rvKind = rvValue.Kind()
+	}
+	switch rvKind {
+	case reflect.Slice, reflect.Array, reflect.Map:
+		// It should ignore the bytes type.
+		if _, ok := fieldValue.([]byte); !ok {
+			// Convert the value to JSON.
+			convertedValue, err = json.Marshal(fieldValue)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+	case reflect.Struct:
+		switch r := fieldValue.(type) {
+		// If the time is zero, it then updates it to nil,
+		// which will insert/update the value to database as "null".
+		case time.Time:
+			if r.IsZero() {
+				convertedValue = nil
+			}
+
+		case gtime.Time:
+			if r.IsZero() {
+				convertedValue = nil
+			} else {
+				convertedValue = r.Time
+			}
+
+		case *gtime.Time:
+			if r.IsZero() {
+				convertedValue = nil
+			} else {
+				convertedValue = r.Time
+			}
+
+		case *time.Time:
+			// Nothing to do.
+
+		case Counter, *Counter:
+			// Nothing to do.
+
+		default:
+			// If `value` implements interface iNil,
+			// check its IsNil() function, if got ture,
+			// which will insert/update the value to database as "null".
+			if v, ok := fieldValue.(iNil); ok && v.IsNil() {
+				convertedValue = nil
+			} else if s, ok := fieldValue.(iString); ok {
+				// Use string conversion in default.
+				convertedValue = s.String()
+			} else {
+				// Convert the value to JSON.
+				convertedValue, err = json.Marshal(fieldValue)
+				if err != nil {
+					return nil, err
+				}
+			}
+		}
+	}
+	return convertedValue, nil
+}
+
+// CheckLocalTypeForField checks and returns corresponding type for given db type.
+func (c *Core) CheckLocalTypeForField(ctx context.Context, fieldType string, fieldValue interface{}) (LocalType, error) {
+	var (
+		typeName    string
+		typePattern string
+	)
+	match, _ := gregex.MatchString(`(.+?)\((.+)\)`, fieldType)
+	if len(match) == 3 {
+		typeName = gstr.Trim(match[1])
+		typePattern = gstr.Trim(match[2])
+	} else {
+		typeName = gstr.Split(fieldType, " ")[0]
+	}
+
+	typeName = strings.ToLower(typeName)
+
+	switch typeName {
+	case
+		fieldTypeBinary,
+		fieldTypeVarbinary,
+		fieldTypeBlob,
+		fieldTypeTinyblob,
+		fieldTypeMediumblob,
+		fieldTypeLongblob:
+		return LocalTypeBytes, nil
+
+	case
+		fieldTypeInt,
+		fieldTypeTinyint,
+		fieldTypeSmallInt,
+		fieldTypeSmallint,
+		fieldTypeMediumInt,
+		fieldTypeMediumint,
+		fieldTypeSerial:
+		if gstr.ContainsI(fieldType, "unsigned") {
+			return LocalTypeUint, nil
+		}
+		return LocalTypeInt, nil
+
+	case
+		fieldTypeBigInt,
+		fieldTypeBigint,
+		fieldTypeBigserial:
+		if gstr.ContainsI(fieldType, "unsigned") {
+			return LocalTypeUint64, nil
+		}
+		return LocalTypeInt64, nil
+
+	case
+		fieldTypeReal:
+		return LocalTypeFloat32, nil
+
+	case
+		fieldTypeDecimal,
+		fieldTypeMoney,
+		fieldTypeNumeric,
+		fieldTypeSmallmoney:
+		return LocalTypeString, nil
+	case
+		fieldTypeFloat,
+		fieldTypeDouble:
+		return LocalTypeFloat64, nil
+
+	case
+		fieldTypeBit:
+		// It is suggested using bit(1) as boolean.
+		if typePattern == "1" {
+			return LocalTypeBool, nil
+		}
+		s := gconv.String(fieldValue)
+		// mssql is true|false string.
+		if strings.EqualFold(s, "true") || strings.EqualFold(s, "false") {
+			return LocalTypeBool, nil
+		}
+		if gstr.ContainsI(fieldType, "unsigned") {
+			return LocalTypeUint64Bytes, nil
+		}
+		return LocalTypeInt64Bytes, nil
+
+	case
+		fieldTypeBool:
+		return LocalTypeBool, nil
+
+	case
+		fieldTypeDate:
+		return LocalTypeDate, nil
+
+	case
+		fieldTypeDatetime,
+		fieldTypeTimestamp,
+		fieldTypeTimestampz:
+		return LocalTypeDatetime, nil
+
+	case
+		fieldTypeJson:
+		return LocalTypeJson, nil
+
+	case
+		fieldTypeJsonb:
+		return LocalTypeJsonb, nil
+
+	default:
+		// Auto-detect field type, using key match.
+		switch {
+		case strings.Contains(typeName, "text") || strings.Contains(typeName, "char") || strings.Contains(typeName, "character"):
+			return LocalTypeString, nil
+
+		case strings.Contains(typeName, "float") || strings.Contains(typeName, "double") || strings.Contains(typeName, "numeric"):
+			return LocalTypeFloat64, nil
+
+		case strings.Contains(typeName, "bool"):
+			return LocalTypeBool, nil
+
+		case strings.Contains(typeName, "binary") || strings.Contains(typeName, "blob"):
+			return LocalTypeBytes, nil
+
+		case strings.Contains(typeName, "int"):
+			if gstr.ContainsI(fieldType, "unsigned") {
+				return LocalTypeUint, nil
+			}
+			return LocalTypeInt, nil
+
+		case strings.Contains(typeName, "time"):
+			return LocalTypeDatetime, nil
+
+		case strings.Contains(typeName, "date"):
+			return LocalTypeDatetime, nil
+
+		default:
+			return LocalTypeString, nil
+		}
+	}
+}
+
+// ConvertValueForLocal converts value to local Golang type of value according field type name from database.
+// The parameter `fieldType` is in lower case, like:
+// `float(5,2)`, `unsigned double(5,2)`, `decimal(10,2)`, `char(45)`, `varchar(100)`, etc.
+func (c *Core) ConvertValueForLocal(ctx context.Context, fieldType string, fieldValue interface{}) (interface{}, error) {
+	// If there's no type retrieved, it returns the `fieldValue` directly
+	// to use its original data type, as `fieldValue` is type of interface{}.
+	if fieldType == "" {
+		return fieldValue, nil
+	}
+	typeName, err := c.db.CheckLocalTypeForField(ctx, fieldType, fieldValue)
+	if err != nil {
+		return nil, err
+	}
+	switch typeName {
+	case LocalTypeBytes:
+		var typeNameStr = string(typeName)
+		if strings.Contains(typeNameStr, "binary") || strings.Contains(typeNameStr, "blob") {
+			return fieldValue, nil
+		}
+		return gconv.Bytes(fieldValue), nil
+
+	case LocalTypeInt:
+		return gconv.Int(gconv.String(fieldValue)), nil
+
+	case LocalTypeUint:
+		return gconv.Uint(gconv.String(fieldValue)), nil
+
+	case LocalTypeInt64:
+		return gconv.Int64(gconv.String(fieldValue)), nil
+
+	case LocalTypeUint64:
+		return gconv.Uint64(gconv.String(fieldValue)), nil
+
+	case LocalTypeInt64Bytes:
+		return gbinary.BeDecodeToInt64(gconv.Bytes(fieldValue)), nil
+
+	case LocalTypeUint64Bytes:
+		return gbinary.BeDecodeToUint64(gconv.Bytes(fieldValue)), nil
+
+	case LocalTypeFloat32:
+		return gconv.Float32(gconv.String(fieldValue)), nil
+
+	case LocalTypeFloat64:
+		return gconv.Float64(gconv.String(fieldValue)), nil
+
+	case LocalTypeBool:
+		s := gconv.String(fieldValue)
+		// mssql is true|false string.
+		if strings.EqualFold(s, "true") {
+			return 1, nil
+		}
+		if strings.EqualFold(s, "false") {
+			return 0, nil
+		}
+		return gconv.Bool(fieldValue), nil
+
+	case LocalTypeDate:
+		// Date without time.
+		if t, ok := fieldValue.(time.Time); ok {
+			return gtime.NewFromTime(t).Format("Y-m-d"), nil
+		}
+		t, _ := gtime.StrToTime(gconv.String(fieldValue))
+		return t.Format("Y-m-d"), nil
+
+	case LocalTypeDatetime:
+		if t, ok := fieldValue.(time.Time); ok {
+			return gtime.NewFromTime(t), nil
+		}
+		t, _ := gtime.StrToTime(gconv.String(fieldValue))
+		return t, nil
+
+	default:
+		return gconv.String(fieldValue), nil
+	}
+}
+
+// mappingAndFilterData automatically mappings the map key to table field and removes
+// all key-value pairs that are not the field of given table.
+func (c *Core) mappingAndFilterData(ctx context.Context, schema, table string, data map[string]interface{}, filter bool) (map[string]interface{}, error) {
+	fieldsMap, err := c.db.TableFields(ctx, c.guessPrimaryTableName(table), schema)
+	if err != nil {
+		return nil, err
+	}
+	fieldsKeyMap := make(map[string]interface{}, len(fieldsMap))
+	for k := range fieldsMap {
+		fieldsKeyMap[k] = nil
+	}
+	// Automatic data key to table field name mapping.
+	var foundKey string
+	for dataKey, dataValue := range data {
+		if _, ok := fieldsKeyMap[dataKey]; !ok {
+			foundKey, _ = gutil.MapPossibleItemByKey(fieldsKeyMap, dataKey)
+			if foundKey != "" {
+				if _, ok = data[foundKey]; !ok {
+					data[foundKey] = dataValue
+				}
+				delete(data, dataKey)
+			}
+		}
+	}
+	// Data filtering.
+	// It deletes all key-value pairs that has incorrect field name.
+	if filter {
+		for dataKey := range data {
+			if _, ok := fieldsMap[dataKey]; !ok {
+				delete(data, dataKey)
+			}
+		}
+	}
+	return data, nil
+}

+ 86 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_trace.go

@@ -0,0 +1,86 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+//
+
+package gdb
+
+import (
+	"context"
+	"fmt"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/gogf/gf/v2/net/gtrace"
+)
+
+const (
+	traceInstrumentName       = "github.com/gogf/gf/v2/database/gdb"
+	traceAttrDbType           = "db.type"
+	traceAttrDbHost           = "db.host"
+	traceAttrDbPort           = "db.port"
+	traceAttrDbName           = "db.name"
+	traceAttrDbUser           = "db.user"
+	traceAttrDbLink           = "db.link"
+	traceAttrDbGroup          = "db.group"
+	traceEventDbExecution     = "db.execution"
+	traceEventDbExecutionSql  = "db.execution.sql"
+	traceEventDbExecutionCost = "db.execution.cost"
+	traceEventDbExecutionRows = "db.execution.rows"
+	traceEventDbExecutionTxID = "db.execution.txid"
+	traceEventDbExecutionType = "db.execution.type"
+)
+
+// addSqlToTracing adds sql information to tracer if it's enabled.
+func (c *Core) traceSpanEnd(ctx context.Context, span trace.Span, sql *Sql) {
+	if gtrace.IsUsingDefaultProvider() || !gtrace.IsTracingInternal() {
+		return
+	}
+	if sql.Error != nil {
+		span.SetStatus(codes.Error, fmt.Sprintf(`%+v`, sql.Error))
+	}
+	labels := make([]attribute.KeyValue, 0)
+	labels = append(labels, gtrace.CommonLabels()...)
+	labels = append(labels,
+		attribute.String(traceAttrDbType, c.db.GetConfig().Type),
+		semconv.DBStatementKey.String(sql.Format),
+	)
+	if c.db.GetConfig().Host != "" {
+		labels = append(labels, attribute.String(traceAttrDbHost, c.db.GetConfig().Host))
+	}
+	if c.db.GetConfig().Port != "" {
+		labels = append(labels, attribute.String(traceAttrDbPort, c.db.GetConfig().Port))
+	}
+	if c.db.GetConfig().Name != "" {
+		labels = append(labels, attribute.String(traceAttrDbName, c.db.GetConfig().Name))
+	}
+	if c.db.GetConfig().User != "" {
+		labels = append(labels, attribute.String(traceAttrDbUser, c.db.GetConfig().User))
+	}
+	if filteredLink := c.db.GetCore().FilteredLink(); filteredLink != "" {
+		labels = append(labels, attribute.String(traceAttrDbLink, c.db.GetCore().FilteredLink()))
+	}
+	if group := c.db.GetGroup(); group != "" {
+		labels = append(labels, attribute.String(traceAttrDbGroup, group))
+	}
+	span.SetAttributes(labels...)
+	events := []attribute.KeyValue{
+		attribute.String(traceEventDbExecutionSql, sql.Format),
+		attribute.String(traceEventDbExecutionCost, fmt.Sprintf(`%d ms`, sql.End-sql.Start)),
+		attribute.String(traceEventDbExecutionRows, fmt.Sprintf(`%d`, sql.RowsAffected)),
+	}
+	if sql.IsTransaction {
+		if v := ctx.Value(transactionIdForLoggerCtx); v != nil {
+			events = append(events, attribute.String(
+				traceEventDbExecutionTxID, fmt.Sprintf(`%d`, v.(uint64)),
+			))
+		}
+	}
+	events = append(events, attribute.String(traceEventDbExecutionType, sql.Type))
+	span.AddEvent(traceEventDbExecution, trace.WithAttributes(events...))
+}

+ 544 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_transaction.go

@@ -0,0 +1,544 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"context"
+	"database/sql"
+	"reflect"
+
+	"github.com/gogf/gf/v2/container/gtype"
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/internal/reflection"
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/util/gconv"
+)
+
+// TXCore is the struct for transaction management.
+type TXCore struct {
+	db               DB              // db is the current gdb database manager.
+	tx               *sql.Tx         // tx is the raw and underlying transaction manager.
+	ctx              context.Context // ctx is the context for this transaction only.
+	master           *sql.DB         // master is the raw and underlying database manager.
+	transactionId    string          // transactionId is a unique id generated by this object for this transaction.
+	transactionCount int             // transactionCount marks the times that Begins.
+	isClosed         bool            // isClosed marks this transaction has already been committed or rolled back.
+}
+
+const (
+	transactionPointerPrefix    = "transaction"
+	contextTransactionKeyPrefix = "TransactionObjectForGroup_"
+	transactionIdForLoggerCtx   = "TransactionId"
+)
+
+var transactionIdGenerator = gtype.NewUint64()
+
+// Begin starts and returns the transaction object.
+// You should call Commit or Rollback functions of the transaction object
+// if you no longer use the transaction. Commit or Rollback functions will also
+// close the transaction automatically.
+func (c *Core) Begin(ctx context.Context) (tx TX, err error) {
+	return c.doBeginCtx(ctx)
+}
+
+func (c *Core) doBeginCtx(ctx context.Context) (TX, error) {
+	master, err := c.db.Master()
+	if err != nil {
+		return nil, err
+	}
+	var out DoCommitOutput
+	out, err = c.db.DoCommit(ctx, DoCommitInput{
+		Db:            master,
+		Sql:           "BEGIN",
+		Type:          SqlTypeBegin,
+		IsTransaction: true,
+	})
+	return out.Tx, err
+}
+
+// Transaction wraps the transaction logic using function `f`.
+// It rollbacks the transaction and returns the error from function `f` if
+// it returns non-nil error. It commits the transaction and returns nil if
+// function `f` returns nil.
+//
+// Note that, you should not Commit or Rollback the transaction in function `f`
+// as it is automatically handled by this function.
+func (c *Core) Transaction(ctx context.Context, f func(ctx context.Context, tx TX) error) (err error) {
+	if ctx == nil {
+		ctx = c.db.GetCtx()
+	}
+	ctx = c.InjectInternalCtxData(ctx)
+	// Check transaction object from context.
+	var tx TX
+	tx = TXFromCtx(ctx, c.db.GetGroup())
+	if tx != nil {
+		return tx.Transaction(ctx, f)
+	}
+	tx, err = c.doBeginCtx(ctx)
+	if err != nil {
+		return err
+	}
+	// Inject transaction object into context.
+	tx = tx.Ctx(WithTX(tx.GetCtx(), tx))
+	defer func() {
+		if err == nil {
+			if exception := recover(); exception != nil {
+				if v, ok := exception.(error); ok && gerror.HasStack(v) {
+					err = v
+				} else {
+					err = gerror.NewCodef(gcode.CodeInternalPanic, "%+v", exception)
+				}
+			}
+		}
+		if err != nil {
+			if e := tx.Rollback(); e != nil {
+				err = e
+			}
+		} else {
+			if e := tx.Commit(); e != nil {
+				err = e
+			}
+		}
+	}()
+	err = f(tx.GetCtx(), tx)
+	return
+}
+
+// WithTX injects given transaction object into context and returns a new context.
+func WithTX(ctx context.Context, tx TX) context.Context {
+	if tx == nil {
+		return ctx
+	}
+	// Check repeat injection from given.
+	group := tx.GetDB().GetGroup()
+	if ctxTx := TXFromCtx(ctx, group); ctxTx != nil && ctxTx.GetDB().GetGroup() == group {
+		return ctx
+	}
+	dbCtx := tx.GetDB().GetCtx()
+	if ctxTx := TXFromCtx(dbCtx, group); ctxTx != nil && ctxTx.GetDB().GetGroup() == group {
+		return dbCtx
+	}
+	// Inject transaction object and id into context.
+	ctx = context.WithValue(ctx, transactionKeyForContext(group), tx)
+	return ctx
+}
+
+// TXFromCtx retrieves and returns transaction object from context.
+// It is usually used in nested transaction feature, and it returns nil if it is not set previously.
+func TXFromCtx(ctx context.Context, group string) TX {
+	if ctx == nil {
+		return nil
+	}
+	v := ctx.Value(transactionKeyForContext(group))
+	if v != nil {
+		tx := v.(TX)
+		if tx.IsClosed() {
+			return nil
+		}
+		tx = tx.Ctx(ctx)
+		return tx
+	}
+	return nil
+}
+
+// transactionKeyForContext forms and returns a string for storing transaction object of certain database group into context.
+func transactionKeyForContext(group string) string {
+	return contextTransactionKeyPrefix + group
+}
+
+// transactionKeyForNestedPoint forms and returns the transaction key at current save point.
+func (tx *TXCore) transactionKeyForNestedPoint() string {
+	return tx.db.GetCore().QuoteWord(transactionPointerPrefix + gconv.String(tx.transactionCount))
+}
+
+// Ctx sets the context for current transaction.
+func (tx *TXCore) Ctx(ctx context.Context) TX {
+	tx.ctx = ctx
+	if tx.ctx != nil {
+		tx.ctx = tx.db.GetCore().InjectInternalCtxData(tx.ctx)
+	}
+	return tx
+}
+
+// GetCtx returns the context for current transaction.
+func (tx *TXCore) GetCtx() context.Context {
+	return tx.ctx
+}
+
+// GetDB returns the DB for current transaction.
+func (tx *TXCore) GetDB() DB {
+	return tx.db
+}
+
+// GetSqlTX returns the underlying transaction object for current transaction.
+func (tx *TXCore) GetSqlTX() *sql.Tx {
+	return tx.tx
+}
+
+// Commit commits current transaction.
+// Note that it releases previous saved transaction point if it's in a nested transaction procedure,
+// or else it commits the hole transaction.
+func (tx *TXCore) Commit() error {
+	if tx.transactionCount > 0 {
+		tx.transactionCount--
+		_, err := tx.Exec("RELEASE SAVEPOINT " + tx.transactionKeyForNestedPoint())
+		return err
+	}
+	_, err := tx.db.DoCommit(tx.ctx, DoCommitInput{
+		Tx:            tx.tx,
+		Sql:           "COMMIT",
+		Type:          SqlTypeTXCommit,
+		IsTransaction: true,
+	})
+	if err == nil {
+		tx.isClosed = true
+	}
+	return err
+}
+
+// Rollback aborts current transaction.
+// Note that it aborts current transaction if it's in a nested transaction procedure,
+// or else it aborts the hole transaction.
+func (tx *TXCore) Rollback() error {
+	if tx.transactionCount > 0 {
+		tx.transactionCount--
+		_, err := tx.Exec("ROLLBACK TO SAVEPOINT " + tx.transactionKeyForNestedPoint())
+		return err
+	}
+	_, err := tx.db.DoCommit(tx.ctx, DoCommitInput{
+		Tx:            tx.tx,
+		Sql:           "ROLLBACK",
+		Type:          SqlTypeTXRollback,
+		IsTransaction: true,
+	})
+	if err == nil {
+		tx.isClosed = true
+	}
+	return err
+}
+
+// IsClosed checks and returns this transaction has already been committed or rolled back.
+func (tx *TXCore) IsClosed() bool {
+	return tx.isClosed
+}
+
+// Begin starts a nested transaction procedure.
+func (tx *TXCore) Begin() error {
+	_, err := tx.Exec("SAVEPOINT " + tx.transactionKeyForNestedPoint())
+	if err != nil {
+		return err
+	}
+	tx.transactionCount++
+	return nil
+}
+
+// SavePoint performs `SAVEPOINT xxx` SQL statement that saves transaction at current point.
+// The parameter `point` specifies the point name that will be saved to server.
+func (tx *TXCore) SavePoint(point string) error {
+	_, err := tx.Exec("SAVEPOINT " + tx.db.GetCore().QuoteWord(point))
+	return err
+}
+
+// RollbackTo performs `ROLLBACK TO SAVEPOINT xxx` SQL statement that rollbacks to specified saved transaction.
+// The parameter `point` specifies the point name that was saved previously.
+func (tx *TXCore) RollbackTo(point string) error {
+	_, err := tx.Exec("ROLLBACK TO SAVEPOINT " + tx.db.GetCore().QuoteWord(point))
+	return err
+}
+
+// Transaction wraps the transaction logic using function `f`.
+// It rollbacks the transaction and returns the error from function `f` if
+// it returns non-nil error. It commits the transaction and returns nil if
+// function `f` returns nil.
+//
+// Note that, you should not Commit or Rollback the transaction in function `f`
+// as it is automatically handled by this function.
+func (tx *TXCore) Transaction(ctx context.Context, f func(ctx context.Context, tx TX) error) (err error) {
+	if ctx != nil {
+		tx.ctx = ctx
+	}
+	// Check transaction object from context.
+	if TXFromCtx(tx.ctx, tx.db.GetGroup()) == nil {
+		// Inject transaction object into context.
+		tx.ctx = WithTX(tx.ctx, tx)
+	}
+	err = tx.Begin()
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err == nil {
+			if exception := recover(); exception != nil {
+				if v, ok := exception.(error); ok && gerror.HasStack(v) {
+					err = v
+				} else {
+					err = gerror.NewCodef(gcode.CodeInternalPanic, "%+v", exception)
+				}
+			}
+		}
+		if err != nil {
+			if e := tx.Rollback(); e != nil {
+				err = e
+			}
+		} else {
+			if e := tx.Commit(); e != nil {
+				err = e
+			}
+		}
+	}()
+	err = f(tx.ctx, tx)
+	return
+}
+
+// Query does query operation on transaction.
+// See Core.Query.
+func (tx *TXCore) Query(sql string, args ...interface{}) (result Result, err error) {
+	return tx.db.DoQuery(tx.ctx, &txLink{tx.tx}, sql, args...)
+}
+
+// Exec does none query operation on transaction.
+// See Core.Exec.
+func (tx *TXCore) Exec(sql string, args ...interface{}) (sql.Result, error) {
+	return tx.db.DoExec(tx.ctx, &txLink{tx.tx}, sql, args...)
+}
+
+// Prepare creates a prepared statement for later queries or executions.
+// Multiple queries or executions may be run concurrently from the
+// returned statement.
+// The caller must call the statement's Close method
+// when the statement is no longer needed.
+func (tx *TXCore) Prepare(sql string) (*Stmt, error) {
+	return tx.db.DoPrepare(tx.ctx, &txLink{tx.tx}, sql)
+}
+
+// GetAll queries and returns data records from database.
+func (tx *TXCore) GetAll(sql string, args ...interface{}) (Result, error) {
+	return tx.Query(sql, args...)
+}
+
+// GetOne queries and returns one record from database.
+func (tx *TXCore) GetOne(sql string, args ...interface{}) (Record, error) {
+	list, err := tx.GetAll(sql, args...)
+	if err != nil {
+		return nil, err
+	}
+	if len(list) > 0 {
+		return list[0], nil
+	}
+	return nil, nil
+}
+
+// GetStruct queries one record from database and converts it to given struct.
+// The parameter `pointer` should be a pointer to struct.
+func (tx *TXCore) GetStruct(obj interface{}, sql string, args ...interface{}) error {
+	one, err := tx.GetOne(sql, args...)
+	if err != nil {
+		return err
+	}
+	return one.Struct(obj)
+}
+
+// GetStructs queries records from database and converts them to given struct.
+// The parameter `pointer` should be type of struct slice: []struct/[]*struct.
+func (tx *TXCore) GetStructs(objPointerSlice interface{}, sql string, args ...interface{}) error {
+	all, err := tx.GetAll(sql, args...)
+	if err != nil {
+		return err
+	}
+	return all.Structs(objPointerSlice)
+}
+
+// GetScan queries one or more records from database and converts them to given struct or
+// struct array.
+//
+// If parameter `pointer` is type of struct pointer, it calls GetStruct internally for
+// the conversion. If parameter `pointer` is type of slice, it calls GetStructs internally
+// for conversion.
+func (tx *TXCore) GetScan(pointer interface{}, sql string, args ...interface{}) error {
+	reflectInfo := reflection.OriginTypeAndKind(pointer)
+	if reflectInfo.InputKind != reflect.Ptr {
+		return gerror.NewCodef(
+			gcode.CodeInvalidParameter,
+			"params should be type of pointer, but got: %v",
+			reflectInfo.InputKind,
+		)
+	}
+	switch reflectInfo.OriginKind {
+	case reflect.Array, reflect.Slice:
+		return tx.GetStructs(pointer, sql, args...)
+
+	case reflect.Struct:
+		return tx.GetStruct(pointer, sql, args...)
+	}
+	return gerror.NewCodef(
+		gcode.CodeInvalidParameter,
+		`in valid parameter type "%v", of which element type should be type of struct/slice`,
+		reflectInfo.InputType,
+	)
+}
+
+// GetValue queries and returns the field value from database.
+// The sql should query only one field from database, or else it returns only one
+// field of the result.
+func (tx *TXCore) GetValue(sql string, args ...interface{}) (Value, error) {
+	one, err := tx.GetOne(sql, args...)
+	if err != nil {
+		return nil, err
+	}
+	for _, v := range one {
+		return v, nil
+	}
+	return nil, nil
+}
+
+// GetCount queries and returns the count from database.
+func (tx *TXCore) GetCount(sql string, args ...interface{}) (int64, error) {
+	if !gregex.IsMatchString(`(?i)SELECT\s+COUNT\(.+\)\s+FROM`, sql) {
+		sql, _ = gregex.ReplaceString(`(?i)(SELECT)\s+(.+)\s+(FROM)`, `$1 COUNT($2) $3`, sql)
+	}
+	value, err := tx.GetValue(sql, args...)
+	if err != nil {
+		return 0, err
+	}
+	return value.Int64(), nil
+}
+
+// Insert does "INSERT INTO ..." statement for the table.
+// If there's already one unique record of the data in the table, it returns error.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `batch` specifies the batch operation count when given data is slice.
+func (tx *TXCore) Insert(table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).Insert()
+	}
+	return tx.Model(table).Ctx(tx.ctx).Data(data).Insert()
+}
+
+// InsertIgnore does "INSERT IGNORE INTO ..." statement for the table.
+// If there's already one unique record of the data in the table, it ignores the inserting.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `batch` specifies the batch operation count when given data is slice.
+func (tx *TXCore) InsertIgnore(table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).InsertIgnore()
+	}
+	return tx.Model(table).Ctx(tx.ctx).Data(data).InsertIgnore()
+}
+
+// InsertAndGetId performs action Insert and returns the last insert id that automatically generated.
+func (tx *TXCore) InsertAndGetId(table string, data interface{}, batch ...int) (int64, error) {
+	if len(batch) > 0 {
+		return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).InsertAndGetId()
+	}
+	return tx.Model(table).Ctx(tx.ctx).Data(data).InsertAndGetId()
+}
+
+// Replace does "REPLACE INTO ..." statement for the table.
+// If there's already one unique record of the data in the table, it deletes the record
+// and inserts a new one.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// If given data is type of slice, it then does batch replacing, and the optional parameter
+// `batch` specifies the batch operation count.
+func (tx *TXCore) Replace(table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).Replace()
+	}
+	return tx.Model(table).Ctx(tx.ctx).Data(data).Replace()
+}
+
+// Save does "INSERT INTO ... ON DUPLICATE KEY UPDATE..." statement for the table.
+// It updates the record if there's primary or unique index in the saving data,
+// or else it inserts a new record into the table.
+//
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// If given data is type of slice, it then does batch saving, and the optional parameter
+// `batch` specifies the batch operation count.
+func (tx *TXCore) Save(table string, data interface{}, batch ...int) (sql.Result, error) {
+	if len(batch) > 0 {
+		return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).Save()
+	}
+	return tx.Model(table).Ctx(tx.ctx).Data(data).Save()
+}
+
+// Update does "UPDATE ... " statement for the table.
+//
+// The parameter `data` can be type of string/map/gmap/struct/*struct, etc.
+// Eg: "uid=10000", "uid", 10000, g.Map{"uid": 10000, "name":"john"}
+//
+// The parameter `condition` can be type of string/map/gmap/slice/struct/*struct, etc.
+// It is commonly used with parameter `args`.
+// Eg:
+// "uid=10000",
+// "uid", 10000
+// "money>? AND name like ?", 99999, "vip_%"
+// "status IN (?)", g.Slice{1,2,3}
+// "age IN(?,?)", 18, 50
+// User{ Id : 1, UserName : "john"}.
+func (tx *TXCore) Update(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error) {
+	return tx.Model(table).Ctx(tx.ctx).Data(data).Where(condition, args...).Update()
+}
+
+// Delete does "DELETE FROM ... " statement for the table.
+//
+// The parameter `condition` can be type of string/map/gmap/slice/struct/*struct, etc.
+// It is commonly used with parameter `args`.
+// Eg:
+// "uid=10000",
+// "uid", 10000
+// "money>? AND name like ?", 99999, "vip_%"
+// "status IN (?)", g.Slice{1,2,3}
+// "age IN(?,?)", 18, 50
+// User{ Id : 1, UserName : "john"}.
+func (tx *TXCore) Delete(table string, condition interface{}, args ...interface{}) (sql.Result, error) {
+	return tx.Model(table).Ctx(tx.ctx).Where(condition, args...).Delete()
+}
+
+// QueryContext implements interface function Link.QueryContext.
+func (tx *TXCore) QueryContext(ctx context.Context, sql string, args ...interface{}) (*sql.Rows, error) {
+	return tx.tx.QueryContext(ctx, sql, args...)
+}
+
+// ExecContext implements interface function Link.ExecContext.
+func (tx *TXCore) ExecContext(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+	return tx.tx.ExecContext(ctx, sql, args...)
+}
+
+// PrepareContext implements interface function Link.PrepareContext.
+func (tx *TXCore) PrepareContext(ctx context.Context, sql string) (*sql.Stmt, error) {
+	return tx.tx.PrepareContext(ctx, sql)
+}
+
+// IsOnMaster implements interface function Link.IsOnMaster.
+func (tx *TXCore) IsOnMaster() bool {
+	return true
+}
+
+// IsTransaction implements interface function Link.IsTransaction.
+func (tx *TXCore) IsTransaction() bool {
+	return true
+}

+ 431 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_underlying.go

@@ -0,0 +1,431 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+//
+
+package gdb
+
+import (
+	"context"
+	"database/sql"
+	"reflect"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/gogf/gf/v2/util/gconv"
+
+	"github.com/gogf/gf/v2"
+	"github.com/gogf/gf/v2/container/gvar"
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/internal/intlog"
+	"github.com/gogf/gf/v2/os/gtime"
+	"github.com/gogf/gf/v2/util/guid"
+)
+
+// Query commits one query SQL to underlying driver and returns the execution result.
+// It is most commonly used for data querying.
+func (c *Core) Query(ctx context.Context, sql string, args ...interface{}) (result Result, err error) {
+	return c.db.DoQuery(ctx, nil, sql, args...)
+}
+
+// DoQuery commits the sql string and its arguments to underlying driver
+// through given link object and returns the execution result.
+func (c *Core) DoQuery(ctx context.Context, link Link, sql string, args ...interface{}) (result Result, err error) {
+	// Transaction checks.
+	if link == nil {
+		if tx := TXFromCtx(ctx, c.db.GetGroup()); tx != nil {
+			// Firstly, check and retrieve transaction link from context.
+			link = &txLink{tx.GetSqlTX()}
+		} else if link, err = c.SlaveLink(); err != nil {
+			// Or else it creates one from master node.
+			return nil, err
+		}
+	} else if !link.IsTransaction() {
+		// If current link is not transaction link, it checks and retrieves transaction from context.
+		if tx := TXFromCtx(ctx, c.db.GetGroup()); tx != nil {
+			link = &txLink{tx.GetSqlTX()}
+		}
+	}
+
+	if c.db.GetConfig().QueryTimeout > 0 {
+		ctx, _ = context.WithTimeout(ctx, c.db.GetConfig().QueryTimeout)
+	}
+
+	// Sql filtering.
+	sql, args = c.FormatSqlBeforeExecuting(sql, args)
+	sql, args, err = c.db.DoFilter(ctx, link, sql, args)
+	if err != nil {
+		return nil, err
+	}
+	// SQL format and retrieve.
+	if v := ctx.Value(ctxKeyCatchSQL); v != nil {
+		var (
+			manager      = v.(*CatchSQLManager)
+			formattedSql = FormatSqlWithArgs(sql, args)
+		)
+		manager.SQLArray.Append(formattedSql)
+		if !manager.DoCommit && ctx.Value(ctxKeyInternalProducedSQL) == nil {
+			return nil, nil
+		}
+	}
+	// Link execution.
+	var out DoCommitOutput
+	out, err = c.db.DoCommit(ctx, DoCommitInput{
+		Link:          link,
+		Sql:           sql,
+		Args:          args,
+		Stmt:          nil,
+		Type:          SqlTypeQueryContext,
+		IsTransaction: link.IsTransaction(),
+	})
+	return out.Records, err
+}
+
+// Exec commits one query SQL to underlying driver and returns the execution result.
+// It is most commonly used for data inserting and updating.
+func (c *Core) Exec(ctx context.Context, sql string, args ...interface{}) (result sql.Result, err error) {
+	return c.db.DoExec(ctx, nil, sql, args...)
+}
+
+// DoExec commits the sql string and its arguments to underlying driver
+// through given link object and returns the execution result.
+func (c *Core) DoExec(ctx context.Context, link Link, sql string, args ...interface{}) (result sql.Result, err error) {
+	// Transaction checks.
+	if link == nil {
+		if tx := TXFromCtx(ctx, c.db.GetGroup()); tx != nil {
+			// Firstly, check and retrieve transaction link from context.
+			link = &txLink{tx.GetSqlTX()}
+		} else if link, err = c.MasterLink(); err != nil {
+			// Or else it creates one from master node.
+			return nil, err
+		}
+	} else if !link.IsTransaction() {
+		// If current link is not transaction link, it checks and retrieves transaction from context.
+		if tx := TXFromCtx(ctx, c.db.GetGroup()); tx != nil {
+			link = &txLink{tx.GetSqlTX()}
+		}
+	}
+
+	if c.db.GetConfig().ExecTimeout > 0 {
+		var cancelFunc context.CancelFunc
+		ctx, cancelFunc = context.WithTimeout(ctx, c.db.GetConfig().ExecTimeout)
+		defer cancelFunc()
+	}
+
+	// SQL filtering.
+	sql, args = c.FormatSqlBeforeExecuting(sql, args)
+	sql, args, err = c.db.DoFilter(ctx, link, sql, args)
+	if err != nil {
+		return nil, err
+	}
+	// SQL format and retrieve.
+	if v := ctx.Value(ctxKeyCatchSQL); v != nil {
+		var (
+			manager      = v.(*CatchSQLManager)
+			formattedSql = FormatSqlWithArgs(sql, args)
+		)
+		manager.SQLArray.Append(formattedSql)
+		if !manager.DoCommit && ctx.Value(ctxKeyInternalProducedSQL) == nil {
+			return new(SqlResult), nil
+		}
+	}
+	// Link execution.
+	var out DoCommitOutput
+	out, err = c.db.DoCommit(ctx, DoCommitInput{
+		Link:          link,
+		Sql:           sql,
+		Args:          args,
+		Stmt:          nil,
+		Type:          SqlTypeExecContext,
+		IsTransaction: link.IsTransaction(),
+	})
+	return out.Result, err
+}
+
+// DoFilter is a hook function, which filters the sql and its arguments before it's committed to underlying driver.
+// The parameter `link` specifies the current database connection operation object. You can modify the sql
+// string `sql` and its arguments `args` as you wish before they're committed to driver.
+func (c *Core) DoFilter(ctx context.Context, link Link, sql string, args []interface{}) (newSql string, newArgs []interface{}, err error) {
+	return sql, args, nil
+}
+
+// DoCommit commits current sql and arguments to underlying sql driver.
+func (c *Core) DoCommit(ctx context.Context, in DoCommitInput) (out DoCommitOutput, err error) {
+	// Inject internal data into ctx, especially for transaction creating.
+	ctx = c.InjectInternalCtxData(ctx)
+
+	var (
+		sqlTx                *sql.Tx
+		sqlStmt              *sql.Stmt
+		sqlRows              *sql.Rows
+		sqlResult            sql.Result
+		stmtSqlRows          *sql.Rows
+		stmtSqlRow           *sql.Row
+		rowsAffected         int64
+		cancelFuncForTimeout context.CancelFunc
+		formattedSql         = FormatSqlWithArgs(in.Sql, in.Args)
+		timestampMilli1      = gtime.TimestampMilli()
+	)
+
+	// Trace span start.
+	tr := otel.GetTracerProvider().Tracer(traceInstrumentName, trace.WithInstrumentationVersion(gf.VERSION))
+	ctx, span := tr.Start(ctx, in.Type, trace.WithSpanKind(trace.SpanKindInternal))
+	defer span.End()
+
+	// Execution cased by type.
+	switch in.Type {
+	case SqlTypeBegin:
+		if sqlTx, err = in.Db.Begin(); err == nil {
+			out.Tx = &TXCore{
+				db:            c.db,
+				tx:            sqlTx,
+				ctx:           context.WithValue(ctx, transactionIdForLoggerCtx, transactionIdGenerator.Add(1)),
+				master:        in.Db,
+				transactionId: guid.S(),
+			}
+			ctx = out.Tx.GetCtx()
+		}
+		out.RawResult = sqlTx
+
+	case SqlTypeTXCommit:
+		err = in.Tx.Commit()
+
+	case SqlTypeTXRollback:
+		err = in.Tx.Rollback()
+
+	case SqlTypeExecContext:
+		if c.db.GetDryRun() {
+			sqlResult = new(SqlResult)
+		} else {
+			sqlResult, err = in.Link.ExecContext(ctx, in.Sql, in.Args...)
+		}
+		out.RawResult = sqlResult
+
+	case SqlTypeQueryContext:
+		sqlRows, err = in.Link.QueryContext(ctx, in.Sql, in.Args...)
+		out.RawResult = sqlRows
+
+	case SqlTypePrepareContext:
+		sqlStmt, err = in.Link.PrepareContext(ctx, in.Sql)
+		out.RawResult = sqlStmt
+
+	case SqlTypeStmtExecContext:
+		ctx, cancelFuncForTimeout = c.GetCtxTimeout(ctx, ctxTimeoutTypeExec)
+		defer cancelFuncForTimeout()
+		if c.db.GetDryRun() {
+			sqlResult = new(SqlResult)
+		} else {
+			sqlResult, err = in.Stmt.ExecContext(ctx, in.Args...)
+		}
+		out.RawResult = sqlResult
+
+	case SqlTypeStmtQueryContext:
+		ctx, cancelFuncForTimeout = c.GetCtxTimeout(ctx, ctxTimeoutTypeQuery)
+		defer cancelFuncForTimeout()
+		stmtSqlRows, err = in.Stmt.QueryContext(ctx, in.Args...)
+		out.RawResult = stmtSqlRows
+
+	case SqlTypeStmtQueryRowContext:
+		ctx, cancelFuncForTimeout = c.GetCtxTimeout(ctx, ctxTimeoutTypeQuery)
+		defer cancelFuncForTimeout()
+		stmtSqlRow = in.Stmt.QueryRowContext(ctx, in.Args...)
+		out.RawResult = stmtSqlRow
+
+	default:
+		panic(gerror.NewCodef(gcode.CodeInvalidParameter, `invalid SqlType "%s"`, in.Type))
+	}
+	// Result handling.
+	switch {
+	case sqlResult != nil && !c.GetIgnoreResultFromCtx(ctx):
+		rowsAffected, err = sqlResult.RowsAffected()
+		out.Result = sqlResult
+
+	case sqlRows != nil:
+		out.Records, err = c.RowsToResult(ctx, sqlRows)
+		rowsAffected = int64(len(out.Records))
+
+	case sqlStmt != nil:
+		out.Stmt = &Stmt{
+			Stmt: sqlStmt,
+			core: c,
+			link: in.Link,
+			sql:  in.Sql,
+		}
+	}
+	var (
+		timestampMilli2 = gtime.TimestampMilli()
+		sqlObj          = &Sql{
+			Sql:           in.Sql,
+			Type:          in.Type,
+			Args:          in.Args,
+			Format:        formattedSql,
+			Error:         err,
+			Start:         timestampMilli1,
+			End:           timestampMilli2,
+			Group:         c.db.GetGroup(),
+			Schema:        c.db.GetSchema(),
+			RowsAffected:  rowsAffected,
+			IsTransaction: in.IsTransaction,
+		}
+	)
+
+	// Tracing.
+	c.traceSpanEnd(ctx, span, sqlObj)
+
+	// Logging.
+	if c.db.GetDebug() {
+		c.writeSqlToLogger(ctx, sqlObj)
+	}
+	if err != nil && err != sql.ErrNoRows {
+		err = gerror.WrapCode(
+			gcode.CodeDbOperationError,
+			err,
+			FormatSqlWithArgs(in.Sql, in.Args),
+		)
+	}
+	return out, err
+}
+
+// Prepare creates a prepared statement for later queries or executions.
+// Multiple queries or executions may be run concurrently from the
+// returned statement.
+// The caller must call the statement's Close method
+// when the statement is no longer needed.
+//
+// The parameter `execOnMaster` specifies whether executing the sql on master node,
+// or else it executes the sql on slave node if master-slave configured.
+func (c *Core) Prepare(ctx context.Context, sql string, execOnMaster ...bool) (*Stmt, error) {
+	var (
+		err  error
+		link Link
+	)
+	if len(execOnMaster) > 0 && execOnMaster[0] {
+		if link, err = c.MasterLink(); err != nil {
+			return nil, err
+		}
+	} else {
+		if link, err = c.SlaveLink(); err != nil {
+			return nil, err
+		}
+	}
+	return c.db.DoPrepare(ctx, link, sql)
+}
+
+// DoPrepare calls prepare function on given link object and returns the statement object.
+func (c *Core) DoPrepare(ctx context.Context, link Link, sql string) (stmt *Stmt, err error) {
+	// Transaction checks.
+	if link == nil {
+		if tx := TXFromCtx(ctx, c.db.GetGroup()); tx != nil {
+			// Firstly, check and retrieve transaction link from context.
+			link = &txLink{tx.GetSqlTX()}
+		} else {
+			// Or else it creates one from master node.
+			var err error
+			if link, err = c.MasterLink(); err != nil {
+				return nil, err
+			}
+		}
+	} else if !link.IsTransaction() {
+		// If current link is not transaction link, it checks and retrieves transaction from context.
+		if tx := TXFromCtx(ctx, c.db.GetGroup()); tx != nil {
+			link = &txLink{tx.GetSqlTX()}
+		}
+	}
+
+	if c.db.GetConfig().PrepareTimeout > 0 {
+		// DO NOT USE cancel function in prepare statement.
+		ctx, _ = context.WithTimeout(ctx, c.db.GetConfig().PrepareTimeout)
+	}
+
+	// Link execution.
+	var out DoCommitOutput
+	out, err = c.db.DoCommit(ctx, DoCommitInput{
+		Link:          link,
+		Sql:           sql,
+		Type:          SqlTypePrepareContext,
+		IsTransaction: link.IsTransaction(),
+	})
+	return out.Stmt, err
+}
+
+// RowsToResult converts underlying data record type sql.Rows to Result type.
+func (c *Core) RowsToResult(ctx context.Context, rows *sql.Rows) (Result, error) {
+	if rows == nil {
+		return nil, nil
+	}
+	defer func() {
+		if err := rows.Close(); err != nil {
+			intlog.Errorf(ctx, `%+v`, err)
+		}
+	}()
+	if !rows.Next() {
+		return nil, nil
+	}
+	// Column names and types.
+	columnTypes, err := rows.ColumnTypes()
+	if err != nil {
+		return nil, err
+	}
+
+	if len(columnTypes) > 0 {
+		if internalData := c.GetInternalCtxDataFromCtx(ctx); internalData != nil {
+			internalData.FirstResultColumn = columnTypes[0].Name()
+		}
+	}
+	var (
+		values   = make([]interface{}, len(columnTypes))
+		result   = make(Result, 0)
+		scanArgs = make([]interface{}, len(values))
+	)
+	for i := range values {
+		scanArgs[i] = &values[i]
+	}
+	for {
+		if err = rows.Scan(scanArgs...); err != nil {
+			return result, err
+		}
+		record := Record{}
+		for i, value := range values {
+			if value == nil {
+				// DO NOT use `gvar.New(nil)` here as it creates an initialized object
+				// which will cause struct converting issue.
+				record[columnTypes[i].Name()] = nil
+			} else {
+				var convertedValue interface{}
+				if convertedValue, err = c.columnValueToLocalValue(ctx, value, columnTypes[i]); err != nil {
+					return nil, err
+				}
+				record[columnTypes[i].Name()] = gvar.New(convertedValue)
+			}
+		}
+		result = append(result, record)
+		if !rows.Next() {
+			break
+		}
+	}
+	return result, nil
+}
+
+func (c *Core) columnValueToLocalValue(ctx context.Context, value interface{}, columnType *sql.ColumnType) (interface{}, error) {
+	var scanType = columnType.ScanType()
+	if scanType != nil {
+		// Common basic builtin types.
+		switch scanType.Kind() {
+		case
+			reflect.Bool,
+			reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+			reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+			reflect.Float32, reflect.Float64:
+			return gconv.Convert(
+				gconv.String(value),
+				columnType.ScanType().String(),
+			), nil
+		}
+	}
+	// Other complex types, especially custom types.
+	return c.db.ConvertValueForLocal(ctx, columnType.DatabaseTypeName(), value)
+}

+ 246 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_core_utility.go

@@ -0,0 +1,246 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+//
+
+package gdb
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/gogf/gf/v2/crypto/gmd5"
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gconv"
+	"github.com/gogf/gf/v2/util/gutil"
+)
+
+// GetDB returns the underlying DB.
+func (c *Core) GetDB() DB {
+	return c.db
+}
+
+// GetLink creates and returns the underlying database link object with transaction checks.
+// The parameter `master` specifies whether using the master node if master-slave configured.
+func (c *Core) GetLink(ctx context.Context, master bool, schema string) (Link, error) {
+	tx := TXFromCtx(ctx, c.db.GetGroup())
+	if tx != nil {
+		return &txLink{tx.GetSqlTX()}, nil
+	}
+	if master {
+		link, err := c.db.GetCore().MasterLink(schema)
+		if err != nil {
+			return nil, err
+		}
+		return link, nil
+	}
+	link, err := c.db.GetCore().SlaveLink(schema)
+	if err != nil {
+		return nil, err
+	}
+	return link, nil
+}
+
+// MasterLink acts like function Master but with additional `schema` parameter specifying
+// the schema for the connection. It is defined for internal usage.
+// Also see Master.
+func (c *Core) MasterLink(schema ...string) (Link, error) {
+	db, err := c.db.Master(schema...)
+	if err != nil {
+		return nil, err
+	}
+	return &dbLink{
+		DB:         db,
+		isOnMaster: true,
+	}, nil
+}
+
+// SlaveLink acts like function Slave but with additional `schema` parameter specifying
+// the schema for the connection. It is defined for internal usage.
+// Also see Slave.
+func (c *Core) SlaveLink(schema ...string) (Link, error) {
+	db, err := c.db.Slave(schema...)
+	if err != nil {
+		return nil, err
+	}
+	return &dbLink{
+		DB:         db,
+		isOnMaster: false,
+	}, nil
+}
+
+// QuoteWord checks given string `s` a word,
+// if true it quotes `s` with security chars of the database
+// and returns the quoted string; or else it returns `s` without any change.
+//
+// The meaning of a `word` can be considered as a column name.
+func (c *Core) QuoteWord(s string) string {
+	s = gstr.Trim(s)
+	if s == "" {
+		return s
+	}
+	charLeft, charRight := c.db.GetChars()
+	return doQuoteWord(s, charLeft, charRight)
+}
+
+// QuoteString quotes string with quote chars. Strings like:
+// "user", "user u", "user,user_detail", "user u, user_detail ut", "u.id asc".
+//
+// The meaning of a `string` can be considered as part of a statement string including columns.
+func (c *Core) QuoteString(s string) string {
+	charLeft, charRight := c.db.GetChars()
+	return doQuoteString(s, charLeft, charRight)
+}
+
+// QuotePrefixTableName adds prefix string and quotes chars for the table.
+// It handles table string like:
+// "user", "user u",
+// "user,user_detail",
+// "user u, user_detail ut",
+// "user as u, user_detail as ut".
+//
+// Note that, this will automatically checks the table prefix whether already added,
+// if true it does nothing to the table name, or else adds the prefix to the table name.
+func (c *Core) QuotePrefixTableName(table string) string {
+	charLeft, charRight := c.db.GetChars()
+	return doQuoteTableName(table, c.db.GetPrefix(), charLeft, charRight)
+}
+
+// GetChars returns the security char for current database.
+// It does nothing in default.
+func (c *Core) GetChars() (charLeft string, charRight string) {
+	return "", ""
+}
+
+// Tables retrieves and returns the tables of current schema.
+// It's mainly used in cli tool chain for automatically generating the models.
+func (c *Core) Tables(ctx context.Context, schema ...string) (tables []string, err error) {
+	return
+}
+
+// TableFields retrieves and returns the fields' information of specified table of current
+// schema.
+//
+// The parameter `link` is optional, if given nil it automatically retrieves a raw sql connection
+// as its link to proceed necessary sql query.
+//
+// Note that it returns a map containing the field name and its corresponding fields.
+// As a map is unsorted, the TableField struct has an "Index" field marks its sequence in
+// the fields.
+//
+// It's using cache feature to enhance the performance, which is never expired util the
+// process restarts.
+func (c *Core) TableFields(ctx context.Context, table string, schema ...string) (fields map[string]*TableField, err error) {
+	return
+}
+
+// ClearTableFields removes certain cached table fields of current configuration group.
+func (c *Core) ClearTableFields(ctx context.Context, table string, schema ...string) (err error) {
+	tableFieldsMap.Remove(fmt.Sprintf(
+		`%s%s@%s#%s`,
+		cachePrefixTableFields,
+		c.db.GetGroup(),
+		gutil.GetOrDefaultStr(c.db.GetSchema(), schema...),
+		table,
+	))
+	return
+}
+
+// ClearTableFieldsAll removes all cached table fields of current configuration group.
+func (c *Core) ClearTableFieldsAll(ctx context.Context) (err error) {
+	var (
+		keys        = tableFieldsMap.Keys()
+		cachePrefix = fmt.Sprintf(`%s@%s`, cachePrefixTableFields, c.db.GetGroup())
+		removedKeys = make([]string, 0)
+	)
+	for _, key := range keys {
+		if gstr.HasPrefix(key, cachePrefix) {
+			removedKeys = append(removedKeys, key)
+		}
+	}
+	if len(removedKeys) > 0 {
+		tableFieldsMap.Removes(removedKeys)
+	}
+	return
+}
+
+// ClearCache removes cached sql result of certain table.
+func (c *Core) ClearCache(ctx context.Context, table string) (err error) {
+	return c.db.GetCache().Clear(ctx)
+}
+
+// ClearCacheAll removes all cached sql result from cache
+func (c *Core) ClearCacheAll(ctx context.Context) (err error) {
+	return c.db.GetCache().Clear(ctx)
+}
+
+func (c *Core) makeSelectCacheKey(name, schema, table, sql string, args ...interface{}) string {
+	if name == "" {
+		name = fmt.Sprintf(
+			`%s@%s#%s:%s`,
+			c.db.GetGroup(),
+			schema,
+			table,
+			gmd5.MustEncryptString(sql+", @PARAMS:"+gconv.String(args)),
+		)
+	}
+	return fmt.Sprintf(`%s%s`, cachePrefixSelectCache, name)
+}
+
+// HasField determine whether the field exists in the table.
+func (c *Core) HasField(ctx context.Context, table, field string, schema ...string) (bool, error) {
+	table = c.guessPrimaryTableName(table)
+	tableFields, err := c.db.TableFields(ctx, table, schema...)
+	if err != nil {
+		return false, err
+	}
+	if len(tableFields) == 0 {
+		return false, gerror.NewCodef(
+			gcode.CodeNotFound,
+			`empty table fields for table "%s"`, table,
+		)
+	}
+	fieldsArray := make([]string, len(tableFields))
+	for k, v := range tableFields {
+		fieldsArray[v.Index] = k
+	}
+	charLeft, charRight := c.db.GetChars()
+	field = gstr.Trim(field, charLeft+charRight)
+	for _, f := range fieldsArray {
+		if f == field {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// guessPrimaryTableName parses and returns the primary table name.
+func (c *Core) guessPrimaryTableName(tableStr string) string {
+	if tableStr == "" {
+		return ""
+	}
+	var (
+		guessedTableName string
+		array1           = gstr.SplitAndTrim(tableStr, ",")
+		array2           = gstr.SplitAndTrim(array1[0], " ")
+		array3           = gstr.SplitAndTrim(array2[0], ".")
+	)
+	if len(array3) >= 2 {
+		guessedTableName = array3[1]
+	} else {
+		guessedTableName = array3[0]
+	}
+	charL, charR := c.db.GetChars()
+	if charL != "" || charR != "" {
+		guessedTableName = gstr.Trim(guessedTableName, charL+charR)
+	}
+	if !gregex.IsMatchString(regularFieldNameRegPattern, guessedTableName) {
+		return ""
+	}
+	return guessedTableName
+}

+ 46 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_driver_default.go

@@ -0,0 +1,46 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"database/sql"
+)
+
+// DriverDefault is the default driver for mysql database, which does nothing.
+type DriverDefault struct {
+	*Core
+}
+
+func init() {
+	if err := Register("default", &DriverDefault{}); err != nil {
+		panic(err)
+	}
+}
+
+// New creates and returns a database object for mysql.
+// It implements the interface of gdb.Driver for extra database driver installation.
+func (d *DriverDefault) New(core *Core, node *ConfigNode) (DB, error) {
+	return &DriverDefault{
+		Core: core,
+	}, nil
+}
+
+// Open creates and returns an underlying sql.DB object for mysql.
+// Note that it converts time.Time argument to local timezone in default.
+func (d *DriverDefault) Open(config *ConfigNode) (db *sql.DB, err error) {
+	return
+}
+
+// PingMaster pings the master node to check authentication or keeps the connection alive.
+func (d *DriverDefault) PingMaster() error {
+	return nil
+}
+
+// PingSlave pings the slave node to check authentication or keeps the connection alive.
+func (d *DriverDefault) PingSlave() error {
+	return nil
+}

+ 31 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_driver_wrapper.go

@@ -0,0 +1,31 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+// DriverWrapper is a driver wrapper for extending features with embedded driver.
+type DriverWrapper struct {
+	driver Driver
+}
+
+// New creates and returns a database object for mysql.
+// It implements the interface of gdb.Driver for extra database driver installation.
+func (d *DriverWrapper) New(core *Core, node *ConfigNode) (DB, error) {
+	db, err := d.driver.New(core, node)
+	if err != nil {
+		return nil, err
+	}
+	return &DriverWrapperDB{
+		DB: db,
+	}, nil
+}
+
+// newDriverWrapper creates and returns a driver wrapper.
+func newDriverWrapper(driver Driver) Driver {
+	return &DriverWrapper{
+		driver: driver,
+	}
+}

+ 114 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_driver_wrapper_db.go

@@ -0,0 +1,114 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+
+	"github.com/gogf/gf/v2/encoding/gjson"
+	"github.com/gogf/gf/v2/errors/gcode"
+	"github.com/gogf/gf/v2/errors/gerror"
+	"github.com/gogf/gf/v2/internal/intlog"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gutil"
+)
+
+// DriverWrapperDB is a DB wrapper for extending features with embedded DB.
+type DriverWrapperDB struct {
+	DB
+}
+
+// Open creates and returns an underlying sql.DB object for pgsql.
+// https://pkg.go.dev/github.com/lib/pq
+func (d *DriverWrapperDB) Open(node *ConfigNode) (db *sql.DB, err error) {
+	var ctx = d.GetCtx()
+	intlog.PrintFunc(ctx, func() string {
+		return fmt.Sprintf(`open new connection:%s`, gjson.MustEncode(node))
+	})
+	return d.DB.Open(node)
+}
+
+// Tables retrieves and returns the tables of current schema.
+// It's mainly used in cli tool chain for automatically generating the models.
+func (d *DriverWrapperDB) Tables(ctx context.Context, schema ...string) (tables []string, err error) {
+	ctx = context.WithValue(ctx, ctxKeyInternalProducedSQL, struct{}{})
+	return d.DB.Tables(ctx, schema...)
+}
+
+// TableFields retrieves and returns the fields' information of specified table of current
+// schema.
+//
+// The parameter `link` is optional, if given nil it automatically retrieves a raw sql connection
+// as its link to proceed necessary sql query.
+//
+// Note that it returns a map containing the field name and its corresponding fields.
+// As a map is unsorted, the TableField struct has an "Index" field marks its sequence in
+// the fields.
+//
+// It's using cache feature to enhance the performance, which is never expired util the
+// process restarts.
+func (d *DriverWrapperDB) TableFields(
+	ctx context.Context, table string, schema ...string,
+) (fields map[string]*TableField, err error) {
+	if table == "" {
+		return nil, nil
+	}
+	charL, charR := d.GetChars()
+	table = gstr.Trim(table, charL+charR)
+	if gstr.Contains(table, " ") {
+		return nil, gerror.NewCode(
+			gcode.CodeInvalidParameter,
+			"function TableFields supports only single table operations",
+		)
+	}
+	var (
+		cacheKey = fmt.Sprintf(
+			`%s%s@%s#%s`,
+			cachePrefixTableFields,
+			d.GetGroup(),
+			gutil.GetOrDefaultStr(d.GetSchema(), schema...),
+			table,
+		)
+		value = tableFieldsMap.GetOrSetFuncLock(cacheKey, func() interface{} {
+			ctx = context.WithValue(ctx, ctxKeyInternalProducedSQL, struct{}{})
+			fields, err = d.DB.TableFields(ctx, table, schema...)
+			if err != nil {
+				return nil
+			}
+			return fields
+		})
+	)
+	if value != nil {
+		fields = value.(map[string]*TableField)
+	}
+	return
+}
+
+// DoInsert inserts or updates data forF given table.
+// This function is usually used for custom interface definition, you do not need call it manually.
+// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
+// Eg:
+// Data(g.Map{"uid": 10000, "name":"john"})
+// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
+//
+// The parameter `option` values are as follows:
+// InsertOptionDefault:  just insert, if there's unique/primary key in the data, it returns error;
+// InsertOptionReplace: if there's unique/primary key in the data, it deletes it from table and inserts a new one;
+// InsertOptionSave:    if there's unique/primary key in the data, it updates it or else inserts a new one;
+// InsertOptionIgnore:  if there's unique/primary key in the data, it ignores the inserting;
+func (d *DriverWrapperDB) DoInsert(ctx context.Context, link Link, table string, list List, option DoInsertOption) (result sql.Result, err error) {
+	// Convert data type before commit it to underlying db driver.
+	for i, item := range list {
+		list[i], err = d.GetCore().ConvertDataForRecord(ctx, item, table)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return d.DB.DoInsert(ctx, link, table, list, option)
+}

+ 897 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_func.go

@@ -0,0 +1,897 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/gogf/gf/v2/container/garray"
+	"github.com/gogf/gf/v2/encoding/gjson"
+	"github.com/gogf/gf/v2/internal/empty"
+	"github.com/gogf/gf/v2/internal/reflection"
+	"github.com/gogf/gf/v2/internal/utils"
+	"github.com/gogf/gf/v2/os/gstructs"
+	"github.com/gogf/gf/v2/os/gtime"
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gconv"
+	"github.com/gogf/gf/v2/util/gmeta"
+	"github.com/gogf/gf/v2/util/gutil"
+)
+
+// iString is the type assert api for String.
+type iString interface {
+	String() string
+}
+
+// iIterator is the type assert api for Iterator.
+type iIterator interface {
+	Iterator(f func(key, value interface{}) bool)
+}
+
+// iInterfaces is the type assert api for Interfaces.
+type iInterfaces interface {
+	Interfaces() []interface{}
+}
+
+// iNil if the type assert api for IsNil.
+type iNil interface {
+	IsNil() bool
+}
+
+// iTableName is the interface for retrieving table name for struct.
+type iTableName interface {
+	TableName() string
+}
+
+const (
+	OrmTagForStruct    = "orm"
+	OrmTagForTable     = "table"
+	OrmTagForWith      = "with"
+	OrmTagForWithWhere = "where"
+	OrmTagForWithOrder = "order"
+	OrmTagForDo        = "do"
+)
+
+var (
+	// quoteWordReg is the regular expression object for a word check.
+	quoteWordReg = regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`)
+
+	// structTagPriority tags for struct converting for orm field mapping.
+	structTagPriority = append([]string{OrmTagForStruct}, gconv.StructTagPriority...)
+)
+
+// WithDB injects given db object into context and returns a new context.
+func WithDB(ctx context.Context, db DB) context.Context {
+	if db == nil {
+		return ctx
+	}
+	dbCtx := db.GetCtx()
+	if ctxDb := DBFromCtx(dbCtx); ctxDb != nil {
+		return dbCtx
+	}
+	ctx = context.WithValue(ctx, ctxKeyForDB, db)
+	return ctx
+}
+
+// DBFromCtx retrieves and returns DB object from context.
+func DBFromCtx(ctx context.Context) DB {
+	if ctx == nil {
+		return nil
+	}
+	v := ctx.Value(ctxKeyForDB)
+	if v != nil {
+		return v.(DB)
+	}
+	return nil
+}
+
+// ToSQL formats and returns the last one of sql statements in given closure function
+// WITHOUT TRULY EXECUTING IT.
+// Be caution that, all the following sql statements should use the context object passing by function `f`.
+func ToSQL(ctx context.Context, f func(ctx context.Context) error) (sql string, err error) {
+	var manager = &CatchSQLManager{
+		SQLArray: garray.NewStrArray(),
+		DoCommit: false,
+	}
+	ctx = context.WithValue(ctx, ctxKeyCatchSQL, manager)
+	err = f(ctx)
+	sql, _ = manager.SQLArray.PopRight()
+	return
+}
+
+// CatchSQL catches and returns all sql statements that are EXECUTED in given closure function.
+// Be caution that, all the following sql statements should use the context object passing by function `f`.
+func CatchSQL(ctx context.Context, f func(ctx context.Context) error) (sqlArray []string, err error) {
+	var manager = &CatchSQLManager{
+		SQLArray: garray.NewStrArray(),
+		DoCommit: true,
+	}
+	ctx = context.WithValue(ctx, ctxKeyCatchSQL, manager)
+	err = f(ctx)
+	return manager.SQLArray.Slice(), err
+}
+
+// isDoStruct checks and returns whether given type is a DO struct.
+func isDoStruct(object interface{}) bool {
+	// It checks by struct name like "XxxForDao", to be compatible with old version.
+	// TODO remove this compatible codes in future.
+	reflectType := reflect.TypeOf(object)
+	if gstr.HasSuffix(reflectType.String(), modelForDaoSuffix) {
+		return true
+	}
+	// It checks by struct meta for DO struct in version.
+	if ormTag := gmeta.Get(object, OrmTagForStruct); !ormTag.IsEmpty() {
+		match, _ := gregex.MatchString(
+			fmt.Sprintf(`%s\s*:\s*([^,]+)`, OrmTagForDo),
+			ormTag.String(),
+		)
+		if len(match) > 1 {
+			return gconv.Bool(match[1])
+		}
+	}
+	return false
+}
+
+// getTableNameFromOrmTag retrieves and returns the table name from struct object.
+func getTableNameFromOrmTag(object interface{}) string {
+	var tableName string
+	// Use the interface value.
+	if r, ok := object.(iTableName); ok {
+		tableName = r.TableName()
+	}
+	// User meta data tag "orm".
+	if tableName == "" {
+		if ormTag := gmeta.Get(object, OrmTagForStruct); !ormTag.IsEmpty() {
+			match, _ := gregex.MatchString(
+				fmt.Sprintf(`%s\s*:\s*([^,]+)`, OrmTagForTable),
+				ormTag.String(),
+			)
+			if len(match) > 1 {
+				tableName = match[1]
+			}
+		}
+	}
+	// Use the struct name of snake case.
+	if tableName == "" {
+		if t, err := gstructs.StructType(object); err != nil {
+			panic(err)
+		} else {
+			tableName = gstr.CaseSnakeFirstUpper(
+				gstr.StrEx(t.String(), "."),
+			)
+		}
+	}
+	return tableName
+}
+
+// ListItemValues retrieves and returns the elements of all item struct/map with key `key`.
+// Note that the parameter `list` should be type of slice which contains elements of map or struct,
+// or else it returns an empty slice.
+//
+// The parameter `list` supports types like:
+// []map[string]interface{}
+// []map[string]sub-map
+// []struct
+// []struct:sub-struct
+// Note that the sub-map/sub-struct makes sense only if the optional parameter `subKey` is given.
+// See gutil.ListItemValues.
+func ListItemValues(list interface{}, key interface{}, subKey ...interface{}) (values []interface{}) {
+	return gutil.ListItemValues(list, key, subKey...)
+}
+
+// ListItemValuesUnique retrieves and returns the unique elements of all struct/map with key `key`.
+// Note that the parameter `list` should be type of slice which contains elements of map or struct,
+// or else it returns an empty slice.
+// See gutil.ListItemValuesUnique.
+func ListItemValuesUnique(list interface{}, key string, subKey ...interface{}) []interface{} {
+	return gutil.ListItemValuesUnique(list, key, subKey...)
+}
+
+// GetInsertOperationByOption returns proper insert option with given parameter `option`.
+func GetInsertOperationByOption(option InsertOption) string {
+	var operator string
+	switch option {
+	case InsertOptionReplace:
+		operator = InsertOperationReplace
+	case InsertOptionIgnore:
+		operator = InsertOperationIgnore
+	default:
+		operator = InsertOperationInsert
+	}
+	return operator
+}
+
+func anyValueToMapBeforeToRecord(value interface{}) map[string]interface{} {
+	return gconv.Map(value, gconv.MapOption{
+		Tags:      structTagPriority,
+		OmitEmpty: true, // To be compatible with old version from v2.6.0.
+	})
+}
+
+// DaToMapDeep is deprecated, use MapOrStructToMapDeep instead.
+func DaToMapDeep(value interface{}) map[string]interface{} {
+	return MapOrStructToMapDeep(value, true)
+}
+
+// MapOrStructToMapDeep converts `value` to map type recursively(if attribute struct is embedded).
+// The parameter `value` should be type of *map/map/*struct/struct.
+// It supports embedded struct definition for struct.
+func MapOrStructToMapDeep(value interface{}, omitempty bool) map[string]interface{} {
+	m := gconv.Map(value, gconv.MapOption{
+		Tags:      structTagPriority,
+		OmitEmpty: omitempty,
+	})
+	for k, v := range m {
+		switch v.(type) {
+		case time.Time, *time.Time, gtime.Time, *gtime.Time, gjson.Json, *gjson.Json:
+			m[k] = v
+		}
+	}
+	return m
+}
+
+// doQuoteTableName adds prefix string and quote chars for table name. It handles table string like:
+// "user", "user u", "user,user_detail", "user u, user_detail ut", "user as u, user_detail as ut",
+// "user.user u", "`user`.`user` u".
+//
+// Note that, this will automatically check the table prefix whether already added, if true it does
+// nothing to the table name, or else adds the prefix to the table name and returns new table name with prefix.
+func doQuoteTableName(table, prefix, charLeft, charRight string) string {
+	var (
+		index  int
+		chars  = charLeft + charRight
+		array1 = gstr.SplitAndTrim(table, ",")
+	)
+	for k1, v1 := range array1 {
+		array2 := gstr.SplitAndTrim(v1, " ")
+		// Trim the security chars.
+		array2[0] = gstr.Trim(array2[0], chars)
+		// Check whether it has database name.
+		array3 := gstr.Split(gstr.Trim(array2[0]), ".")
+		for k, v := range array3 {
+			array3[k] = gstr.Trim(v, chars)
+		}
+		index = len(array3) - 1
+		// If the table name already has the prefix, skips the prefix adding.
+		if len(array3[index]) <= len(prefix) || array3[index][:len(prefix)] != prefix {
+			array3[index] = prefix + array3[index]
+		}
+		array2[0] = gstr.Join(array3, ".")
+		// Add the security chars.
+		array2[0] = doQuoteString(array2[0], charLeft, charRight)
+		array1[k1] = gstr.Join(array2, " ")
+	}
+	return gstr.Join(array1, ",")
+}
+
+// doQuoteWord checks given string `s` a word, if true quotes it with `charLeft` and `charRight`
+// and returns the quoted string; or else returns `s` without any change.
+func doQuoteWord(s, charLeft, charRight string) string {
+	if quoteWordReg.MatchString(s) && !gstr.ContainsAny(s, charLeft+charRight) {
+		return charLeft + s + charRight
+	}
+	return s
+}
+
+// doQuoteString quotes string with quote chars.
+// For example, if quote char is '`':
+// "null"                             => "NULL"
+// "user"                             => "`user`"
+// "user u"                           => "`user` u"
+// "user,user_detail"                 => "`user`,`user_detail`"
+// "user u, user_detail ut"           => "`user` u,`user_detail` ut"
+// "user.user u, user.user_detail ut" => "`user`.`user` u,`user`.`user_detail` ut"
+// "u.id, u.name, u.age"              => "`u`.`id`,`u`.`name`,`u`.`age`"
+// "u.id asc"                         => "`u`.`id` asc".
+func doQuoteString(s, charLeft, charRight string) string {
+	array1 := gstr.SplitAndTrim(s, ",")
+	for k1, v1 := range array1 {
+		array2 := gstr.SplitAndTrim(v1, " ")
+		array3 := gstr.Split(gstr.Trim(array2[0]), ".")
+		if len(array3) == 1 {
+			if strings.EqualFold(array3[0], "NULL") {
+				array3[0] = doQuoteWord(array3[0], "", "")
+			} else {
+				array3[0] = doQuoteWord(array3[0], charLeft, charRight)
+			}
+		} else if len(array3) >= 2 {
+			array3[0] = doQuoteWord(array3[0], charLeft, charRight)
+			// Note:
+			// mysql: u.uid
+			// mssql double dots: Database..Table
+			array3[len(array3)-1] = doQuoteWord(array3[len(array3)-1], charLeft, charRight)
+		}
+		array2[0] = gstr.Join(array3, ".")
+		array1[k1] = gstr.Join(array2, " ")
+	}
+	return gstr.Join(array1, ",")
+}
+
+func getFieldsFromStructOrMap(structOrMap interface{}) (fields []string) {
+	fields = []string{}
+	if utils.IsStruct(structOrMap) {
+		structFields, _ := gstructs.Fields(gstructs.FieldsInput{
+			Pointer:         structOrMap,
+			RecursiveOption: gstructs.RecursiveOptionEmbeddedNoTag,
+		})
+		var ormTagValue string
+		for _, structField := range structFields {
+			ormTagValue = structField.Tag(OrmTagForStruct)
+			ormTagValue = gstr.Split(gstr.Trim(ormTagValue), ",")[0]
+			if ormTagValue != "" && gregex.IsMatchString(regularFieldNameRegPattern, ormTagValue) {
+				fields = append(fields, ormTagValue)
+			} else {
+				fields = append(fields, structField.Name())
+			}
+		}
+	} else {
+		fields = gutil.Keys(structOrMap)
+	}
+	return
+}
+
+// GetPrimaryKeyCondition returns a new where condition by primary field name.
+// The optional parameter `where` is like follows:
+// 123                             => primary=123
+// []int{1, 2, 3}                  => primary IN(1,2,3)
+// "john"                          => primary='john'
+// []string{"john", "smith"}       => primary IN('john','smith')
+// g.Map{"id": g.Slice{1,2,3}}     => id IN(1,2,3)
+// g.Map{"id": 1, "name": "john"}  => id=1 AND name='john'
+// etc.
+//
+// Note that it returns the given `where` parameter directly if the `primary` is empty
+// or length of `where` > 1.
+func GetPrimaryKeyCondition(primary string, where ...interface{}) (newWhereCondition []interface{}) {
+	if len(where) == 0 {
+		return nil
+	}
+	if primary == "" {
+		return where
+	}
+	if len(where) == 1 {
+		var (
+			rv   = reflect.ValueOf(where[0])
+			kind = rv.Kind()
+		)
+		if kind == reflect.Ptr {
+			rv = rv.Elem()
+			kind = rv.Kind()
+		}
+		switch kind {
+		case reflect.Map, reflect.Struct:
+			// Ignore the parameter `primary`.
+			break
+
+		default:
+			return []interface{}{map[string]interface{}{
+				primary: where[0],
+			}}
+		}
+	}
+	return where
+}
+
+type formatWhereHolderInput struct {
+	WhereHolder
+	OmitNil   bool
+	OmitEmpty bool
+	Schema    string
+	Table     string // Table is used for fields mapping and filtering internally.
+}
+
+func isKeyValueCanBeOmitEmpty(omitEmpty bool, whereType string, key, value interface{}) bool {
+	if !omitEmpty {
+		return false
+	}
+	// Eg:
+	// Where("id", []int{}).All()             -> SELECT xxx FROM xxx WHERE 0=1
+	// Where("name", "").All()                -> SELECT xxx FROM xxx WHERE `name`=''
+	// OmitEmpty().Where("id", []int{}).All() -> SELECT xxx FROM xxx
+	// OmitEmpty().Where("name", "").All()    -> SELECT xxx FROM xxx
+	// OmitEmpty().Where("1").All()           -> SELECT xxx FROM xxx WHERE 1
+	switch whereType {
+	case whereHolderTypeNoArgs:
+		return false
+
+	case whereHolderTypeIn:
+		return gutil.IsEmpty(value)
+
+	default:
+		if gstr.Count(gconv.String(key), "?") == 0 && gutil.IsEmpty(value) {
+			return true
+		}
+	}
+	return false
+}
+
+// formatWhereHolder formats where statement and its arguments for `Where` and `Having` statements.
+func formatWhereHolder(ctx context.Context, db DB, in formatWhereHolderInput) (newWhere string, newArgs []interface{}) {
+	var (
+		buffer      = bytes.NewBuffer(nil)
+		reflectInfo = reflection.OriginValueAndKind(in.Where)
+	)
+	switch reflectInfo.OriginKind {
+	case reflect.Array, reflect.Slice:
+		newArgs = formatWhereInterfaces(db, gconv.Interfaces(in.Where), buffer, newArgs)
+
+	case reflect.Map:
+		for key, value := range MapOrStructToMapDeep(in.Where, true) {
+			if in.OmitNil && empty.IsNil(value) {
+				continue
+			}
+			if in.OmitEmpty && empty.IsEmpty(value) {
+				continue
+			}
+			newArgs = formatWhereKeyValue(formatWhereKeyValueInput{
+				Db:     db,
+				Buffer: buffer,
+				Args:   newArgs,
+				Key:    key,
+				Value:  value,
+				Prefix: in.Prefix,
+				Type:   in.Type,
+			})
+		}
+
+	case reflect.Struct:
+		// If the `where` parameter is `DO` struct, it then adds `OmitNil` option for this condition,
+		// which will filter all nil parameters in `where`.
+		if isDoStruct(in.Where) {
+			in.OmitNil = true
+		}
+		// If `where` struct implements `iIterator` interface,
+		// it then uses its Iterate function to iterate its key-value pairs.
+		// For example, ListMap and TreeMap are ordered map,
+		// which implement `iIterator` interface and are index-friendly for where conditions.
+		if iterator, ok := in.Where.(iIterator); ok {
+			iterator.Iterator(func(key, value interface{}) bool {
+				ketStr := gconv.String(key)
+				if in.OmitNil && empty.IsNil(value) {
+					return true
+				}
+				if in.OmitEmpty && empty.IsEmpty(value) {
+					return true
+				}
+				newArgs = formatWhereKeyValue(formatWhereKeyValueInput{
+					Db:        db,
+					Buffer:    buffer,
+					Args:      newArgs,
+					Key:       ketStr,
+					Value:     value,
+					OmitEmpty: in.OmitEmpty,
+					Prefix:    in.Prefix,
+					Type:      in.Type,
+				})
+				return true
+			})
+			break
+		}
+		// Automatically mapping and filtering the struct attribute.
+		var (
+			reflectType = reflectInfo.OriginValue.Type()
+			structField reflect.StructField
+			data        = MapOrStructToMapDeep(in.Where, true)
+		)
+		// If `Prefix` is given, it checks and retrieves the table name.
+		if in.Prefix != "" {
+			hasTable, _ := db.GetCore().HasTable(in.Prefix)
+			if hasTable {
+				in.Table = in.Prefix
+			} else {
+				ormTagTableName := getTableNameFromOrmTag(in.Where)
+				if ormTagTableName != "" {
+					in.Table = ormTagTableName
+				}
+			}
+		}
+		// Mapping and filtering fields if `Table` is given.
+		if in.Table != "" {
+			data, _ = db.GetCore().mappingAndFilterData(ctx, in.Schema, in.Table, data, true)
+		}
+		// Put the struct attributes in sequence in Where statement.
+		var ormTagValue string
+		for i := 0; i < reflectType.NumField(); i++ {
+			structField = reflectType.Field(i)
+			// Use tag value from `orm` as field name if specified.
+			ormTagValue = structField.Tag.Get(OrmTagForStruct)
+			ormTagValue = gstr.Split(gstr.Trim(ormTagValue), ",")[0]
+			if ormTagValue == "" {
+				ormTagValue = structField.Name
+			}
+			foundKey, foundValue := gutil.MapPossibleItemByKey(data, ormTagValue)
+			if foundKey != "" {
+				if in.OmitNil && empty.IsNil(foundValue) {
+					continue
+				}
+				if in.OmitEmpty && empty.IsEmpty(foundValue) {
+					continue
+				}
+				newArgs = formatWhereKeyValue(formatWhereKeyValueInput{
+					Db:        db,
+					Buffer:    buffer,
+					Args:      newArgs,
+					Key:       foundKey,
+					Value:     foundValue,
+					OmitEmpty: in.OmitEmpty,
+					Prefix:    in.Prefix,
+					Type:      in.Type,
+				})
+			}
+		}
+
+	default:
+		// Where filter.
+		var omitEmptyCheckValue interface{}
+		if len(in.Args) == 1 {
+			omitEmptyCheckValue = in.Args[0]
+		} else {
+			omitEmptyCheckValue = in.Args
+		}
+		if isKeyValueCanBeOmitEmpty(in.OmitEmpty, in.Type, in.Where, omitEmptyCheckValue) {
+			return
+		}
+		// Usually a string.
+		whereStr := gstr.Trim(gconv.String(in.Where))
+		// Is `whereStr` a field name which composed as a key-value condition?
+		// Eg:
+		// Where("id", 1)
+		// Where("id", g.Slice{1,2,3})
+		if gregex.IsMatchString(regularFieldNameWithoutDotRegPattern, whereStr) && len(in.Args) == 1 {
+			newArgs = formatWhereKeyValue(formatWhereKeyValueInput{
+				Db:        db,
+				Buffer:    buffer,
+				Args:      newArgs,
+				Key:       whereStr,
+				Value:     in.Args[0],
+				OmitEmpty: in.OmitEmpty,
+				Prefix:    in.Prefix,
+				Type:      in.Type,
+			})
+			in.Args = in.Args[:0]
+			break
+		}
+		// If the first part is column name, it automatically adds prefix to the column.
+		if in.Prefix != "" {
+			array := gstr.Split(whereStr, " ")
+			if ok, _ := db.GetCore().HasField(ctx, in.Table, array[0]); ok {
+				whereStr = in.Prefix + "." + whereStr
+			}
+		}
+		// Regular string and parameter place holder handling.
+		// Eg:
+		// Where("id in(?) and name=?", g.Slice{1,2,3}, "john")
+		i := 0
+		for {
+			if i >= len(in.Args) {
+				break
+			}
+			// ===============================================================
+			// Sub query, which is always used along with a string condition.
+			// ===============================================================
+			if subModel, ok := in.Args[i].(*Model); ok {
+				index := -1
+				whereStr, _ = gregex.ReplaceStringFunc(`(\?)`, whereStr, func(s string) string {
+					index++
+					if i+len(newArgs) == index {
+						sqlWithHolder, holderArgs := subModel.getHolderAndArgsAsSubModel(ctx)
+						in.Args = gutil.SliceInsertAfter(in.Args, i, holderArgs...)
+						// Automatically adding the brackets.
+						return "(" + sqlWithHolder + ")"
+					}
+					return s
+				})
+				in.Args = gutil.SliceDelete(in.Args, i)
+				continue
+			}
+			i++
+		}
+		buffer.WriteString(whereStr)
+	}
+
+	if buffer.Len() == 0 {
+		return "", in.Args
+	}
+	if len(in.Args) > 0 {
+		newArgs = append(newArgs, in.Args...)
+	}
+	newWhere = buffer.String()
+	if len(newArgs) > 0 {
+		if gstr.Pos(newWhere, "?") == -1 {
+			if gregex.IsMatchString(lastOperatorRegPattern, newWhere) {
+				// Eg: Where/And/Or("uid>=", 1)
+				newWhere += "?"
+			} else if gregex.IsMatchString(regularFieldNameRegPattern, newWhere) {
+				newWhere = db.GetCore().QuoteString(newWhere)
+				if len(newArgs) > 0 {
+					if utils.IsArray(newArgs[0]) {
+						// Eg:
+						// Where("id", []int{1,2,3})
+						// Where("user.id", []int{1,2,3})
+						newWhere += " IN (?)"
+					} else if empty.IsNil(newArgs[0]) {
+						// Eg:
+						// Where("id", nil)
+						// Where("user.id", nil)
+						newWhere += " IS NULL"
+						newArgs = nil
+					} else {
+						// Eg:
+						// Where/And/Or("uid", 1)
+						// Where/And/Or("user.uid", 1)
+						newWhere += "=?"
+					}
+				}
+			}
+		}
+	}
+	return handleArguments(newWhere, newArgs)
+}
+
+// formatWhereInterfaces formats `where` as []interface{}.
+func formatWhereInterfaces(db DB, where []interface{}, buffer *bytes.Buffer, newArgs []interface{}) []interface{} {
+	if len(where) == 0 {
+		return newArgs
+	}
+	if len(where)%2 != 0 {
+		buffer.WriteString(gstr.Join(gconv.Strings(where), ""))
+		return newArgs
+	}
+	var str string
+	for i := 0; i < len(where); i += 2 {
+		str = gconv.String(where[i])
+		if buffer.Len() > 0 {
+			buffer.WriteString(" AND " + db.GetCore().QuoteWord(str) + "=?")
+		} else {
+			buffer.WriteString(db.GetCore().QuoteWord(str) + "=?")
+		}
+		if s, ok := where[i+1].(Raw); ok {
+			buffer.WriteString(gconv.String(s))
+		} else {
+			newArgs = append(newArgs, where[i+1])
+		}
+	}
+	return newArgs
+}
+
+type formatWhereKeyValueInput struct {
+	Db        DB            // Db is the underlying DB object for current operation.
+	Buffer    *bytes.Buffer // Buffer is the sql statement string without Args for current operation.
+	Args      []interface{} // Args is the full arguments of current operation.
+	Key       string        // The field name, eg: "id", "name", etc.
+	Value     interface{}   // The field value, can be any types.
+	Type      string        // The value in Where type.
+	OmitEmpty bool          // Ignores current condition key if `value` is empty.
+	Prefix    string        // Field prefix, eg: "user", "order", etc.
+}
+
+// formatWhereKeyValue handles each key-value pair of the parameter map.
+func formatWhereKeyValue(in formatWhereKeyValueInput) (newArgs []interface{}) {
+	var (
+		quotedKey   = in.Db.GetCore().QuoteWord(in.Key)
+		holderCount = gstr.Count(quotedKey, "?")
+	)
+	if isKeyValueCanBeOmitEmpty(in.OmitEmpty, in.Type, quotedKey, in.Value) {
+		return in.Args
+	}
+	if in.Prefix != "" && !gstr.Contains(quotedKey, ".") {
+		quotedKey = in.Prefix + "." + quotedKey
+	}
+	if in.Buffer.Len() > 0 {
+		in.Buffer.WriteString(" AND ")
+	}
+	// If the value is type of slice, and there's only one '?' holder in
+	// the key string, it automatically adds '?' holder chars according to its arguments count
+	// and converts it to "IN" statement.
+	var (
+		reflectValue = reflect.ValueOf(in.Value)
+		reflectKind  = reflectValue.Kind()
+	)
+	switch reflectKind {
+	// Slice argument.
+	case reflect.Slice, reflect.Array:
+		if holderCount == 0 {
+			in.Buffer.WriteString(quotedKey + " IN(?)")
+			in.Args = append(in.Args, in.Value)
+		} else {
+			if holderCount != reflectValue.Len() {
+				in.Buffer.WriteString(quotedKey)
+				in.Args = append(in.Args, in.Value)
+			} else {
+				in.Buffer.WriteString(quotedKey)
+				in.Args = append(in.Args, gconv.Interfaces(in.Value)...)
+			}
+		}
+
+	default:
+		if in.Value == nil || empty.IsNil(reflectValue) {
+			if gregex.IsMatchString(regularFieldNameRegPattern, in.Key) {
+				// The key is a single field name.
+				in.Buffer.WriteString(quotedKey + " IS NULL")
+			} else {
+				// The key may have operation chars.
+				in.Buffer.WriteString(quotedKey)
+			}
+		} else {
+			// It also supports "LIKE" statement, which we consider it an operator.
+			quotedKey = gstr.Trim(quotedKey)
+			if gstr.Pos(quotedKey, "?") == -1 {
+				like := " LIKE"
+				if len(quotedKey) > len(like) && gstr.Equal(quotedKey[len(quotedKey)-len(like):], like) {
+					// Eg: Where(g.Map{"name like": "john%"})
+					in.Buffer.WriteString(quotedKey + " ?")
+				} else if gregex.IsMatchString(lastOperatorRegPattern, quotedKey) {
+					// Eg: Where(g.Map{"age > ": 16})
+					in.Buffer.WriteString(quotedKey + " ?")
+				} else if gregex.IsMatchString(regularFieldNameRegPattern, in.Key) {
+					// The key is a regular field name.
+					in.Buffer.WriteString(quotedKey + "=?")
+				} else {
+					// The key is not a regular field name.
+					// Eg: Where(g.Map{"age > 16": nil})
+					// Issue: https://github.com/gogf/gf/issues/765
+					if empty.IsEmpty(in.Value) {
+						in.Buffer.WriteString(quotedKey)
+						break
+					} else {
+						in.Buffer.WriteString(quotedKey + "=?")
+					}
+				}
+			} else {
+				in.Buffer.WriteString(quotedKey)
+			}
+			if s, ok := in.Value.(Raw); ok {
+				in.Buffer.WriteString(gconv.String(s))
+			} else {
+				in.Args = append(in.Args, in.Value)
+			}
+		}
+	}
+	return in.Args
+}
+
+// handleArguments is an important function, which handles the sql and all its arguments
+// before committing them to underlying driver.
+func handleArguments(sql string, args []interface{}) (newSql string, newArgs []interface{}) {
+	newSql = sql
+	// insertHolderCount is used to calculate the inserting position for the '?' holder.
+	insertHolderCount := 0
+	// Handles the slice arguments.
+	if len(args) > 0 {
+		for index, arg := range args {
+			reflectInfo := reflection.OriginValueAndKind(arg)
+			switch reflectInfo.OriginKind {
+			case reflect.Slice, reflect.Array:
+				// It does not split the type of []byte.
+				// Eg: table.Where("name = ?", []byte("john"))
+				if _, ok := arg.([]byte); ok {
+					newArgs = append(newArgs, arg)
+					continue
+				}
+
+				if reflectInfo.OriginValue.Len() == 0 {
+					// Empty slice argument, it converts the sql to a false sql.
+					// Eg:
+					// Query("select * from xxx where id in(?)", g.Slice{}) -> select * from xxx where 0=1
+					// Where("id in(?)", g.Slice{}) -> WHERE 0=1
+					if gstr.Contains(newSql, "?") {
+						whereKeyWord := " WHERE "
+						if p := gstr.PosI(newSql, whereKeyWord); p == -1 {
+							return "0=1", []interface{}{}
+						} else {
+							return gstr.SubStr(newSql, 0, p+len(whereKeyWord)) + "0=1", []interface{}{}
+						}
+					}
+				} else {
+					for i := 0; i < reflectInfo.OriginValue.Len(); i++ {
+						newArgs = append(newArgs, reflectInfo.OriginValue.Index(i).Interface())
+					}
+				}
+
+				// If the '?' holder count equals the length of the slice,
+				// it does not implement the arguments splitting logic.
+				// Eg: db.Query("SELECT ?+?", g.Slice{1, 2})
+				if len(args) == 1 && gstr.Count(newSql, "?") == reflectInfo.OriginValue.Len() {
+					break
+				}
+				// counter is used to finding the inserting position for the '?' holder.
+				var (
+					counter  = 0
+					replaced = false
+				)
+				newSql, _ = gregex.ReplaceStringFunc(`\?`, newSql, func(s string) string {
+					if replaced {
+						return s
+					}
+					counter++
+					if counter == index+insertHolderCount+1 {
+						replaced = true
+						insertHolderCount += reflectInfo.OriginValue.Len() - 1
+						return "?" + strings.Repeat(",?", reflectInfo.OriginValue.Len()-1)
+					}
+					return s
+				})
+
+			// Special struct handling.
+			case reflect.Struct:
+				switch arg.(type) {
+				// The underlying driver supports time.Time/*time.Time types.
+				case time.Time, *time.Time:
+					newArgs = append(newArgs, arg)
+					continue
+
+				case gtime.Time:
+					newArgs = append(newArgs, arg.(gtime.Time).Time)
+					continue
+
+				case *gtime.Time:
+					newArgs = append(newArgs, arg.(*gtime.Time).Time)
+					continue
+
+				default:
+					// It converts the struct to string in default
+					// if it has implemented the String interface.
+					if v, ok := arg.(iString); ok {
+						newArgs = append(newArgs, v.String())
+						continue
+					}
+				}
+				newArgs = append(newArgs, arg)
+
+			default:
+				newArgs = append(newArgs, arg)
+			}
+		}
+	}
+	return
+}
+
+// FormatSqlWithArgs binds the arguments to the sql string and returns a complete
+// sql string, just for debugging.
+func FormatSqlWithArgs(sql string, args []interface{}) string {
+	index := -1
+	newQuery, _ := gregex.ReplaceStringFunc(
+		`(\?|:v\d+|\$\d+|@p\d+)`,
+		sql,
+		func(s string) string {
+			index++
+			if len(args) > index {
+				if args[index] == nil {
+					return "null"
+				}
+				// Parameters of type Raw do not require special treatment
+				if v, ok := args[index].(Raw); ok {
+					return gconv.String(v)
+				}
+				reflectInfo := reflection.OriginValueAndKind(args[index])
+				if reflectInfo.OriginKind == reflect.Ptr &&
+					(reflectInfo.OriginValue.IsNil() || !reflectInfo.OriginValue.IsValid()) {
+					return "null"
+				}
+				switch reflectInfo.OriginKind {
+				case reflect.String, reflect.Map, reflect.Slice, reflect.Array:
+					return `'` + gstr.QuoteMeta(gconv.String(args[index]), `'`) + `'`
+
+				case reflect.Struct:
+					if t, ok := args[index].(time.Time); ok {
+						return `'` + t.Format(`2006-01-02 15:04:05`) + `'`
+					}
+					return `'` + gstr.QuoteMeta(gconv.String(args[index]), `'`) + `'`
+				}
+				return gconv.String(args[index])
+			}
+			return s
+		})
+	return newQuery
+}

+ 334 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_model.go

@@ -0,0 +1,334 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/gogf/gf/v2/text/gregex"
+	"github.com/gogf/gf/v2/text/gstr"
+	"github.com/gogf/gf/v2/util/gconv"
+)
+
+// Model is core struct implementing the DAO for ORM.
+type Model struct {
+	db            DB                // Underlying DB interface.
+	tx            TX                // Underlying TX interface.
+	rawSql        string            // rawSql is the raw SQL string which marks a raw SQL based Model not a table based Model.
+	schema        string            // Custom database schema.
+	linkType      int               // Mark for operation on master or slave.
+	tablesInit    string            // Table names when model initialization.
+	tables        string            // Operation table names, which can be more than one table names and aliases, like: "user", "user u", "user u, user_detail ud".
+	fields        string            // Operation fields, multiple fields joined using char ','.
+	fieldsEx      string            // Excluded operation fields, multiple fields joined using char ','.
+	withArray     []interface{}     // Arguments for With feature.
+	withAll       bool              // Enable model association operations on all objects that have "with" tag in the struct.
+	extraArgs     []interface{}     // Extra custom arguments for sql, which are prepended to the arguments before sql committed to underlying driver.
+	whereBuilder  *WhereBuilder     // Condition builder for where operation.
+	groupBy       string            // Used for "group by" statement.
+	orderBy       string            // Used for "order by" statement.
+	having        []interface{}     // Used for "having..." statement.
+	start         int               // Used for "select ... start, limit ..." statement.
+	limit         int               // Used for "select ... start, limit ..." statement.
+	option        int               // Option for extra operation features.
+	offset        int               // Offset statement for some databases grammar.
+	partition     string            // Partition table partition name.
+	data          interface{}       // Data for operation, which can be type of map/[]map/struct/*struct/string, etc.
+	batch         int               // Batch number for batch Insert/Replace/Save operations.
+	filter        bool              // Filter data and where key-value pairs according to the fields of the table.
+	distinct      string            // Force the query to only return distinct results.
+	lockInfo      string            // Lock for update or in shared lock.
+	cacheEnabled  bool              // Enable sql result cache feature, which is mainly for indicating cache duration(especially 0) usage.
+	cacheOption   CacheOption       // Cache option for query statement.
+	hookHandler   HookHandler       // Hook functions for model hook feature.
+	unscoped      bool              // Disables soft deleting features when select/delete operations.
+	safe          bool              // If true, it clones and returns a new model object whenever operation done; or else it changes the attribute of current model.
+	onDuplicate   interface{}       // onDuplicate is used for ON "DUPLICATE KEY UPDATE" statement.
+	onDuplicateEx interface{}       // onDuplicateEx is used for excluding some columns ON "DUPLICATE KEY UPDATE" statement.
+	tableAliasMap map[string]string // Table alias to true table name, usually used in join statements.
+}
+
+// ModelHandler is a function that handles given Model and returns a new Model that is custom modified.
+type ModelHandler func(m *Model) *Model
+
+// ChunkHandler is a function that is used in function Chunk, which handles given Result and error.
+// It returns true if it wants to continue chunking, or else it returns false to stop chunking.
+type ChunkHandler func(result Result, err error) bool
+
+const (
+	linkTypeMaster           = 1
+	linkTypeSlave            = 2
+	defaultFields            = "*"
+	whereHolderOperatorWhere = 1
+	whereHolderOperatorAnd   = 2
+	whereHolderOperatorOr    = 3
+	whereHolderTypeDefault   = "Default"
+	whereHolderTypeNoArgs    = "NoArgs"
+	whereHolderTypeIn        = "In"
+)
+
+// Model creates and returns a new ORM model from given schema.
+// The parameter `tableNameQueryOrStruct` can be more than one table names, and also alias name, like:
+//  1. Model names:
+//     db.Model("user")
+//     db.Model("user u")
+//     db.Model("user, user_detail")
+//     db.Model("user u, user_detail ud")
+//  2. Model name with alias:
+//     db.Model("user", "u")
+//  3. Model name with sub-query:
+//     db.Model("? AS a, ? AS b", subQuery1, subQuery2)
+func (c *Core) Model(tableNameQueryOrStruct ...interface{}) *Model {
+	var (
+		ctx       = c.db.GetCtx()
+		tableStr  string
+		tableName string
+		extraArgs []interface{}
+	)
+	// Model creation with sub-query.
+	if len(tableNameQueryOrStruct) > 1 {
+		conditionStr := gconv.String(tableNameQueryOrStruct[0])
+		if gstr.Contains(conditionStr, "?") {
+			whereHolder := WhereHolder{
+				Where: conditionStr,
+				Args:  tableNameQueryOrStruct[1:],
+			}
+			tableStr, extraArgs = formatWhereHolder(ctx, c.db, formatWhereHolderInput{
+				WhereHolder: whereHolder,
+				OmitNil:     false,
+				OmitEmpty:   false,
+				Schema:      "",
+				Table:       "",
+			})
+		}
+	}
+	// Normal model creation.
+	if tableStr == "" {
+		tableNames := make([]string, len(tableNameQueryOrStruct))
+		for k, v := range tableNameQueryOrStruct {
+			if s, ok := v.(string); ok {
+				tableNames[k] = s
+			} else if tableName = getTableNameFromOrmTag(v); tableName != "" {
+				tableNames[k] = tableName
+			}
+		}
+		if len(tableNames) > 1 {
+			tableStr = fmt.Sprintf(
+				`%s AS %s`, c.QuotePrefixTableName(tableNames[0]), c.QuoteWord(tableNames[1]),
+			)
+		} else if len(tableNames) == 1 {
+			tableStr = c.QuotePrefixTableName(tableNames[0])
+		}
+	}
+	m := &Model{
+		db:            c.db,
+		schema:        c.schema,
+		tablesInit:    tableStr,
+		tables:        tableStr,
+		fields:        defaultFields,
+		start:         -1,
+		offset:        -1,
+		filter:        true,
+		extraArgs:     extraArgs,
+		tableAliasMap: make(map[string]string),
+	}
+	m.whereBuilder = m.Builder()
+	if defaultModelSafe {
+		m.safe = true
+	}
+	return m
+}
+
+// Raw creates and returns a model based on a raw sql not a table.
+// Example:
+//
+//	db.Raw("SELECT * FROM `user` WHERE `name` = ?", "john").Scan(&result)
+func (c *Core) Raw(rawSql string, args ...interface{}) *Model {
+	model := c.Model()
+	model.rawSql = rawSql
+	model.extraArgs = args
+	return model
+}
+
+// Raw sets current model as a raw sql model.
+// Example:
+//
+//	db.Raw("SELECT * FROM `user` WHERE `name` = ?", "john").Scan(&result)
+//
+// See Core.Raw.
+func (m *Model) Raw(rawSql string, args ...interface{}) *Model {
+	model := m.db.Raw(rawSql, args...)
+	model.db = m.db
+	model.tx = m.tx
+	return model
+}
+
+func (tx *TXCore) Raw(rawSql string, args ...interface{}) *Model {
+	return tx.Model().Raw(rawSql, args...)
+}
+
+// With creates and returns an ORM model based on metadata of given object.
+func (c *Core) With(objects ...interface{}) *Model {
+	return c.db.Model().With(objects...)
+}
+
+// Partition sets Partition name.
+// Example:
+// dao.User.Ctx(ctx).Partition("p1","p2","p3").All()
+func (m *Model) Partition(partitions ...string) *Model {
+	model := m.getModel()
+	model.partition = gstr.Join(partitions, ",")
+	return model
+}
+
+// Model acts like Core.Model except it operates on transaction.
+// See Core.Model.
+func (tx *TXCore) Model(tableNameQueryOrStruct ...interface{}) *Model {
+	model := tx.db.Model(tableNameQueryOrStruct...)
+	model.db = tx.db
+	model.tx = tx
+	return model
+}
+
+// With acts like Core.With except it operates on transaction.
+// See Core.With.
+func (tx *TXCore) With(object interface{}) *Model {
+	return tx.Model().With(object)
+}
+
+// Ctx sets the context for current operation.
+func (m *Model) Ctx(ctx context.Context) *Model {
+	if ctx == nil {
+		return m
+	}
+	model := m.getModel()
+	model.db = model.db.Ctx(ctx)
+	if m.tx != nil {
+		model.tx = model.tx.Ctx(ctx)
+	}
+	return model
+}
+
+// GetCtx returns the context for current Model.
+// It returns `context.Background()` is there's no context previously set.
+func (m *Model) GetCtx() context.Context {
+	if m.tx != nil && m.tx.GetCtx() != nil {
+		return m.tx.GetCtx()
+	}
+	return m.db.GetCtx()
+}
+
+// As sets an alias name for current table.
+func (m *Model) As(as string) *Model {
+	if m.tables != "" {
+		model := m.getModel()
+		split := " JOIN "
+		if gstr.ContainsI(model.tables, split) {
+			// For join table.
+			array := gstr.Split(model.tables, split)
+			array[len(array)-1], _ = gregex.ReplaceString(`(.+) ON`, fmt.Sprintf(`$1 AS %s ON`, as), array[len(array)-1])
+			model.tables = gstr.Join(array, split)
+		} else {
+			// For base table.
+			model.tables = gstr.TrimRight(model.tables) + " AS " + as
+		}
+		return model
+	}
+	return m
+}
+
+// DB sets/changes the db object for current operation.
+func (m *Model) DB(db DB) *Model {
+	model := m.getModel()
+	model.db = db
+	return model
+}
+
+// TX sets/changes the transaction for current operation.
+func (m *Model) TX(tx TX) *Model {
+	model := m.getModel()
+	model.db = tx.GetDB()
+	model.tx = tx
+	return model
+}
+
+// Schema sets the schema for current operation.
+func (m *Model) Schema(schema string) *Model {
+	model := m.getModel()
+	model.schema = schema
+	return model
+}
+
+// Clone creates and returns a new model which is a Clone of current model.
+// Note that it uses deep-copy for the Clone.
+func (m *Model) Clone() *Model {
+	newModel := (*Model)(nil)
+	if m.tx != nil {
+		newModel = m.tx.Model(m.tablesInit)
+	} else {
+		newModel = m.db.Model(m.tablesInit)
+	}
+	// Basic attributes copy.
+	*newModel = *m
+	// WhereBuilder copy, note the attribute pointer.
+	newModel.whereBuilder = m.whereBuilder.Clone()
+	newModel.whereBuilder.model = newModel
+	// Shallow copy slice attributes.
+	if n := len(m.extraArgs); n > 0 {
+		newModel.extraArgs = make([]interface{}, n)
+		copy(newModel.extraArgs, m.extraArgs)
+	}
+	if n := len(m.withArray); n > 0 {
+		newModel.withArray = make([]interface{}, n)
+		copy(newModel.withArray, m.withArray)
+	}
+	return newModel
+}
+
+// Master marks the following operation on master node.
+func (m *Model) Master() *Model {
+	model := m.getModel()
+	model.linkType = linkTypeMaster
+	return model
+}
+
+// Slave marks the following operation on slave node.
+// Note that it makes sense only if there's any slave node configured.
+func (m *Model) Slave() *Model {
+	model := m.getModel()
+	model.linkType = linkTypeSlave
+	return model
+}
+
+// Safe marks this model safe or unsafe. If safe is true, it clones and returns a new model object
+// whenever the operation done, or else it changes the attribute of current model.
+func (m *Model) Safe(safe ...bool) *Model {
+	if len(safe) > 0 {
+		m.safe = safe[0]
+	} else {
+		m.safe = true
+	}
+	return m
+}
+
+// Args sets custom arguments for model operation.
+func (m *Model) Args(args ...interface{}) *Model {
+	model := m.getModel()
+	model.extraArgs = append(model.extraArgs, args)
+	return model
+}
+
+// Handler calls each of `handlers` on current Model and returns a new Model.
+// ModelHandler is a function that handles given Model and returns a new Model that is custom modified.
+func (m *Model) Handler(handlers ...ModelHandler) *Model {
+	model := m.getModel()
+	for _, handler := range handlers {
+		model = handler(model)
+	}
+	return model
+}

+ 124 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder.go

@@ -0,0 +1,124 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"fmt"
+)
+
+// WhereBuilder holds multiple where conditions in a group.
+type WhereBuilder struct {
+	model       *Model        // A WhereBuilder should be bound to certain Model.
+	whereHolder []WhereHolder // Condition strings for where operation.
+}
+
+// WhereHolder is the holder for where condition preparing.
+type WhereHolder struct {
+	Type     string        // Type of this holder.
+	Operator int           // Operator for this holder.
+	Where    interface{}   // Where parameter, which can commonly be type of string/map/struct.
+	Args     []interface{} // Arguments for where parameter.
+	Prefix   string        // Field prefix, eg: "user.", "order.".
+}
+
+// Builder creates and returns a WhereBuilder. Please note that the builder is chain-safe.
+func (m *Model) Builder() *WhereBuilder {
+	b := &WhereBuilder{
+		model:       m,
+		whereHolder: make([]WhereHolder, 0),
+	}
+	return b
+}
+
+// getBuilder creates and returns a cloned WhereBuilder of current WhereBuilder
+func (b *WhereBuilder) getBuilder() *WhereBuilder {
+	return b.Clone()
+}
+
+// Clone clones and returns a WhereBuilder that is a copy of current one.
+func (b *WhereBuilder) Clone() *WhereBuilder {
+	newBuilder := b.model.Builder()
+	newBuilder.whereHolder = make([]WhereHolder, len(b.whereHolder))
+	copy(newBuilder.whereHolder, b.whereHolder)
+	return newBuilder
+}
+
+// Build builds current WhereBuilder and returns the condition string and parameters.
+func (b *WhereBuilder) Build() (conditionWhere string, conditionArgs []interface{}) {
+	var (
+		ctx                         = b.model.GetCtx()
+		autoPrefix                  = b.model.getAutoPrefix()
+		tableForMappingAndFiltering = b.model.tables
+	)
+	if len(b.whereHolder) > 0 {
+		for _, holder := range b.whereHolder {
+			if holder.Prefix == "" {
+				holder.Prefix = autoPrefix
+			}
+			switch holder.Operator {
+			case whereHolderOperatorWhere, whereHolderOperatorAnd:
+				newWhere, newArgs := formatWhereHolder(ctx, b.model.db, formatWhereHolderInput{
+					WhereHolder: holder,
+					OmitNil:     b.model.option&optionOmitNilWhere > 0,
+					OmitEmpty:   b.model.option&optionOmitEmptyWhere > 0,
+					Schema:      b.model.schema,
+					Table:       tableForMappingAndFiltering,
+				})
+				if len(newWhere) > 0 {
+					if len(conditionWhere) == 0 {
+						conditionWhere = newWhere
+					} else if conditionWhere[0] == '(' {
+						conditionWhere = fmt.Sprintf(`%s AND (%s)`, conditionWhere, newWhere)
+					} else {
+						conditionWhere = fmt.Sprintf(`(%s) AND (%s)`, conditionWhere, newWhere)
+					}
+					conditionArgs = append(conditionArgs, newArgs...)
+				}
+
+			case whereHolderOperatorOr:
+				newWhere, newArgs := formatWhereHolder(ctx, b.model.db, formatWhereHolderInput{
+					WhereHolder: holder,
+					OmitNil:     b.model.option&optionOmitNilWhere > 0,
+					OmitEmpty:   b.model.option&optionOmitEmptyWhere > 0,
+					Schema:      b.model.schema,
+					Table:       tableForMappingAndFiltering,
+				})
+				if len(newWhere) > 0 {
+					if len(conditionWhere) == 0 {
+						conditionWhere = newWhere
+					} else if conditionWhere[0] == '(' {
+						conditionWhere = fmt.Sprintf(`%s OR (%s)`, conditionWhere, newWhere)
+					} else {
+						conditionWhere = fmt.Sprintf(`(%s) OR (%s)`, conditionWhere, newWhere)
+					}
+					conditionArgs = append(conditionArgs, newArgs...)
+				}
+			}
+		}
+	}
+	return
+}
+
+// convertWhereBuilder converts parameter `where` to condition string and parameters if `where` is also a WhereBuilder.
+func (b *WhereBuilder) convertWhereBuilder(where interface{}, args []interface{}) (newWhere interface{}, newArgs []interface{}) {
+	var builder *WhereBuilder
+	switch v := where.(type) {
+	case WhereBuilder:
+		builder = &v
+
+	case *WhereBuilder:
+		builder = v
+	}
+	if builder != nil {
+		conditionWhere, conditionArgs := builder.Build()
+		if conditionWhere != "" && (len(b.whereHolder) == 0 || len(builder.whereHolder) > 1) {
+			conditionWhere = "(" + conditionWhere + ")"
+		}
+		return conditionWhere, conditionArgs
+	}
+	return where, args
+}

+ 161 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_where.go

@@ -0,0 +1,161 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"fmt"
+
+	"github.com/gogf/gf/v2/text/gstr"
+)
+
+// doWhereType sets the condition statement for the model. The parameter `where` can be type of
+// string/map/gmap/slice/struct/*struct, etc. Note that, if it's called more than one times,
+// multiple conditions will be joined into where statement using "AND".
+func (b *WhereBuilder) doWhereType(whereType string, where interface{}, args ...interface{}) *WhereBuilder {
+	where, args = b.convertWhereBuilder(where, args)
+
+	builder := b.getBuilder()
+	if builder.whereHolder == nil {
+		builder.whereHolder = make([]WhereHolder, 0)
+	}
+	if whereType == "" {
+		if len(args) == 0 {
+			whereType = whereHolderTypeNoArgs
+		} else {
+			whereType = whereHolderTypeDefault
+		}
+	}
+	builder.whereHolder = append(builder.whereHolder, WhereHolder{
+		Type:     whereType,
+		Operator: whereHolderOperatorWhere,
+		Where:    where,
+		Args:     args,
+	})
+	return builder
+}
+
+// doWherefType builds condition string using fmt.Sprintf and arguments.
+// Note that if the number of `args` is more than the placeholder in `format`,
+// the extra `args` will be used as the where condition arguments of the Model.
+func (b *WhereBuilder) doWherefType(t string, format string, args ...interface{}) *WhereBuilder {
+	var (
+		placeHolderCount = gstr.Count(format, "?")
+		conditionStr     = fmt.Sprintf(format, args[:len(args)-placeHolderCount]...)
+	)
+	return b.doWhereType(t, conditionStr, args[len(args)-placeHolderCount:]...)
+}
+
+// Where sets the condition statement for the builder. The parameter `where` can be type of
+// string/map/gmap/slice/struct/*struct, etc. Note that, if it's called more than one times,
+// multiple conditions will be joined into where statement using "AND".
+// Eg:
+// Where("uid=10000")
+// Where("uid", 10000)
+// Where("money>? AND name like ?", 99999, "vip_%")
+// Where("uid", 1).Where("name", "john")
+// Where("status IN (?)", g.Slice{1,2,3})
+// Where("age IN(?,?)", 18, 50)
+// Where(User{ Id : 1, UserName : "john"}).
+func (b *WhereBuilder) Where(where interface{}, args ...interface{}) *WhereBuilder {
+	return b.doWhereType(``, where, args...)
+}
+
+// Wheref builds condition string using fmt.Sprintf and arguments.
+// Note that if the number of `args` is more than the placeholder in `format`,
+// the extra `args` will be used as the where condition arguments of the Model.
+// Eg:
+// Wheref(`amount<? and status=%s`, "paid", 100)  => WHERE `amount`<100 and status='paid'
+// Wheref(`amount<%d and status=%s`, 100, "paid") => WHERE `amount`<100 and status='paid'
+func (b *WhereBuilder) Wheref(format string, args ...interface{}) *WhereBuilder {
+	return b.doWherefType(``, format, args...)
+}
+
+// WherePri does the same logic as Model.Where except that if the parameter `where`
+// is a single condition like int/string/float/slice, it treats the condition as the primary
+// key value. That is, if primary key is "id" and given `where` parameter as "123", the
+// WherePri function treats the condition as "id=123", but Model.Where treats the condition
+// as string "123".
+func (b *WhereBuilder) WherePri(where interface{}, args ...interface{}) *WhereBuilder {
+	if len(args) > 0 {
+		return b.Where(where, args...)
+	}
+	newWhere := GetPrimaryKeyCondition(b.model.getPrimaryKey(), where)
+	return b.Where(newWhere[0], newWhere[1:]...)
+}
+
+// WhereLT builds `column < value` statement.
+func (b *WhereBuilder) WhereLT(column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s < ?`, b.model.QuoteWord(column), value)
+}
+
+// WhereLTE builds `column <= value` statement.
+func (b *WhereBuilder) WhereLTE(column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s <= ?`, b.model.QuoteWord(column), value)
+}
+
+// WhereGT builds `column > value` statement.
+func (b *WhereBuilder) WhereGT(column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s > ?`, b.model.QuoteWord(column), value)
+}
+
+// WhereGTE builds `column >= value` statement.
+func (b *WhereBuilder) WhereGTE(column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s >= ?`, b.model.QuoteWord(column), value)
+}
+
+// WhereBetween builds `column BETWEEN min AND max` statement.
+func (b *WhereBuilder) WhereBetween(column string, min, max interface{}) *WhereBuilder {
+	return b.Wheref(`%s BETWEEN ? AND ?`, b.model.QuoteWord(column), min, max)
+}
+
+// WhereLike builds `column LIKE like` statement.
+func (b *WhereBuilder) WhereLike(column string, like string) *WhereBuilder {
+	return b.Wheref(`%s LIKE ?`, b.model.QuoteWord(column), like)
+}
+
+// WhereIn builds `column IN (in)` statement.
+func (b *WhereBuilder) WhereIn(column string, in interface{}) *WhereBuilder {
+	return b.doWherefType(whereHolderTypeIn, `%s IN (?)`, b.model.QuoteWord(column), in)
+}
+
+// WhereNull builds `columns[0] IS NULL AND columns[1] IS NULL ...` statement.
+func (b *WhereBuilder) WhereNull(columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.Wheref(`%s IS NULL`, b.model.QuoteWord(column))
+	}
+	return builder
+}
+
+// WhereNotBetween builds `column NOT BETWEEN min AND max` statement.
+func (b *WhereBuilder) WhereNotBetween(column string, min, max interface{}) *WhereBuilder {
+	return b.Wheref(`%s NOT BETWEEN ? AND ?`, b.model.QuoteWord(column), min, max)
+}
+
+// WhereNotLike builds `column NOT LIKE like` statement.
+func (b *WhereBuilder) WhereNotLike(column string, like interface{}) *WhereBuilder {
+	return b.Wheref(`%s NOT LIKE ?`, b.model.QuoteWord(column), like)
+}
+
+// WhereNot builds `column != value` statement.
+func (b *WhereBuilder) WhereNot(column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s != ?`, b.model.QuoteWord(column), value)
+}
+
+// WhereNotIn builds `column NOT IN (in)` statement.
+func (b *WhereBuilder) WhereNotIn(column string, in interface{}) *WhereBuilder {
+	return b.doWherefType(whereHolderTypeIn, `%s NOT IN (?)`, b.model.QuoteWord(column), in)
+}
+
+// WhereNotNull builds `columns[0] IS NOT NULL AND columns[1] IS NOT NULL ...` statement.
+func (b *WhereBuilder) WhereNotNull(columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.Wheref(`%s IS NOT NULL`, b.model.QuoteWord(column))
+	}
+	return builder
+}

+ 101 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_where_prefix.go

@@ -0,0 +1,101 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+// WherePrefix performs as Where, but it adds prefix to each field in where statement.
+// Eg:
+// WherePrefix("order", "status", "paid")                        => WHERE `order`.`status`='paid'
+// WherePrefix("order", struct{Status:"paid", "channel":"bank"}) => WHERE `order`.`status`='paid' AND `order`.`channel`='bank'
+func (b *WhereBuilder) WherePrefix(prefix string, where interface{}, args ...interface{}) *WhereBuilder {
+	where, args = b.convertWhereBuilder(where, args)
+
+	builder := b.getBuilder()
+	if builder.whereHolder == nil {
+		builder.whereHolder = make([]WhereHolder, 0)
+	}
+	builder.whereHolder = append(builder.whereHolder, WhereHolder{
+		Type:     whereHolderTypeDefault,
+		Operator: whereHolderOperatorWhere,
+		Where:    where,
+		Args:     args,
+		Prefix:   prefix,
+	})
+	return builder
+}
+
+// WherePrefixLT builds `prefix.column < value` statement.
+func (b *WhereBuilder) WherePrefixLT(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s < ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WherePrefixLTE builds `prefix.column <= value` statement.
+func (b *WhereBuilder) WherePrefixLTE(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s <= ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WherePrefixGT builds `prefix.column > value` statement.
+func (b *WhereBuilder) WherePrefixGT(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s > ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WherePrefixGTE builds `prefix.column >= value` statement.
+func (b *WhereBuilder) WherePrefixGTE(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s >= ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WherePrefixBetween builds `prefix.column BETWEEN min AND max` statement.
+func (b *WhereBuilder) WherePrefixBetween(prefix string, column string, min, max interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s BETWEEN ? AND ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), min, max)
+}
+
+// WherePrefixLike builds `prefix.column LIKE like` statement.
+func (b *WhereBuilder) WherePrefixLike(prefix string, column string, like interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s LIKE ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), like)
+}
+
+// WherePrefixIn builds `prefix.column IN (in)` statement.
+func (b *WhereBuilder) WherePrefixIn(prefix string, column string, in interface{}) *WhereBuilder {
+	return b.doWherefType(whereHolderTypeIn, `%s.%s IN (?)`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), in)
+}
+
+// WherePrefixNull builds `prefix.columns[0] IS NULL AND prefix.columns[1] IS NULL ...` statement.
+func (b *WhereBuilder) WherePrefixNull(prefix string, columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.Wheref(`%s.%s IS NULL`, b.model.QuoteWord(prefix), b.model.QuoteWord(column))
+	}
+	return builder
+}
+
+// WherePrefixNotBetween builds `prefix.column NOT BETWEEN min AND max` statement.
+func (b *WhereBuilder) WherePrefixNotBetween(prefix string, column string, min, max interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s NOT BETWEEN ? AND ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), min, max)
+}
+
+// WherePrefixNotLike builds `prefix.column NOT LIKE like` statement.
+func (b *WhereBuilder) WherePrefixNotLike(prefix string, column string, like interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s NOT LIKE ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), like)
+}
+
+// WherePrefixNot builds `prefix.column != value` statement.
+func (b *WhereBuilder) WherePrefixNot(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.Wheref(`%s.%s != ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WherePrefixNotIn builds `prefix.column NOT IN (in)` statement.
+func (b *WhereBuilder) WherePrefixNotIn(prefix string, column string, in interface{}) *WhereBuilder {
+	return b.doWherefType(whereHolderTypeIn, `%s.%s NOT IN (?)`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), in)
+}
+
+// WherePrefixNotNull builds `prefix.columns[0] IS NOT NULL AND prefix.columns[1] IS NOT NULL ...` statement.
+func (b *WhereBuilder) WherePrefixNotNull(prefix string, columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.Wheref(`%s.%s IS NOT NULL`, b.model.QuoteWord(prefix), b.model.QuoteWord(column))
+	}
+	return builder
+}

+ 125 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_whereor.go

@@ -0,0 +1,125 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+import (
+	"fmt"
+
+	"github.com/gogf/gf/v2/text/gstr"
+)
+
+// WhereOr adds "OR" condition to the where statement.
+func (b *WhereBuilder) doWhereOrType(t string, where interface{}, args ...interface{}) *WhereBuilder {
+	where, args = b.convertWhereBuilder(where, args)
+
+	builder := b.getBuilder()
+	if builder.whereHolder == nil {
+		builder.whereHolder = make([]WhereHolder, 0)
+	}
+	builder.whereHolder = append(builder.whereHolder, WhereHolder{
+		Type:     t,
+		Operator: whereHolderOperatorOr,
+		Where:    where,
+		Args:     args,
+	})
+	return builder
+}
+
+// WhereOrf builds `OR` condition string using fmt.Sprintf and arguments.
+func (b *WhereBuilder) doWhereOrfType(t string, format string, args ...interface{}) *WhereBuilder {
+	var (
+		placeHolderCount = gstr.Count(format, "?")
+		conditionStr     = fmt.Sprintf(format, args[:len(args)-placeHolderCount]...)
+	)
+	return b.doWhereOrType(t, conditionStr, args[len(args)-placeHolderCount:]...)
+}
+
+// WhereOr adds "OR" condition to the where statement.
+func (b *WhereBuilder) WhereOr(where interface{}, args ...interface{}) *WhereBuilder {
+	return b.doWhereOrType(``, where, args...)
+}
+
+// WhereOrf builds `OR` condition string using fmt.Sprintf and arguments.
+// Eg:
+// WhereOrf(`amount<? and status=%s`, "paid", 100)  => WHERE xxx OR `amount`<100 and status='paid'
+// WhereOrf(`amount<%d and status=%s`, 100, "paid") => WHERE xxx OR `amount`<100 and status='paid'
+func (b *WhereBuilder) WhereOrf(format string, args ...interface{}) *WhereBuilder {
+	return b.doWhereOrfType(``, format, args...)
+}
+
+// WhereOrNot builds `column != value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrNot(column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s != ?`, column, value)
+}
+
+// WhereOrLT builds `column < value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrLT(column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s < ?`, column, value)
+}
+
+// WhereOrLTE builds `column <= value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrLTE(column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s <= ?`, column, value)
+}
+
+// WhereOrGT builds `column > value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrGT(column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s > ?`, column, value)
+}
+
+// WhereOrGTE builds `column >= value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrGTE(column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s >= ?`, column, value)
+}
+
+// WhereOrBetween builds `column BETWEEN min AND max` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrBetween(column string, min, max interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s BETWEEN ? AND ?`, b.model.QuoteWord(column), min, max)
+}
+
+// WhereOrLike builds `column LIKE 'like'` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrLike(column string, like interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s LIKE ?`, b.model.QuoteWord(column), like)
+}
+
+// WhereOrIn builds `column IN (in)` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrIn(column string, in interface{}) *WhereBuilder {
+	return b.doWhereOrfType(whereHolderTypeIn, `%s IN (?)`, b.model.QuoteWord(column), in)
+}
+
+// WhereOrNull builds `columns[0] IS NULL OR columns[1] IS NULL ...` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrNull(columns ...string) *WhereBuilder {
+	var builder *WhereBuilder
+	for _, column := range columns {
+		builder = b.WhereOrf(`%s IS NULL`, b.model.QuoteWord(column))
+	}
+	return builder
+}
+
+// WhereOrNotBetween builds `column NOT BETWEEN min AND max` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrNotBetween(column string, min, max interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s NOT BETWEEN ? AND ?`, b.model.QuoteWord(column), min, max)
+}
+
+// WhereOrNotLike builds `column NOT LIKE like` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrNotLike(column string, like interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s NOT LIKE ?`, b.model.QuoteWord(column), like)
+}
+
+// WhereOrNotIn builds `column NOT IN (in)` statement.
+func (b *WhereBuilder) WhereOrNotIn(column string, in interface{}) *WhereBuilder {
+	return b.doWhereOrfType(whereHolderTypeIn, `%s NOT IN (?)`, b.model.QuoteWord(column), in)
+}
+
+// WhereOrNotNull builds `columns[0] IS NOT NULL OR columns[1] IS NOT NULL ...` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrNotNull(columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.WhereOrf(`%s IS NOT NULL`, b.model.QuoteWord(column))
+	}
+	return builder
+}

+ 98 - 0
vendor/github.com/gogf/gf/v2/database/gdb/gdb_model_builder_whereor_prefix.go

@@ -0,0 +1,98 @@
+// Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
+//
+// This Source Code Form is subject to the terms of the MIT License.
+// If a copy of the MIT was not distributed with this file,
+// You can obtain one at https://github.com/gogf/gf.
+
+package gdb
+
+// WhereOrPrefix performs as WhereOr, but it adds prefix to each field in where statement.
+// Eg:
+// WhereOrPrefix("order", "status", "paid")                        => WHERE xxx OR (`order`.`status`='paid')
+// WhereOrPrefix("order", struct{Status:"paid", "channel":"bank"}) => WHERE xxx OR (`order`.`status`='paid' AND `order`.`channel`='bank')
+func (b *WhereBuilder) WhereOrPrefix(prefix string, where interface{}, args ...interface{}) *WhereBuilder {
+	where, args = b.convertWhereBuilder(where, args)
+
+	builder := b.getBuilder()
+	builder.whereHolder = append(builder.whereHolder, WhereHolder{
+		Type:     whereHolderTypeDefault,
+		Operator: whereHolderOperatorOr,
+		Where:    where,
+		Args:     args,
+		Prefix:   prefix,
+	})
+	return builder
+}
+
+// WhereOrPrefixNot builds `prefix.column != value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixNot(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s != ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WhereOrPrefixLT builds `prefix.column < value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixLT(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s < ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WhereOrPrefixLTE builds `prefix.column <= value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixLTE(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s <= ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WhereOrPrefixGT builds `prefix.column > value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixGT(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s > ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WhereOrPrefixGTE builds `prefix.column >= value` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixGTE(prefix string, column string, value interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s >= ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), value)
+}
+
+// WhereOrPrefixBetween builds `prefix.column BETWEEN min AND max` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixBetween(prefix string, column string, min, max interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s BETWEEN ? AND ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), min, max)
+}
+
+// WhereOrPrefixLike builds `prefix.column LIKE 'like'` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixLike(prefix string, column string, like interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s LIKE ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), like)
+}
+
+// WhereOrPrefixIn builds `prefix.column IN (in)` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixIn(prefix string, column string, in interface{}) *WhereBuilder {
+	return b.doWhereOrfType(whereHolderTypeIn, `%s.%s IN (?)`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), in)
+}
+
+// WhereOrPrefixNull builds `prefix.columns[0] IS NULL OR prefix.columns[1] IS NULL ...` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixNull(prefix string, columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.WhereOrf(`%s.%s IS NULL`, b.model.QuoteWord(prefix), b.model.QuoteWord(column))
+	}
+	return builder
+}
+
+// WhereOrPrefixNotBetween builds `prefix.column NOT BETWEEN min AND max` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixNotBetween(prefix string, column string, min, max interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s NOT BETWEEN ? AND ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), min, max)
+}
+
+// WhereOrPrefixNotLike builds `prefix.column NOT LIKE 'like'` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixNotLike(prefix string, column string, like interface{}) *WhereBuilder {
+	return b.WhereOrf(`%s.%s NOT LIKE ?`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), like)
+}
+
+// WhereOrPrefixNotIn builds `prefix.column NOT IN (in)` statement.
+func (b *WhereBuilder) WhereOrPrefixNotIn(prefix string, column string, in interface{}) *WhereBuilder {
+	return b.doWhereOrfType(whereHolderTypeIn, `%s.%s NOT IN (?)`, b.model.QuoteWord(prefix), b.model.QuoteWord(column), in)
+}
+
+// WhereOrPrefixNotNull builds `prefix.columns[0] IS NOT NULL OR prefix.columns[1] IS NOT NULL ...` statement in `OR` conditions.
+func (b *WhereBuilder) WhereOrPrefixNotNull(prefix string, columns ...string) *WhereBuilder {
+	builder := b
+	for _, column := range columns {
+		builder = builder.WhereOrf(`%s.%s IS NOT NULL`, b.model.QuoteWord(prefix), b.model.QuoteWord(column))
+	}
+	return builder
+}

Some files were not shown because too many files changed in this diff