瀏覽代碼

first commit

liuxiulin 1 年之前
當前提交
88e62abae9
共有 100 個文件被更改,包括 15528 次插入0 次删除
  1. 8 0
      .idea/.gitignore
  2. 8 0
      .idea/modules.xml
  3. 9 0
      .idea/yx-4g-gateway.iml
  4. 57 0
      address/address.go
  5. 二進制
      bin/linux_amd64/yx-4g-gateway
  6. 12 0
      config/config.toml
  7. 34 0
      go.mod
  8. 92 0
      go.sum
  9. 242 0
      main.go
  10. 68 0
      modbus/crc.go
  11. 184 0
      modbus/modbus.go
  12. 60 0
      schema/schema.go
  13. 477 0
      server/client.go
  14. 87 0
      server/server.go
  15. 2 0
      vendor/github.com/BurntSushi/toml/.gitignore
  16. 1 0
      vendor/github.com/BurntSushi/toml/COMPATIBLE
  17. 21 0
      vendor/github.com/BurntSushi/toml/COPYING
  18. 211 0
      vendor/github.com/BurntSushi/toml/README.md
  19. 561 0
      vendor/github.com/BurntSushi/toml/decode.go
  20. 19 0
      vendor/github.com/BurntSushi/toml/decode_go116.go
  21. 21 0
      vendor/github.com/BurntSushi/toml/deprecated.go
  22. 13 0
      vendor/github.com/BurntSushi/toml/doc.go
  23. 698 0
      vendor/github.com/BurntSushi/toml/encode.go
  24. 229 0
      vendor/github.com/BurntSushi/toml/error.go
  25. 36 0
      vendor/github.com/BurntSushi/toml/internal/tz.go
  26. 1224 0
      vendor/github.com/BurntSushi/toml/lex.go
  27. 120 0
      vendor/github.com/BurntSushi/toml/meta.go
  28. 767 0
      vendor/github.com/BurntSushi/toml/parse.go
  29. 242 0
      vendor/github.com/BurntSushi/toml/type_fields.go
  30. 70 0
      vendor/github.com/BurntSushi/toml/type_toml.go
  31. 4 0
      vendor/github.com/clbanning/mxj/.travis.yml
  32. 55 0
      vendor/github.com/clbanning/mxj/LICENSE
  33. 199 0
      vendor/github.com/clbanning/mxj/anyxml.go
  34. 54 0
      vendor/github.com/clbanning/mxj/atomFeedString.xml
  35. 138 0
      vendor/github.com/clbanning/mxj/doc.go
  36. 54 0
      vendor/github.com/clbanning/mxj/escapechars.go
  37. 9 0
      vendor/github.com/clbanning/mxj/exists.go
  38. 287 0
      vendor/github.com/clbanning/mxj/files.go
  39. 2 0
      vendor/github.com/clbanning/mxj/files_test.badjson
  40. 9 0
      vendor/github.com/clbanning/mxj/files_test.badxml
  41. 2 0
      vendor/github.com/clbanning/mxj/files_test.json
  42. 9 0
      vendor/github.com/clbanning/mxj/files_test.xml
  43. 1 0
      vendor/github.com/clbanning/mxj/files_test_dup.json
  44. 1 0
      vendor/github.com/clbanning/mxj/files_test_dup.xml
  45. 12 0
      vendor/github.com/clbanning/mxj/files_test_indent.json
  46. 8 0
      vendor/github.com/clbanning/mxj/files_test_indent.xml
  47. 35 0
      vendor/github.com/clbanning/mxj/gob.go
  48. 323 0
      vendor/github.com/clbanning/mxj/json.go
  49. 668 0
      vendor/github.com/clbanning/mxj/keyvalues.go
  50. 112 0
      vendor/github.com/clbanning/mxj/leafnode.go
  51. 86 0
      vendor/github.com/clbanning/mxj/misc.go
  52. 128 0
      vendor/github.com/clbanning/mxj/mxj.go
  53. 184 0
      vendor/github.com/clbanning/mxj/newmap.go
  54. 192 0
      vendor/github.com/clbanning/mxj/readme.md
  55. 37 0
      vendor/github.com/clbanning/mxj/remove.go
  56. 61 0
      vendor/github.com/clbanning/mxj/rename.go
  57. 26 0
      vendor/github.com/clbanning/mxj/set.go
  58. 20 0
      vendor/github.com/clbanning/mxj/setfieldsep.go
  59. 29 0
      vendor/github.com/clbanning/mxj/songtext.xml
  60. 30 0
      vendor/github.com/clbanning/mxj/strict.go
  61. 54 0
      vendor/github.com/clbanning/mxj/struct.go
  62. 258 0
      vendor/github.com/clbanning/mxj/updatevalues.go
  63. 1324 0
      vendor/github.com/clbanning/mxj/xml.go
  64. 844 0
      vendor/github.com/clbanning/mxj/xmlseq.go
  65. 18 0
      vendor/github.com/clbanning/mxj/xmlseq2.go
  66. 36 0
      vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
  67. 56 0
      vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
  68. 15 0
      vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
  69. 20 0
      vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
  70. 177 0
      vendor/github.com/eclipse/paho.mqtt.golang/README.md
  71. 41 0
      vendor/github.com/eclipse/paho.mqtt.golang/about.html
  72. 1127 0
      vendor/github.com/eclipse/paho.mqtt.golang/client.go
  73. 32 0
      vendor/github.com/eclipse/paho.mqtt.golang/components.go
  74. 15 0
      vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
  75. 70 0
      vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
  76. 257 0
      vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
  77. 138 0
      vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
  78. 127 0
      vendor/github.com/eclipse/paho.mqtt.golang/message.go
  79. 176 0
      vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
  80. 464 0
      vendor/github.com/eclipse/paho.mqtt.golang/net.go
  81. 92 0
      vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
  82. 108 0
      vendor/github.com/eclipse/paho.mqtt.golang/notice.html
  83. 21 0
      vendor/github.com/eclipse/paho.mqtt.golang/oops.go
  84. 403 0
      vendor/github.com/eclipse/paho.mqtt.golang/options.go
  85. 167 0
      vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
  86. 52 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
  87. 155 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
  88. 34 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
  89. 356 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
  90. 34 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
  91. 34 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
  92. 42 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
  93. 42 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
  94. 83 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
  95. 42 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
  96. 42 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
  97. 57 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
  98. 69 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
  99. 42 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
  100. 56 0
      vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go

+ 8 - 0
.idea/.gitignore

@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/yx-4g-gateway.iml" filepath="$PROJECT_DIR$/.idea/yx-4g-gateway.iml" />
+    </modules>
+  </component>
+</project>

+ 9 - 0
.idea/yx-4g-gateway.iml

@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+  <component name="Go" enabled="true" />
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>

+ 57 - 0
address/address.go

@@ -0,0 +1,57 @@
+package address
+
+const (
+	//// UnitOnOff 开关机
+	//UnitOnOff = 2
+	//// AmbientTemp 环境温度
+	//AmbientTemp = 4
+	//// IDUAddr 内机地址
+	//IDUAddr = 5
+	//// SetMode 设定模式
+	//SetMode = 17
+	//// SetFanSpeed 设定风速
+	//SetFanSpeed = 19
+	//// SetTemp 设定温度
+	//SetTemp = 20
+	//// SleepMode 睡眠模式
+	//SleepMode = 25
+
+	// UnitOnOff 开关机
+	UnitOnOff = 0
+
+	// SetMode 设定模式
+	SetMode = 1
+
+	// SetNewFan 新风阀模式
+	SetNewFan = 2
+
+	// SleepMode 睡眠模式
+	SleepMode = 3
+
+	// SetFanSpeed 设定风速
+	SetFanSpeed = 4
+
+	SetValvePower = 5
+	// SetTemp 设定温度
+
+	// SetFanGateThreeLevel 风阀3开度
+	SetFanGateThreeLevel = 8
+
+	// SetFanGateFourLevel 风阀4开度
+	SetFanGateFourLevel = 9
+
+	// SetFanGateFiveLevel 风阀5开度
+	SetFanGateFiveLevel = 10
+
+	SetTemp = 14
+
+	// AmbientTemp 环境温度
+	AmbientTemp = 46
+
+	// OutdoorAmbientTemp 室外环境温度
+	OutdoorAmbientTemp = 49
+	// AmbientTempAtAirReturn 回风处环境温度
+	AmbientTempAtAirReturn = 50
+	// AmbientTempOfLightBoard 灯板环境温度
+	AmbientTempOfLightBoard = 51
+)

二進制
bin/linux_amd64/yx-4g-gateway


+ 12 - 0
config/config.toml

@@ -0,0 +1,12 @@
+[Server]
+    Addr = "0.0.0.0"
+    Port = 9000
+    RunMode = "debug"
+[Sparrow]
+    Server = "http://114.115.211.247:8082"
+    ProductKey = "7dca15d460ae28c1a9bd5fd1342cc21710ef31a197f864696ec48a92d2e49b62c0ca2ce8008efb32a9000fae84b15af7"
+    DeviceCode = "YX-4G-Gateway"
+    Debug = true
+
+
+

+ 34 - 0
go.mod

@@ -0,0 +1,34 @@
+module yx-4g-gateway
+
+require (
+	github.com/gogf/gf v1.16.9
+	sparrow-sdk v1.0.0
+)
+
+require (
+	github.com/BurntSushi/toml v1.1.0 // indirect
+	github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28 // indirect
+	github.com/eclipse/paho.mqtt.golang v1.3.5 // indirect
+	github.com/fatih/color v1.13.0 // indirect
+	github.com/fsnotify/fsnotify v1.5.4 // indirect
+	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
+	github.com/go-sql-driver/mysql v1.6.0 // indirect
+	github.com/gomodule/redigo v1.8.5 // indirect
+	github.com/gorilla/websocket v1.5.0 // indirect
+	github.com/grokify/html-strip-tags-go v0.0.1 // indirect
+	github.com/mattn/go-colorable v0.1.9 // indirect
+	github.com/mattn/go-isatty v0.0.14 // indirect
+	github.com/mattn/go-runewidth v0.0.9 // indirect
+	github.com/olekukonko/tablewriter v0.0.5 // indirect
+	go.opentelemetry.io/otel v1.7.0 // indirect
+	go.opentelemetry.io/otel/trace v1.7.0 // indirect
+	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
+	golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
+	golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2 // indirect
+	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+)
+
+replace sparrow-sdk v1.0.0 => gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.6
+
+go 1.19

+ 92 - 0
go.sum

@@ -0,0 +1,92 @@
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
+github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28 h1:LdXxtjzvZYhhUaonAaAKArG3pyC67kGL3YY+6hGG8G4=
+github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y=
+github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
+github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/gogf/gf v1.16.6/go.mod h1:4LoHfEBl2jbVmZpVx+qk2La3zWr1V315FtF2PVZuyQ8=
+github.com/gogf/gf v1.16.9 h1:Q803UmmRo59+Ws08sMVFOcd8oNpkSWL9vS33hlo/Cyk=
+github.com/gogf/gf v1.16.9/go.mod h1:8Q/kw05nlVRp+4vv7XASBsMe9L1tsVKiGoeP2AHnlkk=
+github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc=
+github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grokify/html-strip-tags-go v0.0.0-20190921062105-daaa06bf1aaf/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
+github.com/grokify/html-strip-tags-go v0.0.1 h1:0fThFwLbW7P/kOiTBs03FsJSV9RM2M/Q/MOnCQxKMo0=
+github.com/grokify/html-strip-tags-go v0.0.1/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
+go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
+go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
+go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
+go.opentelemetry.io/otel/oteltest v1.0.0-RC2/go.mod h1:kiQ4tw5tAL4JLTbcOYwK1CWI1HkT5aiLzHovgOVnz/A=
+go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
+go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
+go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
+go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
+gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.6 h1:eWYj9ZjaKC2p8mxBgCIZgrpNZXQDMN9R+J1d2cATJdI=
+gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.6/go.mod h1:hWw7D5hrW8f8cOKKdhtlt8HQbdfD2o6PllWMhs0BdQs=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2 h1:GLw7MR8AfAG2GmGcmVgObFOHXYypgGjnGno25RDwn3Y=
+golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

+ 242 - 0
main.go

@@ -0,0 +1,242 @@
+package main
+
+import (
+	"context"
+	"github.com/gogf/gf/encoding/gjson"
+	"github.com/gogf/gf/frame/g"
+	"github.com/gogf/gf/os/glog"
+	"github.com/gogf/gf/os/gproc"
+	"os"
+	"sparrow-sdk/config"
+	"sparrow-sdk/protocal"
+	gatewayV2 "sparrow-sdk/v2"
+	"yx-4g-gateway/schema"
+	"yx-4g-gateway/server"
+)
+
+func main() {
+	ctx := context.Background()
+	err := glog.SetLevelStr(g.Cfg().GetString("Server.RunMode"))
+	if err != nil {
+		panic(err)
+	}
+	gw := gatewayV2.NewGateway(&config.Config{
+		SparrowServer: g.Cfg().GetString("Sparrow.Server"),
+		ProductKey:    g.Cfg().GetString("Sparrow.ProductKey"),
+		Protocol:      "mqtt",
+		DeviceCode:    g.Cfg().GetString("Sparrow.DeviceCode"),
+		Version:       "1.0.0",
+		Debug:         g.Cfg().GetBool("Sparrow.Debug"),
+	})
+	if _, err = gw.Register(); err != nil {
+		panic(err)
+	}
+	if _, err = gw.Authentication(); err != nil {
+		panic(err)
+	} // 通用指令回调
+	gw.SetReportCommandCallback(func(deviceCode, subId string) error {
+		return nil
+	})
+	go gw.Connect()
+
+	srv := server.NewServer(
+		ctx,
+		g.Cfg().GetString("Server.Addr"),
+		g.Cfg().GetInt("Server.Port"),
+		gw,
+	)
+	go func() {
+		if err := srv.Start(); err != nil {
+			panic(err)
+		}
+	}()
+
+	closeReportChan := make(chan struct{})
+	go func() {
+		for {
+			select {
+			case <-closeReportChan:
+				return
+			}
+		}
+	}()
+
+	if err = gw.RegisterCommand("setPower", func(msg protocal.CloudSend) error {
+		var params schema.SetPowerReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if params.Power == 1 {
+				if err = client.PowerOn(); err != nil {
+					glog.Errorf("执行命令出错:%s", err.Error())
+					return err
+				}
+			} else {
+				if err = client.PowerOff(); err != nil {
+					glog.Errorf("执行命令出错:%s", err.Error())
+					return err
+				}
+			}
+
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	if err = gw.RegisterCommand("setMode", func(msg protocal.CloudSend) error {
+		var params schema.SetModeReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if err = client.SetMode(params.Mode); err != nil {
+				glog.Errorf("执行命令出错:%s", err.Error())
+				return err
+			}
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	if err = gw.RegisterCommand("setTemp", func(msg protocal.CloudSend) error {
+		var params schema.SetTempReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if err = client.SetTemp(params.Temp); err != nil {
+				glog.Errorf("执行命令出错:%s", err.Error())
+				return err
+			}
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	if err = gw.RegisterCommand("setFanLevel", func(msg protocal.CloudSend) error {
+		var params schema.FanSpeedReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if err = client.SetFanSpeed(params.Speed); err != nil {
+				glog.Errorf("执行命令出错:%s", err.Error())
+				return err
+			}
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	if err = gw.RegisterCommand("setNewFan", func(msg protocal.CloudSend) error {
+		var params schema.SetNewFanReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if err = client.SetNewFan(params.Mode); err != nil {
+				glog.Errorf("执行命令出错:%s", err.Error())
+				return err
+			}
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	if err = gw.RegisterCommand("setFanValue", func(msg protocal.CloudSend) error {
+		var params schema.FanValveReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if params.FanValve == "03" {
+				if err = client.SetFanGateThreeLevel(params.Value); err != nil {
+					glog.Errorf("执行命令出错:%s", err.Error())
+					return err
+				}
+			}
+			if params.FanValve == "04" {
+				if err = client.SetFanGateFourLevel(params.Value); err != nil {
+					glog.Errorf("执行命令出错:%s", err.Error())
+					return err
+				}
+			}
+			if params.FanValve == "05" {
+				if err = client.SetFanGateFiveLevel(params.Value); err != nil {
+					glog.Errorf("执行命令出错:%s", err.Error())
+					return err
+				}
+			}
+
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	if err = gw.RegisterCommand("setFan", func(msg protocal.CloudSend) error {
+		var params schema.SetValvePowerReq
+		j := gjson.New(msg.Data.Params)
+		err = j.Struct(&params)
+		if err != nil {
+			glog.Errorf("错误的指令参数%s", err.Error())
+			return err
+		}
+		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		client := srv.GetClient(msg.SubDeviceId)
+		if client != nil {
+			if err = client.SetValvePower(&params); err != nil {
+				glog.Errorf("执行命令出错:%s", err.Error())
+				return err
+			}
+		}
+		return nil
+	}); err != nil {
+		panic(err)
+	}
+
+	gproc.AddSigHandlerShutdown(func(sig os.Signal) {
+		gw.Close()
+		srv.Stop()
+	})
+	gproc.Listen()
+}

+ 68 - 0
modbus/crc.go

@@ -0,0 +1,68 @@
+package modbus
+
+// Table of CRC values for high–order byte
+var crcHighBytes = []byte{
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41,
+	0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40,
+}
+
+// Table of CRC values for low-order byte
+var crcLowBytes = []byte{
+	0x00, 0xC0, 0xC1, 0x01, 0xC3, 0x03, 0x02, 0xC2, 0xC6, 0x06, 0x07, 0xC7, 0x05, 0xC5, 0xC4, 0x04,
+	0xCC, 0x0C, 0x0D, 0xCD, 0x0F, 0xCF, 0xCE, 0x0E, 0x0A, 0xCA, 0xCB, 0x0B, 0xC9, 0x09, 0x08, 0xC8,
+	0xD8, 0x18, 0x19, 0xD9, 0x1B, 0xDB, 0xDA, 0x1A, 0x1E, 0xDE, 0xDF, 0x1F, 0xDD, 0x1D, 0x1C, 0xDC,
+	0x14, 0xD4, 0xD5, 0x15, 0xD7, 0x17, 0x16, 0xD6, 0xD2, 0x12, 0x13, 0xD3, 0x11, 0xD1, 0xD0, 0x10,
+	0xF0, 0x30, 0x31, 0xF1, 0x33, 0xF3, 0xF2, 0x32, 0x36, 0xF6, 0xF7, 0x37, 0xF5, 0x35, 0x34, 0xF4,
+	0x3C, 0xFC, 0xFD, 0x3D, 0xFF, 0x3F, 0x3E, 0xFE, 0xFA, 0x3A, 0x3B, 0xFB, 0x39, 0xF9, 0xF8, 0x38,
+	0x28, 0xE8, 0xE9, 0x29, 0xEB, 0x2B, 0x2A, 0xEA, 0xEE, 0x2E, 0x2F, 0xEF, 0x2D, 0xED, 0xEC, 0x2C,
+	0xE4, 0x24, 0x25, 0xE5, 0x27, 0xE7, 0xE6, 0x26, 0x22, 0xE2, 0xE3, 0x23, 0xE1, 0x21, 0x20, 0xE0,
+	0xA0, 0x60, 0x61, 0xA1, 0x63, 0xA3, 0xA2, 0x62, 0x66, 0xA6, 0xA7, 0x67, 0xA5, 0x65, 0x64, 0xA4,
+	0x6C, 0xAC, 0xAD, 0x6D, 0xAF, 0x6F, 0x6E, 0xAE, 0xAA, 0x6A, 0x6B, 0xAB, 0x69, 0xA9, 0xA8, 0x68,
+	0x78, 0xB8, 0xB9, 0x79, 0xBB, 0x7B, 0x7A, 0xBA, 0xBE, 0x7E, 0x7F, 0xBF, 0x7D, 0xBD, 0xBC, 0x7C,
+	0xB4, 0x74, 0x75, 0xB5, 0x77, 0xB7, 0xB6, 0x76, 0x72, 0xB2, 0xB3, 0x73, 0xB1, 0x71, 0x70, 0xB0,
+	0x50, 0x90, 0x91, 0x51, 0x93, 0x53, 0x52, 0x92, 0x96, 0x56, 0x57, 0x97, 0x55, 0x95, 0x94, 0x54,
+	0x9C, 0x5C, 0x5D, 0x9D, 0x5F, 0x9F, 0x9E, 0x5E, 0x5A, 0x9A, 0x9B, 0x5B, 0x99, 0x59, 0x58, 0x98,
+	0x88, 0x48, 0x49, 0x89, 0x4B, 0x8B, 0x8A, 0x4A, 0x4E, 0x8E, 0x8F, 0x4F, 0x8D, 0x4D, 0x4C, 0x8C,
+	0x44, 0x84, 0x85, 0x45, 0x87, 0x47, 0x46, 0x86, 0x82, 0x42, 0x43, 0x83, 0x41, 0x81, 0x80, 0x40,
+}
+
+// Cyclical Redundancy Checking
+type Crc struct {
+	high byte
+	low  byte
+}
+
+func (crc *Crc) Reset() *Crc {
+	crc.high = 0xFF
+	crc.low = 0xFF
+	return crc
+}
+
+func (crc *Crc) PushBytes(bs []byte) *Crc {
+	var idx, b byte
+
+	for _, b = range bs {
+		idx = crc.low ^ b
+		crc.low = crc.high ^ crcHighBytes[idx]
+		crc.high = crcLowBytes[idx]
+	}
+	return crc
+}
+
+func (crc *Crc) Value() uint16 {
+	return uint16(crc.high)<<8 | uint16(crc.low)
+}

+ 184 - 0
modbus/modbus.go

@@ -0,0 +1,184 @@
+package modbus
+
+import (
+	"encoding/binary"
+	"fmt"
+	"time"
+)
+
+const (
+	// Bit access
+	FuncCodeReadDiscreteInputs = 2
+	FuncCodeReadCoils          = 1
+	FuncCodeWriteSingleCoil    = 5
+	FuncCodeWriteMultipleCoils = 15
+
+	// 16-bit access
+	FuncCodeReadInputRegisters         = 4
+	FuncCodeReadHoldingRegisters       = 3
+	FuncCodeWriteSingleRegister        = 6
+	FuncCodeWriteMultipleRegisters     = 16
+	FuncCodeReadWriteMultipleRegisters = 23
+	FuncCodeMaskWriteRegister          = 22
+	FuncCodeReadFIFOQueue              = 24
+)
+
+const (
+	ExceptionCodeIllegalFunction                    = 1
+	ExceptionCodeIllegalDataAddress                 = 2
+	ExceptionCodeIllegalDataValue                   = 3
+	ExceptionCodeServerDeviceFailure                = 4
+	ExceptionCodeAcknowledge                        = 5
+	ExceptionCodeServerDeviceBusy                   = 6
+	ExceptionCodeMemoryParityError                  = 8
+	ExceptionCodeGatewayPathUnavailable             = 10
+	ExceptionCodeGatewayTargetDeviceFailedToRespond = 11
+)
+
+const (
+	rtuMinSize = 4
+	rtuMaxSize = 256
+
+	rtuExceptionSize = 5
+
+	tcpProtocolIdentifier uint16 = 0x0000
+
+	// Modbus Application Protocol
+	tcpHeaderSize = 7
+	tcpMaxLength  = 260
+	// Default TCP timeout is not set
+	tcpTimeout     = 10 * time.Second
+	tcpIdleTimeout = 60 * time.Second
+)
+
+// ModbusError implements error interface.
+type ModbusError struct {
+	FunctionCode  byte
+	ExceptionCode byte
+}
+
+// Error converts known modbus exception code to error message.
+func (e *ModbusError) Error() string {
+	var name string
+	switch e.ExceptionCode {
+	case ExceptionCodeIllegalFunction:
+		name = "illegal function"
+	case ExceptionCodeIllegalDataAddress:
+		name = "illegal data address"
+	case ExceptionCodeIllegalDataValue:
+		name = "illegal data value"
+	case ExceptionCodeServerDeviceFailure:
+		name = "server device failure"
+	case ExceptionCodeAcknowledge:
+		name = "acknowledge"
+	case ExceptionCodeServerDeviceBusy:
+		name = "server device busy"
+	case ExceptionCodeMemoryParityError:
+		name = "memory parity error"
+	case ExceptionCodeGatewayPathUnavailable:
+		name = "gateway path unavailable"
+	case ExceptionCodeGatewayTargetDeviceFailedToRespond:
+		name = "gateway target device failed to respond"
+	default:
+		name = "unknown"
+	}
+	return fmt.Sprintf("modbus: exception '%v' (%s), function '%v'", e.ExceptionCode, name, e.FunctionCode)
+}
+
+// ProtocolDataUnit (PDU) is independent of underlying communication layers.
+type ProtocolDataUnit struct {
+	FunctionCode byte
+	Data         []byte
+}
+
+// Transporter specifies the transport layer.
+type Transporter interface {
+	Send(aduRequest []byte) (aduResponse []byte, err error)
+}
+
+// dataBlock creates a sequence of uint16 data.
+func dataBlock(value ...uint16) []byte {
+	data := make([]byte, 2*len(value))
+	for i, v := range value {
+		binary.BigEndian.PutUint16(data[i*2:], v)
+	}
+	return data
+}
+
+// dataBlockSuffix creates a sequence of uint16 data and append the suffix plus its length.
+func dataBlockSuffix(suffix []byte, value ...uint16) []byte {
+	length := 2 * len(value)
+	data := make([]byte, length+1+len(suffix))
+	for i, v := range value {
+		binary.BigEndian.PutUint16(data[i*2:], v)
+	}
+	data[length] = uint8(len(suffix))
+	copy(data[length+1:], suffix)
+	return data
+}
+
+func ReadHoldingRegisters(address, quantity uint16) (results []byte, err error) {
+	if quantity < 1 || quantity > 125 {
+		err = fmt.Errorf("modbus: quantity '%v' must be between '%v' and '%v',", quantity, 1, 125)
+		return
+	}
+	request := ProtocolDataUnit{
+		FunctionCode: FuncCodeReadHoldingRegisters,
+		Data:         dataBlock(address, quantity),
+	}
+	aduRequest, err := Encode(&request)
+	if err != nil {
+		return
+	}
+	return aduRequest, nil
+}
+
+// WriteMultipleRegisters
+// Request:
+//
+//	Function code         : 1 byte (0x10)
+//	Starting address      : 2 bytes
+//	Quantity of outputs   : 2 bytes
+//	Byte count            : 1 byte
+//	Registers value       : N* bytes
+//
+// Response:
+//
+//	Function code         : 1 byte (0x10)
+//	Starting address      : 2 bytes
+//	Quantity of registers : 2 bytes
+func WriteMultipleRegisters(address, quantity uint16, value []byte) (results []byte, err error) {
+	if quantity < 1 || quantity > 123 {
+		err = fmt.Errorf("modbus: quantity '%v' must be between '%v' and '%v',", quantity, 1, 123)
+		return
+	}
+	request := ProtocolDataUnit{
+		FunctionCode: FuncCodeWriteMultipleRegisters,
+		Data:         dataBlockSuffix(value, address, quantity),
+	}
+	aduRequest, err := Encode(&request)
+	if err != nil {
+		return
+	}
+	return aduRequest, nil
+}
+
+func Encode(pdu *ProtocolDataUnit) (adu []byte, err error) {
+	length := len(pdu.Data) + 4
+	if length > rtuMaxSize {
+		err = fmt.Errorf("modbus: length of data '%v' must not be bigger than '%v'", length, rtuMaxSize)
+		return
+	}
+	adu = make([]byte, length)
+	adu[0] = 0x02
+	adu[1] = pdu.FunctionCode
+	copy(adu[2:], pdu.Data)
+	// Append Crc
+	var crc Crc
+	crc.Reset().PushBytes(adu[0 : length-2])
+	checksum := crc.Value()
+
+	adu[length-1] = byte(checksum >> 8)
+	adu[length-2] = byte(checksum)
+	return
+}

+ 60 - 0
schema/schema.go

@@ -0,0 +1,60 @@
+package schema
+
+type SetPowerReq struct {
+	Power int `json:"power" v:"required|in:1,2"` // 1:开机0:关机
+}
+
+type SetModeReq struct {
+	Mode uint16 `json:"mode" v:"required|between:0,4"` // 模式(0:制冷1:制热 2:除湿 3:送风 4:加湿)
+}
+
+type SetNewFanReq struct {
+	Mode uint16 `json:"mode" v:"required|between:1,2"` // 新风阀模式(1通风模式 2新风模式)
+}
+
+type SetSleepModeReq struct {
+	Mode uint16 `json:"mode" v:"required|between:0,3"` // 睡眠模式(0:无睡眠 1:睡眠1 2:睡眠2 3:睡眠3)
+}
+
+type FanSpeedReq struct {
+	Speed uint16 `json:"temp" v:"required|between:0,6"` // 风速设置((0:自动; 1:1档; 2:2 档; 3: 3 档; 4:4 档; 5: 5 档; 6:超强档;))
+}
+
+type FanValveReq struct {
+	FanValve string `json:"fan_valve" v:"required"`
+	Value    uint16 `json:"value" v:"required|between:0,90"` // 风阀开度设置(0-90)
+}
+
+type SetTempReq struct {
+	Temp uint16 `json:"temp" v:"required|between:16,30"` // 温度值(16-30)
+}
+
+type SetValvePowerReq struct {
+	Valve1 uint16 `json:"valve1"`
+	Valve2 uint16 `json:"valve2"`
+	Valve3 uint16 `json:"valve3"`
+	Valve4 uint16 `json:"valve4"`
+	Valve5 uint16 `json:"valve5"`
+}
+
+type StatusResponse struct {
+	Power             int    `json:"power"`                // 开机关状态
+	Mode              uint16 `json:"mode"`                 // 设置模式
+	NewFan            uint16 `json:"new_fan"`              // 新风开关
+	FanSpeed          uint16 `json:"fan_speed"`            // 设定风速
+	SetTemp           uint16 `json:"set_temp"`             // 设置温度
+	Temperature       uint16 `json:"temperature"`          // 温度
+	Humidity          uint16 `json:"humidity"`             // 湿度
+	AirQuality        uint16 `json:"air_quality"`          // pm25
+	CO2               uint16 `json:"co2"`                  // co2
+	FanGateOne        int    `json:"fan_gate_one"`         // 风阀1 状态
+	FanGateTwo        int    `json:"fan_gate_two"`         // 风阀2 状态
+	FanGateThree      int    `json:"fan_gate_three"`       // 风阀3 状态
+	FanGateFour       int    `json:"fan_gate_four"`        // 风阀4 状态
+	FanGateFive       int    `json:"fan_gate_five"`        // 风阀5 状态
+	FanGateOneLevel   uint16 `json:"fan_gate_one_level"`   // 风阀1 开度
+	FanGateTwoLevel   uint16 `json:"fan_gate_two_level"`   // 风阀2 开度
+	FanGateThreeLevel uint16 `json:"fan_gate_three_level"` // 风阀3 开度
+	FanGateFourLevel  uint16 `json:"fan_gate_four_level"`  // 风阀4 开度
+	FanGateFiveLevel  uint16 `json:"fan_gate_five_level"`  // 风阀5 开度
+}

+ 477 - 0
server/client.go

@@ -0,0 +1,477 @@
+package server
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"github.com/gogf/gf/encoding/gbinary"
+	"github.com/gogf/gf/net/gtcp"
+	"github.com/gogf/gf/os/glog"
+	"io"
+	"net"
+	"strings"
+	"syscall"
+	"time"
+	"yx-4g-gateway/address"
+	"yx-4g-gateway/modbus"
+	"yx-4g-gateway/schema"
+)
+
+const (
+	FAN_1_OPEN = 1 << 0
+	FAN_2_OPEN = 1 << 1
+	FAN_3_OPEN = 1 << 2
+	FAN_4_OPEN = 1 << 3
+	FAN_5_OPEN = 1 << 4
+)
+
+type Client struct {
+	Id           string
+	srv          *Server
+	conn         *gtcp.Conn
+	sendChan     chan []byte
+	closeChan    chan struct{}
+	closeHandler func(id string, c *Client)
+	regHandler   func(id string, c *Client)
+	isReg        bool
+	receiveData  []byte
+}
+
+func NewClient(s *Server, conn *gtcp.Conn) *Client {
+	return &Client{
+		srv:         s,
+		conn:        conn,
+		sendChan:    make(chan []byte),
+		closeChan:   make(chan struct{}),
+		receiveData: make([]byte, 0),
+	}
+}
+
+func (c *Client) SetId(id string) {
+	c.Id = id
+}
+
+func (c *Client) SendLoop() {
+	for {
+		select {
+		case buf := <-c.sendChan:
+			err := c.send(buf)
+			if err != nil {
+				glog.Errorf("指令发送失败:%s", err.Error())
+				continue
+			}
+			timer := time.NewTimer(5 * time.Second)
+			for {
+				select {
+				case <-c.closeChan:
+					return
+				case <-timer.C:
+					glog.Error("接收指令超时")
+					break
+				default:
+					receiveBuf, err := c.conn.Recv(-1)
+					if err != nil {
+						c.readError(err)
+						break
+					}
+					if !c.isReg {
+						id := gbinary.DecodeToString(receiveBuf)
+						glog.Debugf("收到注册包!id:%s", id)
+						c.SetId(id)
+						c.isReg = true
+						if c.regHandler != nil {
+							c.regHandler(c.Id, c)
+						}
+						continue
+					}
+					glog.Debugf("收到数据:%2X", receiveBuf)
+					c.receiveData = append(c.receiveData, receiveBuf...)
+					//for v := range receiveBuf {
+					//	c.receiveQueue.Push(v)
+					//	glog.Debugf("队列数据:%2X", c.receiveQueue)
+					//}
+					//glog.Debugf("receiveQueue长度:%d", c.receiveQueue.Len())
+					if err := c.readQueue(); err != nil {
+						glog.Debugf("处理数据失败:%s", err.Error())
+						break
+					}
+				}
+				break
+			}
+		}
+	}
+}
+
+func (c *Client) readQueue() error {
+	if bytes.Equal(c.receiveData[:3], []byte{0x02, 0x03, 0xa8}) {
+		if len(c.receiveData) >= int(c.receiveData[2])+5 {
+			c.receiveData = c.receiveData[:int(c.receiveData[2]+5)]
+			if err := c.decodeAndReport(c.receiveData); err != nil {
+				return err
+			}
+			c.receiveData = c.receiveData[:0]
+		}
+		return nil
+	} else {
+		c.receiveData = c.receiveData[:0]
+		return nil
+	}
+	//for {
+	//	if c.receiveQueue.Len() > 3 {
+	//		glog.Debugf("11111111111111111")
+	//		a := FormatInterfaceToByte(c.receiveQueue.Pop())
+	//		b := FormatInterfaceToByte(c.receiveQueue.Pop())
+	//		d := FormatInterfaceToByte(c.receiveQueue.Pop())
+	//		if a == 0x1 && b == 0x03 && d == 0xa8 && len(c.dataByteArray) == 0 {
+	//			packageBytes = append(packageBytes, a)
+	//			packageBytes = append(packageBytes, b)
+	//			packageBytes = append(packageBytes, d)
+	//
+	//		} else {
+	//			return
+	//		}
+	//	} else {
+	//		return
+	//	}
+	//	for {
+	//		if count == 3 {
+	//			glog.Debugf("2222222222222222222")
+	//			size := int(packageBytes[2]) + 2
+	//			for {
+	//				c.dataByteArray = append(c.dataByteArray, packageBytes...)
+	//				for i := 0; i < size; i++ {
+	//					if c.receiveQueue.Len() > 0 {
+	//						d := c.receiveQueue.Pop()
+	//						c.dataByteArray = append(c.dataByteArray, FormatInterfaceToByte(d))
+	//						if i == size-1 {
+	//							err := c.decodeAndReport(c.dataByteArray)
+	//							glog.Debugf("处理数据失败:%s", err.Error())
+	//							c.dataByteArray = c.dataByteArray[:0]
+	//							return
+	//						}
+	//					}
+	//				}
+	//			}
+	//			count = 0
+	//		}
+	//	}
+	//}
+}
+
+func (c *Client) decodeAndReport(buf []byte) error {
+	length := len(buf)
+	var crc modbus.Crc
+	crc.Reset().PushBytes(buf[0 : length-2])
+	checksum := uint16(buf[length-1])<<8 | uint16(buf[length-2])
+	if checksum != crc.Value() {
+		return errors.New(fmt.Sprintf("modbus: response crc '%v' does not match expected '%v'", checksum, crc.Value()))
+	}
+
+	result := buf[3 : length-2]
+	data := make(map[int][]byte)
+	var index, dIndex int
+	var newBuf []byte
+	for _, b := range result {
+		index += 1
+		newBuf = append(newBuf, b)
+		if index%2 == 0 {
+			data[dIndex] = newBuf
+			dIndex += 1
+			index = 0
+			newBuf = make([]byte, 0)
+		}
+	}
+
+	ret := new(schema.StatusResponse)
+	ret.Power = gbinary.BeDecodeToInt(data[address.UnitOnOff])
+	ret.Mode = gbinary.BeDecodeToUint16(data[address.SetMode])
+	ret.FanSpeed = gbinary.BeDecodeToUint16(data[address.SetFanSpeed])
+	ret.SetTemp = gbinary.BeDecodeToUint16(data[address.SetTemp])
+	ret.NewFan = gbinary.BeDecodeToUint16(data[address.SetNewFan])
+
+	//ret.FanSpeed = gbinary.BeDecodeToUint16(data[8:9])
+	//ret.SetTemp = gbinary.BeDecodeToUint16(data[28:29])
+	ret.AirQuality = gbinary.BeDecodeToUint16(data[35])
+	ret.CO2 = gbinary.BeDecodeToUint16(data[36])
+	ret.Temperature = gbinary.BeDecodeToUint16(data[27])
+	ret.Humidity = gbinary.BeDecodeToUint16(data[28])
+	//
+	fanStatus := gbinary.BeDecodeToUint16(data[5])
+	ret.FanGateOne = int(fanStatus) & 0x80
+	ret.FanGateTwo = int(fanStatus) & 0x40
+	ret.FanGateThree = int(fanStatus) & 0x20
+	ret.FanGateFour = int(fanStatus) & 0x10
+	ret.FanGateFive = int(fanStatus) & 0x08
+	//
+	ret.FanGateOneLevel = gbinary.BeDecodeToUint16(data[7])
+	ret.FanGateThreeLevel = gbinary.BeDecodeToUint16(data[8])
+	ret.FanGateFourLevel = gbinary.BeDecodeToUint16(data[9])
+	ret.FanGateFiveLevel = gbinary.BeDecodeToUint16(data[10])
+
+	if ret.FanGateOneLevel > 0 {
+		ret.FanGateOne = 1
+		ret.FanGateTwo = 1
+	}
+	if ret.FanGateThreeLevel > 0 {
+		ret.FanGateThree = 1
+	}
+
+	if ret.FanGateFourLevel > 0 {
+		ret.FanGateFour = 1
+	}
+	if ret.FanGateFiveLevel > 0 {
+		ret.FanGateFive = 1
+	}
+
+	if err := c.srv.ReportStatus(c.Id, ret, "status"); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *Client) readError(err error) {
+	defer c.closeConnection()
+	if err == io.EOF || isErrConnReset(err) {
+		return
+	}
+	glog.Errorf("读取数据发生错误:%s", err.Error())
+}
+func (c *Client) closeConnection() {
+	_ = c.conn.Close()
+	c.conn = nil
+	close(c.closeChan)
+	c.isReg = false
+	if c.closeHandler != nil {
+		c.closeHandler(c.Id, c)
+	}
+}
+
+// isErrConnReset read: connection reset by peer
+func isErrConnReset(err error) bool {
+	if ne, ok := err.(*net.OpError); ok {
+		return strings.Contains(ne.Err.Error(), syscall.ECONNRESET.Error())
+	}
+	return false
+}
+
+func (c *Client) send(buf []byte) error {
+	if c.conn == nil {
+		return nil
+	}
+	glog.Debugf("----->%2X", buf)
+	err := c.conn.Send(buf)
+	if err != nil {
+		glog.Error(c.srv.ctx, err)
+		c.closeConnection()
+		return err
+	}
+	return nil
+}
+
+// GetStatus 获取机组运行状态
+func (c *Client) GetStatus() {
+	//for {
+	//	err := c.send([]byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06})
+	//	err := c.send([]byte{0x01, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x35})
+	//	if err != nil {
+	//		glog.Debugf("处理数据失败:%s", err.Error())
+	//	}
+	//	time.Sleep(10 * time.Second)
+	//}
+	for {
+		c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+		time.Sleep(10 * time.Second)
+	}
+}
+
+func (c *Client) GetSensorStatus() {
+	for {
+		err := c.send([]byte{0x01, 0x03, 0x00, 0x02, 0x00, 0x07, 0x00, 0x00})
+		if err != nil {
+			glog.Debugf("处理数据失败:%s", err.Error())
+		}
+		time.Sleep(10 * time.Second)
+	}
+}
+
+func (c *Client) Read() {
+	for {
+		receiveBuf, err := c.conn.Recv(-1)
+		if err != nil {
+			glog.Debugf("接收数据失败:%s", err.Error())
+		}
+		glog.Debugf("收到数据:%2X", receiveBuf)
+		time.Sleep(1 * time.Second)
+	}
+}
+
+// PowerOn 开机
+func (c *Client) PowerOn() error {
+	result, err := modbus.WriteMultipleRegisters(address.UnitOnOff, 1, []byte{0x00, 0x01})
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// PowerOff 关机
+func (c *Client) PowerOff() error {
+	result, err := modbus.WriteMultipleRegisters(address.UnitOnOff, 1, []byte{0x00, 0x00})
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	return nil
+}
+
+// GetPower 获取开关机状态
+func (c *Client) GetPower() (err error) {
+	result, err := modbus.ReadHoldingRegisters(address.UnitOnOff, 1)
+	if err != nil {
+		return
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetNewFan 设置新风阀模式
+func (c *Client) SetNewFan(mode uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetNewFan, 1, gbinary.BeEncode(mode))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetMode 设置模式
+func (c *Client) SetMode(mode uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetMode, 1, gbinary.BeEncode(mode))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetFanSpeed 设置风速
+func (c *Client) SetFanSpeed(speed uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetFanSpeed, 1, gbinary.BeEncode(speed))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetTemp 设置温度
+func (c *Client) SetTemp(temp uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetTemp, 1, gbinary.BeEncode(temp))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SleepMode 睡眠模式
+func (c *Client) SleepMode(mode uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SleepMode, 1, gbinary.BeEncode(mode))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetFanGateThreeLevel 风阀3开度
+func (c *Client) SetFanGateThreeLevel(value uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetFanGateThreeLevel, 1, gbinary.BeEncode(value))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetFanGateFourLevel 风阀4开度
+func (c *Client) SetFanGateFourLevel(value uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetFanGateFourLevel, 1, gbinary.BeEncode(value))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetFanGateFiveLevel 风阀5开度
+func (c *Client) SetFanGateFiveLevel(value uint16) error {
+	result, err := modbus.WriteMultipleRegisters(address.SetFanGateFiveLevel, 1, gbinary.BeEncode(value))
+	if err != nil {
+		return err
+	}
+	c.sendChan <- result
+	c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+	return nil
+}
+
+// SetValvePower 控制风阀开关
+func (c *Client) SetValvePower(param *schema.SetValvePowerReq) error {
+	var result []byte
+	var err error
+	if param.Valve1 == 1 {
+		result, err = modbus.WriteMultipleRegisters(address.SetValvePower, 1, gbinary.BeEncode(FAN_1_OPEN))
+		if err != nil {
+			return err
+		}
+
+	}
+	if param.Valve2 == 1 {
+		result, err = modbus.WriteMultipleRegisters(address.SetValvePower, 1, gbinary.BeEncode(FAN_2_OPEN))
+		if err != nil {
+			return err
+		}
+
+	}
+	if param.Valve3 == 1 {
+		result, err = modbus.WriteMultipleRegisters(address.SetValvePower, 1, gbinary.BeEncode(FAN_3_OPEN))
+		if err != nil {
+			return err
+		}
+
+	}
+	if param.Valve4 == 1 {
+		result, err = modbus.WriteMultipleRegisters(address.SetValvePower, 1, gbinary.BeEncode(FAN_4_OPEN))
+		if err != nil {
+			return err
+		}
+
+	}
+	if param.Valve5 == 1 {
+		result, err = modbus.WriteMultipleRegisters(address.SetValvePower, 1, gbinary.BeEncode(FAN_5_OPEN))
+		if err != nil {
+			return err
+		}
+
+	}
+	c.sendChan <- result
+	return nil
+}
+
+func FormatInterfaceToByte(a interface{}) (result byte) {
+	v, ok := a.(uint8)
+	if ok {
+		result = byte(v)
+	}
+	return result
+}

+ 87 - 0
server/server.go

@@ -0,0 +1,87 @@
+package server
+
+import (
+	"context"
+	"fmt"
+	"github.com/gogf/gf/container/gmap"
+	"github.com/gogf/gf/frame/g"
+	"github.com/gogf/gf/net/gtcp"
+	"github.com/gogf/gf/os/glog"
+	gatewayV2 "sparrow-sdk/v2"
+	"time"
+)
+
+type Server struct {
+	closeChan chan struct{}
+	srv       *gtcp.Server
+	ctx       context.Context
+	addr      string
+	port      int
+	gateWay   *gatewayV2.Gateway
+	clients   *gmap.Map
+}
+
+func NewServer(ctx context.Context, addr string, port int, gw *gatewayV2.Gateway) *Server {
+	return &Server{
+		closeChan: make(chan struct{}),
+		ctx:       ctx,
+		addr:      addr,
+		port:      port,
+		gateWay:   gw,
+		clients:   gmap.New(false),
+	}
+}
+
+func (s *Server) Start() error {
+	glog.Printf("服务端启动[%s:%d]", s.addr, s.port)
+	srv := gtcp.NewServer(fmt.Sprintf("%s:%d", s.addr, s.port), s.onClientConnect)
+	s.srv = srv
+	return s.srv.Run()
+}
+
+func (s *Server) Stop() {
+	s.clients.Iterator(func(k interface{}, v interface{}) bool {
+		client := v.(*Client)
+		close(client.closeChan)
+		return true
+	})
+	_ = s.srv.Close()
+}
+
+func (s *Server) onClientConnect(conn *gtcp.Conn) {
+	glog.Debugf("新的设备接入:%s", conn.RemoteAddr())
+	client := NewClient(s, conn)
+	client.closeHandler = func(id string, c *Client) {
+		glog.Debugf("客户端断开:%s", id)
+		if id != "" {
+			_ = s.gateWay.SubDeviceLogout(g.Cfg().GetString("sparrow.DeviceCode"), id)
+			s.clients.Remove(id)
+		}
+	}
+	client.regHandler = func(id string, c *Client) {
+		_ = s.gateWay.SubDeviceLogin(g.Cfg().GetString("Sparrow.DeviceCode"), id)
+		s.clients.Set(id, c)
+	}
+
+	go client.SendLoop()
+
+	time.Sleep(10 * time.Second)
+
+	go client.GetStatus()
+
+	//go client.GetSensorStatus()
+
+	//go client.Read()
+}
+
+func (s *Server) ReportStatus(subId string, data interface{}, cmd string) error {
+	return s.gateWay.ReportStatus(subId, cmd, data)
+}
+
+func (s *Server) GetClient(subId string) *Client {
+	client := s.clients.Get(subId)
+	if client != nil {
+		return client.(*Client)
+	}
+	return nil
+}

+ 2 - 0
vendor/github.com/BurntSushi/toml/.gitignore

@@ -0,0 +1,2 @@
+toml.test
+/toml-test

+ 1 - 0
vendor/github.com/BurntSushi/toml/COMPATIBLE

@@ -0,0 +1 @@
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).

+ 21 - 0
vendor/github.com/BurntSushi/toml/COPYING

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 TOML authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 211 - 0
vendor/github.com/BurntSushi/toml/README.md

@@ -0,0 +1,211 @@
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages.
+
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
+
+Documentation: https://godocs.io/github.com/BurntSushi/toml
+
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
+
+This library requires Go 1.13 or newer; install it with:
+
+    % go get github.com/BurntSushi/toml@latest
+
+It also comes with a TOML validator CLI tool:
+
+    % go install github.com/BurntSushi/toml/cmd/tomlv@latest
+    % tomlv some-toml-file.toml
+
+### Testing
+This package passes all tests in [toml-test] for both the decoder and the
+encoder.
+
+[toml-test]: https://github.com/BurntSushi/toml-test
+
+### Examples
+This package works similar to how the Go standard library handles XML and JSON.
+Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys and
+values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+	Age        int
+	Cats       []string
+	Pi         float64
+	Perfection []int
+	DOB        time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+_, err := toml.Decode(tomlData, &conf)
+// handle error
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+    ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+Beware that like other most other decoders **only exported fields** are
+considered when encoding and decoding; private fields are silently ignored.
+
+### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+	Name     string
+	Duration duration
+}
+type songs struct {
+	Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+	log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+	fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+	time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+	var err error
+	d.Duration, err = time.ParseDuration(string(text))
+	return err
+}
+```
+
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
+
+### More complex usage
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+  "alpha",
+  "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+	Title   string
+	Owner   ownerInfo
+	DB      database `toml:"database"`
+	Servers map[string]server
+	Clients clients
+}
+
+type ownerInfo struct {
+	Name string
+	Org  string `toml:"organization"`
+	Bio  string
+	DOB  time.Time
+}
+
+type database struct {
+	Server  string
+	Ports   []int
+	ConnMax int `toml:"connection_max"`
+	Enabled bool
+}
+
+type server struct {
+	IP string
+	DC string
+}
+
+type clients struct {
+	Data  [][]interface{}
+	Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_example/example.{go,toml}`.

+ 561 - 0
vendor/github.com/BurntSushi/toml/decode.go

@@ -0,0 +1,561 @@
+package toml
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"reflect"
+	"strings"
+)
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+	UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(data []byte, v interface{}) error {
+	_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
+	return err
+}
+
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+	return NewDecoder(strings.NewReader(data)).Decode(v)
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+	fp, err := os.Open(path)
+	if err != nil {
+		return MetaData{}, err
+	}
+	defer fp.Close()
+	return NewDecoder(fp).Decode(v)
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+//
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use the PrimitiveDecode() function to "manually" decode these values.
+//
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
+//
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
+type Primitive struct {
+	undecoded interface{}
+	context   Key
+}
+
+// The significand precision for float32 and float64 is 24 and 53 bits; this is
+// the range a natural number can be stored in a float without loss of data.
+const (
+	maxSafeFloat32Int = 16777215                // 2^24-1
+	maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
+)
+
+// Decoder decodes TOML data.
+//
+// TOML tables correspond to Go structs or maps (dealer's choice – they can be
+// used interchangeably).
+//
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
+//
+// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
+// in the local timezone.
+//
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
+//
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
+//
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+	r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+var (
+	unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+	unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		s := "%q"
+		if reflect.TypeOf(v) == nil {
+			s = "%v"
+		}
+
+		return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
+	}
+	if rv.IsNil() {
+		return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
+	}
+
+	// Check if this is a supported type: struct, map, interface{}, or something
+	// that implements UnmarshalTOML or UnmarshalText.
+	rv = indirect(rv)
+	rt := rv.Type()
+	if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
+		!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
+		!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
+		return MetaData{}, e("cannot decode to type %s", rt)
+	}
+
+	// TODO: parser should read from io.Reader? Or at the very least, make it
+	// read from []byte rather than string
+	data, err := ioutil.ReadAll(dec.r)
+	if err != nil {
+		return MetaData{}, err
+	}
+
+	p, err := parse(string(data))
+	if err != nil {
+		return MetaData{}, err
+	}
+
+	md := MetaData{
+		mapping: p.mapping,
+		types:   p.types,
+		keys:    p.ordered,
+		decoded: make(map[string]struct{}, len(p.ordered)),
+		context: nil,
+	}
+	return md, md.unify(p.mapping, rv)
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md.context = primValue.context
+	defer func() { md.context = nil }()
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+	// Special case. Look for a `Primitive` value.
+	// TODO: #76 would make this superfluous after implemented.
+	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+		// Save the undecoded data and the key context into the primitive
+		// value.
+		context := make(Key, len(md.context))
+		copy(context, md.context)
+		rv.Set(reflect.ValueOf(Primitive{
+			undecoded: data,
+			context:   context,
+		}))
+		return nil
+	}
+
+	// Special case. Unmarshaler Interface support.
+	if rv.CanAddr() {
+		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+			return v.UnmarshalTOML(data)
+		}
+	}
+
+	// Special case. Look for a value satisfying the TextUnmarshaler interface.
+	if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+		return md.unifyText(data, v)
+	}
+	// TODO:
+	// The behavior here is incorrect whenever a Go type satisfies the
+	// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+	// array. In particular, the unmarshaler should only be applied to primitive
+	// TOML values. But at this point, it will be applied to all kinds of values
+	// and produce an incorrect error whenever those values are hashes or arrays
+	// (including arrays of tables).
+
+	k := rv.Kind()
+
+	// laziness
+	if k >= reflect.Int && k <= reflect.Uint64 {
+		return md.unifyInt(data, rv)
+	}
+	switch k {
+	case reflect.Ptr:
+		elem := reflect.New(rv.Type().Elem())
+		err := md.unify(data, reflect.Indirect(elem))
+		if err != nil {
+			return err
+		}
+		rv.Set(elem)
+		return nil
+	case reflect.Struct:
+		return md.unifyStruct(data, rv)
+	case reflect.Map:
+		return md.unifyMap(data, rv)
+	case reflect.Array:
+		return md.unifyArray(data, rv)
+	case reflect.Slice:
+		return md.unifySlice(data, rv)
+	case reflect.String:
+		return md.unifyString(data, rv)
+	case reflect.Bool:
+		return md.unifyBool(data, rv)
+	case reflect.Interface:
+		// we only support empty interfaces.
+		if rv.NumMethod() > 0 {
+			return e("unsupported type %s", rv.Type())
+		}
+		return md.unifyAnything(data, rv)
+	case reflect.Float32, reflect.Float64:
+		return md.unifyFloat64(data, rv)
+	}
+	return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if mapping == nil {
+			return nil
+		}
+		return e("type mismatch for %s: expected table but found %T",
+			rv.Type().String(), mapping)
+	}
+
+	for key, datum := range tmap {
+		var f *field
+		fields := cachedTypeFields(rv.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if ff.name == key {
+				f = ff
+				break
+			}
+			if f == nil && strings.EqualFold(ff.name, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			subv := rv
+			for _, i := range f.index {
+				subv = indirect(subv.Field(i))
+			}
+
+			if isUnifiable(subv) {
+				md.decoded[md.context.add(key).String()] = struct{}{}
+				md.context = append(md.context, key)
+				err := md.unify(datum, subv)
+				if err != nil {
+					return err
+				}
+				md.context = md.context[0 : len(md.context)-1]
+			} else if f.name != "" {
+				return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
+			}
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+	if k := rv.Type().Key().Kind(); k != reflect.String {
+		return fmt.Errorf(
+			"toml: cannot decode to a map with non-string key type (%s in %q)",
+			k, rv.Type())
+	}
+
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if tmap == nil {
+			return nil
+		}
+		return md.badtype("map", mapping)
+	}
+	if rv.IsNil() {
+		rv.Set(reflect.MakeMap(rv.Type()))
+	}
+	for k, v := range tmap {
+		md.decoded[md.context.add(k).String()] = struct{}{}
+		md.context = append(md.context, k)
+
+		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+		if err := md.unify(v, rvval); err != nil {
+			return err
+		}
+		md.context = md.context[0 : len(md.context)-1]
+
+		rvkey := indirect(reflect.New(rv.Type().Key()))
+		rvkey.SetString(k)
+		rv.SetMapIndex(rvkey, rvval)
+	}
+	return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return md.badtype("slice", data)
+	}
+	if l := datav.Len(); l != rv.Len() {
+		return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
+	}
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return md.badtype("slice", data)
+	}
+	n := datav.Len()
+	if rv.IsNil() || rv.Cap() < n {
+		rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+	}
+	rv.SetLen(n)
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+	l := data.Len()
+	for i := 0; i < l; i++ {
+		err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+	if s, ok := data.(string); ok {
+		rv.SetString(s)
+		return nil
+	}
+	return md.badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(float64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			if num < -math.MaxFloat32 || num > math.MaxFloat32 {
+				return e("value %f is out of range for float32", num)
+			}
+			fallthrough
+		case reflect.Float64:
+			rv.SetFloat(num)
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+
+	if num, ok := data.(int64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
+				return e("value %d is out of range for float32", num)
+			}
+			fallthrough
+		case reflect.Float64:
+			if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
+				return e("value %d is out of range for float64", num)
+			}
+			rv.SetFloat(float64(num))
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+
+	return md.badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(int64); ok {
+		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+			switch rv.Kind() {
+			case reflect.Int, reflect.Int64:
+				// No bounds checking necessary.
+			case reflect.Int8:
+				if num < math.MinInt8 || num > math.MaxInt8 {
+					return e("value %d is out of range for int8", num)
+				}
+			case reflect.Int16:
+				if num < math.MinInt16 || num > math.MaxInt16 {
+					return e("value %d is out of range for int16", num)
+				}
+			case reflect.Int32:
+				if num < math.MinInt32 || num > math.MaxInt32 {
+					return e("value %d is out of range for int32", num)
+				}
+			}
+			rv.SetInt(num)
+		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+			unum := uint64(num)
+			switch rv.Kind() {
+			case reflect.Uint, reflect.Uint64:
+				// No bounds checking necessary.
+			case reflect.Uint8:
+				if num < 0 || unum > math.MaxUint8 {
+					return e("value %d is out of range for uint8", num)
+				}
+			case reflect.Uint16:
+				if num < 0 || unum > math.MaxUint16 {
+					return e("value %d is out of range for uint16", num)
+				}
+			case reflect.Uint32:
+				if num < 0 || unum > math.MaxUint32 {
+					return e("value %d is out of range for uint32", num)
+				}
+			}
+			rv.SetUint(unum)
+		} else {
+			panic("unreachable")
+		}
+		return nil
+	}
+	return md.badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+	if b, ok := data.(bool); ok {
+		rv.SetBool(b)
+		return nil
+	}
+	return md.badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+	rv.Set(reflect.ValueOf(data))
+	return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+	var s string
+	switch sdata := data.(type) {
+	case Marshaler:
+		text, err := sdata.MarshalTOML()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case TextMarshaler:
+		text, err := sdata.MarshalText()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case fmt.Stringer:
+		s = sdata.String()
+	case string:
+		s = sdata
+	case bool:
+		s = fmt.Sprintf("%v", sdata)
+	case int64:
+		s = fmt.Sprintf("%d", sdata)
+	case float64:
+		s = fmt.Sprintf("%f", sdata)
+	default:
+		return md.badtype("primitive (string-like)", data)
+	}
+	if err := v.UnmarshalText([]byte(s)); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (md *MetaData) badtype(dst string, data interface{}) error {
+	return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+	return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+//
+// Pointers are followed until the value is not a pointer. New values are
+// allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of interest
+// to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+	if v.Kind() != reflect.Ptr {
+		if v.CanSet() {
+			pv := v.Addr()
+			if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
+				return pv
+			}
+		}
+		return v
+	}
+	if v.IsNil() {
+		v.Set(reflect.New(v.Type().Elem()))
+	}
+	return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+	if rv.CanSet() {
+		return true
+	}
+	if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+		return true
+	}
+	return false
+}
+
+func e(format string, args ...interface{}) error {
+	return fmt.Errorf("toml: "+format, args...)
+}

+ 19 - 0
vendor/github.com/BurntSushi/toml/decode_go116.go

@@ -0,0 +1,19 @@
+//go:build go1.16
+// +build go1.16
+
+package toml
+
+import (
+	"io/fs"
+)
+
+// DecodeFS is just like Decode, except it will automatically read the contents
+// of the file at `path` from a fs.FS instance.
+func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
+	fp, err := fsys.Open(path)
+	if err != nil {
+		return MetaData{}, err
+	}
+	defer fp.Close()
+	return NewDecoder(fp).Decode(v)
+}

+ 21 - 0
vendor/github.com/BurntSushi/toml/deprecated.go

@@ -0,0 +1,21 @@
+package toml
+
+import (
+	"encoding"
+	"io"
+)
+
+// Deprecated: use encoding.TextMarshaler
+type TextMarshaler encoding.TextMarshaler
+
+// Deprecated: use encoding.TextUnmarshaler
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// Deprecated: use MetaData.PrimitiveDecode.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md := MetaData{decoded: make(map[string]struct{})}
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }

+ 13 - 0
vendor/github.com/BurntSushi/toml/doc.go

@@ -0,0 +1,13 @@
+/*
+Package toml implements decoding and encoding of TOML files.
+
+This package supports TOML v1.0.0, as listed on https://toml.io
+
+There is also support for delaying decoding with the Primitive type, and
+querying the set of keys in a TOML document with the MetaData type.
+
+The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+and can be used to verify if TOML document is valid. It can also be used to
+print the type of each key.
+*/
+package toml

+ 698 - 0
vendor/github.com/BurntSushi/toml/encode.go

@@ -0,0 +1,698 @@
+package toml
+
+import (
+	"bufio"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/BurntSushi/toml/internal"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+	errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+	errNonString       = errors.New("toml: cannot encode a map with non-string key type")
+	errNoKey           = errors.New("toml: top-level values must be Go maps or structs")
+	errAnything        = errors.New("") // used in testing
+)
+
+var dblQuotedReplacer = strings.NewReplacer(
+	"\"", "\\\"",
+	"\\", "\\\\",
+	"\x00", `\u0000`,
+	"\x01", `\u0001`,
+	"\x02", `\u0002`,
+	"\x03", `\u0003`,
+	"\x04", `\u0004`,
+	"\x05", `\u0005`,
+	"\x06", `\u0006`,
+	"\x07", `\u0007`,
+	"\b", `\b`,
+	"\t", `\t`,
+	"\n", `\n`,
+	"\x0b", `\u000b`,
+	"\f", `\f`,
+	"\r", `\r`,
+	"\x0e", `\u000e`,
+	"\x0f", `\u000f`,
+	"\x10", `\u0010`,
+	"\x11", `\u0011`,
+	"\x12", `\u0012`,
+	"\x13", `\u0013`,
+	"\x14", `\u0014`,
+	"\x15", `\u0015`,
+	"\x16", `\u0016`,
+	"\x17", `\u0017`,
+	"\x18", `\u0018`,
+	"\x19", `\u0019`,
+	"\x1a", `\u001a`,
+	"\x1b", `\u001b`,
+	"\x1c", `\u001c`,
+	"\x1d", `\u001d`,
+	"\x1e", `\u001e`,
+	"\x1f", `\u001f`,
+	"\x7f", `\u007f`,
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid TOML.
+type Marshaler interface {
+	MarshalTOML() ([]byte, error)
+}
+
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for the Decode* functions.
+//
+// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
+// encoding the value as custom TOML.
+//
+// If you want to write arbitrary binary data then you will need to use
+// something like base64 since TOML does not have any binary types.
+//
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
+//
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
+type Encoder struct {
+	// String to use for a single indentation level; default is two spaces.
+	Indent string
+
+	w          *bufio.Writer
+	hasWritten bool // written any output to w yet?
+}
+
+// NewEncoder create a new Encoder.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:      bufio.NewWriter(w),
+		Indent: "  ",
+	}
+}
+
+// Encode writes a TOML representation of the Go value to the Encoder's writer.
+//
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
+func (enc *Encoder) Encode(v interface{}) error {
+	rv := eindirect(reflect.ValueOf(v))
+	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+		return err
+	}
+	return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if terr, ok := r.(tomlEncodeError); ok {
+				err = terr.error
+				return
+			}
+			panic(r)
+		}
+	}()
+	enc.encode(key, rv)
+	return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+	// Special case: time needs to be in ISO8601 format.
+	//
+	// Special case: if we can marshal the type to text, then we used that. This
+	// prevents the encoder for handling these types as generic structs (or
+	// whatever the underlying type of a TextMarshaler is).
+	switch t := rv.Interface().(type) {
+	case time.Time, encoding.TextMarshaler, Marshaler:
+		enc.writeKeyValue(key, rv, false)
+		return
+	// TODO: #76 would make this superfluous after implemented.
+	case Primitive:
+		enc.encode(key, reflect.ValueOf(t.undecoded))
+		return
+	}
+
+	k := rv.Kind()
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64,
+		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+		enc.writeKeyValue(key, rv, false)
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+			enc.eArrayOfTables(key, rv)
+		} else {
+			enc.writeKeyValue(key, rv, false)
+		}
+	case reflect.Interface:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Map:
+		if rv.IsNil() {
+			return
+		}
+		enc.eTable(key, rv)
+	case reflect.Ptr:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Struct:
+		enc.eTable(key, rv)
+	default:
+		encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
+	}
+}
+
+// eElement encodes any value that can be an array element.
+func (enc *Encoder) eElement(rv reflect.Value) {
+	switch v := rv.Interface().(type) {
+	case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+		format := time.RFC3339Nano
+		switch v.Location() {
+		case internal.LocalDatetime:
+			format = "2006-01-02T15:04:05.999999999"
+		case internal.LocalDate:
+			format = "2006-01-02"
+		case internal.LocalTime:
+			format = "15:04:05.999999999"
+		}
+		switch v.Location() {
+		default:
+			enc.wf(v.Format(format))
+		case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+			enc.wf(v.In(time.UTC).Format(format))
+		}
+		return
+	case Marshaler:
+		s, err := v.MarshalTOML()
+		if err != nil {
+			encPanic(err)
+		}
+		enc.w.Write(s)
+		return
+	case encoding.TextMarshaler:
+		s, err := v.MarshalText()
+		if err != nil {
+			encPanic(err)
+		}
+		enc.writeQuoted(string(s))
+		return
+	}
+
+	switch rv.Kind() {
+	case reflect.String:
+		enc.writeQuoted(rv.String())
+	case reflect.Bool:
+		enc.wf(strconv.FormatBool(rv.Bool()))
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		enc.wf(strconv.FormatInt(rv.Int(), 10))
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		enc.wf(strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Float32:
+		f := rv.Float()
+		if math.IsNaN(f) {
+			enc.wf("nan")
+		} else if math.IsInf(f, 0) {
+			enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+		} else {
+			enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+		}
+	case reflect.Float64:
+		f := rv.Float()
+		if math.IsNaN(f) {
+			enc.wf("nan")
+		} else if math.IsInf(f, 0) {
+			enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+		} else {
+			enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+		}
+	case reflect.Array, reflect.Slice:
+		enc.eArrayOrSliceElement(rv)
+	case reflect.Struct:
+		enc.eStruct(nil, rv, true)
+	case reflect.Map:
+		enc.eMap(nil, rv, true)
+	case reflect.Interface:
+		enc.eElement(rv.Elem())
+	default:
+		encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
+	}
+}
+
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
+func floatAddDecimal(fstr string) string {
+	if !strings.Contains(fstr, ".") {
+		return fstr + ".0"
+	}
+	return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+	enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+	length := rv.Len()
+	enc.wf("[")
+	for i := 0; i < length; i++ {
+		elem := rv.Index(i)
+		enc.eElement(elem)
+		if i != length-1 {
+			enc.wf(", ")
+		}
+	}
+	enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	for i := 0; i < rv.Len(); i++ {
+		trv := rv.Index(i)
+		if isNil(trv) {
+			continue
+		}
+		enc.newline()
+		enc.wf("%s[[%s]]", enc.indentStr(key), key)
+		enc.newline()
+		enc.eMapOrStruct(key, trv, false)
+	}
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+	if len(key) == 1 {
+		// Output an extra newline between top-level tables.
+		// (The newline isn't written if nothing else has been written though.)
+		enc.newline()
+	}
+	if len(key) > 0 {
+		enc.wf("%s[%s]", enc.indentStr(key), key)
+		enc.newline()
+	}
+	enc.eMapOrStruct(key, rv, false)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
+	switch rv := eindirect(rv); rv.Kind() {
+	case reflect.Map:
+		enc.eMap(key, rv, inline)
+	case reflect.Struct:
+		enc.eStruct(key, rv, inline)
+	default:
+		// Should never happen?
+		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+	}
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
+	rt := rv.Type()
+	if rt.Key().Kind() != reflect.String {
+		encPanic(errNonString)
+	}
+
+	// Sort keys so that we have deterministic output. And write keys directly
+	// underneath this key first, before writing sub-structs or sub-maps.
+	var mapKeysDirect, mapKeysSub []string
+	for _, mapKey := range rv.MapKeys() {
+		k := mapKey.String()
+		if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+			mapKeysSub = append(mapKeysSub, k)
+		} else {
+			mapKeysDirect = append(mapKeysDirect, k)
+		}
+	}
+
+	var writeMapKeys = func(mapKeys []string, trailC bool) {
+		sort.Strings(mapKeys)
+		for i, mapKey := range mapKeys {
+			val := rv.MapIndex(reflect.ValueOf(mapKey))
+			if isNil(val) {
+				continue
+			}
+
+			if inline {
+				enc.writeKeyValue(Key{mapKey}, val, true)
+				if trailC || i != len(mapKeys)-1 {
+					enc.wf(", ")
+				}
+			} else {
+				enc.encode(key.add(mapKey), val)
+			}
+		}
+	}
+
+	if inline {
+		enc.wf("{")
+	}
+	writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+	writeMapKeys(mapKeysSub, false)
+	if inline {
+		enc.wf("}")
+	}
+}
+
+const is32Bit = (32 << (^uint(0) >> 63)) == 32
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
+	// Write keys for fields directly under this key first, because if we write
+	// a field that creates a new table then all keys under it will be in that
+	// table (not the one we're writing here).
+	//
+	// Fields is a [][]int: for fieldsDirect this always has one entry (the
+	// struct index). For fieldsSub it contains two entries: the parent field
+	// index from tv, and the field indexes for the fields of the sub.
+	var (
+		rt                      = rv.Type()
+		fieldsDirect, fieldsSub [][]int
+		addFields               func(rt reflect.Type, rv reflect.Value, start []int)
+	)
+	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
+				continue
+			}
+			opts := getOptions(f.Tag)
+			if opts.skip {
+				continue
+			}
+
+			frv := rv.Field(i)
+
+			// Treat anonymous struct fields with tag names as though they are
+			// not anonymous, like encoding/json does.
+			//
+			// Non-struct anonymous fields use the normal encoding logic.
+			if f.Anonymous {
+				t := f.Type
+				switch t.Kind() {
+				case reflect.Struct:
+					if getOptions(f.Tag).name == "" {
+						addFields(t, frv, append(start, f.Index...))
+						continue
+					}
+				case reflect.Ptr:
+					if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
+						if !frv.IsNil() {
+							addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
+						}
+						continue
+					}
+				}
+			}
+
+			if typeIsTable(tomlTypeOfGo(frv)) {
+				fieldsSub = append(fieldsSub, append(start, f.Index...))
+			} else {
+				// Copy so it works correct on 32bit archs; not clear why this
+				// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
+				// This also works fine on 64bit, but 32bit archs are somewhat
+				// rare and this is a wee bit faster.
+				if is32Bit {
+					copyStart := make([]int, len(start))
+					copy(copyStart, start)
+					fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
+				} else {
+					fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+				}
+			}
+		}
+	}
+	addFields(rt, rv, nil)
+
+	writeFields := func(fields [][]int) {
+		for _, fieldIndex := range fields {
+			fieldType := rt.FieldByIndex(fieldIndex)
+			fieldVal := rv.FieldByIndex(fieldIndex)
+
+			if isNil(fieldVal) { /// Don't write anything for nil fields.
+				continue
+			}
+
+			opts := getOptions(fieldType.Tag)
+			if opts.skip {
+				continue
+			}
+			keyName := fieldType.Name
+			if opts.name != "" {
+				keyName = opts.name
+			}
+			if opts.omitempty && isEmpty(fieldVal) {
+				continue
+			}
+			if opts.omitzero && isZero(fieldVal) {
+				continue
+			}
+
+			if inline {
+				enc.writeKeyValue(Key{keyName}, fieldVal, true)
+				if fieldIndex[0] != len(fields)-1 {
+					enc.wf(", ")
+				}
+			} else {
+				enc.encode(key.add(keyName), fieldVal)
+			}
+		}
+	}
+
+	if inline {
+		enc.wf("{")
+	}
+	writeFields(fieldsDirect)
+	writeFields(fieldsSub)
+	if inline {
+		enc.wf("}")
+	}
+}
+
+// tomlTypeOfGo returns the TOML type name of the Go value's type.
+//
+// It is used to determine whether the types of array elements are mixed (which
+// is forbidden). If the Go value is nil, then it is illegal for it to be an
+// array element, and valueIsNil is returned as true.
+//
+// The type may be `nil`, which means no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() {
+		return nil
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		return tomlBool
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64:
+		return tomlInteger
+	case reflect.Float32, reflect.Float64:
+		return tomlFloat
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlHash, tomlArrayType(rv)) {
+			return tomlArrayHash
+		}
+		return tomlArray
+	case reflect.Ptr, reflect.Interface:
+		return tomlTypeOfGo(rv.Elem())
+	case reflect.String:
+		return tomlString
+	case reflect.Map:
+		return tomlHash
+	case reflect.Struct:
+		if _, ok := rv.Interface().(time.Time); ok {
+			return tomlDatetime
+		}
+		if isMarshaler(rv) {
+			return tomlString
+		}
+		return tomlHash
+	default:
+		if isMarshaler(rv) {
+			return tomlString
+		}
+
+		encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+		panic("unreachable")
+	}
+}
+
+func isMarshaler(rv reflect.Value) bool {
+	switch rv.Interface().(type) {
+	case encoding.TextMarshaler:
+		return true
+	case Marshaler:
+		return true
+	}
+
+	// Someone used a pointer receiver: we can make it work for pointer values.
+	if rv.CanAddr() {
+		if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
+			return true
+		}
+		if _, ok := rv.Addr().Interface().(Marshaler); ok {
+			return true
+		}
+	}
+	return false
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+		return nil
+	}
+
+	/// Don't allow nil.
+	rvlen := rv.Len()
+	for i := 1; i < rvlen; i++ {
+		if tomlTypeOfGo(rv.Index(i)) == nil {
+			encPanic(errArrayNilElement)
+		}
+	}
+
+	firstType := tomlTypeOfGo(rv.Index(0))
+	if firstType == nil {
+		encPanic(errArrayNilElement)
+	}
+	return firstType
+}
+
+type tagOptions struct {
+	skip      bool // "-"
+	name      string
+	omitempty bool
+	omitzero  bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+	t := tag.Get("toml")
+	if t == "-" {
+		return tagOptions{skip: true}
+	}
+	var opts tagOptions
+	parts := strings.Split(t, ",")
+	opts.name = parts[0]
+	for _, s := range parts[1:] {
+		switch s {
+		case "omitempty":
+			opts.omitempty = true
+		case "omitzero":
+			opts.omitzero = true
+		}
+	}
+	return opts
+}
+
+func isZero(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return rv.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return rv.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return rv.Float() == 0.0
+	}
+	return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+		return rv.Len() == 0
+	case reflect.Bool:
+		return !rv.Bool()
+	}
+	return false
+}
+
+func (enc *Encoder) newline() {
+	if enc.hasWritten {
+		enc.wf("\n")
+	}
+}
+
+// Write a key/value pair:
+//
+//   key = <any value>
+//
+// This is also used for "k = v" in inline tables; so something like this will
+// be written in three calls:
+//
+//     ┌────────────────────┐
+//     │      ┌───┐  ┌─────┐│
+//     v      v   v  v     vv
+//     key = {k = v, k2 = v2}
+//
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+	enc.eElement(val)
+	if !inline {
+		enc.newline()
+	}
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+	_, err := fmt.Fprintf(enc.w, format, v...)
+	if err != nil {
+		encPanic(err)
+	}
+	enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+	return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+	panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return eindirect(v.Elem())
+	default:
+		return v
+	}
+}
+
+func isNil(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return rv.IsNil()
+	default:
+		return false
+	}
+}

+ 229 - 0
vendor/github.com/BurntSushi/toml/error.go

@@ -0,0 +1,229 @@
+package toml
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ParseError is returned when there is an error parsing the TOML syntax.
+//
+// For example invalid syntax, duplicate keys, etc.
+//
+// In addition to the error message itself, you can also print detailed location
+// information with context by using ErrorWithPosition():
+//
+//     toml: error: Key 'fruit' was already created and cannot be used as an array.
+//
+//     At line 4, column 2-7:
+//
+//           2 | fruit = []
+//           3 |
+//           4 | [[fruit]] # Not allowed
+//                 ^^^^^
+//
+// Furthermore, the ErrorWithUsage() can be used to print the above with some
+// more detailed usage guidance:
+//
+//    toml: error: newlines not allowed within inline tables
+//
+//    At line 1, column 18:
+//
+//          1 | x = [{ key = 42 #
+//                               ^
+//
+//    Error help:
+//
+//      Inline tables must always be on a single line:
+//
+//          table = {key = 42, second = 43}
+//
+//      It is invalid to split them over multiple lines like so:
+//
+//          # INVALID
+//          table = {
+//              key    = 42,
+//              second = 43
+//          }
+//
+//      Use regular for this:
+//
+//          [table]
+//          key    = 42
+//          second = 43
+type ParseError struct {
+	Message  string   // Short technical message.
+	Usage    string   // Longer message with usage guidance; may be blank.
+	Position Position // Position of the error
+	LastKey  string   // Last parsed key, may be blank.
+	Line     int      // Line the error occurred. Deprecated: use Position.
+
+	err   error
+	input string
+}
+
+// Position of an error.
+type Position struct {
+	Line  int // Line number, starting at 1.
+	Start int // Start of error, as byte offset starting at 0.
+	Len   int // Lenght in bytes.
+}
+
+func (pe ParseError) Error() string {
+	msg := pe.Message
+	if msg == "" { // Error from errorf()
+		msg = pe.err.Error()
+	}
+
+	if pe.LastKey == "" {
+		return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+	}
+	return fmt.Sprintf("toml: line %d (last key %q): %s",
+		pe.Position.Line, pe.LastKey, msg)
+}
+
+// ErrorWithUsage() returns the error with detailed location context.
+//
+// See the documentation on ParseError.
+func (pe ParseError) ErrorWithPosition() string {
+	if pe.input == "" { // Should never happen, but just in case.
+		return pe.Error()
+	}
+
+	var (
+		lines = strings.Split(pe.input, "\n")
+		col   = pe.column(lines)
+		b     = new(strings.Builder)
+	)
+
+	msg := pe.Message
+	if msg == "" {
+		msg = pe.err.Error()
+	}
+
+	// TODO: don't show control characters as literals? This may not show up
+	// well everywhere.
+
+	if pe.Position.Len == 1 {
+		fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
+			msg, pe.Position.Line, col+1)
+	} else {
+		fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
+			msg, pe.Position.Line, col, col+pe.Position.Len)
+	}
+	if pe.Position.Line > 2 {
+		fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
+	}
+	if pe.Position.Line > 1 {
+		fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
+	}
+	fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
+	fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
+	return b.String()
+}
+
+// ErrorWithUsage() returns the error with detailed location context and usage
+// guidance.
+//
+// See the documentation on ParseError.
+func (pe ParseError) ErrorWithUsage() string {
+	m := pe.ErrorWithPosition()
+	if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
+		return m + "Error help:\n\n    " +
+			strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n    ") +
+			"\n"
+	}
+	return m
+}
+
+func (pe ParseError) column(lines []string) int {
+	var pos, col int
+	for i := range lines {
+		ll := len(lines[i]) + 1 // +1 for the removed newline
+		if pos+ll >= pe.Position.Start {
+			col = pe.Position.Start - pos
+			if col < 0 { // Should never happen, but just in case.
+				col = 0
+			}
+			break
+		}
+		pos += ll
+	}
+
+	return col
+}
+
+type (
+	errLexControl       struct{ r rune }
+	errLexEscape        struct{ r rune }
+	errLexUTF8          struct{ b byte }
+	errLexInvalidNum    struct{ v string }
+	errLexInvalidDate   struct{ v string }
+	errLexInlineTableNL struct{}
+	errLexStringNL      struct{}
+)
+
+func (e errLexControl) Error() string {
+	return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
+}
+func (e errLexControl) Usage() string { return "" }
+
+func (e errLexEscape) Error() string        { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
+func (e errLexEscape) Usage() string        { return usageEscape }
+func (e errLexUTF8) Error() string          { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
+func (e errLexUTF8) Usage() string          { return "" }
+func (e errLexInvalidNum) Error() string    { return fmt.Sprintf("invalid number: %q", e.v) }
+func (e errLexInvalidNum) Usage() string    { return "" }
+func (e errLexInvalidDate) Error() string   { return fmt.Sprintf("invalid date: %q", e.v) }
+func (e errLexInvalidDate) Usage() string   { return "" }
+func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
+func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
+func (e errLexStringNL) Error() string      { return "strings cannot contain newlines" }
+func (e errLexStringNL) Usage() string      { return usageStringNewline }
+
+const usageEscape = `
+A '\' inside a "-delimited string is interpreted as an escape character.
+
+The following escape sequences are supported:
+\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
+
+To prevent a '\' from being recognized as an escape character, use either:
+
+- a ' or '''-delimited string; escape characters aren't processed in them; or
+- write two backslashes to get a single backslash: '\\'.
+
+If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
+instead of '\' will usually also work: "C:/Users/martin".
+`
+
+const usageInlineNewline = `
+Inline tables must always be on a single line:
+
+    table = {key = 42, second = 43}
+
+It is invalid to split them over multiple lines like so:
+
+    # INVALID
+    table = {
+        key    = 42,
+        second = 43
+    }
+
+Use regular for this:
+
+    [table]
+    key    = 42
+    second = 43
+`
+
+const usageStringNewline = `
+Strings must always be on a single line, and cannot span more than one line:
+
+    # INVALID
+    string = "Hello,
+    world!"
+
+Instead use """ or ''' to split strings over multiple lines:
+
+    string = """Hello,
+    world!"""
+`

+ 36 - 0
vendor/github.com/BurntSushi/toml/internal/tz.go

@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+	localOffset   = func() int { _, o := time.Now().Zone(); return o }()
+	LocalDatetime = time.FixedZone("datetime-local", localOffset)
+	LocalDate     = time.FixedZone("date-local", localOffset)
+	LocalTime     = time.FixedZone("time-local", localOffset)
+)

+ 1224 - 0
vendor/github.com/BurntSushi/toml/lex.go

@@ -0,0 +1,1224 @@
+package toml
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type itemType int
+
+const (
+	itemError itemType = iota
+	itemNIL            // used in the parser to indicate no type
+	itemEOF
+	itemText
+	itemString
+	itemRawString
+	itemMultilineString
+	itemRawMultilineString
+	itemBool
+	itemInteger
+	itemFloat
+	itemDatetime
+	itemArray // the start of an array
+	itemArrayEnd
+	itemTableStart
+	itemTableEnd
+	itemArrayTableStart
+	itemArrayTableEnd
+	itemKeyStart
+	itemKeyEnd
+	itemCommentStart
+	itemInlineTableStart
+	itemInlineTableEnd
+)
+
+const eof = 0
+
+type stateFn func(lx *lexer) stateFn
+
+func (p Position) String() string {
+	return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len)
+}
+
+type lexer struct {
+	input string
+	start int
+	pos   int
+	line  int
+	state stateFn
+	items chan item
+
+	// Allow for backing up up to 4 runes. This is necessary because TOML
+	// contains 3-rune tokens (""" and ''').
+	prevWidths [4]int
+	nprev      int  // how many of prevWidths are in use
+	atEOF      bool // If we emit an eof, we can still back up, but it is not OK to call next again.
+
+	// A stack of state functions used to maintain context.
+	//
+	// The idea is to reuse parts of the state machine in various places. For
+	// example, values can appear at the top level or within arbitrarily nested
+	// arrays. The last state on the stack is used after a value has been lexed.
+	// Similarly for comments.
+	stack []stateFn
+}
+
+type item struct {
+	typ itemType
+	val string
+	err error
+	pos Position
+}
+
+func (lx *lexer) nextItem() item {
+	for {
+		select {
+		case item := <-lx.items:
+			return item
+		default:
+			lx.state = lx.state(lx)
+			//fmt.Printf("     STATE %-24s  current: %-10q	stack: %s\n", lx.state, lx.current(), lx.stack)
+		}
+	}
+}
+
+func lex(input string) *lexer {
+	lx := &lexer{
+		input: input,
+		state: lexTop,
+		items: make(chan item, 10),
+		stack: make([]stateFn, 0, 10),
+		line:  1,
+	}
+	return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+	lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+	if len(lx.stack) == 0 {
+		return lx.errorf("BUG in lexer: no states to pop")
+	}
+	last := lx.stack[len(lx.stack)-1]
+	lx.stack = lx.stack[0 : len(lx.stack)-1]
+	return last
+}
+
+func (lx *lexer) current() string {
+	return lx.input[lx.start:lx.pos]
+}
+
+func (lx lexer) getPos() Position {
+	p := Position{
+		Line:  lx.line,
+		Start: lx.start,
+		Len:   lx.pos - lx.start,
+	}
+	if p.Len <= 0 {
+		p.Len = 1
+	}
+	return p
+}
+
+func (lx *lexer) emit(typ itemType) {
+	// Needed for multiline strings ending with an incomplete UTF-8 sequence.
+	if lx.start > lx.pos {
+		lx.error(errLexUTF8{lx.input[lx.pos]})
+		return
+	}
+	lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+	lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+	if lx.atEOF {
+		panic("BUG in lexer: next called after EOF")
+	}
+	if lx.pos >= len(lx.input) {
+		lx.atEOF = true
+		return eof
+	}
+
+	if lx.input[lx.pos] == '\n' {
+		lx.line++
+	}
+	lx.prevWidths[3] = lx.prevWidths[2]
+	lx.prevWidths[2] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[0]
+	if lx.nprev < 4 {
+		lx.nprev++
+	}
+
+	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+	if r == utf8.RuneError {
+		lx.error(errLexUTF8{lx.input[lx.pos]})
+		return utf8.RuneError
+	}
+
+	// Note: don't use peek() here, as this calls next().
+	if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) {
+		lx.errorControlChar(r)
+		return utf8.RuneError
+	}
+
+	lx.prevWidths[0] = w
+	lx.pos += w
+	return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+	lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called 4 times between calls to next.
+func (lx *lexer) backup() {
+	if lx.atEOF {
+		lx.atEOF = false
+		return
+	}
+	if lx.nprev < 1 {
+		panic("BUG in lexer: backed up too far")
+	}
+	w := lx.prevWidths[0]
+	lx.prevWidths[0] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[2]
+	lx.prevWidths[2] = lx.prevWidths[3]
+	lx.nprev--
+
+	lx.pos -= w
+	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+		lx.line--
+	}
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+	if lx.next() == valid {
+		return true
+	}
+	lx.backup()
+	return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+	r := lx.next()
+	lx.backup()
+	return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+	for {
+		r := lx.next()
+		if pred(r) {
+			continue
+		}
+		lx.backup()
+		lx.ignore()
+		return
+	}
+}
+
+// error stops all lexing by emitting an error and returning `nil`.
+//
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) error(err error) stateFn {
+	if lx.atEOF {
+		return lx.errorPrevLine(err)
+	}
+	lx.items <- item{typ: itemError, pos: lx.getPos(), err: err}
+	return nil
+}
+
+// errorfPrevline is like error(), but sets the position to the last column of
+// the previous line.
+//
+// This is so that unexpected EOF or NL errors don't show on a new blank line.
+func (lx *lexer) errorPrevLine(err error) stateFn {
+	pos := lx.getPos()
+	pos.Line--
+	pos.Len = 1
+	pos.Start = lx.pos - 1
+	lx.items <- item{typ: itemError, pos: pos, err: err}
+	return nil
+}
+
+// errorPos is like error(), but allows explicitly setting the position.
+func (lx *lexer) errorPos(start, length int, err error) stateFn {
+	pos := lx.getPos()
+	pos.Start = start
+	pos.Len = length
+	lx.items <- item{typ: itemError, pos: pos, err: err}
+	return nil
+}
+
+// errorf is like error, and creates a new error.
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+	if lx.atEOF {
+		pos := lx.getPos()
+		pos.Line--
+		pos.Len = 1
+		pos.Start = lx.pos - 1
+		lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
+		return nil
+	}
+	lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)}
+	return nil
+}
+
+func (lx *lexer) errorControlChar(cc rune) stateFn {
+	return lx.errorPos(lx.pos-1, 1, errLexControl{cc})
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+	r := lx.next()
+	if isWhitespace(r) || isNL(r) {
+		return lexSkip(lx, lexTop)
+	}
+	switch r {
+	case '#':
+		lx.push(lexTop)
+		return lexCommentStart
+	case '[':
+		return lexTableStart
+	case eof:
+		if lx.pos > lx.start {
+			return lx.errorf("unexpected EOF")
+		}
+		lx.emit(itemEOF)
+		return nil
+	}
+
+	// At this point, the only valid item can be a key, so we back up
+	// and let the key lexer do the rest.
+	lx.backup()
+	lx.push(lexTopEnd)
+	return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == '#':
+		// a comment will read to a newline for us.
+		lx.push(lexTop)
+		return lexCommentStart
+	case isWhitespace(r):
+		return lexTopEnd
+	case isNL(r):
+		lx.ignore()
+		return lexTop
+	case r == eof:
+		lx.emit(itemEOF)
+		return nil
+	}
+	return lx.errorf(
+		"expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
+		r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+	if lx.peek() == '[' {
+		lx.next()
+		lx.emit(itemArrayTableStart)
+		lx.push(lexArrayTableEnd)
+	} else {
+		lx.emit(itemTableStart)
+		lx.push(lexTableEnd)
+	}
+	return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+	lx.emit(itemTableEnd)
+	return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+	if r := lx.next(); r != ']' {
+		return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r)
+	}
+	lx.emit(itemArrayTableEnd)
+	return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == ']' || r == eof:
+		return lx.errorf("unexpected end of table name (table names cannot be empty)")
+	case r == '.':
+		return lx.errorf("unexpected table separator (table names cannot be empty)")
+	case r == '"' || r == '\'':
+		lx.ignore()
+		lx.push(lexTableNameEnd)
+		return lexQuotedName
+	default:
+		lx.push(lexTableNameEnd)
+		return lexBareName
+	}
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexTableNameEnd
+	case r == '.':
+		lx.ignore()
+		return lexTableNameStart
+	case r == ']':
+		return lx.pop()
+	default:
+		return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
+	}
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+	r := lx.next()
+	if isBareKeyChar(r) {
+		return lexBareName
+	}
+	lx.backup()
+	lx.emit(itemText)
+	return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case r == '"':
+		lx.ignore() // ignore the '"'
+		return lexString
+	case r == '\'':
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case r == eof:
+		return lx.errorf("unexpected EOF; expected value")
+	default:
+		return lx.errorf("expected value but found %q instead", r)
+	}
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == '=' || r == eof:
+		return lx.errorf("unexpected '=': key name appears blank")
+	case r == '.':
+		return lx.errorf("unexpected '.': keys cannot start with a '.'")
+	case r == '"' || r == '\'':
+		lx.ignore()
+		fallthrough
+	default: // Bare key
+		lx.emit(itemKeyStart)
+		return lexKeyNameStart
+	}
+}
+
+func lexKeyNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == '=' || r == eof:
+		return lx.errorf("unexpected '='")
+	case r == '.':
+		return lx.errorf("unexpected '.'")
+	case r == '"' || r == '\'':
+		lx.ignore()
+		lx.push(lexKeyEnd)
+		return lexQuotedName
+	default:
+		lx.push(lexKeyEnd)
+		return lexBareName
+	}
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexSkip(lx, lexKeyEnd)
+	case r == eof:
+		return lx.errorf("unexpected EOF; expected key separator '='")
+	case r == '.':
+		lx.ignore()
+		return lexKeyNameStart
+	case r == '=':
+		lx.emit(itemKeyEnd)
+		return lexSkip(lx, lexValue)
+	default:
+		return lx.errorf("expected '.' or '=', but got %q instead", r)
+	}
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+	// We allow whitespace to precede a value, but NOT newlines.
+	// In array syntax, the array states are responsible for ignoring newlines.
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case isDigit(r):
+		lx.backup() // avoid an extra state and use the same as above
+		return lexNumberOrDateStart
+	}
+	switch r {
+	case '[':
+		lx.ignore()
+		lx.emit(itemArray)
+		return lexArrayValue
+	case '{':
+		lx.ignore()
+		lx.emit(itemInlineTableStart)
+		return lexInlineTableValue
+	case '"':
+		if lx.accept('"') {
+			if lx.accept('"') {
+				lx.ignore() // Ignore """
+				return lexMultilineString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the '"'
+		return lexString
+	case '\'':
+		if lx.accept('\'') {
+			if lx.accept('\'') {
+				lx.ignore() // Ignore """
+				return lexMultilineRawString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case '.': // special error case, be kind to users
+		return lx.errorf("floats must start with a digit, not '.'")
+	case 'i', 'n':
+		if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+			lx.emit(itemFloat)
+			return lx.pop()
+		}
+	case '-', '+':
+		return lexDecimalNumberStart
+	}
+	if unicode.IsLetter(r) {
+		// Be permissive here; lexBool will give a nice error if the
+		// user wrote something like
+		//   x = foo
+		// (i.e. not 'true' or 'false' but is something else word-like.)
+		lx.backup()
+		return lexBool
+	}
+	if r == eof {
+		return lx.errorf("unexpected EOF; expected value")
+	}
+	return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValue)
+	case r == '#':
+		lx.push(lexArrayValue)
+		return lexCommentStart
+	case r == ',':
+		return lx.errorf("unexpected comma")
+	case r == ']':
+		return lexArrayEnd
+	}
+
+	lx.backup()
+	lx.push(lexArrayValueEnd)
+	return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValueEnd)
+	case r == '#':
+		lx.push(lexArrayValueEnd)
+		return lexCommentStart
+	case r == ',':
+		lx.ignore()
+		return lexArrayValue // move on to the next value
+	case r == ']':
+		return lexArrayEnd
+	default:
+		return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r))
+	}
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemArrayEnd)
+	return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValue)
+	case isNL(r):
+		return lx.errorPrevLine(errLexInlineTableNL{})
+	case r == '#':
+		lx.push(lexInlineTableValue)
+		return lexCommentStart
+	case r == ',':
+		return lx.errorf("unexpected comma")
+	case r == '}':
+		return lexInlineTableEnd
+	}
+	lx.backup()
+	lx.push(lexInlineTableValueEnd)
+	return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValueEnd)
+	case isNL(r):
+		return lx.errorPrevLine(errLexInlineTableNL{})
+	case r == '#':
+		lx.push(lexInlineTableValueEnd)
+		return lexCommentStart
+	case r == ',':
+		lx.ignore()
+		lx.skip(isWhitespace)
+		if lx.peek() == '}' {
+			return lx.errorf("trailing comma not allowed in inline tables")
+		}
+		return lexInlineTableValue
+	case r == '}':
+		return lexInlineTableEnd
+	default:
+		return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r))
+	}
+}
+
+func runeOrEOF(r rune) string {
+	if r == eof {
+		return "end of file"
+	}
+	return "'" + string(r) + "'"
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemInlineTableEnd)
+	return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == eof:
+		return lx.errorf(`unexpected EOF; expected '"'`)
+	case isNL(r):
+		return lx.errorPrevLine(errLexStringNL{})
+	case r == '\\':
+		lx.push(lexString)
+		return lexStringEscape
+	case r == '"':
+		lx.backup()
+		lx.emit(itemString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	default:
+		return lexMultilineString
+	case eof:
+		return lx.errorf(`unexpected EOF; expected '"""'`)
+	case '\\':
+		return lexMultilineStringEscape
+	case '"':
+		/// Found " → try to read two more "".
+		if lx.accept('"') {
+			if lx.accept('"') {
+				/// Peek ahead: the string can contain " and "", including at the
+				/// end: """str"""""
+				/// 6 or more at the end, however, is an error.
+				if lx.peek() == '"' {
+					/// Check if we already lexed 5 's; if so we have 6 now, and
+					/// that's just too many man!
+					if strings.HasSuffix(lx.current(), `"""""`) {
+						return lx.errorf(`unexpected '""""""'`)
+					}
+					lx.backup()
+					lx.backup()
+					return lexMultilineString
+				}
+
+				lx.backup() /// backup: don't include the """ in the item.
+				lx.backup()
+				lx.backup()
+				lx.emit(itemMultilineString)
+				lx.next() /// Read over ''' again and discard it.
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+		return lexMultilineString
+	}
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	default:
+		return lexRawString
+	case r == eof:
+		return lx.errorf(`unexpected EOF; expected "'"`)
+	case isNL(r):
+		return lx.errorPrevLine(errLexStringNL{})
+	case r == '\'':
+		lx.backup()
+		lx.emit(itemRawString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	default:
+		return lexMultilineRawString
+	case eof:
+		return lx.errorf(`unexpected EOF; expected "'''"`)
+	case '\'':
+		/// Found ' → try to read two more ''.
+		if lx.accept('\'') {
+			if lx.accept('\'') {
+				/// Peek ahead: the string can contain ' and '', including at the
+				/// end: '''str'''''
+				/// 6 or more at the end, however, is an error.
+				if lx.peek() == '\'' {
+					/// Check if we already lexed 5 's; if so we have 6 now, and
+					/// that's just too many man!
+					if strings.HasSuffix(lx.current(), "'''''") {
+						return lx.errorf(`unexpected "''''''"`)
+					}
+					lx.backup()
+					lx.backup()
+					return lexMultilineRawString
+				}
+
+				lx.backup() /// backup: don't include the ''' in the item.
+				lx.backup()
+				lx.backup()
+				lx.emit(itemRawMultilineString)
+				lx.next() /// Read over ''' again and discard it.
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+		return lexMultilineRawString
+	}
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+	// Handle the special case first:
+	if isNL(lx.next()) {
+		return lexMultilineString
+	}
+	lx.backup()
+	lx.push(lexMultilineString)
+	return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case 'b':
+		fallthrough
+	case 't':
+		fallthrough
+	case 'n':
+		fallthrough
+	case 'f':
+		fallthrough
+	case 'r':
+		fallthrough
+	case '"':
+		fallthrough
+	case ' ', '\t':
+		// Inside """ .. """ strings you can use \ to escape newlines, and any
+		// amount of whitespace can be between the \ and \n.
+		fallthrough
+	case '\\':
+		return lx.pop()
+	case 'u':
+		return lexShortUnicodeEscape
+	case 'U':
+		return lexLongUnicodeEscape
+	}
+	return lx.error(errLexEscape{r})
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 4; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(
+				`expected four hexadecimal digits after '\u', but got %q instead`,
+				lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 8; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(
+				`expected eight hexadecimal digits after '\U', but got %q instead`,
+				lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case '0':
+		return lexBaseNumberOrDate
+	}
+
+	if !isDigit(r) {
+		// The only way to reach this state is if the value starts
+		// with a digit, so specifically treat anything else as an
+		// error.
+		return lx.errorf("expected a digit but got %q", r)
+	}
+
+	return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '-', ':':
+		return lexDatetime
+	case '_':
+		return lexDecimalNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDatetime
+	}
+	switch r {
+	case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
+		return lexDatetime
+	}
+
+	lx.backup()
+	lx.emitTrim(itemDatetime)
+	return lx.pop()
+}
+
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isHexadecimal(r) {
+		return lexHexInteger
+	}
+	switch r {
+	case '_':
+		return lexHexInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isOctal(r) {
+		return lexOctalInteger
+	}
+	switch r {
+	case '_':
+		return lexOctalInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isBinary(r) {
+		return lexBinaryInteger
+	}
+	switch r {
+	case '_':
+		return lexBinaryInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDecimalNumber
+	}
+	switch r {
+	case '.', 'e', 'E':
+		return lexFloat
+	case '_':
+		return lexDecimalNumber
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+	r := lx.next()
+
+	// Special error cases to give users better error messages
+	switch r {
+	case 'i':
+		if !lx.accept('n') || !lx.accept('f') {
+			return lx.errorf("invalid float: '%s'", lx.current())
+		}
+		lx.emit(itemFloat)
+		return lx.pop()
+	case 'n':
+		if !lx.accept('a') || !lx.accept('n') {
+			return lx.errorf("invalid float: '%s'", lx.current())
+		}
+		lx.emit(itemFloat)
+		return lx.pop()
+	case '0':
+		p := lx.peek()
+		switch p {
+		case 'b', 'o', 'x':
+			return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+		}
+	case '.':
+		return lx.errorf("floats must start with a digit, not '.'")
+	}
+
+	if isDigit(r) {
+		return lexDecimalNumber
+	}
+
+	return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	// Note: All datetimes start with at least two digits, so we don't
+	// handle date characters (':', '-', etc.) here.
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '_':
+		// Can only be decimal, because there can't be an underscore
+		// between the '0' and the base designator, and dates can't
+		// contain underscores.
+		return lexDecimalNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	case 'b':
+		r = lx.peek()
+		if !isBinary(r) {
+			lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+		}
+		return lexBinaryInteger
+	case 'o':
+		r = lx.peek()
+		if !isOctal(r) {
+			lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+		}
+		return lexOctalInteger
+	case 'x':
+		r = lx.peek()
+		if !isHexadecimal(r) {
+			lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+		}
+		return lexHexInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexFloat
+	}
+	switch r {
+	case '_', '.', '-', '+', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemFloat)
+	return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+	var rs []rune
+	for {
+		r := lx.next()
+		if !unicode.IsLetter(r) {
+			lx.backup()
+			break
+		}
+		rs = append(rs, r)
+	}
+	s := string(rs)
+	switch s {
+	case "true", "false":
+		lx.emit(itemBool)
+		return lx.pop()
+	}
+	return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemCommentStart)
+	return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isNL(r) || r == eof:
+		lx.backup()
+		lx.emit(itemText)
+		return lx.pop()
+	default:
+		return lexComment
+	}
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+	lx.ignore()
+	return nextState
+}
+
+func (s stateFn) String() string {
+	name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+	if i := strings.LastIndexByte(name, '.'); i > -1 {
+		name = name[i+1:]
+	}
+	if s == nil {
+		name = "<nil>"
+	}
+	return name + "()"
+}
+
+func (itype itemType) String() string {
+	switch itype {
+	case itemError:
+		return "Error"
+	case itemNIL:
+		return "NIL"
+	case itemEOF:
+		return "EOF"
+	case itemText:
+		return "Text"
+	case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+		return "String"
+	case itemBool:
+		return "Bool"
+	case itemInteger:
+		return "Integer"
+	case itemFloat:
+		return "Float"
+	case itemDatetime:
+		return "DateTime"
+	case itemTableStart:
+		return "TableStart"
+	case itemTableEnd:
+		return "TableEnd"
+	case itemKeyStart:
+		return "KeyStart"
+	case itemKeyEnd:
+		return "KeyEnd"
+	case itemArray:
+		return "Array"
+	case itemArrayEnd:
+		return "ArrayEnd"
+	case itemCommentStart:
+		return "CommentStart"
+	case itemInlineTableStart:
+		return "InlineTableStart"
+	case itemInlineTableEnd:
+		return "InlineTableEnd"
+	}
+	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
+
+func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
+func isNL(r rune) bool         { return r == '\n' || r == '\r' }
+func isControl(r rune) bool { // Control characters except \t, \r, \n
+	switch r {
+	case '\t', '\r', '\n':
+		return false
+	default:
+		return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+	}
+}
+func isDigit(r rune) bool  { return r >= '0' && r <= '9' }
+func isBinary(r rune) bool { return r == '0' || r == '1' }
+func isOctal(r rune) bool  { return r >= '0' && r <= '7' }
+func isHexadecimal(r rune) bool {
+	return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
+}
+func isBareKeyChar(r rune) bool {
+	return (r >= 'A' && r <= 'Z') ||
+		(r >= 'a' && r <= 'z') ||
+		(r >= '0' && r <= '9') ||
+		r == '_' || r == '-'
+}

+ 120 - 0
vendor/github.com/BurntSushi/toml/meta.go

@@ -0,0 +1,120 @@
+package toml
+
+import (
+	"strings"
+)
+
+// MetaData allows access to meta information about TOML data that's not
+// accessible otherwise.
+//
+// It allows checking if a key is defined in the TOML data, whether any keys
+// were undecoded, and the TOML type of a key.
+type MetaData struct {
+	context Key // Used only during decoding.
+
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	keys    []Key
+	decoded map[string]struct{}
+}
+
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
+//
+// Returns false for an empty key.
+func (md *MetaData) IsDefined(key ...string) bool {
+	if len(key) == 0 {
+		return false
+	}
+
+	var (
+		hash      map[string]interface{}
+		ok        bool
+		hashOrVal interface{} = md.mapping
+	)
+	for _, k := range key {
+		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+			return false
+		}
+		if hashOrVal, ok = hash[k]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+	if typ, ok := md.types[Key(key).String()]; ok {
+		return typ.typeString()
+	}
+	return ""
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+//
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+	return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+	undecoded := make([]Key, 0, len(md.keys))
+	for _, key := range md.keys {
+		if _, ok := md.decoded[key.String()]; !ok {
+			undecoded = append(undecoded, key)
+		}
+	}
+	return undecoded
+}
+
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
+type Key []string
+
+func (k Key) String() string {
+	ss := make([]string, len(k))
+	for i := range k {
+		ss[i] = k.maybeQuoted(i)
+	}
+	return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+	if k[i] == "" {
+		return `""`
+	}
+	for _, c := range k[i] {
+		if !isBareKeyChar(c) {
+			return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+		}
+	}
+	return k[i]
+}
+
+func (k Key) add(piece string) Key {
+	newKey := make(Key, len(k)+1)
+	copy(newKey, k)
+	newKey[len(k)] = piece
+	return newKey
+}

+ 767 - 0
vendor/github.com/BurntSushi/toml/parse.go

@@ -0,0 +1,767 @@
+package toml
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/BurntSushi/toml/internal"
+)
+
+type parser struct {
+	lx         *lexer
+	context    Key      // Full key for the current hash in scope.
+	currentKey string   // Base key name for everything except hashes.
+	pos        Position // Current position in the TOML file.
+
+	ordered   []Key                  // List of keys in the order that they appear in the TOML data.
+	mapping   map[string]interface{} // Map keyname → key value.
+	types     map[string]tomlType    // Map keyname → TOML type.
+	implicits map[string]struct{}    // Record implicit keys (e.g. "key.group.names").
+}
+
+func parse(data string) (p *parser, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if pErr, ok := r.(ParseError); ok {
+				pErr.input = data
+				err = pErr
+				return
+			}
+			panic(r)
+		}
+	}()
+
+	// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+	// which mangles stuff.
+	if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+		data = data[2:]
+	}
+
+	// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+	// file (second byte in surrogate pair being NULL). Again, do this here to
+	// avoid having to deal with UTF-8/16 stuff in the lexer.
+	ex := 6
+	if len(data) < 6 {
+		ex = len(data)
+	}
+	if i := strings.IndexRune(data[:ex], 0); i > -1 {
+		return nil, ParseError{
+			Message:  "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
+			Position: Position{Line: 1, Start: i, Len: 1},
+			Line:     1,
+			input:    data,
+		}
+	}
+
+	p = &parser{
+		mapping:   make(map[string]interface{}),
+		types:     make(map[string]tomlType),
+		lx:        lex(data),
+		ordered:   make([]Key, 0),
+		implicits: make(map[string]struct{}),
+	}
+	for {
+		item := p.next()
+		if item.typ == itemEOF {
+			break
+		}
+		p.topLevel(item)
+	}
+
+	return p, nil
+}
+
+func (p *parser) panicItemf(it item, format string, v ...interface{}) {
+	panic(ParseError{
+		Message:  fmt.Sprintf(format, v...),
+		Position: it.pos,
+		Line:     it.pos.Len,
+		LastKey:  p.current(),
+	})
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+	panic(ParseError{
+		Message:  fmt.Sprintf(format, v...),
+		Position: p.pos,
+		Line:     p.pos.Line,
+		LastKey:  p.current(),
+	})
+}
+
+func (p *parser) next() item {
+	it := p.lx.nextItem()
+	//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
+	if it.typ == itemError {
+		if it.err != nil {
+			panic(ParseError{
+				Position: it.pos,
+				Line:     it.pos.Line,
+				LastKey:  p.current(),
+				err:      it.err,
+			})
+		}
+
+		p.panicItemf(it, "%s", it.val)
+	}
+	return it
+}
+
+func (p *parser) nextPos() item {
+	it := p.next()
+	p.pos = it.pos
+	return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+	it := p.next()
+	p.assertEqual(typ, it.typ)
+	return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+	if expected != got {
+		p.bug("Expected '%s' but got '%s'.", expected, got)
+	}
+}
+
+func (p *parser) topLevel(item item) {
+	switch item.typ {
+	case itemCommentStart: // # ..
+		p.expect(itemText)
+	case itemTableStart: // [ .. ]
+		name := p.nextPos()
+
+		var key Key
+		for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+			key = append(key, p.keyString(name))
+		}
+		p.assertEqual(itemTableEnd, name.typ)
+
+		p.addContext(key, false)
+		p.setType("", tomlHash)
+		p.ordered = append(p.ordered, key)
+	case itemArrayTableStart: // [[ .. ]]
+		name := p.nextPos()
+
+		var key Key
+		for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+			key = append(key, p.keyString(name))
+		}
+		p.assertEqual(itemArrayTableEnd, name.typ)
+
+		p.addContext(key, true)
+		p.setType("", tomlArrayHash)
+		p.ordered = append(p.ordered, key)
+	case itemKeyStart: // key = ..
+		outerContext := p.context
+		/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+		k := p.nextPos()
+		var key Key
+		for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+			key = append(key, p.keyString(k))
+		}
+		p.assertEqual(itemKeyEnd, k.typ)
+
+		/// The current key is the last part.
+		p.currentKey = key[len(key)-1]
+
+		/// All the other parts (if any) are the context; need to set each part
+		/// as implicit.
+		context := key[:len(key)-1]
+		for i := range context {
+			p.addImplicitContext(append(p.context, context[i:i+1]...))
+		}
+
+		/// Set value.
+		val, typ := p.value(p.next(), false)
+		p.set(p.currentKey, val, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+		/// Remove the context we added (preserving any context from [tbl] lines).
+		p.context = outerContext
+		p.currentKey = ""
+	default:
+		p.bug("Unexpected type at top level: %s", item.typ)
+	}
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+	switch it.typ {
+	case itemText:
+		return it.val
+	case itemString, itemMultilineString,
+		itemRawString, itemRawMultilineString:
+		s, _ := p.value(it, false)
+		return s.(string)
+	default:
+		p.bug("Unexpected key type: %s", it.typ)
+	}
+	panic("unreachable")
+}
+
+var datetimeRepl = strings.NewReplacer(
+	"z", "Z",
+	"t", "T",
+	" ", "T")
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+	switch it.typ {
+	case itemString:
+		return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
+	case itemMultilineString:
+		return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+	case itemRawString:
+		return it.val, p.typeOfPrimitive(it)
+	case itemRawMultilineString:
+		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+	case itemInteger:
+		return p.valueInteger(it)
+	case itemFloat:
+		return p.valueFloat(it)
+	case itemBool:
+		switch it.val {
+		case "true":
+			return true, p.typeOfPrimitive(it)
+		case "false":
+			return false, p.typeOfPrimitive(it)
+		default:
+			p.bug("Expected boolean value, but got '%s'.", it.val)
+		}
+	case itemDatetime:
+		return p.valueDatetime(it)
+	case itemArray:
+		return p.valueArray(it)
+	case itemInlineTableStart:
+		return p.valueInlineTable(it, parentIsArray)
+	default:
+		p.bug("Unexpected value type: %s", it.typ)
+	}
+	panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+	if !numUnderscoresOK(it.val) {
+		p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
+	}
+	if numHasLeadingZero(it.val) {
+		p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
+	}
+
+	num, err := strconv.ParseInt(it.val, 0, 64)
+	if err != nil {
+		// Distinguish integer values. Normally, it'd be a bug if the lexer
+		// provides an invalid integer, but it's possible that the number is
+		// out of range of valid values (which the lexer cannot determine).
+		// So mark the former as a bug but the latter as a legitimate user
+		// error.
+		if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+			p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+		} else {
+			p.bug("Expected integer value, but got '%s'.", it.val)
+		}
+	}
+	return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+	parts := strings.FieldsFunc(it.val, func(r rune) bool {
+		switch r {
+		case '.', 'e', 'E':
+			return true
+		}
+		return false
+	})
+	for _, part := range parts {
+		if !numUnderscoresOK(part) {
+			p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
+		}
+	}
+	if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+		p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
+	}
+	if !numPeriodsOK(it.val) {
+		// As a special case, numbers like '123.' or '1.e2',
+		// which are valid as far as Go/strconv are concerned,
+		// must be rejected because TOML says that a fractional
+		// part consists of '.' followed by 1+ digits.
+		p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
+	}
+	val := strings.Replace(it.val, "_", "", -1)
+	if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+		val = "nan"
+	}
+	num, err := strconv.ParseFloat(val, 64)
+	if err != nil {
+		if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+			p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+		} else {
+			p.panicItemf(it, "Invalid float value: %q", it.val)
+		}
+	}
+	return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+	fmt  string
+	zone *time.Location
+}{
+	{time.RFC3339Nano, time.Local},
+	{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+	{"2006-01-02", internal.LocalDate},
+	{"15:04:05.999999999", internal.LocalTime},
+}
+
+func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+	it.val = datetimeRepl.Replace(it.val)
+	var (
+		t   time.Time
+		ok  bool
+		err error
+	)
+	for _, dt := range dtTypes {
+		t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+		if err == nil {
+			ok = true
+			break
+		}
+	}
+	if !ok {
+		p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
+	}
+	return t, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueArray(it item) (interface{}, tomlType) {
+	p.setType(p.currentKey, tomlArray)
+
+	// p.setType(p.currentKey, typ)
+	var (
+		types []tomlType
+
+		// Initialize to a non-nil empty slice. This makes it consistent with
+		// how S = [] decodes into a non-nil slice inside something like struct
+		// { S []string }. See #338
+		array = []interface{}{}
+	)
+	for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+		if it.typ == itemCommentStart {
+			p.expect(itemText)
+			continue
+		}
+
+		val, typ := p.value(it, true)
+		array = append(array, val)
+		types = append(types, typ)
+
+		// XXX: types isn't used here, we need it to record the accurate type
+		// information.
+		//
+		// Not entirely sure how to best store this; could use "key[0]",
+		// "key[1]" notation, or maybe store it on the Array type?
+	}
+	return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+	var (
+		hash         = make(map[string]interface{})
+		outerContext = p.context
+		outerKey     = p.currentKey
+	)
+
+	p.context = append(p.context, p.currentKey)
+	prevContext := p.context
+	p.currentKey = ""
+
+	p.addImplicit(p.context)
+	p.addContext(p.context, parentIsArray)
+
+	/// Loop over all table key/value pairs.
+	for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+		if it.typ == itemCommentStart {
+			p.expect(itemText)
+			continue
+		}
+
+		/// Read all key parts.
+		k := p.nextPos()
+		var key Key
+		for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+			key = append(key, p.keyString(k))
+		}
+		p.assertEqual(itemKeyEnd, k.typ)
+
+		/// The current key is the last part.
+		p.currentKey = key[len(key)-1]
+
+		/// All the other parts (if any) are the context; need to set each part
+		/// as implicit.
+		context := key[:len(key)-1]
+		for i := range context {
+			p.addImplicitContext(append(p.context, context[i:i+1]...))
+		}
+
+		/// Set the value.
+		val, typ := p.value(p.next(), false)
+		p.set(p.currentKey, val, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+		hash[p.currentKey] = val
+
+		/// Restore context.
+		p.context = prevContext
+	}
+	p.context = outerContext
+	p.currentKey = outerKey
+	return hash, tomlHash
+}
+
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+	if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
+		return true
+	}
+	if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+		return true
+	}
+	return false
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+	switch s {
+	case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+		return true
+	}
+	accept := false
+	for _, r := range s {
+		if r == '_' {
+			if !accept {
+				return false
+			}
+		}
+
+		// isHexadecimal is a superset of all the permissable characters
+		// surrounding an underscore.
+		accept = isHexadecimal(r)
+	}
+	return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+	period := false
+	for _, r := range s {
+		if period && !isDigit(r) {
+			return false
+		}
+		period = r == '.'
+	}
+	return !period
+}
+
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) addContext(key Key, array bool) {
+	var ok bool
+
+	// Always start at the top level and drill down for our context.
+	hashContext := p.mapping
+	keyContext := make(Key, 0)
+
+	// We only need implicit hashes for key[0:-1]
+	for _, k := range key[0 : len(key)-1] {
+		_, ok = hashContext[k]
+		keyContext = append(keyContext, k)
+
+		// No key? Make an implicit hash and move on.
+		if !ok {
+			p.addImplicit(keyContext)
+			hashContext[k] = make(map[string]interface{})
+		}
+
+		// If the hash context is actually an array of tables, then set
+		// the hash context to the last element in that array.
+		//
+		// Otherwise, it better be a table, since this MUST be a key group (by
+		// virtue of it not being the last element in a key).
+		switch t := hashContext[k].(type) {
+		case []map[string]interface{}:
+			hashContext = t[len(t)-1]
+		case map[string]interface{}:
+			hashContext = t
+		default:
+			p.panicf("Key '%s' was already created as a hash.", keyContext)
+		}
+	}
+
+	p.context = keyContext
+	if array {
+		// If this is the first element for this array, then allocate a new
+		// list of tables for it.
+		k := key[len(key)-1]
+		if _, ok := hashContext[k]; !ok {
+			hashContext[k] = make([]map[string]interface{}, 0, 4)
+		}
+
+		// Add a new table. But make sure the key hasn't already been used
+		// for something else.
+		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+			hashContext[k] = append(hash, make(map[string]interface{}))
+		} else {
+			p.panicf("Key '%s' was already created and cannot be used as an array.", key)
+		}
+	} else {
+		p.setValue(key[len(key)-1], make(map[string]interface{}))
+	}
+	p.context = append(p.context, key[len(key)-1])
+}
+
+// set calls setValue and setType.
+func (p *parser) set(key string, val interface{}, typ tomlType) {
+	p.setValue(key, val)
+	p.setType(key, typ)
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+	var (
+		tmpHash    interface{}
+		ok         bool
+		hash       = p.mapping
+		keyContext Key
+	)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+		if tmpHash, ok = hash[k]; !ok {
+			p.bug("Context for key '%s' has not been established.", keyContext)
+		}
+		switch t := tmpHash.(type) {
+		case []map[string]interface{}:
+			// The context is a table of hashes. Pick the most recent table
+			// defined as the current hash.
+			hash = t[len(t)-1]
+		case map[string]interface{}:
+			hash = t
+		default:
+			p.panicf("Key '%s' has already been defined.", keyContext)
+		}
+	}
+	keyContext = append(keyContext, key)
+
+	if _, ok := hash[key]; ok {
+		// Normally redefining keys isn't allowed, but the key could have been
+		// defined implicitly and it's allowed to be redefined concretely. (See
+		// the `valid/implicit-and-explicit-after.toml` in toml-test)
+		//
+		// But we have to make sure to stop marking it as an implicit. (So that
+		// another redefinition provokes an error.)
+		//
+		// Note that since it has already been defined (as a hash), we don't
+		// want to overwrite it. So our business is done.
+		if p.isArray(keyContext) {
+			p.removeImplicit(keyContext)
+			hash[key] = value
+			return
+		}
+		if p.isImplicit(keyContext) {
+			p.removeImplicit(keyContext)
+			return
+		}
+
+		// Otherwise, we have a concrete key trying to override a previous
+		// key, which is *always* wrong.
+		p.panicf("Key '%s' has already been defined.", keyContext)
+	}
+
+	hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key. It should be
+// called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+	keyContext := make(Key, 0, len(p.context)+1)
+	keyContext = append(keyContext, p.context...)
+	if len(key) > 0 { // allow type setting for hashes
+		keyContext = append(keyContext, key)
+	}
+	// Special case to make empty keys ("" = 1) work.
+	// Without it it will set "" rather than `""`.
+	// TODO: why is this needed? And why is this only needed here?
+	if len(keyContext) == 0 {
+		keyContext = Key{""}
+	}
+	p.types[keyContext.String()] = typ
+}
+
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key)     { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key)  { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool    { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicitContext(key Key) {
+	p.addImplicit(key)
+	p.addContext(key, false)
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+	if len(p.currentKey) == 0 {
+		return p.context.String()
+	}
+	if len(p.context) == 0 {
+		return p.currentKey
+	}
+	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+	if len(s) > 0 && s[0] == '\n' {
+		return s[1:]
+	}
+	if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+		return s[2:]
+	}
+	return s
+}
+
+// Remove newlines inside triple-quoted strings if a line ends with "\".
+func (p *parser) stripEscapedNewlines(s string) string {
+	split := strings.Split(s, "\n")
+	if len(split) < 1 {
+		return s
+	}
+
+	escNL := false // Keep track of the last non-blank line was escaped.
+	for i, line := range split {
+		line = strings.TrimRight(line, " \t\r")
+
+		if len(line) == 0 || line[len(line)-1] != '\\' {
+			split[i] = strings.TrimRight(split[i], "\r")
+			if !escNL && i != len(split)-1 {
+				split[i] += "\n"
+			}
+			continue
+		}
+
+		escBS := true
+		for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
+			escBS = !escBS
+		}
+		if escNL {
+			line = strings.TrimLeft(line, " \t\r")
+		}
+		escNL = !escBS
+
+		if escBS {
+			split[i] += "\n"
+			continue
+		}
+
+		if i == len(split)-1 {
+			p.panicf("invalid escape: '\\ '")
+		}
+
+		split[i] = line[:len(line)-1] // Remove \
+		if len(split)-1 > i {
+			split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
+		}
+	}
+	return strings.Join(split, "")
+}
+
+func (p *parser) replaceEscapes(it item, str string) string {
+	replaced := make([]rune, 0, len(str))
+	s := []byte(str)
+	r := 0
+	for r < len(s) {
+		if s[r] != '\\' {
+			c, size := utf8.DecodeRune(s[r:])
+			r += size
+			replaced = append(replaced, c)
+			continue
+		}
+		r += 1
+		if r >= len(s) {
+			p.bug("Escape sequence at end of string.")
+			return ""
+		}
+		switch s[r] {
+		default:
+			p.bug("Expected valid escape code after \\, but got %q.", s[r])
+			return ""
+		case ' ', '\t':
+			p.panicItemf(it, "invalid escape: '\\%c'", s[r])
+			return ""
+		case 'b':
+			replaced = append(replaced, rune(0x0008))
+			r += 1
+		case 't':
+			replaced = append(replaced, rune(0x0009))
+			r += 1
+		case 'n':
+			replaced = append(replaced, rune(0x000A))
+			r += 1
+		case 'f':
+			replaced = append(replaced, rune(0x000C))
+			r += 1
+		case 'r':
+			replaced = append(replaced, rune(0x000D))
+			r += 1
+		case '"':
+			replaced = append(replaced, rune(0x0022))
+			r += 1
+		case '\\':
+			replaced = append(replaced, rune(0x005C))
+			r += 1
+		case 'u':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
+			replaced = append(replaced, escaped)
+			r += 5
+		case 'U':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
+			replaced = append(replaced, escaped)
+			r += 9
+		}
+	}
+	return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
+	s := string(bs)
+	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+	if err != nil {
+		p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
+	}
+	if !utf8.ValidRune(rune(hex)) {
+		p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
+	}
+	return rune(hex)
+}

+ 242 - 0
vendor/github.com/BurntSushi/toml/type_fields.go

@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+	name  string       // the name of the field (`toml` tag included)
+	tag   bool         // whether field has a `toml` tag
+	index []int        // represents the depth of an anonymous field
+	typ   reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	var count map[reflect.Type]int
+	var nextCount map[reflect.Type]int
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				opts := getOptions(sf.Tag)
+				if opts.skip {
+					continue
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := opts.name != ""
+					name := opts.name
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, field{name, tagged, index, ft})
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					f := field{name: ft.Name(), index: index, typ: ft}
+					next = append(next, f)
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with TOML tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}

+ 70 - 0
vendor/github.com/BurntSushi/toml/type_toml.go

@@ -0,0 +1,70 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+	typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+	if t1 == nil || t2 == nil {
+		return false
+	}
+	return t1.typeString() == t2.typeString()
+}
+
+func typeIsTable(t tomlType) bool {
+	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+	return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+	return btype.typeString()
+}
+
+var (
+	tomlInteger   tomlBaseType = "Integer"
+	tomlFloat     tomlBaseType = "Float"
+	tomlDatetime  tomlBaseType = "Datetime"
+	tomlString    tomlBaseType = "String"
+	tomlBool      tomlBaseType = "Bool"
+	tomlArray     tomlBaseType = "Array"
+	tomlHash      tomlBaseType = "Hash"
+	tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+	switch lexItem.typ {
+	case itemInteger:
+		return tomlInteger
+	case itemFloat:
+		return tomlFloat
+	case itemDatetime:
+		return tomlDatetime
+	case itemString:
+		return tomlString
+	case itemMultilineString:
+		return tomlString
+	case itemRawString:
+		return tomlString
+	case itemRawMultilineString:
+		return tomlString
+	case itemBool:
+		return tomlBool
+	}
+	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+	panic("unreachable")
+}

+ 4 - 0
vendor/github.com/clbanning/mxj/.travis.yml

@@ -0,0 +1,4 @@
+language: go
+
+go:
+- 1.x

+ 55 - 0
vendor/github.com/clbanning/mxj/LICENSE

@@ -0,0 +1,55 @@
+Copyright (c) 2012-2019 Charles Banning <clbanning@gmail.com>.  All rights reserved.
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+===================== for some Go code used in test case ======================
+
+Go Language Copyright & License - 
+
+Copyright 2009 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 199 - 0
vendor/github.com/clbanning/mxj/anyxml.go

@@ -0,0 +1,199 @@
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"reflect"
+)
+
+const (
+	DefaultElementTag = "element"
+)
+
+// Encode arbitrary value as XML.
+//
+// Note: unmarshaling the resultant
+// XML may not return the original value, since tag labels may have been injected
+// to create the XML representation of the value.
+/*
+ Encode an arbitrary JSON object.
+	package main
+
+	import (
+		"encoding/json"
+		"fmt"
+		"github.com/clbanning/mxj"
+	)
+
+	func main() {
+		jsondata := []byte(`[
+			{ "somekey":"somevalue" },
+			"string",
+			3.14159265,
+			true
+		]`)
+		var i interface{}
+		err := json.Unmarshal(jsondata, &i)
+		if err != nil {
+			// do something
+		}
+		x, err := mxj.AnyXmlIndent(i, "", "  ", "mydoc")
+		if err != nil {
+			// do something else
+		}
+		fmt.Println(string(x))
+	}
+
+	output:
+		<mydoc>
+		  <somekey>somevalue</somekey>
+		  <element>string</element>
+		  <element>3.14159265</element>
+		  <element>true</element>
+		</mydoc>
+*/
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXml( v, myRootTag, myElementTag).
+func AnyXml(v interface{}, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte("<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte("<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">"); err != nil {
+			return nil, err
+		}
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(false, s, tag, val, p)
+					}
+				} else {
+					err = marshalMapToXmlIndent(false, s, et, vv, p)
+				}
+			default:
+				err = marshalMapToXmlIndent(false, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString("</" + rt + ">"); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.Xml(rt)
+	default:
+		err = marshalMapToXmlIndent(false, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string.
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXmlIndent( v, "", "  ", myRootTag, myElementTag).
+func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte(prefix + "<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte(prefix + "<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">\n"); err != nil {
+			return nil, err
+		}
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we 1 tag in
+					err = marshalMapToXmlIndent(true, s, et, vv, p)
+					// *s += "\n"
+					if _, err = s.WriteString("\n"); err != nil {
+						return nil, err
+					}
+				}
+			default:
+				p.start = 0 // in case trailing p.start = 1
+				err = marshalMapToXmlIndent(true, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString(`</` + rt + `>`); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.XmlIndent(prefix, indent, rt)
+	default:
+		err = marshalMapToXmlIndent(true, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}

+ 54 - 0
vendor/github.com/clbanning/mxj/atomFeedString.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+  An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+     feeds that will be pubsubhubbubbed.
+  2. every time one of those feeds changes, tell the hub
+     with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code.  I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+  This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops.  Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites.  I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> 	   `
+

+ 138 - 0
vendor/github.com/clbanning/mxj/doc.go

@@ -0,0 +1,138 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2019, Charles Banning. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file
+
+/*
+Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package.  The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them.
+
+Note: this library was designed for processing ad hoc anonymous messages.  Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly.
+
+Related Packages:
+	checkxml: github.com/clbanning/checkxml provides functions for validating XML data.
+
+Notes:
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>].
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+
+SUMMARY
+
+   type Map map[string]interface{}
+
+   Create a Map value, 'mv', from any map[string]interface{} value, 'v':
+      mv := Map(v)
+
+   Unmarshal / marshal XML as a Map value, 'mv':
+      mv, err := NewMapXml(xmlValue) // unmarshal
+      xmlValue, err := mv.Xml()      // marshal
+
+   Unmarshal XML from an io.Reader as a Map value, 'mv':
+      mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+      mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+
+   Marshal Map value, 'mv', to an XML Writer (io.Writer):
+      err := mv.XmlWriter(xmlWriter)
+      raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+
+   Also, for prettified output:
+      xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+      err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+      raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+
+   Bulk process XML with error handling (note: handlers must return a boolean value):
+      err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+      err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+
+   Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader.
+
+   There are comparable functions and methods for JSON processing.
+
+   Arbitrary structure values can be decoded to / encoded from Map values:
+      mv, err := NewMapStruct(structVal)
+      err := mv.Struct(structPointer)
+
+   To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+   or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then:
+      paths := mv.PathsForKey(key)
+      path := mv.PathForKeyShortest(key)
+      values, err := mv.ValuesForKey(key, subkeys)
+      values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays.
+      count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+
+   Get everything at once, irrespective of path depth:
+      leafnodes := mv.LeafNodes()
+      leafvalues := mv.LeafValues()
+
+   A new Map with whatever keys are desired can be created from the current Map and then encoded in XML
+   or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.)
+      newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+      newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+      newXml, err := newMap.Xml()   // for example
+      newJson, err := newMap.Json() // ditto
+
+XML PARSING CONVENTIONS
+
+   Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+   Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - <ns:key>something</ns.key> parses to map["ns:key"]interface{}{"something"}
+      - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"}
+
+   Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+XML ENCODING CONVENTIONS
+   
+   - 'nil' Map values, which may represent 'null' JSON values, are encoded as "<tag/>".
+     NOTE: the operation is not symmetric as "<tag/>" elements are decoded as 'tag:""' Map values,
+           which, then, encode in JSON as '"tag":""' values..
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+*/
+package mxj

+ 54 - 0
vendor/github.com/clbanning/mxj/escapechars.go

@@ -0,0 +1,54 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+)
+
+var xmlEscapeChars bool
+
+// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values.
+// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is
+// then '&amp;' will be re-escaped as '&amp;amp;'.
+//  
+/*
+	The values are:
+	"   &quot;
+	'   &apos;
+	<   &lt;
+	>   &gt;
+	&   &amp;
+*/
+func XMLEscapeChars(b bool) {
+	xmlEscapeChars = b
+}
+
+// Scan for '&' first, since 's' may contain "&amp;" that is parsed to "&amp;amp;" 
+// - or "&lt;" that is parsed to "&amp;lt;".
+var escapechars = [][2][]byte{
+	{[]byte(`&`), []byte(`&amp;`)},
+	{[]byte(`<`), []byte(`&lt;`)},
+	{[]byte(`>`), []byte(`&gt;`)},
+	{[]byte(`"`), []byte(`&quot;`)},
+	{[]byte(`'`), []byte(`&apos;`)},
+}
+
+func escapeChars(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+
+	b := []byte(s)
+	for _, v := range escapechars {
+		n := bytes.Count(b, v[0])
+		if n == 0 {
+			continue
+		}
+		b = bytes.Replace(b, v[0], v[1], n)
+	}
+	return string(b)
+}
+

+ 9 - 0
vendor/github.com/clbanning/mxj/exists.go

@@ -0,0 +1,9 @@
+package mxj
+
+// Checks whether the path exists. If err != nil then 'false' is returned
+// along with the error encountered parsing either the "path" or "subkeys"
+// argument.
+func (mv Map) Exists(path string, subkeys ...string) (bool, error) {
+	v, err := mv.ValuesForPath(path, subkeys...)
+	return (err == nil && len(v) > 0), err
+}

+ 287 - 0
vendor/github.com/clbanning/mxj/files.go

@@ -0,0 +1,287 @@
+package mxj
+
+import (
+	"fmt"
+	"io"
+	"os"
+)
+
+type Maps []Map
+
+func NewMaps() Maps {
+	return make(Maps, 0)
+}
+
+type MapRaw struct {
+	M Map
+	R []byte
+}
+
+// NewMapsFromXmlFile - creates an array from a file of JSON values.
+func NewMapsFromJsonFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values.
+func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFile - creates an array from a file of XML values.
+func NewMapsFromXmlFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values.
+// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw().
+// It is slow at parsing a file from disk and is intended for relatively small utility files.
+func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ------------------------ Maps writing -------------------------
+// These are handy-dandy methods for dumping configuration data, etc.
+
+// JsonString - analogous to mv.Json()
+func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) {
+	var s string
+	for _, v := range mvs {
+		j, err := v.Json()
+		if err != nil {
+			return s, err
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// JsonStringIndent - analogous to mv.JsonIndent()
+func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) {
+	var s string
+	var haveFirst bool
+	for _, v := range mvs {
+		j, err := v.JsonIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		if haveFirst {
+			s += "\n"
+		} else {
+			haveFirst = true
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// XmlString - analogous to mv.Xml()
+func (mvs Maps) XmlString() (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.Xml()
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// XmlStringIndent - analogous to mv.XmlIndent()
+func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.XmlIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// JsonFile - write Maps to named file as JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonWriter method.
+func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonString(encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// JsonFileIndent - write Maps to named file as pretty JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonIndentWriter method.
+func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonStringIndent(prefix, indent, encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFile - write Maps to named file as XML
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlWriter method.
+func (mvs Maps) XmlFile(file string) error {
+	s, err := mvs.XmlString()
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFileIndent - write Maps to named file as pretty XML
+// Note: the file will be created,if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlIndentWriter method.
+func (mvs Maps) XmlFileIndent(file, prefix, indent string) error {
+	s, err := mvs.XmlStringIndent(prefix, indent)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}

+ 2 - 0
vendor/github.com/clbanning/mxj/files_test.badjson

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"some", "bad":JSON, "in":"it" }

+ 9 - 0
vendor/github.com/clbanning/mxj/files_test.badxml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</other>
+	<for>test case</for>
+</msg>

+ 2 - 0
vendor/github.com/clbanning/mxj/files_test.json

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"just", "two":2, "JSON":"values", "true":true }

+ 9 - 0
vendor/github.com/clbanning/mxj/files_test.xml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</another>
+	<for>test case</for>
+</msg>

+ 1 - 0
vendor/github.com/clbanning/mxj/files_test_dup.json

@@ -0,0 +1 @@
+{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"}

+ 1 - 0
vendor/github.com/clbanning/mxj/files_test_dup.xml

@@ -0,0 +1 @@
+<doc><data>for files.go</data><some>test</some></doc><msg><another>doc</another><for>test case</for><just>some</just></msg>

+ 12 - 0
vendor/github.com/clbanning/mxj/files_test_indent.json

@@ -0,0 +1,12 @@
+{
+  "a": "test",
+  "file": "for",
+  "files_test.go": "case",
+  "this": "is"
+}
+{
+  "JSON": "values",
+  "true": true,
+  "two": 2,
+  "with": "just"
+}

+ 8 - 0
vendor/github.com/clbanning/mxj/files_test_indent.xml

@@ -0,0 +1,8 @@
+<doc>
+  <data>for files.go</data>
+  <some>test</some>
+</doc><msg>
+  <another>doc</another>
+  <for>test case</for>
+  <just>some</just>
+</msg>

+ 35 - 0
vendor/github.com/clbanning/mxj/gob.go

@@ -0,0 +1,35 @@
+// gob.go - Encode/Decode a Map into a gob object.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/gob"
+)
+
+// NewMapGob returns a Map value for a gob object that has been
+// encoded from a map[string]interface{} (or compatible type) value.
+// It is intended to provide symmetric handling of Maps that have
+// been encoded using mv.Gob.
+func NewMapGob(gobj []byte) (Map, error) {
+	m := make(map[string]interface{}, 0)
+	if len(gobj) == 0 {
+		return m, nil
+	}
+	r := bytes.NewReader(gobj)
+	dec := gob.NewDecoder(r)
+	if err := dec.Decode(&m); err != nil {
+		return m, err
+	}
+	return m, nil
+}
+
+// Gob returns a gob-encoded value for the Map 'mv'.
+func (mv Map) Gob() ([]byte, error) {
+	var buf bytes.Buffer
+	enc := gob.NewEncoder(&buf)
+	if err := enc.Encode(map[string]interface{}(mv)); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}

+ 323 - 0
vendor/github.com/clbanning/mxj/json.go

@@ -0,0 +1,323 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+)
+
+// ------------------------------ write JSON -----------------------
+
+// Just a wrapper on json.Marshal.
+// If option safeEncoding is'true' then safe encoding of '<', '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) Json(safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.Marshal(mv)
+
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// Just a wrapper on json.MarshalIndent.
+// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.MarshalIndent(mv, prefix, indent)
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// The following implementation is provided for symmetry with NewMapJsonReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// Writes the Map as pretty JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// --------------------------- read JSON -----------------------------
+
+// Decode numericvalues as json.Number type Map values - see encoding/json#Number.
+// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), 
+// etc.; it does not affect NewMapXml(), etc.  The XML encoders mv.Xml() and mv.XmlIndent()
+// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number
+// value types and the resulting Map can be correctly encoded into a XML object.
+var JsonUseNumber bool
+
+// Just a wrapper on json.Unmarshal
+//	Converting JSON to XML is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapJson(jsonVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		xmlVal, xerr := mapVal.Xml()
+//		if xerr != nil {
+//			// handle error
+//		}
+// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}],
+// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map.
+// See mxj/j2x/j2x_test.go.
+func NewMapJson(jsonVal []byte) (Map, error) {
+	// empty or nil begets empty
+	if len(jsonVal) == 0 {
+		m := make(map[string]interface{}, 0)
+		return m, nil
+	}
+	// handle a goofy case ...
+	if jsonVal[0] == '[' {
+		jsonVal = []byte(`{"object":` + string(jsonVal) + `}`)
+	}
+	m := make(map[string]interface{})
+	// err := json.Unmarshal(jsonVal, &m)
+	buf := bytes.NewReader(jsonVal)
+	dec := json.NewDecoder(buf)
+	if JsonUseNumber {
+		dec.UseNumber()
+	}
+	err := dec.Decode(&m)
+	return m, err
+}
+
+// Retrieve a Map value from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object.
+func NewMapJsonReader(jsonReader io.Reader) (Map, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	return NewMapJson(*jb)
+}
+
+// Retrieve a Map value and raw JSON - []byte - from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object and retrieve the raw JSON in a single call.
+func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, *jb, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	m, merr := NewMapJson(*jb)
+	return m, *jb, merr
+}
+
+// Pull the next JSON string off the stream: just read from first '{' to its closing '}'.
+// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package.
+func getJson(rdr io.Reader) (*[]byte, error) {
+	bval := make([]byte, 1)
+	jb := make([]byte, 0)
+	var inQuote, inJson bool
+	var parenCnt int
+	var previous byte
+
+	// scan the input for a matched set of {...}
+	// json.Unmarshal will handle syntax checking.
+	for {
+		_, err := rdr.Read(bval)
+		if err != nil {
+			if err == io.EOF && inJson && parenCnt > 0 {
+				return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb))
+			}
+			return &jb, err
+		}
+		switch bval[0] {
+		case '{':
+			if !inQuote {
+				parenCnt++
+				inJson = true
+			}
+		case '}':
+			if !inQuote {
+				parenCnt--
+			}
+			if parenCnt < 0 {
+				return nil, fmt.Errorf("closing } without opening {: %s", string(jb))
+			}
+		case '"':
+			if inQuote {
+				if previous == '\\' {
+					break
+				}
+				inQuote = false
+			} else {
+				inQuote = true
+			}
+		case '\n', '\r', '\t', ' ':
+			if !inQuote {
+				continue
+			}
+		}
+		if inJson {
+			jb = append(jb, bval[0])
+			if parenCnt == 0 {
+				break
+			}
+		}
+		previous = bval[0]
+	}
+
+	return &jb, nil
+}
+
+// ------------------------------- JSON Reader handler via Map values  -----------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var jhandlerPollInterval = time.Duration(1e6)
+
+// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader().
+// This avoids treating one or other as a special case and discussing the underlying stdlib logic.
+
+// Bulk process JSON using handlers that process a Map value.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader  processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapJsonReader(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process JSON using handlers that process a Map value and the raw JSON.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapJsonReaderRaw(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}

+ 668 - 0
vendor/github.com/clbanning/mxj/keyvalues.go

@@ -0,0 +1,668 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+//	keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters.
+
+package mxj
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// ----------------------------- get everything FOR a single key -------------------------
+
+const (
+	minArraySize = 32
+)
+
+var defaultArraySize int = minArraySize
+
+// SetArraySize adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath().
+// This can have the effect of significantly reducing memory allocation-copy functions for large data sets.
+// Returns the initial buffer size.
+func SetArraySize(size int) int {
+	if size > minArraySize {
+		defaultArraySize = size
+	} else {
+		defaultArraySize = minArraySize
+	}
+	return defaultArraySize
+}
+
+// ValuesForKey return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match.
+// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'key' refers to a list, then "key:value" could select a list member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	ret := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	hasKey(m, key, &ret, &cnt, subKeyMap)
+	return ret[:cnt], nil
+}
+
+var KeyNotExistError = errors.New("Key does not exist")
+
+// ValueForKey is a wrapper on ValuesForKey.  It returns the first member of []interface{}, if any.
+// If there is no value, "nil, nil" is returned.
+func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) {
+	vals, err := mv.ValuesForKey(key, subkeys...)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, KeyNotExistError
+	}
+	return vals[0], nil
+}
+
+// hasKey - if the map 'key' exists append it to array
+//          if it doesn't do nothing except scan array and map values
+func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) {
+	// func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		// see if the current value is of interest
+		if v, ok := vv[key]; ok {
+			switch v.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(v, subkeys) {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			case []interface{}:
+				for _, av := range v.([]interface{}) {
+					if hasSubKeys(av, subkeys) {
+						*ret = append(*ret, av)
+						*cnt++
+					}
+				}
+			default:
+				if len(subkeys) == 0 {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			}
+		}
+
+		// wildcard case
+		if key == "*" {
+			for _, v := range vv {
+				switch v.(type) {
+				case map[string]interface{}:
+					if hasSubKeys(v, subkeys) {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				case []interface{}:
+					for _, av := range v.([]interface{}) {
+						if hasSubKeys(av, subkeys) {
+							*ret = append(*ret, av)
+							*cnt++
+						}
+					}
+				default:
+					if len(subkeys) == 0 {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				}
+			}
+		}
+
+		// scan the rest
+		for _, v := range vv {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	case []interface{}:
+		for _, v := range iv.([]interface{}) {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	}
+}
+
+// -----------------------  get everything for a node in the Map ---------------------------
+
+// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.)
+// 2014.04.28 - implementation note.
+// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion
+// of wildcards and unindexed arrays.  Embedding such logic into valuesForKeyPath() would have made the
+// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead.
+
+// ValuesForPatb retrieves all values for a path from the Map.  If len(returned_values) == 0, then no match.
+// On error, the returned array is 'nil'.
+//   'path' is a dot-separated path of key values.
+//          - If a node in the path is '*', then everything beyond is walked.
+//          - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" -
+//            even "*[2].*[0].field".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'path' refers to a list, then "tag:value" would return member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	// If there are no array indexes in path, use legacy ValuesForPath() logic.
+	if strings.Index(path, "[") < 0 {
+		return mv.oldValuesForPath(path, subkeys...)
+	}
+
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys, kerr := parsePath(path)
+	if kerr != nil {
+		return nil, kerr
+	}
+
+	vals, verr := valuesForArray(keys, mv)
+	if verr != nil {
+		return nil, verr // Vals may be nil, but return empty array.
+	}
+
+	// Need to handle subkeys ... only return members of vals that satisfy conditions.
+	retvals := make([]interface{}, 0)
+	for _, v := range vals {
+		if hasSubKeys(v, subKeyMap) {
+			retvals = append(retvals, v)
+		}
+	}
+	return retvals, nil
+}
+
+func valuesForArray(keys []*key, m Map) ([]interface{}, error) {
+	var tmppath string
+	var haveFirst bool
+	var vals []interface{}
+	var verr error
+
+	lastkey := len(keys) - 1
+	for i := 0; i <= lastkey; i++ {
+		if !haveFirst {
+			tmppath = keys[i].name
+			haveFirst = true
+		} else {
+			tmppath += "." + keys[i].name
+		}
+
+		// Look-ahead: explode wildcards and unindexed arrays.
+		// Need to handle un-indexed list recursively:
+		// e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]".
+		// Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ...
+		if !keys[i].isArray && i < lastkey && keys[i+1].isArray {
+			// Can't pass subkeys because we may not be at literal end of path.
+			vv, vverr := m.oldValuesForPath(tmppath)
+			if vverr != nil {
+				return nil, vverr
+			}
+			for _, v := range vv {
+				// See if we can walk the value.
+				am, ok := v.(map[string]interface{})
+				if !ok {
+					continue
+				}
+				// Work the backend.
+				nvals, nvalserr := valuesForArray(keys[i+1:], Map(am))
+				if nvalserr != nil {
+					return nil, nvalserr
+				}
+				vals = append(vals, nvals...)
+			}
+			break // have recursed the whole path - return
+		}
+
+		if keys[i].isArray || i == lastkey {
+			// Don't pass subkeys because may not be at literal end of path.
+			vals, verr = m.oldValuesForPath(tmppath)
+		} else {
+			continue
+		}
+		if verr != nil {
+			return nil, verr
+		}
+
+		if i == lastkey && !keys[i].isArray {
+			break
+		}
+
+		// Now we're looking at an array - supposedly.
+		// Is index in range of vals?
+		if len(vals) <= keys[i].position {
+			vals = nil
+			break
+		}
+
+		// Return the array member of interest, if at end of path.
+		if i == lastkey {
+			vals = vals[keys[i].position:(keys[i].position + 1)]
+			break
+		}
+
+		// Extract the array member of interest.
+		am := vals[keys[i].position:(keys[i].position + 1)]
+
+		// must be a map[string]interface{} value so we can keep walking the path
+		amm, ok := am[0].(map[string]interface{})
+		if !ok {
+			vals = nil
+			break
+		}
+
+		m = Map(amm)
+		haveFirst = false
+	}
+
+	return vals, nil
+}
+
+type key struct {
+	name     string
+	isArray  bool
+	position int
+}
+
+func parsePath(s string) ([]*key, error) {
+	keys := strings.Split(s, ".")
+
+	ret := make([]*key, 0)
+
+	for i := 0; i < len(keys); i++ {
+		if keys[i] == "" {
+			continue
+		}
+
+		newkey := new(key)
+		if strings.Index(keys[i], "[") < 0 {
+			newkey.name = keys[i]
+			ret = append(ret, newkey)
+			continue
+		}
+
+		p := strings.Split(keys[i], "[")
+		newkey.name = p[0]
+		p = strings.Split(p[1], "]")
+		if p[0] == "" { // no right bracket
+			return nil, fmt.Errorf("no right bracket on key index: %s", keys[i])
+		}
+		// convert p[0] to a int value
+		pos, nerr := strconv.ParseInt(p[0], 10, 32)
+		if nerr != nil {
+			return nil, fmt.Errorf("cannot convert index to int value: %s", p[0])
+		}
+		newkey.position = int(pos)
+		newkey.isArray = true
+		ret = append(ret, newkey)
+	}
+
+	return ret, nil
+}
+
+// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'.
+func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys := strings.Split(path, ".")
+	if keys[len(keys)-1] == "" {
+		keys = keys[:len(keys)-1]
+	}
+	ivals := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap)
+	return ivals[:cnt], nil
+}
+
+func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) {
+	lenKeys := len(keys)
+
+	// load 'm' values into 'ret'
+	// expand any lists
+	if lenKeys == 0 {
+		switch m.(type) {
+		case map[string]interface{}:
+			if subkeys != nil {
+				if ok := hasSubKeys(m, subkeys); !ok {
+					return
+				}
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		case []interface{}:
+			for i, v := range m.([]interface{}) {
+				if subkeys != nil {
+					if ok := hasSubKeys(v, subkeys); !ok {
+						continue // only load list members with subkeys
+					}
+				}
+				*ret = append(*ret, (m.([]interface{}))[i])
+				*cnt++
+			}
+		default:
+			if subkeys != nil {
+				return // must be map[string]interface{} if there are subkeys
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		}
+		return
+	}
+
+	// key of interest
+	key := keys[0]
+	switch key {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				default:
+					// valuesForKeyPath(ret, v, keys[1:], subkeys)
+					valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[key]; ok {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[key]; ok {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				}
+			}
+		}
+	}
+}
+
+// hasSubKeys() - interface{} equality works for string, float64, bool
+// 'v' must be a map[string]interface{} value to have subkeys
+// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard.
+func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool {
+	if len(subkeys) == 0 {
+		return true
+	}
+
+	switch v.(type) {
+	case map[string]interface{}:
+		// do all subKey name:value pairs match?
+		mv := v.(map[string]interface{})
+		for skey, sval := range subkeys {
+			isNotKey := false
+			if skey[:1] == "!" { // a NOT-key
+				skey = skey[1:]
+				isNotKey = true
+			}
+			vv, ok := mv[skey]
+			if !ok { // key doesn't exist
+				if isNotKey { // key not there, but that's what we want
+					if kv, ok := sval.(string); ok && kv == "*" {
+						continue
+					}
+				}
+				return false
+			}
+			// wildcard check
+			if kv, ok := sval.(string); ok && kv == "*" {
+				if isNotKey { // key is there, and we don't want it
+					return false
+				}
+				continue
+			}
+			switch sval.(type) {
+			case string:
+				if s, ok := vv.(string); ok && s == sval.(string) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case bool:
+				if b, ok := vv.(bool); ok && b == sval.(bool) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case float64:
+				if f, ok := vv.(float64); ok && f == sval.(float64) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			}
+			// key there but didn't match subkey value
+			if isNotKey { // that's what we want
+				continue
+			}
+			return false
+		}
+		// all subkeys matched
+		return true
+	}
+
+	// not a map[string]interface{} value, can't have subkeys
+	return false
+}
+
+// Generate map of key:value entries as map[string]string.
+//	'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'.
+//	If len(kv) == 0, the return is (nil, nil).
+func getSubKeyMap(kv ...string) (map[string]interface{}, error) {
+	if len(kv) == 0 {
+		return nil, nil
+	}
+	m := make(map[string]interface{}, 0)
+	for _, v := range kv {
+		vv := strings.Split(v, fieldSep)
+		switch len(vv) {
+		case 2:
+			m[vv[0]] = interface{}(vv[1])
+		case 3:
+			switch vv[2] {
+			case "string", "char", "text":
+				m[vv[0]] = interface{}(vv[1])
+			case "bool", "boolean":
+				// ParseBool treats "1"==true & "0"==false
+				b, err := strconv.ParseBool(vv[1])
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(b)
+			case "float", "float64", "num", "number", "numeric":
+				f, err := strconv.ParseFloat(vv[1], 64)
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(f)
+			default:
+				return nil, fmt.Errorf("unknown subkey conversion spec: %s", v)
+			}
+		default:
+			return nil, fmt.Errorf("unknown subkey spec: %s", v)
+		}
+	}
+	return m, nil
+}
+
+// -------------------------------  END of valuesFor ... ----------------------------
+
+// ----------------------- locate where a key value is in the tree -------------------
+
+//----------------------------- find all paths to a key --------------------------------
+
+// PathsForKey returns all paths through Map, 'mv', (in dot-notation) that terminate with the specified key.
+// Results can be used with ValuesForPath.
+func (mv Map) PathsForKey(key string) []string {
+	m := map[string]interface{}(mv)
+	breadbasket := make(map[string]bool, 0)
+	breadcrumbs := ""
+
+	hasKeyPath(breadcrumbs, m, key, breadbasket)
+	if len(breadbasket) == 0 {
+		return nil
+	}
+
+	// unpack map keys to return
+	res := make([]string, len(breadbasket))
+	var i int
+	for k := range breadbasket {
+		res[i] = k
+		i++
+	}
+
+	return res
+}
+
+// PathForKeyShortest extracts the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'..
+// Paths are strings using dot-notation.
+func (mv Map) PathForKeyShortest(key string) string {
+	paths := mv.PathsForKey(key)
+
+	lp := len(paths)
+	if lp == 0 {
+		return ""
+	}
+	if lp == 1 {
+		return paths[0]
+	}
+
+	shortest := paths[0]
+	shortestLen := len(strings.Split(shortest, "."))
+
+	for i := 1; i < len(paths); i++ {
+		vlen := len(strings.Split(paths[i], "."))
+		if vlen < shortestLen {
+			shortest = paths[i]
+			shortestLen = vlen
+		}
+	}
+
+	return shortest
+}
+
+// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth
+// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'.
+func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		if _, ok := vv[key]; ok {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = key
+			} else {
+				nbc = crumbs + "." + key
+			}
+			basket[nbc] = true
+		}
+		// walk on down the path, key could occur again at deeper node
+		for k, v := range vv {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = k
+			} else {
+				nbc = crumbs + "." + k
+			}
+			hasKeyPath(nbc, v, key, basket)
+		}
+	case []interface{}:
+		// crumb-trail doesn't change, pass it on
+		for _, v := range iv.([]interface{}) {
+			hasKeyPath(crumbs, v, key, basket)
+		}
+	}
+}
+
+var PathNotExistError = errors.New("Path does not exist")
+
+// ValueForPath wraps ValuesFor Path and returns the first value returned.
+// If no value is found it returns 'nil' and PathNotExistError.
+func (mv Map) ValueForPath(path string) (interface{}, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, PathNotExistError
+	}
+	return vals[0], nil
+}
+
+// ValuesForPathString returns the first found value for the path as a string.
+func (mv Map) ValueForPathString(path string) (string, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return "", err
+	}
+	if len(vals) == 0 {
+		return "", errors.New("ValueForPath: path not found")
+	}
+	val := vals[0]
+	return fmt.Sprintf("%v", val), nil
+}
+
+// ValueOrEmptyForPathString returns the first found value for the path as a string.
+// If the path is not found then it returns an empty string.
+func (mv Map) ValueOrEmptyForPathString(path string) string {
+	str, _ := mv.ValueForPathString(path)
+	return str
+}

+ 112 - 0
vendor/github.com/clbanning/mxj/leafnode.go

@@ -0,0 +1,112 @@
+package mxj
+
+// leafnode.go - return leaf nodes with paths and values for the Map
+// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw
+
+import (
+	"strconv"
+	"strings"
+)
+
+const (
+	NoAttributes = true // suppress LeafNode values that are attributes
+)
+
+// LeafNode - a terminal path value in a Map.
+// For XML Map values it represents an attribute or simple element value  - of type
+// string unless Map was created using Cast flag. For JSON Map values it represents
+// a string, numeric, boolean, or null value.
+type LeafNode struct {
+	Path  string      // a dot-notation representation of the path with array subscripting
+	Value interface{} // the value at the path termination
+}
+
+// LeafNodes - returns an array of all LeafNode values for the Map.
+// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-')
+// as well as the "#text" key for the associated simple element value.
+//
+// PrependAttrWithHypen(false) will result in attributes having .attr-name as 
+// terminal node in 'path' while the path for the element value, itself, will be 
+// the base path w/o "#text". 
+//
+// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax
+// rather than "[N]" syntax.
+func (mv Map) LeafNodes(no_attr ...bool) []LeafNode {
+	var a bool
+	if len(no_attr) == 1 {
+		a = no_attr[0]
+	}
+
+	l := make([]LeafNode, 0)
+	getLeafNodes("", "", map[string]interface{}(mv), &l, a)
+	return l
+}
+
+func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) {
+	// if stripping attributes, then also strip "#text" key
+	if !noattr || node != "#text" {
+		if path != "" && node[:1] != "[" {
+			path += "."
+		}
+		path += node
+	}
+	switch mv.(type) {
+	case map[string]interface{}:
+		for k, v := range mv.(map[string]interface{}) {
+			// if noattr && k[:1] == "-" {
+			if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue
+			}
+			getLeafNodes(path, k, v, l, noattr)
+		}
+	case []interface{}:
+		for i, v := range mv.([]interface{}) {
+			if useDotNotation {
+				getLeafNodes(path, strconv.Itoa(i), v, l, noattr)
+			} else {
+				getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr)
+			}
+		}
+	default:
+		// can't walk any further, so create leaf
+		n := LeafNode{path, mv}
+		*l = append(*l, n)
+	}
+}
+
+// LeafPaths - all paths that terminate in LeafNode values.
+func (mv Map) LeafPaths(no_attr ...bool) []string {
+	ln := mv.LeafNodes()
+	ss := make([]string, len(ln))
+	for i := 0; i < len(ln); i++ {
+		ss[i] = ln[i].Path
+	}
+	return ss
+}
+
+// LeafValues - all terminal values in the Map.
+func (mv Map) LeafValues(no_attr ...bool) []interface{} {
+	ln := mv.LeafNodes()
+	vv := make([]interface{}, len(ln))
+	for i := 0; i < len(ln); i++ {
+		vv[i] = ln[i].Value
+	}
+	return vv
+}
+
+// ====================== utilities ======================
+
+// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I
+var useDotNotation bool
+
+// LeafUseDotNotation sets a flag that list members in LeafNode paths
+// should be identified using ".N" syntax rather than the default "[N]"
+// syntax.  Calling LeafUseDotNotation with no arguments toggles the 
+// flag on/off; otherwise, the argument sets the flag value 'true'/'false'.
+func LeafUseDotNotation(b ...bool) {
+	if len(b) == 0 {
+		useDotNotation = !useDotNotation
+		return
+	}
+	useDotNotation = b[0]
+}

+ 86 - 0
vendor/github.com/clbanning/mxj/misc.go

@@ -0,0 +1,86 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// misc.go - mimic functions (+others) called out in:
+//          https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ
+// Primarily these methods let you retrive XML structure information.
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Return the root element of the Map. If there is not a single key in Map,
+// then an error is returned.
+func (mv Map) Root() (string, error) {
+	mm := map[string]interface{}(mv)
+	if len(mm) != 1 {
+		return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm))
+	}
+	for k, _ := range mm {
+		return k, nil
+	}
+	return "", nil
+}
+
+// If the path is an element with sub-elements, return a list of the sub-element
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are prefixed with
+// '-', a hyphen, are considered attributes; see m.Attributes(path).
+func (mv Map) Elements(path string) ([]string, error) {
+	e, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch e.(type) {
+	case map[string]interface{}:
+		ee := e.(map[string]interface{})
+		elems := make([]string, len(ee))
+		var i int
+		for k, _ := range ee {
+			if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue // skip attributes
+			}
+			elems[i] = k
+			i++
+		}
+		elems = elems[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(elems)
+		return elems, nil
+	}
+	return nil, fmt.Errorf("no elements for path: %s", path)
+}
+
+// If the path is an element with attributes, return a list of the attribute
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are not prefixed with
+// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the
+// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then
+// there are no identifiable attributes.
+func (mv Map) Attributes(path string) ([]string, error) {
+	a, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch a.(type) {
+	case map[string]interface{}:
+		aa := a.(map[string]interface{})
+		attrs := make([]string, len(aa))
+		var i int
+		for k, _ := range aa {
+			if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 {
+				continue // skip non-attributes
+			}
+			attrs[i] = k[len(attrPrefix):]
+			i++
+		}
+		attrs = attrs[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(attrs)
+		return attrs, nil
+	}
+	return nil, fmt.Errorf("no attributes for path: %s", path)
+}

+ 128 - 0
vendor/github.com/clbanning/mxj/mxj.go

@@ -0,0 +1,128 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+)
+
+const (
+	Cast         = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast)
+	SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding)
+)
+
+type Map map[string]interface{}
+
+// Allocate a Map.
+func New() Map {
+	m := make(map[string]interface{}, 0)
+	return m
+}
+
+// Cast a Map to map[string]interface{}
+func (mv Map) Old() map[string]interface{} {
+	return mv
+}
+
+// Return a copy of mv as a newly allocated Map.  If the Map only contains string,
+// numeric, map[string]interface{}, and []interface{} values, then it can be thought
+// of as a "deep copy."  Copying a structure (or structure reference) value is subject
+// to the noted restrictions.
+//	NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags
+//	      then only public fields of the structure are in the new Map - and with
+//	      keys that conform to any encoding tag instructions. The structure itself will
+//	      be represented as a map[string]interface{} value.
+func (mv Map) Copy() (Map, error) {
+	// this is the poor-man's deep copy
+	// not efficient, but it works
+	j, jerr := mv.Json()
+	// must handle, we don't know how mv got built
+	if jerr != nil {
+		return nil, jerr
+	}
+	return NewMapJson(j)
+}
+
+// --------------- StringIndent ... from x2j.WriteMap -------------
+
+// Pretty print a Map.
+func (mv Map) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (mv Map) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), false, true, offset...)
+}
+
+// writeMap - dumps the map[string]interface{} for examination.
+// 'typeInfo' causes value type to be printed.
+//	'offset' is initial indentation count; typically: Write(m).
+func writeMap(m interface{}, typeInfo, root bool, offset ...int) string {
+	var indent int
+	if len(offset) == 1 {
+		indent = offset[0]
+	}
+
+	var s string
+	switch m.(type) {
+	case []interface{}:
+		if typeInfo {
+			s += "[[]interface{}]"
+		}
+		for _, v := range m.([]interface{}) {
+			s += "\n"
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += writeMap(v, typeInfo, false, indent+1)
+		}
+	case map[string]interface{}:
+		list := make([][2]string, len(m.(map[string]interface{})))
+		var n int
+		for k, v := range m.(map[string]interface{}) {
+			list[n][0] = k
+			list[n][1] = writeMap(v, typeInfo, false, indent+1)
+			n++
+		}
+		sort.Sort(mapList(list))
+		for _, v := range list {
+			if root {
+				root = false
+			} else {
+				s += "\n"
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += v[0] + " : " + v[1]
+		}
+	default:
+		if typeInfo {
+			s += fmt.Sprintf("[%T] %+v", m, m)
+		} else {
+			s += fmt.Sprintf("%+v", m)
+		}
+	}
+	return s
+}
+
+// ======================== utility ===============
+
+type mapList [][2]string
+
+func (ml mapList) Len() int {
+	return len(ml)
+}
+
+func (ml mapList) Swap(i, j int) {
+	ml[i], ml[j] = ml[j], ml[i]
+}
+
+func (ml mapList) Less(i, j int) bool {
+	return ml[i][0] <= ml[j][0]
+}

+ 184 - 0
vendor/github.com/clbanning/mxj/newmap.go

@@ -0,0 +1,184 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014, 2018 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings
+//            keys can use dot-notation, keyOld can use wildcard, '*'
+//
+// Computational strategy -
+// Using the key path - []string - traverse a new map[string]interface{} and
+// insert the oldVal as the newVal when we arrive at the end of the path.
+// If the type at the end is nil, then that is newVal
+// If the type at the end is a singleton (string, float64, bool) an array is created.
+// If the type at the end is an array, newVal is just appended.
+// If the type at the end is a map, it is inserted if possible or the map value
+//    is converted into an array if necessary.
+
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// (Map)NewMap - create a new Map from data in the current Map.
+//	'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey'
+//	should be the value for 'newKey' in the returned Map.
+//		- 'oldKey' supports dot-notation as described for (Map)ValuesForPath()
+//		- 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays
+//		- "oldKey" is shorthand for the keypair value "oldKey:oldKey"
+//		- "oldKey:" and ":newKey" are invalid keypair values
+//		- if 'oldKey' does not exist in the current Map, it is not written to the new Map.
+//		  "null" is not supported unless it is the current Map.
+//		- see newmap_test.go for several syntax examples
+// 	- mv.NewMap() == mxj.New()
+//
+//	NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc.
+func (mv Map) NewMap(keypairs ...string) (Map, error) {
+	n := make(map[string]interface{}, 0)
+	if len(keypairs) == 0 {
+		return n, nil
+	}
+
+	// loop through the pairs
+	var oldKey, newKey string
+	var path []string
+	for _, v := range keypairs {
+		if len(v) == 0 {
+			continue // just skip over empty keypair arguments
+		}
+
+		// initialize oldKey, newKey and check
+		vv := strings.Split(v, ":")
+		if len(vv) > 2 {
+			return n, errors.New("oldKey:newKey keypair value not valid - " + v)
+		}
+		if len(vv) == 1 {
+			oldKey, newKey = vv[0], vv[0]
+		} else {
+			oldKey, newKey = vv[0], vv[1]
+		}
+		strings.TrimSpace(oldKey)
+		strings.TrimSpace(newKey)
+		if i := strings.Index(newKey, "*"); i > -1 {
+			return n, errors.New("newKey value cannot contain wildcard character - " + v)
+		}
+		if i := strings.Index(newKey, "["); i > -1 {
+			return n, errors.New("newKey value cannot contain indexed arrays - " + v)
+		}
+		if oldKey == "" || newKey == "" {
+			return n, errors.New("oldKey or newKey is not specified - " + v)
+		}
+
+		// get oldKey value
+		oldVal, err := mv.ValuesForPath(oldKey)
+		if err != nil {
+			return n, err
+		}
+		if len(oldVal) == 0 {
+			continue // oldKey has no value, may not exist in mv
+		}
+
+		// break down path
+		path = strings.Split(newKey, ".")
+		if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec
+			path = path[:len(path)-1]
+		}
+
+		addNewVal(&n, path, oldVal)
+	}
+
+	return n, nil
+}
+
+// navigate 'n' to end of path and add val
+func addNewVal(n *map[string]interface{}, path []string, val []interface{}) {
+	// newVal - either singleton or array
+	var newVal interface{}
+	if len(val) == 1 {
+		newVal = val[0] // is type interface{}
+	} else {
+		newVal = interface{}(val)
+	}
+
+	// walk to the position of interest, create it if necessary
+	m := (*n)           // initialize map walker
+	var k string        // key for m
+	lp := len(path) - 1 // when to stop looking
+	for i := 0; i < len(path); i++ {
+		k = path[i]
+		if i == lp {
+			break
+		}
+		var nm map[string]interface{} // holds position of next-map
+		switch m[k].(type) {
+		case nil: // need a map for next node in path, so go there
+			nm = make(map[string]interface{}, 0)
+			m[k] = interface{}(nm)
+			m = m[k].(map[string]interface{})
+		case map[string]interface{}:
+			// OK - got somewhere to walk to, go there
+			m = m[k].(map[string]interface{})
+		case []interface{}:
+			// add a map and nm points to new map unless there's already
+			// a map in the array, then nm points there
+			// The placement of the next value in the array is dependent
+			// on the sequence of members - could land on a map or a nil
+			// value first.  TODO: how to test this.
+			a := make([]interface{}, 0)
+			var foundmap bool
+			for _, vv := range m[k].([]interface{}) {
+				switch vv.(type) {
+				case nil: // doesn't appear that this occurs, need a test case
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = make(map[string]interface{}, 0)
+					a = append(a, interface{}(nm))
+					foundmap = true
+				case map[string]interface{}:
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = vv.(map[string]interface{})
+					a = append(a, vv)
+					foundmap = true
+				default:
+					a = append(a, vv)
+				}
+			}
+			// no map found in array
+			if !foundmap {
+				nm = make(map[string]interface{}, 0)
+				a = append(a, interface{}(nm))
+			}
+			m[k] = interface{}(a) // must insert in map
+			m = nm
+		default: // it's a string, float, bool, etc.
+			aa := make([]interface{}, 0)
+			nm = make(map[string]interface{}, 0)
+			aa = append(aa, m[k], nm)
+			m[k] = interface{}(aa)
+			m = nm
+		}
+	}
+
+	// value is nil, array or a singleton of some kind
+	// initially m.(type) == map[string]interface{}
+	v := m[k]
+	switch v.(type) {
+	case nil: // initialized
+		m[k] = newVal
+	case []interface{}:
+		a := m[k].([]interface{})
+		a = append(a, newVal)
+		m[k] = interface{}(a)
+	default: // v exists:string, float64, bool, map[string]interface, etc.
+		a := make([]interface{}, 0)
+		a = append(a, v, newVal)
+		m[k] = interface{}(a)
+	}
+}

+ 192 - 0
vendor/github.com/clbanning/mxj/readme.md

@@ -0,0 +1,192 @@
+<h2>mxj - to/from maps, XML and JSON</h2>
+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages.
+
+<h4>Related Packages</h4>
+
+https://github.com/clbanning/checkxml provides functions for validating XML data.
+
+<h4>Refactor Encoder - 2020.05.01</h4>
+Issue #70 highlighted that encoding large maps does not scale well, since the original logic used string appends operations. Using bytes.Buffer results in linear scaling for very large XML docs. (Metrics based on MacBook Pro i7 w/ 16 GB.)
+
+	Nodes      m.XML() time
+	54809       12.53708ms
+	109780      32.403183ms
+	164678      59.826412ms
+	482598     109.358007ms
+
+<h4>Refactor Decoder - 2015.11.15</h4>
+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant.  I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi.  Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value.  As shown by:
+
+	BenchmarkNewMapXml-4         	  100000	     18043 ns/op
+	BenchmarkNewStructXml-4      	  100000	     14892 ns/op
+	BenchmarkNewMapJson-4        	  300000	      4633 ns/op
+	BenchmarkNewStructJson-4     	  300000	      3427 ns/op
+	BenchmarkNewMapXmlBooks-4    	   20000	     82850 ns/op
+	BenchmarkNewStructXmlBooks-4 	   20000	     67822 ns/op
+	BenchmarkNewMapJsonBooks-4   	  100000	     17222 ns/op
+	BenchmarkNewStructJsonBooks-4	  100000	     15309 ns/op
+
+<h4>Notices</h4>
+
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>]
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq()
+	            and mv.XmlSeq() / mv.XmlSeqIndent().
+	2015-05-20: New: mv.StringIndentNoTypeInfo().
+	            Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(),
+	            mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo().
+	2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information.
+	            (NOTE: PreserveXmlList() is similar and will be here soon.)
+	2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+	2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references.
+
+<h4>Basic Unmarshal XML to map[string]interface{}</h4>
+<pre>type Map map[string]interface{}</pre>
+
+Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v':
+<pre>mv := Map(v)</pre>
+
+Unmarshal / marshal XML as a `Map` value, 'mv':
+<pre>mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal</pre>
+
+Unmarshal XML from an `io.Reader` as a `Map` value, 'mv':
+<pre>mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded</pre>
+
+Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`):
+<pre>err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter</pre>
+   
+Also, for prettified output:
+<pre>xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)</pre>
+
+Bulk process XML with error handling (note: handlers must return a boolean value):
+<pre>err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))</pre>
+
+Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`.
+
+There are comparable functions and methods for JSON processing.
+
+Arbitrary structure values can be decoded to / encoded from `Map` values:
+<pre>mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)</pre>
+
+<h4>Extract / modify Map values</h4>
+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then:
+<pre>paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)</pre>
+
+Get everything at once, irrespective of path depth:
+<pre>leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()</pre>
+
+A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML
+or JSON. (Note: keys can use dot-notation.)
+<pre>newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto</pre>
+
+<h4>Usage</h4>
+
+The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj).
+
+Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions.
+
+<h4>XML parsing conventions</h4>
+
+Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - `<ns:key>something</ns.key>` parses to `map["ns:key"]interface{}{"something"}`
+      - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}`
+
+Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+<h4>XML encoding conventions</h4>
+
+   - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as `<tag/>`.
+     NOTE: the operation is not symmetric as `<tag/>` elements are decoded as `tag:""` `Map` values,
+           which, then, encode in JSON as `"tag":""` values.
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+<h4>Running "go test"</h4>
+
+Because there are no guarantees on the sequence map elements are retrieved, the tests have been 
+written for visual verification in most cases.  One advantage is that you can easily use the 
+output from running "go test" as examples of calling the various functions and methods.
+
+<h4>Motivation</h4>
+
+I make extensive use of JSON for messaging and typically unmarshal the messages into
+`map[string]interface{}` values.  This is easily done using `json.Unmarshal` from the
+standard Go libraries.  Unfortunately, many legacy solutions use structured
+XML messages; in those environments the applications would have to be refactored to
+interoperate with my components.
+
+The better solution is to just provide an alternative HTTP handler that receives
+XML messages and parses it into a `map[string]interface{}` value and then reuse
+all the JSON-based code.  The Go `xml.Unmarshal()` function does not provide the same
+option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote
+a couple of small functions to fill this gap and released them as the x2j package.
+
+Over the next year and a half additional features were added, and the companion j2x
+package was released to address XML encoding of arbitrary JSON and `map[string]interface{}`
+values.  As part of a refactoring of our production system and looking at how we had been
+using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or
+JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}`
+values was the primary value.  Thus, everything was refactored into the mxj package.
+

+ 37 - 0
vendor/github.com/clbanning/mxj/remove.go

@@ -0,0 +1,37 @@
+package mxj
+
+import "strings"
+
+// Removes the path.
+func (mv Map) Remove(path string) error {
+	m := map[string]interface{}(mv)
+	return remove(m, path)
+}
+
+func remove(m interface{}, path string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	lastKey := lastKey(path)
+	delete(val, lastKey)
+
+	return nil
+}
+
+// returns the last key of the path.
+// lastKey("a.b.c") would had returned "c"
+func lastKey(path string) string {
+	keys := strings.Split(path, ".")
+	key := keys[len(keys)-1]
+	return key
+}
+
+// returns the path without the last key
+// parentPath("a.b.c") whould had returned "a.b"
+func parentPath(path string) string {
+	keys := strings.Split(path, ".")
+	parentPath := strings.Join(keys[0:len(keys)-1], ".")
+	return parentPath
+}

+ 61 - 0
vendor/github.com/clbanning/mxj/rename.go

@@ -0,0 +1,61 @@
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// RenameKey renames a key in a Map.
+// It works only for nested maps. 
+// It doesn't work for cases when the key is in a list.
+func (mv Map) RenameKey(path string, newName string) error {
+	var v bool
+	var err error
+	if v, err = mv.Exists(path); err == nil && !v {
+		return errors.New("RenameKey: path not found: " + path)
+	} else if err != nil {
+		return err
+	}
+	if v, err = mv.Exists(parentPath(path) + "." + newName); err == nil && v {
+		return errors.New("RenameKey: key already exists: " + newName)
+	} else if err != nil {
+		return err
+	}
+
+	m := map[string]interface{}(mv)
+	return renameKey(m, path, newName)
+}
+
+func renameKey(m interface{}, path string, newName string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	oldName := lastKey(path)
+	val[newName] = val[oldName]
+	delete(val, oldName)
+
+	return nil
+}
+
+// returns a value which contains a last key in the path
+// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3}
+func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) {
+	keys := strings.Split(path, ".")
+
+	switch mValue := m.(type) {
+	case map[string]interface{}:
+		for key, value := range mValue {
+			if key == keys[0] {
+				if len(keys) == 1 {
+					return mValue, nil
+				} else {
+					// keep looking for the full path to the key
+					return prevValueByPath(value, strings.Join(keys[1:], "."))
+				}
+			}
+		}
+	}
+	return nil, errors.New("prevValueByPath: didn't find path – " + path)
+}

+ 26 - 0
vendor/github.com/clbanning/mxj/set.go

@@ -0,0 +1,26 @@
+package mxj
+
+import (
+	"strings"
+)
+
+// Sets the value for the path
+func (mv Map) SetValueForPath(value interface{}, path string) error {
+	pathAry := strings.Split(path, ".")
+	parentPathAry := pathAry[0 : len(pathAry)-1]
+	parentPath := strings.Join(parentPathAry, ".")
+
+	val, err := mv.ValueForPath(parentPath)
+	if err != nil {
+		return err
+	}
+	if val == nil {
+		return nil // we just ignore the request if there's no val
+	}
+
+	key := pathAry[len(pathAry)-1]
+	cVal := val.(map[string]interface{})
+	cVal[key] = value
+
+	return nil
+}

+ 20 - 0
vendor/github.com/clbanning/mxj/setfieldsep.go

@@ -0,0 +1,20 @@
+package mxj
+
+// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862
+var fieldSep string = ":"
+
+// SetFieldSeparator changes the default field separator, ":", for the
+// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments
+// in mv.ValuesForKey and mv.ValuesForPath. 
+// 
+// E.g., if the newVal value is "http://blah/blah", setting the field separator
+// to "|" will allow the newVal specification, "<key>|http://blah/blah" to parse
+// properly.  If called with no argument or an empty string value, the field
+// separator is set to the default, ":".
+func SetFieldSeparator(s ...string) {
+	if len(s) == 0 || s[0] == "" {
+		fieldSep = ":" // the default
+		return
+	}
+	fieldSep = s[0]
+}

+ 29 - 0
vendor/github.com/clbanning/mxj/songtext.xml

@@ -0,0 +1,29 @@
+<msg mtype="alert" mpriority="1">
+	<text>help me!</text>
+	<song title="A Long Time" author="Mayer Hawthorne">
+		<verses>
+			<verse name="verse 1" no="1">
+				<line no="1">Henry was a renegade</line>
+				<line no="2">Didn't like to play it safe</line>
+				<line no="3">One component at a time</line>
+				<line no="4">There's got to be a better way</line>
+				<line no="5">Oh, people came from miles around</line>
+				<line no="6">Searching for a steady job</line>
+				<line no="7">Welcome to the Motor Town</line>
+				<line no="8">Booming like an atom bomb</line>
+			</verse>
+			<verse name="verse 2" no="2">
+				<line no="1">Oh, Henry was the end of the story</line>
+				<line no="2">Then everything went wrong</line>
+				<line no="3">And we'll return it to its former glory</line>
+				<line no="4">But it just takes so long</line>
+			</verse>
+		</verses>
+		<chorus>
+			<line no="1">It's going to take a long time</line>
+			<line no="2">It's going to take it, but we'll make it one day</line>
+			<line no="3">It's going to take a long time</line>
+			<line no="4">It's going to take it, but we'll make it one day</line>
+		</chorus>
+	</song>
+</msg>

+ 30 - 0
vendor/github.com/clbanning/mxj/strict.go

@@ -0,0 +1,30 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// strict.go actually addresses setting xml.Decoder attribute
+// values.  This'll let you parse non-standard XML.
+
+package mxj
+
+import (
+	"encoding/xml"
+)
+
+// CustomDecoder can be used to specify xml.Decoder attribute
+// values, e.g., Strict:false, to be used.  By default CustomDecoder
+// is nil.  If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is
+// ignored and must be set as part of the CustomDecoder value, if needed.
+//	Usage:
+//		mxj.CustomDecoder = &xml.Decoder{Strict:false}
+var CustomDecoder *xml.Decoder
+
+// useCustomDecoder copy over public attributes from customDecoder
+func useCustomDecoder(d *xml.Decoder) {
+	d.Strict = CustomDecoder.Strict
+	d.AutoClose = CustomDecoder.AutoClose
+	d.Entity = CustomDecoder.Entity
+	d.CharsetReader = CustomDecoder.CharsetReader
+	d.DefaultSpace = CustomDecoder.DefaultSpace
+}
+

+ 54 - 0
vendor/github.com/clbanning/mxj/struct.go

@@ -0,0 +1,54 @@
+// Copyright 2012-2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"encoding/json"
+	"errors"
+	"reflect"
+
+	// "github.com/fatih/structs"
+)
+
+// Create a new Map value from a structure.  Error returned if argument is not a structure.
+// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map
+// for handling of "structs" tags.
+
+// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map.
+//	import "github.com/fatih/structs"
+//	...
+//	   sm, err := structs.Map(<some struct>)
+//	   if err != nil {
+//	      // handle error
+//	   }
+//	   m := mxj.Map(sm)
+// Alernatively uncomment the old source and import in struct.go.
+func NewMapStruct(structVal interface{}) (Map, error) {
+	return nil, errors.New("deprecated - see package documentation")
+	/*
+		if !structs.IsStruct(structVal) {
+			return nil, errors.New("NewMapStruct() error: argument is not type Struct")
+		}
+		return structs.Map(structVal), nil
+	*/
+}
+
+// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned
+// if argument is not a pointer or if json.Unmarshal returns an error.
+//	json.Unmarshal structure encoding rules are followed to encode public structure fields.
+func (mv Map) Struct(structPtr interface{}) error {
+	// should check that we're getting a pointer.
+	if reflect.ValueOf(structPtr).Kind() != reflect.Ptr {
+		return errors.New("mv.Struct() error: argument is not type Ptr")
+	}
+
+	m := map[string]interface{}(mv)
+	j, err := json.Marshal(m)
+	if err != nil {
+		return err
+	}
+
+	return json.Unmarshal(j, structPtr)
+}

+ 258 - 0
vendor/github.com/clbanning/mxj/updatevalues.go

@@ -0,0 +1,258 @@
+// Copyright 2012-2014, 2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// updatevalues.go - modify a value based on path and possibly sub-keys
+// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values.
+
+package mxj
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Update value based on path and possible sub-key values.
+// A count of the number of values changed and any error are returned.
+// If the count == 0, then no path (and subkeys) matched.
+//	'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified
+//	             or a string value "key:value[:type]" where type is "bool" or "num" to cast the value.
+//	'path' is dot-notation list of keys to traverse; last key in path can be newVal key
+//	       NOTE: 'path' spec does not currently support indexed array references.
+//	'subkeys' are "key:value[:type]" entries that must match for path node
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//	              exclusion critera - e.g., "!author:William T. Gaddis".
+//
+//	NOTES:
+//		1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value.
+//		2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key.
+//		3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol,
+//	      perhaps "|".
+func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) {
+	m := map[string]interface{}(mv)
+
+	// extract the subkeys
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	// extract key and value from newVal
+	var key string
+	var val interface{}
+	switch newVal.(type) {
+	case map[string]interface{}, Map:
+		switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec)
+		case Map:
+			newVal = newVal.(Map).Old()
+		}
+		if len(newVal.(map[string]interface{})) != 1 {
+			return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal)
+		}
+		for key, val = range newVal.(map[string]interface{}) {
+		}
+	case string: // split it as a key:value pair
+		ss := strings.Split(newVal.(string), fieldSep)
+		n := len(ss)
+		if n < 2 || n > 3 {
+			return 0, fmt.Errorf("unknown newVal spec - %+v", newVal)
+		}
+		key = ss[0]
+		if n == 2 {
+			val = interface{}(ss[1])
+		} else if n == 3 {
+			switch ss[2] {
+			case "bool", "boolean":
+				nv, err := strconv.ParseBool(ss[1])
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal)
+				}
+				val = interface{}(nv)
+			case "num", "numeric", "float", "int":
+				nv, err := strconv.ParseFloat(ss[1], 64)
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal)
+				}
+				val = interface{}(nv)
+			default:
+				return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal)
+			}
+		}
+	default:
+		return 0, fmt.Errorf("invalid newVal type - %+v", newVal)
+	}
+
+	// parse path
+	keys := strings.Split(path, ".")
+
+	var count int
+	updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count)
+
+	return count, nil
+}
+
+// navigate the path
+func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) {
+	// ----- at end node: looking at possible node to get 'key' ----
+	if len(keys) == 1 {
+		updateValue(key, value, m, keys[0], subkeys, cnt)
+		return
+	}
+
+	// ----- here we are navigating the path thru the penultimate node --------
+	// key of interest is keys[0] - the next in the path
+	switch keys[0] {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				default:
+					updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[keys[0]]; ok {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[keys[0]]; ok {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				}
+			}
+		}
+	}
+}
+
+// change value if key and subkeys are present
+func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) {
+	// there are two possible options for the value of 'keys0': map[string]interface, []interface{}
+	// and 'key' is a key in the map or is a key in a map in a list.
+	switch m.(type) {
+	case map[string]interface{}: // gotta have the last key
+		if keys0 == "*" {
+			for k := range m.(map[string]interface{}) {
+				updateValue(key, value, m, k, subkeys, cnt)
+			}
+			return
+		}
+		endVal, _ := m.(map[string]interface{})[keys0]
+
+		// if newV key is the end of path, replace the value for path-end
+		// may be []interface{} - means replace just an entry w/ subkeys
+		// otherwise replace the keys0 value if subkeys are there
+		// NOTE: this will replace the subkeys, also
+		if key == keys0 {
+			switch endVal.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			case []interface{}:
+				// without subkeys can't select list member to modify
+				// so key:value spec is it ...
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+					break
+				}
+				nv := make([]interface{}, 0)
+				var valmodified bool
+				for _, v := range endVal.([]interface{}) {
+					// check entry subkeys
+					if hasSubKeys(v, subkeys) {
+						// replace v with value
+						nv = append(nv, value)
+						valmodified = true
+						(*cnt)++
+						continue
+					}
+					nv = append(nv, v)
+				}
+				if valmodified {
+					(m.(map[string]interface{}))[keys0] = interface{}(nv)
+				}
+			default: // anything else is a strict replacement
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			}
+			return
+		}
+
+		// so value is for an element of endVal
+		// if endVal is a map then 'key' must be there w/ subkeys
+		// if endVal is a list then 'key' must be in a list member w/ subkeys
+		switch endVal.(type) {
+		case map[string]interface{}:
+			if !hasSubKeys(endVal, subkeys) {
+				return
+			}
+			if _, ok := (endVal.(map[string]interface{}))[key]; ok {
+				(endVal.(map[string]interface{}))[key] = value
+				(*cnt)++
+			}
+		case []interface{}: // keys0 points to a list, check subkeys
+			for _, v := range endVal.([]interface{}) {
+				// got to be a map so we can replace value for 'key'
+				vv, vok := v.(map[string]interface{})
+				if !vok {
+					continue
+				}
+				if _, ok := vv[key]; !ok {
+					continue
+				}
+				if !hasSubKeys(vv, subkeys) {
+					continue
+				}
+				vv[key] = value
+				(*cnt)++
+			}
+		}
+	case []interface{}: // key may be in a list member
+		// don't need to handle keys0 == "*"; we're looking at everything, anyway.
+		for _, v := range m.([]interface{}) {
+			// only map values - we're looking for 'key'
+			mm, ok := v.(map[string]interface{})
+			if !ok {
+				continue
+			}
+			if _, ok := mm[key]; !ok {
+				continue
+			}
+			if !hasSubKeys(mm, subkeys) {
+				continue
+			}
+			mm[key] = value
+			(*cnt)++
+		}
+	}
+
+	// return
+}

+ 1324 - 0
vendor/github.com/clbanning/mxj/xml.go

@@ -0,0 +1,1324 @@
+// Copyright 2012-2016, 2018-2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xml.go - basically the core of X2j for map[string]interface{} values.
+//          NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter
+// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ------------------- NewMapXml & NewMapXmlReader ... -------------------------
+
+// If XmlCharsetReader != nil, it will be used to decode the XML, if required.
+// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored;
+// set the CustomDecoder attribute instead.
+//   import (
+//	     charset "code.google.com/p/go-charset/charset"
+//	     github.com/clbanning/mxj
+//	 )
+//   ...
+//   mxj.XmlCharsetReader = charset.NewReader
+//   m, merr := mxj.NewMapXml(xmlValue)
+var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+// NewMapXml - convert a XML doc into a Map
+// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().)
+//	If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+//
+//	Converting XML to JSON is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapXml(xmlVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		jsonVal, jerr := mapVal.Json()
+//		if jerr != nil {
+//			// handle error
+//		}
+//
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlToMap(xmlVal, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlReaderToMap(xmlReader, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value and slice with the raw XML.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	   3. The 'raw' return value may be larger than the XML text value.
+//	   4. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   5. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   6. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb) // see code at EOF
+
+	m, err := xmlReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	if err != nil {
+		return nil, b, err
+	}
+
+	return m, b, nil
+}
+
+// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// xmlToMap - convert a XML doc into map[string]interface{} value
+func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// PrependAttrWithHyphen. Prepend attribute tags with a hyphen.
+// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.)
+//	Note:
+//		If 'false', unmarshaling and marshaling is not symmetric. Attributes will be
+//		marshal'd as <attr_tag>attr</attr_tag> and may be part of a list.
+func PrependAttrWithHyphen(v bool) {
+	if v {
+		attrPrefix = "-"
+		lenAttrPrefix = len(attrPrefix)
+		return
+	}
+	attrPrefix = ""
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com.
+var includeTagSeqNum bool
+
+// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting
+// its position when parsed. This is of limited usefulness, since list values cannot
+// be tagged with "_seq" without changing their depth in the Map.
+// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what
+// you get.
+/*
+		<Obj c="la" x="dee" h="da">
+			<IntObj id="3"/>
+			<IntObj1 id="1"/>
+			<IntObj id="2"/>
+			<StrObj>hello</StrObj>
+		</Obj>
+
+	parses as:
+
+		{
+		Obj:{
+			"-c":"la",
+			"-h":"da",
+			"-x":"dee",
+			"intObj":[
+				{
+					"-id"="3",
+					"_seq":"0" // if mxj.Cast is passed, then: "_seq":0
+				},
+				{
+					"-id"="2",
+					"_seq":"2"
+				}],
+			"intObj1":{
+				"-id":"1",
+				"_seq":"1"
+				},
+			"StrObj":{
+				"#text":"hello", // simple element value gets "#text" tag
+				"_seq":"3"
+				}
+			}
+		}
+*/
+func IncludeTagSeqNum(b bool) {
+	includeTagSeqNum = b
+}
+
+// all keys will be "lower case"
+var lowerCase bool
+
+// Coerce all tag values to keys in lower case.  This is useful if you've got sources with variable
+// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec
+// in lower case.
+//	CoerceKeysToLower() will toggle the coercion flag true|false - on|off
+//	CoerceKeysToLower(true|false) will set the coercion flag on|off
+//
+//	NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as
+//	      the associated HandleXmlReader and HandleXmlReaderRaw.
+func CoerceKeysToLower(b ...bool) {
+	if len(b) == 0 {
+		lowerCase = !lowerCase
+	} else if len(b) == 1 {
+		lowerCase = b[0]
+	}
+}
+
+// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels.
+// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "",
+// and adding a SetAttrPrefix(s string) function.
+
+var attrPrefix string = `-` // the default
+var lenAttrPrefix int = 1   // the default
+
+// SetAttrPrefix changes the default, "-", to the specified value, s.
+// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false).
+// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.)
+func SetAttrPrefix(s string) {
+	attrPrefix = s
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// 18jan17: Allows user to specify if the map keys should be in snake case instead
+// of the default hyphenated notation.
+var snakeCaseKeys bool
+
+// CoerceKeysToSnakeCase changes the default, false, to the specified value, b.
+// Note: the attribute prefix will be a hyphen, '-', or what ever string value has
+// been specified using SetAttrPrefix.
+func CoerceKeysToSnakeCase(b ...bool) {
+	if len(b) == 0 {
+		snakeCaseKeys = !snakeCaseKeys
+	} else if len(b) == 1 {
+		snakeCaseKeys = b[0]
+	}
+}
+
+// 10jan19: use of pull request #57 should be conditional - legacy code assumes
+// numeric values are float64.
+var castToInt bool
+
+// CastValuesToInt tries to coerce numeric valus to int64 or uint64 instead of the
+// default float64. Repeated calls with no argument will toggle this on/off, or this
+// handling will be set with the value of 'b'.
+func CastValuesToInt(b ...bool) {
+	if len(b) == 0 {
+		castToInt = !castToInt
+	} else if len(b) == 1 {
+		castToInt = b[0]
+	}
+}
+
+// 05feb17: support processing XMPP streams (issue #36)
+var handleXMPPStreamTag bool
+
+// HandleXMPPStreamTag causes decoder to parse XMPP <stream:stream> elements.
+// If called with no argument, XMPP stream element handling is toggled on/off.
+// (See xmppStream_test.go for example.)
+//	If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream"
+//	element will be  returned as:
+//		map["stream"]interface{}{map[-<attrs>]interface{}}.
+//	If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream"
+//	element will be returned as:
+//		map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}}
+//		where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.)
+func HandleXMPPStreamTag(b ...bool) {
+	if len(b) == 0 {
+		handleXMPPStreamTag = !handleXMPPStreamTag
+	} else if len(b) == 1 {
+		handleXMPPStreamTag = b[0]
+	}
+}
+
+// 21jan18 - decode all values as map["#text":value] (issue #56)
+var decodeSimpleValuesAsMap bool
+
+// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":<value>].
+// If called with no argument, the decoding is toggled on/off.
+//
+// By default the NewMapXml functions decode simple values without attributes as
+// map[<tag>:<value>]. This function causes simple values without attributes to be
+// decoded the same as simple values with attributes - map[<tag>:map["#text":<value>]].
+func DecodeSimpleValuesAsMap(b ...bool) {
+	if len(b) == 0 {
+		decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap
+	} else if len(b) == 1 {
+		decodeSimpleValuesAsMap = b[0]
+	}
+}
+
+// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly.
+// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one.
+// We've removed the intermediate *node tree with the allocation and subsequent rescanning.
+func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if lowerCase {
+		skey = strings.ToLower(skey)
+	}
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	// Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value.
+	var n, na map[string]interface{}
+	var seq int // for includeTagSeqNum
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		n = make(map[string]interface{})  // old n
+		na = make(map[string]interface{}) // old n.nodes
+		if len(a) > 0 {
+			for _, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				var key string
+				key = attrPrefix + v.Name.Local
+				if lowerCase {
+					key = strings.ToLower(key)
+				}
+				na[key] = cast(v.Value, r, key)
+			}
+		}
+	}
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.Token()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				return xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			// Note:
+			// * if CoerceKeysToLower() called, then key will be lower case.
+			// * if CoerceKeysToSnakeCase() called, then key will be converted to snake case.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element.
+			// In theory, we don't need this if len(na) == 1. But, we don't know what might
+			// come next - we're only parsing forward.  So if you ask for 'includeTagSeqNum' you
+			// get it on every element. (Personally, I never liked this, but I added it on request
+			// and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!)
+			if includeTagSeqNum {
+				switch val.(type) {
+				case []interface{}:
+					// noop - There's no clean way to handle this w/o changing message structure.
+				case map[string]interface{}:
+					val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag
+					seq++
+				case interface{}: // a non-nil simple element: string, float64, bool
+					v := map[string]interface{}{"#text": val}
+					v["_seq"] = seq
+					seq++
+					val = v
+				}
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Note: in original node-tree parser, val defaulted to "";
+				// so we always had the default if len(node.nodes) == 0.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ")
+			if len(tt) > 0 {
+				if len(na) > 0 || decodeSimpleValuesAsMap {
+					na["#text"] = cast(tt, r, "#text")
+				} else if skey != "" {
+					n[skey] = cast(tt, r, skey)
+				} else {
+					// per Adrian (http://www.adrianlungu.com/) catch stray text
+					// in decoder stream -
+					// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+					// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+					// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+					continue
+				}
+			}
+		default:
+			// noop
+		}
+	}
+}
+
+var castNanInf bool
+
+// Cast "Nan", "Inf", "-Inf" XML values to 'float64'.
+// By default, these values will be decoded as 'string'.
+func CastNanInf(b bool) {
+	castNanInf = b
+}
+
+// cast - try to cast string values to bool or float64
+// 't' is the tag key that can be checked for 'not-casting'
+func cast(s string, r bool, t string) interface{} {
+	if checkTagToSkip != nil && t != "" && checkTagToSkip(t) {
+		// call the check-function here with 't[0]'
+		// if 'true' return s
+		return s
+	}
+
+	if r {
+		// handle nan and inf
+		if !castNanInf {
+			switch strings.ToLower(s) {
+			case "nan", "inf", "-inf":
+				return s
+			}
+		}
+
+		// handle numeric strings ahead of boolean
+		if castToInt {
+			if f, err := strconv.ParseInt(s, 10, 64); err == nil {
+				return f
+			}
+			if f, err := strconv.ParseUint(s, 10, 64); err == nil {
+				return f
+			}
+		}
+
+		if castToFloat {
+			if f, err := strconv.ParseFloat(s, 64); err == nil {
+				return f
+			}
+		}
+
+		// ParseBool treats "1"==true & "0"==false, we've already scanned those
+		// values as float64. See if value has 't' or 'f' as initial screen to
+		// minimize calls to ParseBool; also, see if len(s) < 6.
+		if castToBool {
+			if len(s) > 0 && len(s) < 6 {
+				switch s[:1] {
+				case "t", "T", "f", "F":
+					if b, err := strconv.ParseBool(s); err == nil {
+						return b
+					}
+				}
+			}
+		}
+	}
+	return s
+}
+
+// pull request, #59
+var castToFloat = true
+
+// CastValuesToFloat can be used to skip casting to float64 when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToFloat(b bool) {
+	castToFloat = b
+}
+
+var castToBool = true
+
+// CastValuesToBool can be used to skip casting to bool when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToBool(b bool) {
+	castToBool = b
+}
+
+// checkTagToSkip - switch to address Issue #58
+
+var checkTagToSkip func(string) bool
+
+// SetCheckTagToSkipFunc registers function to test whether the value
+// for a tag should be cast to bool or float64 when "cast" argument is 'true'.
+// (Dot tag path notation is not supported.)
+// NOTE: key may be "#text" if it's a simple element with attributes
+//       or "decodeSimpleValuesAsMap == true".
+// NOTE: does not apply to NewMapXmlSeq... functions.
+func SetCheckTagToSkipFunc(fn func(string) bool) {
+	checkTagToSkip = fn
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------
+
+const (
+	DefaultRootTag = "doc"
+)
+
+var useGoXmlEmptyElemSyntax bool
+
+// XmlGoEmptyElemSyntax() - <tag ...></tag> rather than <tag .../>.
+//	Go's encoding/xml package marshals empty XML elements as <tag ...></tag>.  By default this package
+//	encodes empty elements as <tag .../>.  If you're marshaling Map values that include structures
+//	(which are passed to xml.Marshal for encoding), this will let you conform to the standard package.
+func XmlGoEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = true
+}
+
+// XmlDefaultEmptyElemSyntax() - <tag .../> rather than <tag ...></tag>.
+// Return XML encoding for empty elements to the default package setting.
+// Reverses effect of XmlGoEmptyElemSyntax().
+func XmlDefaultEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = false
+}
+
+// Encode a Map as XML.  The companion of NewMapXml().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - Map keys that begin with a hyphen, '-', are interpreted as attributes.
+//      It is an error if the attribute doesn't have a []byte, string, number, or boolean value.
+//    - Map value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>".
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+//    - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax().
+// The attributes tag=value pairs are alphabetized by "tag".  Also, when encoding map[string]interface{} values -
+// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted.
+func (mv Map) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = marshalMapToXmlIndent(false, b, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(false, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+	}
+done:
+	return b.Bytes(), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as  XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as  XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// Writes the Map as pretty XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// --------------  Handle XML stream by processing Map value --------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var xhandlerPollInterval = time.Millisecond
+
+// Bulk process XML using handlers that process a Map value.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapXmlReader(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process XML using handlers that process a Map value and the raw XML.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+//	See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader.
+func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapXmlReaderRaw(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// ----------------- END: Handle XML stream by processing Map value --------------
+
+// --------  a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ----------
+
+// This is a clone of io.TeeReader with the additional method t.ReadByte().
+// Thus, this TeeReader is also an io.ByteReader.
+// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written
+// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte()..
+// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with
+// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic.
+
+type teeReader struct {
+	r io.Reader
+	w io.Writer
+	b []byte
+}
+
+func myTeeReader(r io.Reader, w io.Writer) io.Reader {
+	b := make([]byte, 1)
+	return &teeReader{r, w, b}
+}
+
+// need for io.Reader - but we don't use it ...
+func (t *teeReader) Read(p []byte) (int, error) {
+	return 0, nil
+}
+
+func (t *teeReader) ReadByte() (byte, error) {
+	n, err := t.r.Read(t.b)
+	if n > 0 {
+		if _, err := t.w.Write(t.b[:1]); err != nil {
+			return t.b[0], err
+		}
+	}
+	return t.b[0], err
+}
+
+// For use with NewMapXmlReader & NewMapXmlSeqReader.
+type byteReader struct {
+	r io.Reader
+	b []byte
+}
+
+func myByteReader(r io.Reader) io.Reader {
+	b := make([]byte, 1)
+	return &byteReader{r, b}
+}
+
+// Need for io.Reader interface ...
+// Needed if reading a malformed http.Request.Body - issue #38.
+func (b *byteReader) Read(p []byte) (int, error) {
+	return b.r.Read(p)
+}
+
+func (b *byteReader) ReadByte() (byte, error) {
+	_, err := b.r.Read(b.b)
+	if len(b.b) > 0 {
+		return b.b[0], nil
+	}
+	var c byte
+	return c, err
+}
+
+// ----------------------- END: io.TeeReader hack -----------------------------------
+
+// ---------------------- XmlIndent - from j2x package ----------------------------
+
+// Encode a map[string]interface{} as a pretty XML string.
+// See Xml for encoding rules.
+func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+			} else {
+				err = marshalMapToXmlIndent(true, b, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(true, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+	}
+	return b.Bytes(), err
+}
+
+type pretty struct {
+	indent   string
+	cnt      int
+	padding  string
+	mapDepth int
+	start    int
+}
+
+func (p *pretty) Indent() {
+	p.padding += p.indent
+	p.cnt++
+}
+
+func (p *pretty) Outdent() {
+	if p.cnt > 0 {
+		p.padding = p.padding[:len(p.padding)-len(p.indent)]
+		p.cnt--
+	}
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+// NOTE: 01may20 - replaces mapToXmlIndent(); uses bytes.Buffer instead for string appends.
+func marshalMapToXmlIndent(doIndent bool, b *bytes.Buffer, key string, value interface{}, pp *pretty) error {
+	var err error
+	var endTag bool
+	var isSimple bool
+	var elen int
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	// per issue #48, 18apr18 - try and coerce maps to map[string]interface{}
+	// Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq().
+	if reflect.ValueOf(value).Kind() == reflect.Map {
+		switch value.(type) {
+		case map[string]interface{}:
+		default:
+			val := make(map[string]interface{})
+			vv := reflect.ValueOf(value)
+			keys := vv.MapKeys()
+			for _, k := range keys {
+				val[fmt.Sprint(k)] = vv.MapIndex(k).Interface()
+			}
+			value = val
+		}
+	}
+
+	// 14jul20.  The following block of code has become something of a catch all for odd stuff
+	// that might be passed in as a result of casting an arbitrary map[<T>]<T> to an mxj.Map
+	// value and then call m.Xml or m.XmlIndent. See issue #71 (and #73) for such edge cases.
+	switch value.(type) {
+	// these types are handled during encoding
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number:
+	case []map[string]interface{}, []string, []float64, []bool, []int, []int32, []int64, []float32, []json.Number:
+	case []interface{}:
+	default:
+		// coerce eveything else into a string value
+		value = fmt.Sprint(value)
+	}
+
+	// start the XML tag with required indentaton and padding
+	if doIndent {
+		if _, err = b.WriteString(p.padding); err != nil {
+			return err
+		}
+	}
+	switch value.(type) {
+	case []interface{}:
+	default:
+		if _, err = b.WriteString(`<` + key); err != nil {
+			return err
+		}
+	}
+
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		// scan out attributes - attribute keys have prepended attrPrefix
+		attrlist := make([][2]string, len(vv))
+		var n int
+		var ss string
+		for k, v := range vv {
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				switch v.(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(v.(string))
+					} else {
+						ss = v.(string)
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				case float64, bool, int, int32, int64, float32, json.Number:
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = fmt.Sprintf("%v", v)
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(v.([]byte)))
+					} else {
+						ss = string(v.([]byte))
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				default:
+					return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v)
+				}
+				n++
+			}
+		}
+		if n > 0 {
+			attrlist = attrlist[:n]
+			sort.Sort(attrList(attrlist))
+			for _, v := range attrlist {
+				if _, err = b.WriteString(` ` + v[0] + `="` + v[1] + `"`); err != nil {
+					return err
+				}
+			}
+		}
+		// only attributes?
+		if n == lenvv {
+			if useGoXmlEmptyElemSyntax {
+				if _, err = b.WriteString(`</` + key + ">"); err != nil {
+					return err
+				}
+			} else {
+				if _, err = b.WriteString(`/>`); err != nil {
+					return err
+				}
+			}
+			break
+		}
+
+		// simple element? Note: '#text" is an invalid XML tag.
+		if v, ok := vv["#text"]; ok && n+1 == lenvv {
+			switch v.(type) {
+			case string:
+				if xmlEscapeChars {
+					v = escapeChars(v.(string))
+				} else {
+					v = v.(string)
+				}
+			case []byte:
+				if xmlEscapeChars {
+					v = escapeChars(string(v.([]byte)))
+				}
+			}
+			if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil {
+				return err
+			}
+			endTag = true
+			elen = 1
+			isSimple = true
+			break
+		} else if ok {
+			// Handle edge case where simple element with attributes
+			// is unmarshal'd using NewMapXml() where attribute prefix
+			// has been set to "".
+			// TODO(clb): should probably scan all keys for invalid chars.
+			return fmt.Errorf("invalid attribute key label: #text - due to attributes not being prefixed")
+		}
+
+		// close tag with possible attributes
+		if _, err = b.WriteString(">"); err != nil {
+			return err
+		}
+		if doIndent {
+			// *s += "\n"
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		// something more complex
+		p.mapDepth++
+		// extract the map k:v pairs and sort on key
+		elemlist := make([][2]interface{}, len(vv))
+		n = 0
+		for k, v := range vv {
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				continue
+			}
+			elemlist[n][0] = k
+			elemlist[n][1] = v
+			n++
+		}
+		elemlist = elemlist[:n]
+		sort.Sort(elemList(elemlist))
+		var i int
+		for _, v := range elemlist {
+			switch v[1].(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := marshalMapToXmlIndent(doIndent, b, v[0].(string), v[1], p); err != nil {
+				return err
+			}
+			switch v[1].(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content ...
+	case []interface{}:
+		// special case - found during implementing Issue #23
+		if len(value.([]interface{})) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case []string:
+		// This was added by https://github.com/slotix ... not a type that
+		// would be encountered if mv generated from NewMapXml, NewMapJson.
+		// Could be encountered in AnyXml(), so we'll let it stay, though
+		// it should be merged with case []interface{}, above.
+		//quick fix for []string type
+		//[]string should be treated exaclty as []interface{}
+		if len(value.([]string)) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]string) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			// *s += p.padding
+			if _, err = b.WriteString(p.padding); err != nil {
+				return err
+			}
+		}
+		if _, err = b.WriteString("<" + key); err != nil {
+			return err
+		}
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			v := value.(string)
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		case float64, bool, int, int32, int64, float32, json.Number:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v) // always > 0
+			if _, err = b.WriteString(">" + v); err != nil {
+				return err
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			v := string(value.([]byte))
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		default:
+			if _, err = b.WriteString(">"); err != nil {
+				return err
+			}
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				if _, err = b.WriteString(">UNKNOWN"); err != nil {
+					return err
+				}
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					if _, err = b.Write(v); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				if _, err = b.WriteString(p.padding); err != nil {
+					return err
+				}
+			}
+		}
+		if elen > 0 || useGoXmlEmptyElemSyntax {
+			if elen == 0 {
+				if _, err = b.WriteString(">"); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString(`</` + key + ">"); err != nil {
+				return err
+			}
+		} else {
+			if _, err = b.WriteString(`/>`); err != nil {
+				return err
+			}
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// ============================ sort interface implementation =================
+
+type attrList [][2]string
+
+func (a attrList) Len() int {
+	return len(a)
+}
+
+func (a attrList) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a attrList) Less(i, j int) bool {
+	return a[i][0] <= a[j][0]
+}
+
+type elemList [][2]interface{}
+
+func (e elemList) Len() int {
+	return len(e)
+}
+
+func (e elemList) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemList) Less(i, j int) bool {
+	return e[i][0].(string) <= e[j][0].(string)
+}
+
+// ======================== newMapToXmlIndent
+
+func (mv Map) MarshalXml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	// s := new(string)
+	// b := new(strings.Builder)
+	b := new(bytes.Buffer)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = marshalMapToXmlIndent(false, b, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(false, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+	}
+done:
+	return b.Bytes(), err
+}

+ 844 - 0
vendor/github.com/clbanning/mxj/xmlseq.go

@@ -0,0 +1,844 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding.
+// Also, handles comments, directives and process instructions.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+)
+
+// MapSeq is like Map but contains seqencing indices to allow recovering the original order of
+// the XML elements when the map[string]interface{} is marshaled. Element attributes are
+// stored as a map["#attr"]map[<attr_key>]map[string]interface{}{"#text":"<value>", "#seq":<attr_index>}
+// value instead of denoting the keys with a prefix character.  Also, comments, directives and
+// process instructions are preserved.
+type MapSeq map[string]interface{}
+
+// NoRoot is returned by NewXmlSeq, etc., when a comment, directive or procinstr element is parsed
+// in the XML data stream and the element is not contained in an XML object with a root element.
+var NoRoot = errors.New("no root key")
+var NO_ROOT = NoRoot // maintain backwards compatibility
+
+// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... -------------------------
+
+// NewMapXmlSeq converts a XML doc into a MapSeq value with elements id'd with decoding sequence key represented
+// as map["#seq"]<int value>.
+// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+// NOTE: "#seq" key/value pairs are removed on encoding with msv.Xml() / msv.XmlIndent().
+//	• attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":<aval>, "#seq":<num>}
+//	• all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well.
+//	• lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that
+//	  include a "#seq" k:v pair based on sequence they are decoded.  Thus, XML like:
+//	      <doc>
+//	         <ltag>value 1</ltag>
+//	         <newtag>value 2</newtag>
+//	         <ltag>value 3</ltag>
+//	      </doc>
+//	  is decoded as:
+//	    doc :
+//	      ltag :[[]interface{}]
+//	        [item: 0]
+//	          #seq :[int] 0
+//	          #text :[string] value 1
+//	        [item: 1]
+//	          #seq :[int] 2
+//	          #text :[string] value 3
+//	      newtag :
+//	        #seq :[int] 1
+//	        #text :[string] value 2
+//	  It will encode in proper sequence even though the MapSeq representation merges all "ltag" elements in an array.
+//	• comments - "<!--comment-->" -  are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair.
+//	• directives - "<!text>" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair.
+//	• process instructions  - "<?instr?>" - are decoded as map["#procinst"]interface{} where the #procinst value
+//	  is of map[string]interface{} type with the following keys: #target, #inst, and #seq.
+//	• comments, directives, and procinsts that are NOT part of a document with a root key will be returned as
+//	  map[string]interface{} and the error value 'NoRoot'.
+//	• note: "<![CDATA[" syntax is lost in xml.Decode parser - and is not handled here, either.
+//	   and: "\r\n" is converted to "\n"
+//
+//	NOTES:
+//	   1. The 'xmlVal' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	NAME SPACES:
+//	   1. Keys in the MapSeq value that are parsed from a <name space prefix>:<local name> tag preserve the
+//	      "<prefix>:" notation rather than stripping it as with NewMapXml().
+//	   2. Attribute keys for name space prefix declarations preserve "xmlns:<prefix>" notation.
+// 
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", 
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlSeqToMap(xmlVal, r)
+}
+
+// NewMpaXmlSeqReader returns next XML doc from an io.Reader as a MapSeq value.
+//	NOTES:
+//	   1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+// 
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", 
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlSeqReaderToMap(xmlReader, r)
+}
+
+// NewMapXmlSeqReaderRaw returns the  next XML doc from  an io.Reader as a MapSeq value.
+// Returns MapSeq value, slice with the raw XML, and any error.
+//	NOTES:
+//	   1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	    2. The 'raw' return value may be larger than the XML text value.
+//	    3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	       extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	    4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	       re-encode the message in its original structure.
+//	    5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+// 
+//	ERRORS:
+//	    1. If a NoRoot error, "no root key," is returned, check if the initial map key is "#comment", 
+//	       "#directive" or #procinst" key.
+func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (MapSeq, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb)
+
+	m, err := xmlSeqReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	// err may be NoRoot
+	return m, b, err
+}
+
+// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// xmlSeqToMap - convert a XML doc into map[string]interface{} value
+func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly.
+// Add #seq tag value for each element decoded - to be used for Encoding later.
+func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	var n, na map[string]interface{}
+	var seq int // for including seq num when decoding
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		// 'n' only needs one slot - save call to runtime•hashGrow()
+		// 'na' we don't know
+		n = make(map[string]interface{}, 1)
+		na = make(map[string]interface{})
+		if len(a) > 0 {
+			// xml.Attr is decoded into: map["#attr"]map[<attr_label>]interface{}
+			// where interface{} is map[string]interface{}{"#text":<attr_val>, "#seq":<attr_seq>}
+			aa := make(map[string]interface{}, len(a))
+			for i, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				if len(v.Name.Space) > 0 {
+					aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i}
+				} else {
+					aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i}
+				}
+			}
+			na["#attr"] = aa
+		}
+	}
+
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream:stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.RawToken()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				if len(tt.Name.Space) > 0 {
+					return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+				} else {
+					return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+				}
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			var nn map[string]interface{}
+			if len(tt.Name.Space) > 0 {
+				nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+			} else {
+				nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// add "#seq" k:v pair -
+			// Sequence number included even in list elements - this should allow us
+			// to properly resequence even something goofy like:
+			//     <list>item 1</list>
+			//     <subelement>item 2</subelement>
+			//     <list>item 3</list>
+			// where all the "list" subelements are decoded into an array.
+			switch val.(type) {
+			case map[string]interface{}:
+				val.(map[string]interface{})["#seq"] = seq
+				seq++
+			case interface{}: // a non-nil simple element: string, float64, bool
+				v := map[string]interface{}{"#text": val, "#seq": seq}
+				seq++
+				val = v
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			if skey != "" {
+				tt := t.(xml.EndElement)
+				if snakeCaseKeys {
+					tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1)
+				}
+				var name string
+				if len(tt.Name.Space) > 0 {
+					name = tt.Name.Space + `:` + tt.Name.Local
+				} else {
+					name = tt.Name.Local
+				}
+				if skey != name {
+					return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d",
+						skey, name, p.InputOffset())
+				}
+			}
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Empty element content will be  map["etag"]map["#text"]""
+				// after #seq injection - map["etag"]map["#seq"]seq - after return.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ")
+			if skey == "" {
+				// per Adrian (http://www.adrianlungu.com/) catch stray text
+				// in decoder stream -
+				// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+				// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+				// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+				continue
+			}
+			if len(tt) > 0 {
+				// every simple element is a #text and has #seq associated with it
+				na["#text"] = cast(tt, r, "")
+				na["#seq"] = seq
+				seq++
+			}
+		case xml.Comment:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#comment": string(t.(xml.Comment))}
+				return n, NoRoot
+			}
+			cm := make(map[string]interface{}, 2)
+			cm["#text"] = string(t.(xml.Comment))
+			cm["#seq"] = seq
+			seq++
+			na["#comment"] = cm
+		case xml.Directive:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#directive": string(t.(xml.Directive))}
+				return n, NoRoot
+			}
+			dm := make(map[string]interface{}, 2)
+			dm["#text"] = string(t.(xml.Directive))
+			dm["#seq"] = seq
+			seq++
+			na["#directive"] = dm
+		case xml.ProcInst:
+			if n == nil {
+				na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)}
+				n = map[string]interface{}{"#procinst": na}
+				return n, NoRoot
+			}
+			pm := make(map[string]interface{}, 3)
+			pm["#target"] = t.(xml.ProcInst).Target
+			pm["#inst"] = string(t.(xml.ProcInst).Inst)
+			pm["#seq"] = seq
+			seq++
+			na["#procinst"] = pm
+		default:
+			// noop - shouldn't ever get here, now, since we handle all token types
+		}
+	}
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// --------------------- mv.XmlSeq & mv.XmlSeqWriter -------------------------
+
+// Xml encodes a MapSeq as XML with elements sorted on #seq.  The companion of NewMapXmlSeq().
+// The following rules apply.
+//    - The "#seq" key value is used to seqence the subelements or attributes only.
+//    - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key.
+//    - The "#comment" map key identifies a comment in the value "#text" map entry - <!--comment-->.
+//    - The "#directive" map key identifies a directive in the value "#text" map entry - <!directive>.
+//    - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst"
+//      map entries - <?target inst?>.
+//    - Value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called.
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+func (mv MapSeq) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it's an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlSeqIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// XmlWriter Writes the MapSeq value as  XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlWriteRaw writes the MapSeq value as XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// XmlIndentWriter writes the MapSeq value as pretty XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlIndentWriterRaw writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlSeqIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// ---------------------- XmlSeqIndent ----------------------------
+
+// XmlIndent encodes a map[string]interface{} as a pretty XML string.
+// See MapSeq.XmlSeq() for encoding rules.
+func (mv MapSeq) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlSeqIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+	}
+	return []byte(*s), err
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	var noEndTag bool
+	var elen int
+	var ss string
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		if key != "#comment" && key != "#directive" && key != "#procinst" {
+			*s += `<` + key
+		}
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		val := value.(map[string]interface{})
+
+		if key == "#comment" {
+			*s += `<!--` + val["#text"].(string) + `-->`
+			noEndTag = true
+			break
+		}
+
+		if key == "#directive" {
+			*s += `<!` + val["#text"].(string) + `>`
+			noEndTag = true
+			break
+		}
+
+		if key == "#procinst" {
+			*s += `<?` + val["#target"].(string) + ` ` + val["#inst"].(string) + `?>`
+			noEndTag = true
+			break
+		}
+
+		haveAttrs := false
+		// process attributes first
+		if v, ok := val["#attr"].(map[string]interface{}); ok {
+			// First, unroll the map[string]interface{} into a []keyval array.
+			// Then sequence it.
+			kv := make([]keyval, len(v))
+			n := 0
+			for ak, av := range v {
+				kv[n] = keyval{ak, av}
+				n++
+			}
+			sort.Sort(elemListSeq(kv))
+			// Now encode the attributes in original decoding sequence, using keyval array.
+			for _, a := range kv {
+				vv := a.v.(map[string]interface{})
+				switch vv["#text"].(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(vv["#text"].(string))
+					} else {
+						ss = vv["#text"].(string)
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				case float64, bool, int, int32, int64, float32:
+					*s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"`
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(vv["#text"].([]byte)))
+					} else {
+						ss = string(vv["#text"].([]byte))
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", a.k)
+				}
+			}
+			haveAttrs = true
+		}
+
+		// simple element?
+		// every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr"
+		_, seqOK := val["#seq"] // have key
+		if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK {
+			if stmp, ok := v.(string); ok && stmp != "" {
+				if xmlEscapeChars {
+					stmp = escapeChars(stmp)
+				}
+				*s += ">" + stmp
+				endTag = true
+				elen = 1
+			}
+			isSimple = true
+			break
+		} else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK {
+			// here no #text but have #seq or #seq+#attr
+			endTag = false
+			break
+		}
+
+		// we now need to sequence everything except attributes
+		// 'kv' will hold everything that needs to be written
+		kv := make([]keyval, 0)
+		for k, v := range val {
+			if k == "#attr" { // already processed
+				continue
+			}
+			if k == "#seq" { // ignore - just for sorting
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+				// unwind the array as separate entries
+				for _, vv := range v.([]interface{}) {
+					kv = append(kv, keyval{k, vv})
+				}
+			default:
+				kv = append(kv, keyval{k, v})
+			}
+		}
+
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		sort.Sort(elemListSeq(kv))
+		i := 0
+		for _, v := range kv {
+			switch v.v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil {
+				return err
+			}
+			switch v.v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content other than attrs
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			*s += p.padding
+		}
+		*s += "<" + key
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			if xmlEscapeChars {
+				ss = escapeChars(value.(string))
+			} else {
+				ss = value.(string)
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		case float64, bool, int, int32, int64, float32:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			if xmlEscapeChars {
+				ss = escapeChars(string(value.([]byte)))
+			} else {
+				ss = string(value.([]byte))
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					*s += string(v)
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag && !noEndTag {
+		if doIndent {
+			if !isSimple {
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			if elen > 0 || useGoXmlEmptyElemSyntax {
+				if elen == 0 {
+					*s += ">"
+				}
+				*s += `</` + key + ">"
+			} else {
+				*s += `/>`
+			}
+		}
+	} else if !noEndTag {
+		if useGoXmlEmptyElemSyntax {
+			*s += `</` + key + ">"
+			// *s += "></" + key + ">"
+		} else {
+			*s += "/>"
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// the element sort implementation
+
+type keyval struct {
+	k string
+	v interface{}
+}
+type elemListSeq []keyval
+
+func (e elemListSeq) Len() int {
+	return len(e)
+}
+
+func (e elemListSeq) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemListSeq) Less(i, j int) bool {
+	var iseq, jseq int
+	var fiseq, fjseq float64
+	var ok bool
+	if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok {
+		if fiseq, ok = e[i].v.(map[string]interface{})["#seq"].(float64); ok {
+			iseq = int(fiseq)
+		} else {
+			iseq = 9999999
+		}
+	}
+
+	if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok {
+		if fjseq, ok = e[j].v.(map[string]interface{})["#seq"].(float64); ok {
+			jseq = int(fjseq)
+		} else {
+			jseq = 9999999
+		}
+	}
+
+	return iseq <= jseq
+}
+
+// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio
+
+// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent().
+// It preserves comments, directives and process instructions, 
+func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) {
+	x, err := NewMapXmlSeq(b)
+	if err != nil {
+		return nil, err
+	}
+	return x.XmlIndent(prefix, indent)
+}

+ 18 - 0
vendor/github.com/clbanning/mxj/xmlseq2.go

@@ -0,0 +1,18 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+// ---------------- expose Map methods to MapSeq type ---------------------------
+
+// Pretty print a Map.
+func (msv MapSeq) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (msv MapSeq) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), false, true, offset...)
+}
+

+ 36 - 0
vendor/github.com/eclipse/paho.mqtt.golang/.gitignore

@@ -0,0 +1,36 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.msg
+*.lok
+
+samples/trivial
+samples/trivial2
+samples/sample
+samples/reconnect
+samples/ssl
+samples/custom_store
+samples/simple
+samples/stdinpub
+samples/stdoutsub
+samples/routing

+ 56 - 0
vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md

@@ -0,0 +1,56 @@
+Contributing to Paho
+====================
+
+Thanks for your interest in this project.
+
+Project description:
+--------------------
+
+The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
+Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
+
+- https://projects.eclipse.org/projects/technology.paho
+
+Developer resources:
+--------------------
+
+Information regarding source code management, builds, coding standards, and more.
+
+- https://projects.eclipse.org/projects/technology.paho/developer
+
+Contributor License Agreement:
+------------------------------
+
+Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
+
+- http://www.eclipse.org/legal/CLA.php
+
+Contributing Code:
+------------------
+
+The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/
+
+Git commit messages should follow the style described here;
+
+http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+
+Contact:
+--------
+
+Contact the project developers via the project's "dev" list.
+
+- https://dev.eclipse.org/mailman/listinfo/paho-dev
+
+Search for bugs:
+----------------
+
+This project uses Github issues to track ongoing development and issues.
+
+- https://github.com/eclipse/paho.mqtt.golang/issues
+
+Create a new bug:
+-----------------
+
+Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
+
+- https://github.com/eclipse/paho.mqtt.golang/issues

+ 15 - 0
vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION

@@ -0,0 +1,15 @@
+
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+    Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 20 - 0
vendor/github.com/eclipse/paho.mqtt.golang/LICENSE

@@ -0,0 +1,20 @@
+This project is dual licensed under the Eclipse Public License 1.0 and the
+Eclipse Distribution License 1.0 as described in the epl-v10 and edl-v10 files.
+
+The EDL is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy).
+
+****
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+    Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+

+ 177 - 0
vendor/github.com/eclipse/paho.mqtt.golang/README.md

@@ -0,0 +1,177 @@
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/eclipse/paho.mqtt.golang)](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang)
+[![Go Report Card](https://goreportcard.com/badge/github.com/eclipse/paho.mqtt.golang)](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang)
+
+Eclipse Paho MQTT Go client
+===========================
+
+
+This repository contains the source code for the [Eclipse Paho](https://eclipse.org/paho) MQTT 3.1/3.11 Go client library. 
+
+This code builds a library which enable applications to connect to an [MQTT](https://mqtt.org) broker to publish 
+messages, and to subscribe to topics and receive published messages.
+
+This library supports a fully asynchronous mode of operation.
+
+A client supporting MQTT V5 is [also available](https://github.com/eclipse/paho.golang).
+
+Installation and Build
+----------------------
+
+The process depends upon whether you are using [modules](https://golang.org/ref/mod) (recommended) or `GOPATH`. 
+
+#### Modules
+
+If you are using [modules](https://blog.golang.org/using-go-modules) then `import "github.com/eclipse/paho.mqtt.golang"` 
+and start using it. The necessary packages will be download automatically when you run `go build`. 
+
+Note that the latest release will be downloaded and changes may have been made since the release. If you have 
+encountered an issue, or wish to try the latest code for another reason, then run 
+`go get github.com/eclipse/paho.mqtt.golang@master` to get the latest commit.
+
+#### GOPATH
+
+Installation is as easy as:
+
+```
+go get github.com/eclipse/paho.mqtt.golang
+```
+
+The client depends on Google's [proxy](https://godoc.org/golang.org/x/net/proxy) package and the 
+[websockets](https://godoc.org/github.com/gorilla/websocket) package, also easily installed with the commands:
+
+```
+go get github.com/gorilla/websocket
+go get golang.org/x/net/proxy
+```
+
+
+Usage and API
+-------------
+
+Detailed API documentation is available by using to godoc tool, or can be browsed online
+using the [pkg.go.dev](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang) service.
+
+Samples are available in the `cmd` directory for reference.
+
+Note:
+
+The library also supports using MQTT over websockets by using the `ws://` (unsecure) or `wss://` (secure) prefix in the
+URI. If the client is running behind a corporate http/https proxy then the following environment variables `HTTP_PROXY`,
+`HTTPS_PROXY` and `NO_PROXY` are taken into account when establishing the connection.
+
+Troubleshooting
+---------------
+
+If you are new to MQTT and your application is not working as expected reviewing the
+[MQTT specification](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), which this library implements,
+is a good first step. [MQTT.org](https://mqtt.org) has some [good resources](https://mqtt.org/getting-started/) that answer many 
+common questions.
+
+### Error Handling
+
+The asynchronous nature of this library makes it easy to forget to check for errors. Consider using a go routine to 
+log these: 
+
+```go
+t := client.Publish("topic", qos, retained, msg)
+go func() {
+    _ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
+    if t.Error() != nil {
+        log.Error(t.Error()) // Use your preferred logging technique (or just fmt.Printf)
+    }
+}()
+```
+
+### Logging
+
+If you are encountering issues then enabling logging, both within this library and on your broker, is a good way to
+begin troubleshooting. This library can produce various levels of log by assigning the logging endpoints, ERROR, 
+CRITICAL, WARN and DEBUG. For example:
+
+```go
+func main() {
+	mqtt.ERROR = log.New(os.Stdout, "[ERROR] ", 0)
+	mqtt.CRITICAL = log.New(os.Stdout, "[CRIT] ", 0)
+	mqtt.WARN = log.New(os.Stdout, "[WARN]  ", 0)
+	mqtt.DEBUG = log.New(os.Stdout, "[DEBUG] ", 0)
+
+	// Connect, Subscribe, Publish etc..
+}
+```
+
+### Common Problems
+
+* Seemingly random disconnections may be caused by another client connecting to the broker with the same client 
+identifier; this is as per the [spec](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc384800405).
+* Unless ordered delivery of messages is essential (and you have configured your broker to support this e.g. 
+  `max_inflight_messages=1` in mosquitto) then set `ClientOptions.SetOrderMatters(false)`. Doing so will avoid the 
+  below issue (deadlocks due to blocking message handlers).
+* A `MessageHandler` (called when a new message is received) must not block (unless 
+  `ClientOptions.SetOrderMatters(false)` set). If you wish to perform a long-running task, or publish a message, then 
+  please use a go routine (blocking in the handler is a common cause of unexpected `pingresp 
+not received, disconnecting` errors). 
+* When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible that the broker will deliver retained 
+messages before `Subscribe` can be called. To process these messages either configure a handler with `AddRoute` or
+set a `DefaultPublishHandler`.
+* Loss of network connectivity may not be detected immediately. If this is an issue then consider setting 
+`ClientOptions.KeepAlive` (sends regular messages to check the link is active). 
+* Brokers offer many configuration options; some settings may lead to unexpected results. If using Mosquitto check
+`max_inflight_messages`, `max_queued_messages`, `persistence` (the defaults may not be what you expect).
+
+Reporting bugs
+--------------
+
+Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues
+
+*A limited number of contributors monitor the issues section so if you have a general question please consider the 
+resources in the [more information](#more-information) section (your question will be seen by more people, and you are 
+likely to receive an answer more quickly).*
+
+We welcome bug reports, but it is important they are actionable. A significant percentage of issues reported are not 
+resolved due to a lack of information. If we cannot replicate the problem then it is unlikely we will be able to fix it. 
+The information required will vary from issue to issue but consider including:  
+
+* Which version of the package you are using (tag or commit - this should be in your go.mod file)
+* A [Minimal, Reproducible Example](https://stackoverflow.com/help/minimal-reproducible-example). Providing an example 
+is the best way to demonstrate the issue you are facing; it is important this includes all relevant information
+(including broker configuration). Docker (see `cmd/docker`) makes it relatively simple to provide a working end-to-end 
+example.
+* A full, clear, description of the problem (detail what you are expecting vs what actually happens).
+* Details of your attempts to resolve the issue (what have you tried, what worked, what did not).
+* [Application Logs](#logging) covering the period the issue occurred. Unless you have isolated the root cause of the issue please include a link to a full log (including data from well before the problem arose).
+* Broker Logs covering the period the issue occurred.
+
+It is important to remember that this library does not stand alone; it communicates with a broker and any issues you are 
+seeing may be due to:
+
+* Bugs in your code.
+* Bugs in this library.
+* The broker configuration.
+* Bugs in the broker.
+* Issues with whatever you are communicating with.
+
+When submitting an issue, please ensure that you provide sufficient details to enable us to eliminate causes outside of
+this library.
+
+Contributing
+------------
+
+We welcome pull requests but before your contribution can be accepted by the project, you need to create and 
+electronically sign the Eclipse Contributor Agreement (ECA) and sign off on the Eclipse Foundation Certificate of Origin. 
+
+More information is available in the 
+[Eclipse Development Resources](http://wiki.eclipse.org/Development_Resources/Contributing_via_Git); please take special 
+note of the requirement that the commit record contain a "Signed-off-by" entry.
+
+More information
+----------------
+
+Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
+
+General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
+
+There is much more information available via the [MQTT community site](http://mqtt.org).
+
+[Stack Overflow](https://stackoverflow.com/questions/tagged/mqtt+go) has a range questions covering a range of common 
+issues (both relating to use of this library and MQTT in general).

+ 41 - 0
vendor/github.com/eclipse/paho.mqtt.golang/about.html

@@ -0,0 +1,41 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml"><head>
+<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+<title>About</title>
+</head>
+<body lang="EN-US">
+<h2>About This Content</h2>
+ 
+<p><em>December 9, 2013</em></p>	
+<h3>License</h3>
+
+<p>The Eclipse Foundation makes available all content in this plug-in ("Content").  Unless otherwise 
+indicated below, the Content is provided to you under the terms and conditions of the
+Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
+A copy of the EPL is available at 
+<a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a> 
+and a copy of the EDL is available at 
+<a href="http://www.eclipse.org/org/documents/edl-v10.php">http://www.eclipse.org/org/documents/edl-v10.php</a>. 
+For purposes of the EPL, "Program" will mean the Content.</p>
+
+<p>If you did not receive this Content directly from the Eclipse Foundation, the Content is 
+being redistributed by another party ("Redistributor") and different terms and conditions may
+apply to your use of any object code in the Content.  Check the Redistributor's license that was 
+provided with the Content.  If no such license exists, contact the Redistributor.  Unless otherwise
+indicated below, the terms and conditions of the EPL still apply to any source code in the Content
+and such source code may be obtained at <a href="http://www.eclipse.org/">http://www.eclipse.org</a>.</p>
+
+		
+		<h3>Third Party Content</h3>
+		<p>The Content includes items that have been sourced from third parties as set out below. If you 
+		did not receive this Content directly from the Eclipse Foundation, the following is provided 
+		for informational purposes only, and you should look to the Redistributor's license for 
+		terms and conditions of use.</p>
+		<p><em>
+		<strong>None</strong> <br><br>
+		<br><br> 
+		</em></p>
+
+
+
+</body></html>

+ 1127 - 0
vendor/github.com/eclipse/paho.mqtt.golang/client.go

@@ -0,0 +1,1127 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+// Portions copyright © 2018 TIBCO Software Inc.
+
+// Package mqtt provides an MQTT v3.1.1 client library.
+package mqtt
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+	disconnected uint32 = iota
+	connecting
+	reconnecting
+	connected
+)
+
+// Client is the interface definition for a Client as used by this
+// library, the interface is primarily to allow mocking tests.
+//
+// It is an MQTT v3.1.1 client for communicating
+// with an MQTT server using non-blocking methods that allow work
+// to be done in the background.
+// An application may connect to an MQTT server using:
+//   A plain TCP socket
+//   A secure SSL/TLS socket
+//   A websocket
+// To enable ensured message delivery at Quality of Service (QoS) levels
+// described in the MQTT spec, a message persistence mechanism must be
+// used. This is done by providing a type which implements the Store
+// interface. For convenience, FileStore and MemoryStore are provided
+// implementations that should be sufficient for most use cases. More
+// information can be found in their respective documentation.
+// Numerous connection options may be specified by configuring a
+// and then supplying a ClientOptions type.
+// Implementations of Client must be safe for concurrent use by multiple
+// goroutines
+type Client interface {
+	// IsConnected returns a bool signifying whether
+	// the client is connected or not.
+	IsConnected() bool
+	// IsConnectionOpen return a bool signifying whether the client has an active
+	// connection to mqtt broker, i.e not in disconnected or reconnect mode
+	IsConnectionOpen() bool
+	// Connect will create a connection to the message broker, by default
+	// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that
+	// fails
+	Connect() Token
+	// Disconnect will end the connection with the server, but not before waiting
+	// the specified number of milliseconds to wait for existing work to be
+	// completed.
+	Disconnect(quiesce uint)
+	// Publish will publish a message with the specified QoS and content
+	// to the specified topic.
+	// Returns a token to track delivery of the message to the broker
+	Publish(topic string, qos byte, retained bool, payload interface{}) Token
+	// Subscribe starts a new subscription. Provide a MessageHandler to be executed when
+	// a message is published on the topic provided, or nil for the default handler.
+	//
+	// If options.OrderMatters is true (the default) then callback must not block or
+	// call functions within this package that may block (e.g. Publish) other than in
+	// a new go routine.
+	// callback must be safe for concurrent use by multiple goroutines.
+	Subscribe(topic string, qos byte, callback MessageHandler) Token
+	// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
+	// be executed when a message is published on one of the topics provided, or nil for the
+	// default handler.
+	//
+	// If options.OrderMatters is true (the default) then callback must not block or
+	// call functions within this package that may block (e.g. Publish) other than in
+	// a new go routine.
+	// callback must be safe for concurrent use by multiple goroutines.
+	SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token
+	// Unsubscribe will end the subscription from each of the topics provided.
+	// Messages published to those topics from other clients will no longer be
+	// received.
+	Unsubscribe(topics ...string) Token
+	// AddRoute allows you to add a handler for messages on a specific topic
+	// without making a subscription. For example having a different handler
+	// for parts of a wildcard subscription or for receiving retained messages
+	// upon connection (before Sub scribe can be processed).
+	//
+	// If options.OrderMatters is true (the default) then callback must not block or
+	// call functions within this package that may block (e.g. Publish) other than in
+	// a new go routine.
+	// callback must be safe for concurrent use by multiple goroutines.
+	AddRoute(topic string, callback MessageHandler)
+	// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions
+	// in use by the client.
+	OptionsReader() ClientOptionsReader
+}
+
+// client implements the Client interface
+// clients are safe for concurrent use by multiple
+// goroutines
+type client struct {
+	lastSent        atomic.Value // time.Time - the last time a packet was successfully sent to network
+	lastReceived    atomic.Value // time.Time - the last time a packet was successfully received from network
+	pingOutstanding int32        // set to 1 if a ping has been sent but response not ret received
+
+	status       uint32 // see const definitions at top of file for possible values
+	sync.RWMutex        // Protects the above two variables (note: atomic writes are also used somewhat inconsistently)
+
+	messageIds // effectively a map from message id to token completor
+
+	obound    chan *PacketAndToken // outgoing publish packet
+	oboundP   chan *PacketAndToken // outgoing 'priority' packet (anything other than publish)
+	msgRouter *router              // routes topics to handlers
+	persist   Store
+	options   ClientOptions
+	optionsMu sync.Mutex // Protects the options in a few limited cases where needed for testing
+
+	conn   net.Conn   // the network connection, must only be set with connMu locked (only used when starting/stopping workers)
+	connMu sync.Mutex // mutex for the connection (again only used in two functions)
+
+	stop         chan struct{}  // Closed to request that workers stop
+	workers      sync.WaitGroup // used to wait for workers to complete (ping, keepalive, errwatch, resume)
+	commsStopped chan struct{}  // closed when the comms routines have stopped (kept running until after workers have closed to avoid deadlocks)
+}
+
+// NewClient will create an MQTT v3.1.1 client with all of the options specified
+// in the provided ClientOptions. The client must have the Connect method called
+// on it before it may be used. This is to make sure resources (such as a net
+// connection) are created before the application is actually ready.
+func NewClient(o *ClientOptions) Client {
+	c := &client{}
+	c.options = *o
+
+	if c.options.Store == nil {
+		c.options.Store = NewMemoryStore()
+	}
+	switch c.options.ProtocolVersion {
+	case 3, 4:
+		c.options.protocolVersionExplicit = true
+	case 0x83, 0x84:
+		c.options.protocolVersionExplicit = true
+	default:
+		c.options.ProtocolVersion = 4
+		c.options.protocolVersionExplicit = false
+	}
+	c.persist = c.options.Store
+	c.status = disconnected
+	c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)}
+	c.msgRouter = newRouter()
+	c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler)
+	c.obound = make(chan *PacketAndToken)
+	c.oboundP = make(chan *PacketAndToken)
+	return c
+}
+
+// AddRoute allows you to add a handler for messages on a specific topic
+// without making a subscription. For example having a different handler
+// for parts of a wildcard subscription
+//
+// If options.OrderMatters is true (the default) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// callback must be safe for concurrent use by multiple goroutines.
+func (c *client) AddRoute(topic string, callback MessageHandler) {
+	if callback != nil {
+		c.msgRouter.addRoute(topic, callback)
+	}
+}
+
+// IsConnected returns a bool signifying whether
+// the client is connected or not.
+// connected means that the connection is up now OR it will
+// be established/reestablished automatically when possible
+func (c *client) IsConnected() bool {
+	c.RLock()
+	defer c.RUnlock()
+	status := atomic.LoadUint32(&c.status)
+	switch {
+	case status == connected:
+		return true
+	case c.options.AutoReconnect && status > connecting:
+		return true
+	case c.options.ConnectRetry && status == connecting:
+		return true
+	default:
+		return false
+	}
+}
+
+// IsConnectionOpen return a bool signifying whether the client has an active
+// connection to mqtt broker, i.e not in disconnected or reconnect mode
+func (c *client) IsConnectionOpen() bool {
+	c.RLock()
+	defer c.RUnlock()
+	status := atomic.LoadUint32(&c.status)
+	switch {
+	case status == connected:
+		return true
+	default:
+		return false
+	}
+}
+
+func (c *client) connectionStatus() uint32 {
+	c.RLock()
+	defer c.RUnlock()
+	status := atomic.LoadUint32(&c.status)
+	return status
+}
+
+func (c *client) setConnected(status uint32) {
+	c.Lock()
+	defer c.Unlock()
+	atomic.StoreUint32(&c.status, status)
+}
+
+// ErrNotConnected is the error returned from function calls that are
+// made when the client is not connected to a broker
+var ErrNotConnected = errors.New("not Connected")
+
+// Connect will create a connection to the message broker, by default
+// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that
+// fails
+// Note: If using QOS1+ and CleanSession=false it is advisable to add
+// routes (or a DefaultPublishHandler) prior to calling Connect()
+// because queued messages may be delivered immediately post connection
+func (c *client) Connect() Token {
+	t := newToken(packets.Connect).(*ConnectToken)
+	DEBUG.Println(CLI, "Connect()")
+
+	if c.options.ConnectRetry && atomic.LoadUint32(&c.status) != disconnected {
+		// if in any state other than disconnected and ConnectRetry is
+		// enabled then the connection will come up automatically
+		// client can assume connection is up
+		WARN.Println(CLI, "Connect() called but not disconnected")
+		t.returnCode = packets.Accepted
+		t.flowComplete()
+		return t
+	}
+
+	c.persist.Open()
+	if c.options.ConnectRetry {
+		c.reserveStoredPublishIDs() // Reserve IDs to allow publish before connect complete
+	}
+	c.setConnected(connecting)
+
+	go func() {
+		if len(c.options.Servers) == 0 {
+			t.setError(fmt.Errorf("no servers defined to connect to"))
+			return
+		}
+
+	RETRYCONN:
+		var conn net.Conn
+		var rc byte
+		var err error
+		conn, rc, t.sessionPresent, err = c.attemptConnection()
+		if err != nil {
+			if c.options.ConnectRetry {
+				DEBUG.Println(CLI, "Connect failed, sleeping for", int(c.options.ConnectRetryInterval.Seconds()), "seconds and will then retry")
+				time.Sleep(c.options.ConnectRetryInterval)
+
+				if atomic.LoadUint32(&c.status) == connecting {
+					goto RETRYCONN
+				}
+			}
+			ERROR.Println(CLI, "Failed to connect to a broker")
+			c.setConnected(disconnected)
+			c.persist.Close()
+			t.returnCode = rc
+			t.setError(err)
+			return
+		}
+		inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
+		if c.startCommsWorkers(conn, inboundFromStore) {
+			// Take care of any messages in the store
+			if !c.options.CleanSession {
+				c.resume(c.options.ResumeSubs, inboundFromStore)
+			} else {
+				c.persist.Reset()
+			}
+		} else {
+			WARN.Println(CLI, "Connect() called but connection established in another goroutine")
+		}
+
+		close(inboundFromStore)
+		t.flowComplete()
+		DEBUG.Println(CLI, "exit startClient")
+	}()
+	return t
+}
+
+// internal function used to reconnect the client when it loses its connection
+func (c *client) reconnect() {
+	DEBUG.Println(CLI, "enter reconnect")
+	var (
+		sleep = 1 * time.Second
+		conn  net.Conn
+	)
+
+	for {
+		if nil != c.options.OnReconnecting {
+			c.options.OnReconnecting(c, &c.options)
+		}
+		var err error
+		conn, _, _, err = c.attemptConnection()
+		if err == nil {
+			break
+		}
+		DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds:", err)
+		time.Sleep(sleep)
+		if sleep < c.options.MaxReconnectInterval {
+			sleep *= 2
+		}
+
+		if sleep > c.options.MaxReconnectInterval {
+			sleep = c.options.MaxReconnectInterval
+		}
+		// Disconnect may have been called
+		if atomic.LoadUint32(&c.status) == disconnected {
+			break
+		}
+	}
+
+	// Disconnect() must have been called while we were trying to reconnect.
+	if c.connectionStatus() == disconnected {
+		if conn != nil {
+			conn.Close()
+		}
+		DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect")
+		return
+	}
+
+	inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
+	if c.startCommsWorkers(conn, inboundFromStore) {
+		c.resume(c.options.ResumeSubs, inboundFromStore)
+	}
+	close(inboundFromStore)
+}
+
+// attemptConnection makes a single attempt to connect to each of the brokers
+// the protocol version to use is passed in (as c.options.ProtocolVersion)
+// Note: Does not set c.conn in order to minimise race conditions
+// Returns:
+// net.Conn - Connected network connection
+// byte - Return code (packets.Accepted indicates a successful connection).
+// bool - SessionPresent flag from the connect ack (only valid if packets.Accepted)
+// err - Error (err != nil guarantees that conn has been set to active connection).
+func (c *client) attemptConnection() (net.Conn, byte, bool, error) {
+	protocolVersion := c.options.ProtocolVersion
+	var (
+		sessionPresent bool
+		conn           net.Conn
+		err            error
+		rc             byte
+	)
+
+	c.optionsMu.Lock() // Protect c.options.Servers so that servers can be added in test cases
+	brokers := c.options.Servers
+	c.optionsMu.Unlock()
+	for _, broker := range brokers {
+		cm := newConnectMsgFromOptions(&c.options, broker)
+		DEBUG.Println(CLI, "about to write new connect msg")
+	CONN:
+		tlsCfg := c.options.TLSConfig
+		if c.options.OnConnectAttempt != nil {
+			DEBUG.Println(CLI, "using custom onConnectAttempt handler...")
+			tlsCfg = c.options.OnConnectAttempt(broker, c.options.TLSConfig)
+		}
+		// Start by opening the network connection (tcp, tls, ws) etc
+		conn, err = openConnection(broker, tlsCfg, c.options.ConnectTimeout, c.options.HTTPHeaders, c.options.WebsocketOptions)
+		if err != nil {
+			ERROR.Println(CLI, err.Error())
+			WARN.Println(CLI, "failed to connect to broker, trying next")
+			rc = packets.ErrNetworkError
+			continue
+		}
+		DEBUG.Println(CLI, "socket connected to broker")
+
+		// Now we send the perform the MQTT connection handshake
+		rc, sessionPresent, err = connectMQTT(conn, cm, protocolVersion)
+		if rc == packets.Accepted {
+			break // successfully connected
+		}
+
+		// We may be have to attempt the connection with MQTT 3.1
+		if conn != nil {
+			_ = conn.Close()
+		}
+		if !c.options.protocolVersionExplicit && protocolVersion == 4 { // try falling back to 3.1?
+			DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
+			protocolVersion = 3
+			goto CONN
+		}
+		if c.options.protocolVersionExplicit { // to maintain logging from previous version
+			ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc])
+		}
+	}
+	// If the connection was successful we set member variable and lock in the protocol version for future connection attempts (and users)
+	if rc == packets.Accepted {
+		c.options.ProtocolVersion = protocolVersion
+		c.options.protocolVersionExplicit = true
+	} else {
+		// Maintain same error format as used previously
+		if rc != packets.ErrNetworkError { // mqtt error
+			err = packets.ConnErrors[rc]
+		} else { // network error (if this occurred in ConnectMQTT then err will be nil)
+			err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err)
+		}
+	}
+	return conn, rc, sessionPresent, err
+}
+
+// Disconnect will end the connection with the server, but not before waiting
+// the specified number of milliseconds to wait for existing work to be
+// completed.
+func (c *client) Disconnect(quiesce uint) {
+	status := atomic.LoadUint32(&c.status)
+	if status == connected {
+		DEBUG.Println(CLI, "disconnecting")
+		c.setConnected(disconnected)
+
+		dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
+		dt := newToken(packets.Disconnect)
+		disconnectSent := false
+		select {
+		case c.oboundP <- &PacketAndToken{p: dm, t: dt}:
+			disconnectSent = true
+		case <-c.commsStopped:
+			WARN.Println("Disconnect packet could not be sent because comms stopped")
+		case <-time.After(time.Duration(quiesce) * time.Millisecond):
+			WARN.Println("Disconnect packet not sent due to timeout")
+		}
+
+		// wait for work to finish, or quiesce time consumed
+		if disconnectSent {
+			DEBUG.Println(CLI, "calling WaitTimeout")
+			dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond)
+			DEBUG.Println(CLI, "WaitTimeout done")
+		}
+	} else {
+		WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)")
+		c.setConnected(disconnected)
+	}
+
+	c.disconnect()
+}
+
+// forceDisconnect will end the connection with the mqtt broker immediately (used for tests only)
+func (c *client) forceDisconnect() {
+	if !c.IsConnected() {
+		WARN.Println(CLI, "already disconnected")
+		return
+	}
+	c.setConnected(disconnected)
+	DEBUG.Println(CLI, "forcefully disconnecting")
+	c.disconnect()
+}
+
+// disconnect cleans up after a final disconnection (user requested so no auto reconnection)
+func (c *client) disconnect() {
+	done := c.stopCommsWorkers()
+	if done != nil {
+		<-done // Wait until the disconnect is complete (to limit chance that another connection will be started)
+		DEBUG.Println(CLI, "forcefully disconnecting")
+		c.messageIds.cleanUp()
+		DEBUG.Println(CLI, "disconnected")
+		c.persist.Close()
+	}
+}
+
+// internalConnLost cleanup when connection is lost or an error occurs
+// Note: This function will not block
+func (c *client) internalConnLost(err error) {
+	// It is possible that internalConnLost will be called multiple times simultaneously
+	// (including after sending a DisconnectPacket) as such we only do cleanup etc if the
+	// routines were actually running and are not being disconnected at users request
+	DEBUG.Println(CLI, "internalConnLost called")
+	stopDone := c.stopCommsWorkers()
+	if stopDone != nil { // stopDone will be nil if workers already in the process of stopping or stopped
+		go func() {
+			DEBUG.Println(CLI, "internalConnLost waiting on workers")
+			<-stopDone
+			DEBUG.Println(CLI, "internalConnLost workers stopped")
+			// It is possible that Disconnect was called which led to this error so reconnection depends upon status
+			reconnect := c.options.AutoReconnect && c.connectionStatus() > connecting
+
+			if c.options.CleanSession && !reconnect {
+				c.messageIds.cleanUp()
+			}
+			if reconnect {
+				c.setConnected(reconnecting)
+				go c.reconnect()
+			} else {
+				c.setConnected(disconnected)
+			}
+			if c.options.OnConnectionLost != nil {
+				go c.options.OnConnectionLost(c, err)
+			}
+			DEBUG.Println(CLI, "internalConnLost complete")
+		}()
+	}
+}
+
+// startCommsWorkers is called when the connection is up.
+// It starts off all of the routines needed to process incoming and outgoing messages.
+// Returns true if the comms workers were started (i.e. they were not already running)
+func (c *client) startCommsWorkers(conn net.Conn, inboundFromStore <-chan packets.ControlPacket) bool {
+	DEBUG.Println(CLI, "startCommsWorkers called")
+	c.connMu.Lock()
+	defer c.connMu.Unlock()
+	if c.conn != nil {
+		WARN.Println(CLI, "startCommsWorkers called when commsworkers already running")
+		conn.Close() // No use for the new network connection
+		return false
+	}
+	c.conn = conn // Store the connection
+
+	c.stop = make(chan struct{})
+	if c.options.KeepAlive != 0 {
+		atomic.StoreInt32(&c.pingOutstanding, 0)
+		c.lastReceived.Store(time.Now())
+		c.lastSent.Store(time.Now())
+		c.workers.Add(1)
+		go keepalive(c, conn)
+	}
+
+	// matchAndDispatch will process messages received from the network. It may generate acknowledgements
+	// It will complete when incomingPubChan is closed and will close ackOut prior to exiting
+	incomingPubChan := make(chan *packets.PublishPacket)
+	c.workers.Add(1) // Done will be called when ackOut is closed
+	ackOut := c.msgRouter.matchAndDispatch(incomingPubChan, c.options.Order, c)
+
+	c.setConnected(connected)
+	DEBUG.Println(CLI, "client is connected/reconnected")
+	if c.options.OnConnect != nil {
+		go c.options.OnConnect(c)
+	}
+
+	// c.oboundP and c.obound need to stay active for the life of the client because, depending upon the options,
+	// messages may be published while the client is disconnected (they will block unless in a goroutine). However
+	// to keep the comms routines clean we want to shutdown the input messages it uses so create out own channels
+	// and copy data across.
+	commsobound := make(chan *PacketAndToken)  // outgoing publish packets
+	commsoboundP := make(chan *PacketAndToken) // outgoing 'priority' packet
+	c.workers.Add(1)
+	go func() {
+		defer c.workers.Done()
+		for {
+			select {
+			case msg := <-c.oboundP:
+				commsoboundP <- msg
+			case msg := <-c.obound:
+				commsobound <- msg
+			case msg, ok := <-ackOut:
+				if !ok {
+					ackOut = nil     // ignore channel going forward
+					c.workers.Done() // matchAndDispatch has completed
+					continue         // await next message
+				}
+				commsoboundP <- msg
+			case <-c.stop:
+				// Attempt to transmit any outstanding acknowledgements (this may well fail but should work if this is a clean disconnect)
+				if ackOut != nil {
+					for msg := range ackOut {
+						commsoboundP <- msg
+					}
+					c.workers.Done() // matchAndDispatch has completed
+				}
+				close(commsoboundP) // Nothing sending to these channels anymore so close them and allow comms routines to exit
+				close(commsobound)
+				DEBUG.Println(CLI, "startCommsWorkers output redirector finished")
+				return
+			}
+		}
+	}()
+
+	commsIncomingPub, commsErrors := startComms(c.conn, c, inboundFromStore, commsoboundP, commsobound)
+	c.commsStopped = make(chan struct{})
+	go func() {
+		for {
+			if commsIncomingPub == nil && commsErrors == nil {
+				break
+			}
+			select {
+			case pub, ok := <-commsIncomingPub:
+				if !ok {
+					// Incoming comms has shutdown
+					close(incomingPubChan) // stop the router
+					commsIncomingPub = nil
+					continue
+				}
+				// Care is needed here because an error elsewhere could trigger a deadlock
+			sendPubLoop:
+				for {
+					select {
+					case incomingPubChan <- pub:
+						break sendPubLoop
+					case err, ok := <-commsErrors:
+						if !ok { // commsErrors has been closed so we can ignore it
+							commsErrors = nil
+							continue
+						}
+						ERROR.Println(CLI, "Connect comms goroutine - error triggered during send Pub", err)
+						c.internalConnLost(err) // no harm in calling this if the connection is already down (or shutdown is in progress)
+						continue
+					}
+				}
+			case err, ok := <-commsErrors:
+				if !ok {
+					commsErrors = nil
+					continue
+				}
+				ERROR.Println(CLI, "Connect comms goroutine - error triggered", err)
+				c.internalConnLost(err) // no harm in calling this if the connection is already down (or shutdown is in progress)
+				continue
+			}
+		}
+		DEBUG.Println(CLI, "incoming comms goroutine done")
+		close(c.commsStopped)
+	}()
+	DEBUG.Println(CLI, "startCommsWorkers done")
+	return true
+}
+
+// stopWorkersAndComms - Cleanly shuts down worker go routines (including the comms routines) and waits until everything has stopped
+// Returns nil it workers did not need to be stopped; otherwise returns a channel which will be closed when the stop is complete
+// Note: This may block so run as a go routine if calling from any of the comms routines
+func (c *client) stopCommsWorkers() chan struct{} {
+	DEBUG.Println(CLI, "stopCommsWorkers called")
+	// It is possible that this function will be called multiple times simultaneously due to the way things get shutdown
+	c.connMu.Lock()
+	if c.conn == nil {
+		DEBUG.Println(CLI, "stopCommsWorkers done (not running)")
+		c.connMu.Unlock()
+		return nil
+	}
+
+	// It is important that everything is stopped in the correct order to avoid deadlocks. The main issue here is
+	// the router because it both receives incoming publish messages and also sends outgoing acknowledgements. To
+	// avoid issues we signal the workers to stop and close the connection (it is probably already closed but
+	// there is no harm in being sure). We can then wait for the workers to finnish before closing outbound comms
+	// channels which will allow the comms routines to exit.
+
+	// We stop all non-comms related workers first (ping, keepalive, errwatch, resume etc) so they don't get blocked waiting on comms
+	close(c.stop)     // Signal for workers to stop
+	c.conn.Close()    // Possible that this is already closed but no harm in closing again
+	c.conn = nil      // Important that this is the only place that this is set to nil
+	c.connMu.Unlock() // As the connection is now nil we can unlock the mu (allowing subsequent calls to exit immediately)
+
+	doneChan := make(chan struct{})
+
+	go func() {
+		DEBUG.Println(CLI, "stopCommsWorkers waiting for workers")
+		c.workers.Wait()
+
+		// Stopping the workers will allow the comms routines to exit; we wait for these to complete
+		DEBUG.Println(CLI, "stopCommsWorkers waiting for comms")
+		<-c.commsStopped // wait for comms routine to stop
+
+		DEBUG.Println(CLI, "stopCommsWorkers done")
+		close(doneChan)
+	}()
+	return doneChan
+}
+
+// Publish will publish a message with the specified QoS and content
+// to the specified topic.
+// Returns a token to track delivery of the message to the broker
+func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token {
+	token := newToken(packets.Publish).(*PublishToken)
+	DEBUG.Println(CLI, "enter Publish")
+	switch {
+	case !c.IsConnected():
+		token.setError(ErrNotConnected)
+		return token
+	case c.connectionStatus() == reconnecting && qos == 0:
+		token.flowComplete()
+		return token
+	}
+	pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
+	pub.Qos = qos
+	pub.TopicName = topic
+	pub.Retain = retained
+	switch p := payload.(type) {
+	case string:
+		pub.Payload = []byte(p)
+	case []byte:
+		pub.Payload = p
+	case bytes.Buffer:
+		pub.Payload = p.Bytes()
+	default:
+		token.setError(fmt.Errorf("unknown payload type"))
+		return token
+	}
+
+	if pub.Qos != 0 && pub.MessageID == 0 {
+		mID := c.getID(token)
+		if mID == 0 {
+			token.setError(fmt.Errorf("no message IDs available"))
+			return token
+		}
+		pub.MessageID = mID
+		token.messageID = mID
+	}
+	persistOutbound(c.persist, pub)
+	switch c.connectionStatus() {
+	case connecting:
+		DEBUG.Println(CLI, "storing publish message (connecting), topic:", topic)
+	case reconnecting:
+		DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic)
+	default:
+		DEBUG.Println(CLI, "sending publish message, topic:", topic)
+		publishWaitTimeout := c.options.WriteTimeout
+		if publishWaitTimeout == 0 {
+			publishWaitTimeout = time.Second * 30
+		}
+		select {
+		case c.obound <- &PacketAndToken{p: pub, t: token}:
+		case <-time.After(publishWaitTimeout):
+			token.setError(errors.New("publish was broken by timeout"))
+		}
+	}
+	return token
+}
+
+// Subscribe starts a new subscription. Provide a MessageHandler to be executed when
+// a message is published on the topic provided.
+//
+// If options.OrderMatters is true (the default) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// callback must be safe for concurrent use by multiple goroutines.
+func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token {
+	token := newToken(packets.Subscribe).(*SubscribeToken)
+	DEBUG.Println(CLI, "enter Subscribe")
+	if !c.IsConnected() {
+		token.setError(ErrNotConnected)
+		return token
+	}
+	if !c.IsConnectionOpen() {
+		switch {
+		case !c.options.ResumeSubs:
+			// if not connected and resumesubs not set this sub will be thrown away
+			token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
+			return token
+		case c.options.CleanSession && c.connectionStatus() == reconnecting:
+			// if reconnecting and cleansession is true this sub will be thrown away
+			token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
+			return token
+		}
+	}
+	sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
+	if err := validateTopicAndQos(topic, qos); err != nil {
+		token.setError(err)
+		return token
+	}
+	sub.Topics = append(sub.Topics, topic)
+	sub.Qoss = append(sub.Qoss, qos)
+
+	if strings.HasPrefix(topic, "$share/") {
+		topic = strings.Join(strings.Split(topic, "/")[2:], "/")
+	}
+
+	if strings.HasPrefix(topic, "$queue/") {
+		topic = strings.TrimPrefix(topic, "$queue/")
+	}
+
+	if callback != nil {
+		c.msgRouter.addRoute(topic, callback)
+	}
+
+	token.subs = append(token.subs, topic)
+
+	if sub.MessageID == 0 {
+		mID := c.getID(token)
+		if mID == 0 {
+			token.setError(fmt.Errorf("no message IDs available"))
+			return token
+		}
+		sub.MessageID = mID
+		token.messageID = mID
+	}
+	DEBUG.Println(CLI, sub.String())
+
+	persistOutbound(c.persist, sub)
+	switch c.connectionStatus() {
+	case connecting:
+		DEBUG.Println(CLI, "storing subscribe message (connecting), topic:", topic)
+	case reconnecting:
+		DEBUG.Println(CLI, "storing subscribe message (reconnecting), topic:", topic)
+	default:
+		DEBUG.Println(CLI, "sending subscribe message, topic:", topic)
+		subscribeWaitTimeout := c.options.WriteTimeout
+		if subscribeWaitTimeout == 0 {
+			subscribeWaitTimeout = time.Second * 30
+		}
+		select {
+		case c.oboundP <- &PacketAndToken{p: sub, t: token}:
+		case <-time.After(subscribeWaitTimeout):
+			token.setError(errors.New("subscribe was broken by timeout"))
+		}
+	}
+	DEBUG.Println(CLI, "exit Subscribe")
+	return token
+}
+
+// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
+// be executed when a message is published on one of the topics provided.
+//
+// If options.OrderMatters is true (the default) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// callback must be safe for concurrent use by multiple goroutines.
+func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token {
+	var err error
+	token := newToken(packets.Subscribe).(*SubscribeToken)
+	DEBUG.Println(CLI, "enter SubscribeMultiple")
+	if !c.IsConnected() {
+		token.setError(ErrNotConnected)
+		return token
+	}
+	if !c.IsConnectionOpen() {
+		switch {
+		case !c.options.ResumeSubs:
+			// if not connected and resumesubs not set this sub will be thrown away
+			token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
+			return token
+		case c.options.CleanSession && c.connectionStatus() == reconnecting:
+			// if reconnecting and cleansession is true this sub will be thrown away
+			token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
+			return token
+		}
+	}
+	sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
+	if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil {
+		token.setError(err)
+		return token
+	}
+
+	if callback != nil {
+		for topic := range filters {
+			c.msgRouter.addRoute(topic, callback)
+		}
+	}
+	token.subs = make([]string, len(sub.Topics))
+	copy(token.subs, sub.Topics)
+
+	if sub.MessageID == 0 {
+		mID := c.getID(token)
+		if mID == 0 {
+			token.setError(fmt.Errorf("no message IDs available"))
+			return token
+		}
+		sub.MessageID = mID
+		token.messageID = mID
+	}
+	persistOutbound(c.persist, sub)
+	switch c.connectionStatus() {
+	case connecting:
+		DEBUG.Println(CLI, "storing subscribe message (connecting), topics:", sub.Topics)
+	case reconnecting:
+		DEBUG.Println(CLI, "storing subscribe message (reconnecting), topics:", sub.Topics)
+	default:
+		DEBUG.Println(CLI, "sending subscribe message, topics:", sub.Topics)
+		subscribeWaitTimeout := c.options.WriteTimeout
+		if subscribeWaitTimeout == 0 {
+			subscribeWaitTimeout = time.Second * 30
+		}
+		select {
+		case c.oboundP <- &PacketAndToken{p: sub, t: token}:
+		case <-time.After(subscribeWaitTimeout):
+			token.setError(errors.New("subscribe was broken by timeout"))
+		}
+	}
+	DEBUG.Println(CLI, "exit SubscribeMultiple")
+	return token
+}
+
+// reserveStoredPublishIDs reserves the ids for publish packets in the persistent store to ensure these are not duplicated
+func (c *client) reserveStoredPublishIDs() {
+	// The resume function sets the stored id for publish packets only (some other packets
+	// will get new ids in net code). This means that the only keys we need to ensure are
+	// unique are the publish ones (and these will completed/replaced in resume() )
+	if !c.options.CleanSession {
+		storedKeys := c.persist.All()
+		for _, key := range storedKeys {
+			packet := c.persist.Get(key)
+			if packet == nil {
+				continue
+			}
+			switch packet.(type) {
+			case *packets.PublishPacket:
+				details := packet.Details()
+				token := &PlaceHolderToken{id: details.MessageID}
+				c.claimID(token, details.MessageID)
+			}
+		}
+	}
+}
+
+// Load all stored messages and resend them
+// Call this to ensure QOS > 1,2 even after an application crash
+// Note: This function will exit if c.stop is closed (this allows the shutdown to proceed avoiding a potential deadlock)
+//
+func (c *client) resume(subscription bool, ibound chan packets.ControlPacket) {
+	DEBUG.Println(STR, "enter Resume")
+
+	storedKeys := c.persist.All()
+	for _, key := range storedKeys {
+		packet := c.persist.Get(key)
+		if packet == nil {
+			DEBUG.Println(STR, fmt.Sprintf("resume found NIL packet (%s)", key))
+			continue
+		}
+		details := packet.Details()
+		if isKeyOutbound(key) {
+			switch p := packet.(type) {
+			case *packets.SubscribePacket:
+				if subscription {
+					DEBUG.Println(STR, fmt.Sprintf("loaded pending subscribe (%d)", details.MessageID))
+					subPacket := packet.(*packets.SubscribePacket)
+					token := newToken(packets.Subscribe).(*SubscribeToken)
+					token.messageID = details.MessageID
+					token.subs = append(token.subs, subPacket.Topics...)
+					c.claimID(token, details.MessageID)
+					select {
+					case c.oboundP <- &PacketAndToken{p: packet, t: token}:
+					case <-c.stop:
+						DEBUG.Println(STR, "resume exiting due to stop")
+						return
+					}
+				} else {
+					c.persist.Del(key) // Unsubscribe packets should not be retained following a reconnect
+				}
+			case *packets.UnsubscribePacket:
+				if subscription {
+					DEBUG.Println(STR, fmt.Sprintf("loaded pending unsubscribe (%d)", details.MessageID))
+					token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
+					select {
+					case c.oboundP <- &PacketAndToken{p: packet, t: token}:
+					case <-c.stop:
+						DEBUG.Println(STR, "resume exiting due to stop")
+						return
+					}
+				} else {
+					c.persist.Del(key) // Unsubscribe packets should not be retained following a reconnect
+				}
+			case *packets.PubrelPacket:
+				DEBUG.Println(STR, fmt.Sprintf("loaded pending pubrel (%d)", details.MessageID))
+				select {
+				case c.oboundP <- &PacketAndToken{p: packet, t: nil}:
+				case <-c.stop:
+					DEBUG.Println(STR, "resume exiting due to stop")
+					return
+				}
+			case *packets.PublishPacket:
+				// spec: If the DUP flag is set to 0, it indicates that this is the first occasion that the Client or
+				// Server has attempted to send this MQTT PUBLISH Packet. If the DUP flag is set to 1, it indicates that
+				// this might be re-delivery of an earlier attempt to send the Packet.
+				//
+				// If the message is in the store than an attempt at delivery has been made (note that the message may
+				// never have made it onto the wire but tracking that would be complicated!).
+				if p.Qos != 0 { // spec: The DUP flag MUST be set to 0 for all QoS 0 messages
+					p.Dup = true
+				}
+				token := newToken(packets.Publish).(*PublishToken)
+				token.messageID = details.MessageID
+				c.claimID(token, details.MessageID)
+				DEBUG.Println(STR, fmt.Sprintf("loaded pending publish (%d)", details.MessageID))
+				DEBUG.Println(STR, details)
+				select {
+				case c.obound <- &PacketAndToken{p: p, t: token}:
+				case <-c.stop:
+					DEBUG.Println(STR, "resume exiting due to stop")
+					return
+				}
+			default:
+				ERROR.Println(STR, "invalid message type in store (discarded)")
+				c.persist.Del(key)
+			}
+		} else {
+			switch packet.(type) {
+			case *packets.PubrelPacket:
+				DEBUG.Println(STR, fmt.Sprintf("loaded pending incomming (%d)", details.MessageID))
+				select {
+				case ibound <- packet:
+				case <-c.stop:
+					DEBUG.Println(STR, "resume exiting due to stop (ibound <- packet)")
+					return
+				}
+			default:
+				ERROR.Println(STR, "invalid message type in store (discarded)")
+				c.persist.Del(key)
+			}
+		}
+	}
+	DEBUG.Println(STR, "exit resume")
+}
+
+// Unsubscribe will end the subscription from each of the topics provided.
+// Messages published to those topics from other clients will no longer be
+// received.
+func (c *client) Unsubscribe(topics ...string) Token {
+	token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
+	DEBUG.Println(CLI, "enter Unsubscribe")
+	if !c.IsConnected() {
+		token.setError(ErrNotConnected)
+		return token
+	}
+	if !c.IsConnectionOpen() {
+		switch {
+		case !c.options.ResumeSubs:
+			// if not connected and resumesubs not set this unsub will be thrown away
+			token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
+			return token
+		case c.options.CleanSession && c.connectionStatus() == reconnecting:
+			// if reconnecting and cleansession is true this unsub will be thrown away
+			token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
+			return token
+		}
+	}
+	unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
+	unsub.Topics = make([]string, len(topics))
+	copy(unsub.Topics, topics)
+
+	if unsub.MessageID == 0 {
+		mID := c.getID(token)
+		if mID == 0 {
+			token.setError(fmt.Errorf("no message IDs available"))
+			return token
+		}
+		unsub.MessageID = mID
+		token.messageID = mID
+	}
+
+	persistOutbound(c.persist, unsub)
+
+	switch c.connectionStatus() {
+	case connecting:
+		DEBUG.Println(CLI, "storing unsubscribe message (connecting), topics:", topics)
+	case reconnecting:
+		DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics)
+	default:
+		DEBUG.Println(CLI, "sending unsubscribe message, topics:", topics)
+		subscribeWaitTimeout := c.options.WriteTimeout
+		if subscribeWaitTimeout == 0 {
+			subscribeWaitTimeout = time.Second * 30
+		}
+		select {
+		case c.oboundP <- &PacketAndToken{p: unsub, t: token}:
+			for _, topic := range topics {
+				c.msgRouter.deleteRoute(topic)
+			}
+		case <-time.After(subscribeWaitTimeout):
+			token.setError(errors.New("unsubscribe was broken by timeout"))
+		}
+	}
+
+	DEBUG.Println(CLI, "exit Unsubscribe")
+	return token
+}
+
+// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions
+// in use by the client.
+func (c *client) OptionsReader() ClientOptionsReader {
+	r := ClientOptionsReader{options: &c.options}
+	return r
+}
+
+// DefaultConnectionLostHandler is a definition of a function that simply
+// reports to the DEBUG log the reason for the client losing a connection.
+func DefaultConnectionLostHandler(client Client, reason error) {
+	DEBUG.Println("Connection lost:", reason.Error())
+}
+
+// UpdateLastReceived - Will be called whenever a packet is received off the network
+// This is used by the keepalive routine to
+func (c *client) UpdateLastReceived() {
+	if c.options.KeepAlive != 0 {
+		c.lastReceived.Store(time.Now())
+	}
+}
+
+// UpdateLastReceived - Will be called whenever a packet is successfully transmitted to the network
+func (c *client) UpdateLastSent() {
+	if c.options.KeepAlive != 0 {
+		c.lastSent.Store(time.Now())
+	}
+}
+
+// getWriteTimeOut returns the writetimeout (duration to wait when writing to the connection) or 0 if none
+func (c *client) getWriteTimeOut() time.Duration {
+	return c.options.WriteTimeout
+}
+
+// persistOutbound adds the packet to the outbound store
+func (c *client) persistOutbound(m packets.ControlPacket) {
+	persistOutbound(c.persist, m)
+}
+
+// persistInbound adds the packet to the inbound store
+func (c *client) persistInbound(m packets.ControlPacket) {
+	persistInbound(c.persist, m)
+}
+
+// pingRespReceived will be called by the network routines when a ping response is received
+func (c *client) pingRespReceived() {
+	atomic.StoreInt32(&c.pingOutstanding, 0)
+}

+ 32 - 0
vendor/github.com/eclipse/paho.mqtt.golang/components.go

@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+type component string
+
+// Component names for debug output
+const (
+	NET component = "[net]     "
+	PNG component = "[pinger]  "
+	CLI component = "[client]  "
+	DEC component = "[decode]  "
+	MES component = "[message] "
+	STR component = "[store]   "
+	MID component = "[msgids]  "
+	TST component = "[test]    "
+	STA component = "[state]   "
+	ERR component = "[error]   "
+	ROU component = "[router]  "
+)

+ 15 - 0
vendor/github.com/eclipse/paho.mqtt.golang/edl-v10

@@ -0,0 +1,15 @@
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+    Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+

+ 70 - 0
vendor/github.com/eclipse/paho.mqtt.golang/epl-v10

@@ -0,0 +1,70 @@
+Eclipse Public License - v 1.0
+
+THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+1. DEFINITIONS
+
+"Contribution" means:
+
+a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
+b) in the case of each subsequent Contributor:
+i) changes to the Program, and
+ii) additions to the Program;
+where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
+"Contributor" means any person or entity that distributes the Program.
+
+"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
+
+"Program" means the Contributions distributed in accordance with this Agreement.
+
+"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
+
+2. GRANT OF RIGHTS
+
+a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
+b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
+c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
+d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
+3. REQUIREMENTS
+
+A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
+
+a) it complies with the terms and conditions of this Agreement; and
+b) its license agreement:
+i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
+ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
+iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
+iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
+When the Program is made available in source code form:
+
+a) it must be made available under this Agreement; and
+b) a copy of this Agreement must be included with each copy of the Program.
+Contributors may not remove or alter any copyright notices contained within the Program.
+
+Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
+
+4. COMMERCIAL DISTRIBUTION
+
+Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
+
+For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
+
+5. NO WARRANTY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
+
+6. DISCLAIMER OF LIABILITY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+7. GENERAL
+
+If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
+
+If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
+
+All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
+
+Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
+
+This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.

+ 257 - 0
vendor/github.com/eclipse/paho.mqtt.golang/filestore.go

@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"io/ioutil"
+	"os"
+	"path"
+	"sort"
+	"sync"
+
+	"github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+	msgExt     = ".msg"
+	tmpExt     = ".tmp"
+	corruptExt = ".CORRUPT"
+)
+
+// FileStore implements the store interface using the filesystem to provide
+// true persistence, even across client failure. This is designed to use a
+// single directory per running client. If you are running multiple clients
+// on the same filesystem, you will need to be careful to specify unique
+// store directories for each.
+type FileStore struct {
+	sync.RWMutex
+	directory string
+	opened    bool
+}
+
+// NewFileStore will create a new FileStore which stores its messages in the
+// directory provided.
+func NewFileStore(directory string) *FileStore {
+	store := &FileStore{
+		directory: directory,
+		opened:    false,
+	}
+	return store
+}
+
+// Open will allow the FileStore to be used.
+func (store *FileStore) Open() {
+	store.Lock()
+	defer store.Unlock()
+	// if no store directory was specified in ClientOpts, by default use the
+	// current working directory
+	if store.directory == "" {
+		store.directory, _ = os.Getwd()
+	}
+
+	// if store dir exists, great, otherwise, create it
+	if !exists(store.directory) {
+		perms := os.FileMode(0770)
+		merr := os.MkdirAll(store.directory, perms)
+		chkerr(merr)
+	}
+	store.opened = true
+	DEBUG.Println(STR, "store is opened at", store.directory)
+}
+
+// Close will disallow the FileStore from being used.
+func (store *FileStore) Close() {
+	store.Lock()
+	defer store.Unlock()
+	store.opened = false
+	DEBUG.Println(STR, "store is closed")
+}
+
+// Put will put a message into the store, associated with the provided
+// key value.
+func (store *FileStore) Put(key string, m packets.ControlPacket) {
+	store.Lock()
+	defer store.Unlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to use file store, but not open")
+		return
+	}
+	full := fullpath(store.directory, key)
+	write(store.directory, key, m)
+	if !exists(full) {
+		ERROR.Println(STR, "file not created:", full)
+	}
+}
+
+// Get will retrieve a message from the store, the one associated with
+// the provided key value.
+func (store *FileStore) Get(key string) packets.ControlPacket {
+	store.RLock()
+	defer store.RUnlock()
+	if !store.opened {
+		ERROR.Println(STR, "trying to use file store, but not open")
+		return nil
+	}
+	filepath := fullpath(store.directory, key)
+	if !exists(filepath) {
+		return nil
+	}
+	mfile, oerr := os.Open(filepath)
+	chkerr(oerr)
+	msg, rerr := packets.ReadPacket(mfile)
+	chkerr(mfile.Close())
+
+	// Message was unreadable, return nil
+	if rerr != nil {
+		newpath := corruptpath(store.directory, key)
+		WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath)
+		if err := os.Rename(filepath, newpath); err != nil {
+			ERROR.Println(STR, err)
+		}
+		return nil
+	}
+	return msg
+}
+
+// All will provide a list of all of the keys associated with messages
+// currently residing in the FileStore.
+func (store *FileStore) All() []string {
+	store.RLock()
+	defer store.RUnlock()
+	return store.all()
+}
+
+// Del will remove the persisted message associated with the provided
+// key from the FileStore.
+func (store *FileStore) Del(key string) {
+	store.Lock()
+	defer store.Unlock()
+	store.del(key)
+}
+
+// Reset will remove all persisted messages from the FileStore.
+func (store *FileStore) Reset() {
+	store.Lock()
+	defer store.Unlock()
+	WARN.Println(STR, "FileStore Reset")
+	for _, key := range store.all() {
+		store.del(key)
+	}
+}
+
+// lockless
+func (store *FileStore) all() []string {
+	var err error
+	var keys []string
+	var files fileInfos
+
+	if !store.opened {
+		ERROR.Println(STR, "trying to use file store, but not open")
+		return nil
+	}
+
+	files, err = ioutil.ReadDir(store.directory)
+	chkerr(err)
+	sort.Sort(files)
+	for _, f := range files {
+		DEBUG.Println(STR, "file in All():", f.Name())
+		name := f.Name()
+		if name[len(name)-4:] != msgExt {
+			DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name)
+			continue
+		}
+		key := name[0 : len(name)-4] // remove file extension
+		keys = append(keys, key)
+	}
+	return keys
+}
+
+// lockless
+func (store *FileStore) del(key string) {
+	if !store.opened {
+		ERROR.Println(STR, "trying to use file store, but not open")
+		return
+	}
+	DEBUG.Println(STR, "store del filepath:", store.directory)
+	DEBUG.Println(STR, "store delete key:", key)
+	filepath := fullpath(store.directory, key)
+	DEBUG.Println(STR, "path of deletion:", filepath)
+	if !exists(filepath) {
+		WARN.Println(STR, "store could not delete key:", key)
+		return
+	}
+	rerr := os.Remove(filepath)
+	chkerr(rerr)
+	DEBUG.Println(STR, "del msg:", key)
+	if exists(filepath) {
+		ERROR.Println(STR, "file not deleted:", filepath)
+	}
+}
+
+func fullpath(store string, key string) string {
+	p := path.Join(store, key+msgExt)
+	return p
+}
+
+func tmppath(store string, key string) string {
+	p := path.Join(store, key+tmpExt)
+	return p
+}
+
+func corruptpath(store string, key string) string {
+	p := path.Join(store, key+corruptExt)
+	return p
+}
+
+// create file called "X.[messageid].tmp" located in the store
+// the contents of the file is the bytes of the message, then
+// rename it to "X.[messageid].msg", overwriting any existing
+// message with the same id
+// X will be 'i' for inbound messages, and O for outbound messages
+func write(store, key string, m packets.ControlPacket) {
+	temppath := tmppath(store, key)
+	f, err := os.Create(temppath)
+	chkerr(err)
+	werr := m.Write(f)
+	chkerr(werr)
+	cerr := f.Close()
+	chkerr(cerr)
+	rerr := os.Rename(temppath, fullpath(store, key))
+	chkerr(rerr)
+}
+
+func exists(file string) bool {
+	if _, err := os.Stat(file); err != nil {
+		if os.IsNotExist(err) {
+			return false
+		}
+		chkerr(err)
+	}
+	return true
+}
+
+type fileInfos []os.FileInfo
+
+func (f fileInfos) Len() int {
+	return len(f)
+}
+
+func (f fileInfos) Swap(i, j int) {
+	f[i], f[j] = f[j], f[i]
+}
+
+func (f fileInfos) Less(i, j int) bool {
+	return f[i].ModTime().Before(f[j].ModTime())
+}

+ 138 - 0
vendor/github.com/eclipse/paho.mqtt.golang/memstore.go

@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"sync"
+
+	"github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// MemoryStore implements the store interface to provide a "persistence"
+// mechanism wholly stored in memory. This is only useful for
+// as long as the client instance exists.
+type MemoryStore struct {
+	sync.RWMutex
+	messages map[string]packets.ControlPacket
+	opened   bool
+}
+
+// NewMemoryStore returns a pointer to a new instance of
+// MemoryStore, the instance is not initialized and ready to
+// use until Open() has been called on it.
+func NewMemoryStore() *MemoryStore {
+	store := &MemoryStore{
+		messages: make(map[string]packets.ControlPacket),
+		opened:   false,
+	}
+	return store
+}
+
+// Open initializes a MemoryStore instance.
+func (store *MemoryStore) Open() {
+	store.Lock()
+	defer store.Unlock()
+	store.opened = true
+	DEBUG.Println(STR, "memorystore initialized")
+}
+
+// Put takes a key and a pointer to a Message and stores the
+// message.
+func (store *MemoryStore) Put(key string, message packets.ControlPacket) {
+	store.Lock()
+	defer store.Unlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to use memory store, but not open")
+		return
+	}
+	store.messages[key] = message
+}
+
+// Get takes a key and looks in the store for a matching Message
+// returning either the Message pointer or nil.
+func (store *MemoryStore) Get(key string) packets.ControlPacket {
+	store.RLock()
+	defer store.RUnlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to use memory store, but not open")
+		return nil
+	}
+	mid := mIDFromKey(key)
+	m := store.messages[key]
+	if m == nil {
+		CRITICAL.Println(STR, "memorystore get: message", mid, "not found")
+	} else {
+		DEBUG.Println(STR, "memorystore get: message", mid, "found")
+	}
+	return m
+}
+
+// All returns a slice of strings containing all the keys currently
+// in the MemoryStore.
+func (store *MemoryStore) All() []string {
+	store.RLock()
+	defer store.RUnlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to use memory store, but not open")
+		return nil
+	}
+	var keys []string
+	for k := range store.messages {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// Del takes a key, searches the MemoryStore and if the key is found
+// deletes the Message pointer associated with it.
+func (store *MemoryStore) Del(key string) {
+	store.Lock()
+	defer store.Unlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to use memory store, but not open")
+		return
+	}
+	mid := mIDFromKey(key)
+	m := store.messages[key]
+	if m == nil {
+		WARN.Println(STR, "memorystore del: message", mid, "not found")
+	} else {
+		delete(store.messages, key)
+		DEBUG.Println(STR, "memorystore del: message", mid, "was deleted")
+	}
+}
+
+// Close will disallow modifications to the state of the store.
+func (store *MemoryStore) Close() {
+	store.Lock()
+	defer store.Unlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to close memory store, but not open")
+		return
+	}
+	store.opened = false
+	DEBUG.Println(STR, "memorystore closed")
+}
+
+// Reset eliminates all persisted message data in the store.
+func (store *MemoryStore) Reset() {
+	store.Lock()
+	defer store.Unlock()
+	if !store.opened {
+		ERROR.Println(STR, "Trying to reset memory store, but not open")
+	}
+	store.messages = make(map[string]packets.ControlPacket)
+	WARN.Println(STR, "memorystore wiped")
+}

+ 127 - 0
vendor/github.com/eclipse/paho.mqtt.golang/message.go

@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"net/url"
+	"sync"
+
+	"github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// Message defines the externals that a message implementation must support
+// these are received messages that are passed to the callbacks, not internal
+// messages
+type Message interface {
+	Duplicate() bool
+	Qos() byte
+	Retained() bool
+	Topic() string
+	MessageID() uint16
+	Payload() []byte
+	Ack()
+}
+
+type message struct {
+	duplicate bool
+	qos       byte
+	retained  bool
+	topic     string
+	messageID uint16
+	payload   []byte
+	once      sync.Once
+	ack       func()
+}
+
+func (m *message) Duplicate() bool {
+	return m.duplicate
+}
+
+func (m *message) Qos() byte {
+	return m.qos
+}
+
+func (m *message) Retained() bool {
+	return m.retained
+}
+
+func (m *message) Topic() string {
+	return m.topic
+}
+
+func (m *message) MessageID() uint16 {
+	return m.messageID
+}
+
+func (m *message) Payload() []byte {
+	return m.payload
+}
+
+func (m *message) Ack() {
+	m.once.Do(m.ack)
+}
+
+func messageFromPublish(p *packets.PublishPacket, ack func()) Message {
+	return &message{
+		duplicate: p.Dup,
+		qos:       p.Qos,
+		retained:  p.Retain,
+		topic:     p.TopicName,
+		messageID: p.MessageID,
+		payload:   p.Payload,
+		ack:       ack,
+	}
+}
+
+func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket {
+	m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
+
+	m.CleanSession = options.CleanSession
+	m.WillFlag = options.WillEnabled
+	m.WillRetain = options.WillRetained
+	m.ClientIdentifier = options.ClientID
+
+	if options.WillEnabled {
+		m.WillQos = options.WillQos
+		m.WillTopic = options.WillTopic
+		m.WillMessage = options.WillPayload
+	}
+
+	username := options.Username
+	password := options.Password
+	if broker.User != nil {
+		username = broker.User.Username()
+		if pwd, ok := broker.User.Password(); ok {
+			password = pwd
+		}
+	}
+	if options.CredentialsProvider != nil {
+		username, password = options.CredentialsProvider()
+	}
+
+	if username != "" {
+		m.UsernameFlag = true
+		m.Username = username
+		// mustn't have password without user as well
+		if password != "" {
+			m.PasswordFlag = true
+			m.Password = []byte(password)
+		}
+	}
+
+	m.Keepalive = uint16(options.KeepAlive)
+
+	return m
+}

+ 176 - 0
vendor/github.com/eclipse/paho.mqtt.golang/messageids.go

@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+// MId is 16 bit message id as specified by the MQTT spec.
+// In general, these values should not be depended upon by
+// the client application.
+type MId uint16
+
+type messageIds struct {
+	sync.RWMutex
+	index map[uint16]tokenCompletor
+
+	lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier)
+}
+
+const (
+	midMin uint16 = 1
+	midMax uint16 = 65535
+)
+
+func (mids *messageIds) cleanUp() {
+	mids.Lock()
+	for _, token := range mids.index {
+		switch token.(type) {
+		case *PublishToken:
+			token.setError(fmt.Errorf("connection lost before Publish completed"))
+		case *SubscribeToken:
+			token.setError(fmt.Errorf("connection lost before Subscribe completed"))
+		case *UnsubscribeToken:
+			token.setError(fmt.Errorf("connection lost before Unsubscribe completed"))
+		case nil:
+			continue
+		}
+		token.flowComplete()
+	}
+	mids.index = make(map[uint16]tokenCompletor)
+	mids.Unlock()
+	DEBUG.Println(MID, "cleaned up")
+}
+
+func (mids *messageIds) freeID(id uint16) {
+	mids.Lock()
+	delete(mids.index, id)
+	mids.Unlock()
+}
+
+func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
+	mids.Lock()
+	defer mids.Unlock()
+	if _, ok := mids.index[id]; !ok {
+		mids.index[id] = token
+	} else {
+		old := mids.index[id]
+		old.flowComplete()
+		mids.index[id] = token
+	}
+	if id > mids.lastIssuedID {
+		mids.lastIssuedID = id
+	}
+}
+
+// getID will return an available id or 0 if none available
+// The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler)
+func (mids *messageIds) getID(t tokenCompletor) uint16 {
+	mids.Lock()
+	defer mids.Unlock()
+	i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty
+	looped := false        // uint16 will loop from 65535->0
+	for {
+		i++
+		if i == 0 { // skip 0 because its not a valid id (Control Packets MUST contain a non-zero 16-bit Packet Identifier [MQTT-2.3.1-1])
+			i++
+			looped = true
+		}
+		if _, ok := mids.index[i]; !ok {
+			mids.index[i] = t
+			mids.lastIssuedID = i
+			return i
+		}
+		if (looped && i == mids.lastIssuedID) || (mids.lastIssuedID == 0 && i == midMax) { // lastIssuedID will be 0 at startup
+			return 0 // no free ids
+		}
+	}
+}
+
+func (mids *messageIds) getToken(id uint16) tokenCompletor {
+	mids.RLock()
+	defer mids.RUnlock()
+	if token, ok := mids.index[id]; ok {
+		return token
+	}
+	return &DummyToken{id: id}
+}
+
+type DummyToken struct {
+	id uint16
+}
+
+// Wait implements the Token Wait method.
+func (d *DummyToken) Wait() bool {
+	return true
+}
+
+// WaitTimeout implements the Token WaitTimeout method.
+func (d *DummyToken) WaitTimeout(t time.Duration) bool {
+	return true
+}
+
+// Done implements the Token Done method.
+func (d *DummyToken) Done() <-chan struct{} {
+	ch := make(chan struct{})
+	close(ch)
+	return ch
+}
+
+func (d *DummyToken) flowComplete() {
+	ERROR.Printf("A lookup for token %d returned nil\n", d.id)
+}
+
+func (d *DummyToken) Error() error {
+	return nil
+}
+
+func (d *DummyToken) setError(e error) {}
+
+// PlaceHolderToken does nothing and was implemented to allow a messageid to be reserved
+// it differs from DummyToken in that calling flowComplete does not generate an error (it
+// is expected that flowComplete will be called when the token is overwritten with a real token)
+type PlaceHolderToken struct {
+	id uint16
+}
+
+// Wait implements the Token Wait method.
+func (p *PlaceHolderToken) Wait() bool {
+	return true
+}
+
+// WaitTimeout implements the Token WaitTimeout method.
+func (p *PlaceHolderToken) WaitTimeout(t time.Duration) bool {
+	return true
+}
+
+// Done implements the Token Done method.
+func (p *PlaceHolderToken) Done() <-chan struct{} {
+	ch := make(chan struct{})
+	close(ch)
+	return ch
+}
+
+func (p *PlaceHolderToken) flowComplete() {
+}
+
+func (p *PlaceHolderToken) Error() error {
+	return nil
+}
+
+func (p *PlaceHolderToken) setError(e error) {}

+ 464 - 0
vendor/github.com/eclipse/paho.mqtt.golang/net.go

@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"errors"
+	"io"
+	"net"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const closedNetConnErrorText = "use of closed network connection" // error string for closed conn (https://golang.org/src/net/error_test.go)
+
+// ConnectMQTT takes a connected net.Conn and performs the initial MQTT handshake. Parameters are:
+// conn - Connected net.Conn
+// cm - Connect Packet with everything other than the protocol name/version populated (historical reasons)
+// protocolVersion - The protocol version to attempt to connect with
+//
+// Note that, for backward compatibility, ConnectMQTT() suppresses the actual connection error (compare to connectMQTT()).
+func ConnectMQTT(conn net.Conn, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool) {
+	rc, sessionPresent, _ := connectMQTT(conn, cm, protocolVersion)
+	return rc, sessionPresent
+}
+
+func connectMQTT(conn io.ReadWriter, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool, error) {
+	switch protocolVersion {
+	case 3:
+		DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
+		cm.ProtocolName = "MQIsdp"
+		cm.ProtocolVersion = 3
+	case 0x83:
+		DEBUG.Println(CLI, "Using MQTT 3.1b protocol")
+		cm.ProtocolName = "MQIsdp"
+		cm.ProtocolVersion = 0x83
+	case 0x84:
+		DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol")
+		cm.ProtocolName = "MQTT"
+		cm.ProtocolVersion = 0x84
+	default:
+		DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
+		cm.ProtocolName = "MQTT"
+		cm.ProtocolVersion = 4
+	}
+
+	if err := cm.Write(conn); err != nil {
+		ERROR.Println(CLI, err)
+		return packets.ErrNetworkError, false, err
+	}
+
+	rc, sessionPresent, err := verifyCONNACK(conn)
+	return rc, sessionPresent, err
+}
+
+// This function is only used for receiving a connack
+// when the connection is first started.
+// This prevents receiving incoming data while resume
+// is in progress if clean session is false.
+func verifyCONNACK(conn io.Reader) (byte, bool, error) {
+	DEBUG.Println(NET, "connect started")
+
+	ca, err := packets.ReadPacket(conn)
+	if err != nil {
+		ERROR.Println(NET, "connect got error", err)
+		return packets.ErrNetworkError, false, err
+	}
+
+	if ca == nil {
+		ERROR.Println(NET, "received nil packet")
+		return packets.ErrNetworkError, false, errors.New("nil CONNACK packet")
+	}
+
+	msg, ok := ca.(*packets.ConnackPacket)
+	if !ok {
+		ERROR.Println(NET, "received msg that was not CONNACK")
+		return packets.ErrNetworkError, false, errors.New("non-CONNACK first packet received")
+	}
+
+	DEBUG.Println(NET, "received connack")
+	return msg.ReturnCode, msg.SessionPresent, nil
+}
+
+// inbound encapsulates the output from startIncoming.
+// err  - If != nil then an error has occurred
+// cp - A control packet received over the network link
+type inbound struct {
+	err error
+	cp  packets.ControlPacket
+}
+
+// startIncoming initiates a goroutine that reads incoming messages off the wire and sends them to the channel (returned).
+// If there are any issues with the network connection then the returned channel will be closed and the goroutine will exit
+// (so closing the connection will terminate the goroutine)
+func startIncoming(conn io.Reader) <-chan inbound {
+	var err error
+	var cp packets.ControlPacket
+	ibound := make(chan inbound)
+
+	DEBUG.Println(NET, "incoming started")
+
+	go func() {
+		for {
+			if cp, err = packets.ReadPacket(conn); err != nil {
+				// We do not want to log the error if it is due to the network connection having been closed
+				// elsewhere (i.e. after sending DisconnectPacket). Detecting this situation is the subject of
+				// https://github.com/golang/go/issues/4373
+				if !strings.Contains(err.Error(), closedNetConnErrorText) {
+					ibound <- inbound{err: err}
+				}
+				close(ibound)
+				DEBUG.Println(NET, "incoming complete")
+				return
+			}
+			DEBUG.Println(NET, "startIncoming Received Message")
+			ibound <- inbound{cp: cp}
+		}
+	}()
+
+	return ibound
+}
+
+// incomingComms encapsulates the possible output of the incomingComms routine. If err != nil then an error has occurred and
+// the routine will have terminated; otherwise one of the other members should be non-nil
+type incomingComms struct {
+	err         error                  // If non-nil then there has been an error (ignore everything else)
+	outbound    *PacketAndToken        // Packet (with token) than needs to be sent out (e.g. an acknowledgement)
+	incomingPub *packets.PublishPacket // A new publish has been received; this will need to be passed on to our user
+}
+
+// startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming
+// messages.
+// Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as the
+// everything in the store has been sent.
+// Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed)
+func startIncomingComms(conn io.Reader,
+	c commsFns,
+	inboundFromStore <-chan packets.ControlPacket,
+) <-chan incomingComms {
+	ibound := startIncoming(conn) // Start goroutine that reads from network connection
+	output := make(chan incomingComms)
+
+	DEBUG.Println(NET, "startIncomingComms started")
+	go func() {
+		for {
+			if inboundFromStore == nil && ibound == nil {
+				close(output)
+				DEBUG.Println(NET, "startIncomingComms goroutine complete")
+				return // As soon as ibound is closed we can exit (should have already processed an error)
+			}
+			DEBUG.Println(NET, "logic waiting for msg on ibound")
+
+			var msg packets.ControlPacket
+			var ok bool
+			select {
+			case msg, ok = <-inboundFromStore:
+				if !ok {
+					DEBUG.Println(NET, "startIncomingComms: inboundFromStore complete")
+					inboundFromStore = nil // should happen quickly as this is only for persisted messages
+					continue
+				}
+				DEBUG.Println(NET, "startIncomingComms: got msg from store")
+			case ibMsg, ok := <-ibound:
+				if !ok {
+					DEBUG.Println(NET, "startIncomingComms: ibound complete")
+					ibound = nil
+					continue
+				}
+				DEBUG.Println(NET, "startIncomingComms: got msg on ibound")
+				// If the inbound comms routine encounters any issues it will send us an error.
+				if ibMsg.err != nil {
+					output <- incomingComms{err: ibMsg.err}
+					continue // Usually the channel will be closed immediately after sending an error but safer that we do not assume this
+				}
+				msg = ibMsg.cp
+
+				c.persistInbound(msg)
+				c.UpdateLastReceived() // Notify keepalive logic that we recently received a packet
+			}
+
+			switch m := msg.(type) {
+			case *packets.PingrespPacket:
+				DEBUG.Println(NET, "startIncomingComms: received pingresp")
+				c.pingRespReceived()
+			case *packets.SubackPacket:
+				DEBUG.Println(NET, "startIncomingComms: received suback, id:", m.MessageID)
+				token := c.getToken(m.MessageID)
+
+				if t, ok := token.(*SubscribeToken); ok {
+					DEBUG.Println(NET, "startIncomingComms: granted qoss", m.ReturnCodes)
+					for i, qos := range m.ReturnCodes {
+						t.subResult[t.subs[i]] = qos
+					}
+				}
+
+				token.flowComplete()
+				c.freeID(m.MessageID)
+			case *packets.UnsubackPacket:
+				DEBUG.Println(NET, "startIncomingComms: received unsuback, id:", m.MessageID)
+				c.getToken(m.MessageID).flowComplete()
+				c.freeID(m.MessageID)
+			case *packets.PublishPacket:
+				DEBUG.Println(NET, "startIncomingComms: received publish, msgId:", m.MessageID)
+				output <- incomingComms{incomingPub: m}
+			case *packets.PubackPacket:
+				DEBUG.Println(NET, "startIncomingComms: received puback, id:", m.MessageID)
+				c.getToken(m.MessageID).flowComplete()
+				c.freeID(m.MessageID)
+			case *packets.PubrecPacket:
+				DEBUG.Println(NET, "startIncomingComms: received pubrec, id:", m.MessageID)
+				prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
+				prel.MessageID = m.MessageID
+				output <- incomingComms{outbound: &PacketAndToken{p: prel, t: nil}}
+			case *packets.PubrelPacket:
+				DEBUG.Println(NET, "startIncomingComms: received pubrel, id:", m.MessageID)
+				pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
+				pc.MessageID = m.MessageID
+				c.persistOutbound(pc)
+				output <- incomingComms{outbound: &PacketAndToken{p: pc, t: nil}}
+			case *packets.PubcompPacket:
+				DEBUG.Println(NET, "startIncomingComms: received pubcomp, id:", m.MessageID)
+				c.getToken(m.MessageID).flowComplete()
+				c.freeID(m.MessageID)
+			}
+		}
+	}()
+	return output
+}
+
+// startOutgoingComms initiates a go routine to transmit outgoing packets.
+// Pass in an open network connection and channels for outbound messages (including those triggered
+// directly from incoming comms).
+// Returns a channel that will receive details of any errors (closed when the goroutine exits)
+// This function wil only terminate when all input channels are closed
+func startOutgoingComms(conn net.Conn,
+	c commsFns,
+	oboundp <-chan *PacketAndToken,
+	obound <-chan *PacketAndToken,
+	oboundFromIncoming <-chan *PacketAndToken,
+) <-chan error {
+	errChan := make(chan error)
+	DEBUG.Println(NET, "outgoing started")
+
+	go func() {
+		for {
+			DEBUG.Println(NET, "outgoing waiting for an outbound message")
+
+			// This goroutine will only exits when all of the input channels we receive on have been closed. This approach is taken to avoid any
+			// deadlocks (if the connection goes down there are limited options as to what we can do with anything waiting on us and
+			// throwing away the packets seems the best option)
+			if oboundp == nil && obound == nil && oboundFromIncoming == nil {
+				DEBUG.Println(NET, "outgoing comms stopping")
+				close(errChan)
+				return
+			}
+
+			select {
+			case pub, ok := <-obound:
+				if !ok {
+					obound = nil
+					continue
+				}
+				msg := pub.p.(*packets.PublishPacket)
+				DEBUG.Println(NET, "obound msg to write", msg.MessageID)
+
+				writeTimeout := c.getWriteTimeOut()
+				if writeTimeout > 0 {
+					if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
+						ERROR.Println(NET, "SetWriteDeadline ", err)
+					}
+				}
+
+				if err := msg.Write(conn); err != nil {
+					ERROR.Println(NET, "outgoing obound reporting error ", err)
+					pub.t.setError(err)
+					// report error if it's not due to the connection being closed elsewhere
+					if !strings.Contains(err.Error(), closedNetConnErrorText) {
+						errChan <- err
+					}
+					continue
+				}
+
+				if writeTimeout > 0 {
+					// If we successfully wrote, we don't want the timeout to happen during an idle period
+					// so we reset it to infinite.
+					if err := conn.SetWriteDeadline(time.Time{}); err != nil {
+						ERROR.Println(NET, "SetWriteDeadline to 0 ", err)
+					}
+				}
+
+				if msg.Qos == 0 {
+					pub.t.flowComplete()
+				}
+				DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
+			case msg, ok := <-oboundp:
+				if !ok {
+					oboundp = nil
+					continue
+				}
+				DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
+				if err := msg.p.Write(conn); err != nil {
+					ERROR.Println(NET, "outgoing oboundp reporting error ", err)
+					if msg.t != nil {
+						msg.t.setError(err)
+					}
+					errChan <- err
+					continue
+				}
+
+				if _, ok := msg.p.(*packets.DisconnectPacket); ok {
+					msg.t.(*DisconnectToken).flowComplete()
+					DEBUG.Println(NET, "outbound wrote disconnect, closing connection")
+					// As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection"
+					// Closing the connection will cause the goroutines to end in sequence (starting with incoming comms)
+					conn.Close()
+				}
+			case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket)
+				if !ok {
+					oboundFromIncoming = nil
+					continue
+				}
+				DEBUG.Println(NET, "obound from incoming msg to write, type", reflect.TypeOf(msg.p), " ID ", msg.p.Details().MessageID)
+				if err := msg.p.Write(conn); err != nil {
+					ERROR.Println(NET, "outgoing oboundFromIncoming reporting error", err)
+					if msg.t != nil {
+						msg.t.setError(err)
+					}
+					errChan <- err
+					continue
+				}
+			}
+			c.UpdateLastSent() // Record that a packet has been received (for keepalive routine)
+		}
+	}()
+	return errChan
+}
+
+// commsFns provide access to the client state (messageids, requesting disconnection and updating timing)
+type commsFns interface {
+	getToken(id uint16) tokenCompletor       // Retrieve the token for the specified messageid (if none then a dummy token must be returned)
+	freeID(id uint16)                        // Release the specified messageid (clearing out of any persistent store)
+	UpdateLastReceived()                     // Must be called whenever a packet is received
+	UpdateLastSent()                         // Must be called whenever a packet is successfully sent
+	getWriteTimeOut() time.Duration          // Return the writetimeout (or 0 if none)
+	persistOutbound(m packets.ControlPacket) // add the packet to the outbound store
+	persistInbound(m packets.ControlPacket)  // add the packet to the inbound store
+	pingRespReceived()                       // Called when a ping response is received
+}
+
+// startComms initiates goroutines that handles communications over the network connection
+// Messages will be stored (via commsFns) and deleted from the store as necessary
+// It returns two channels:
+//  packets.PublishPacket - Will receive publish packets received over the network.
+//  Closed when incoming comms routines exit (on shutdown or if network link closed)
+//  error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down
+//
+// Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the
+// connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is
+// minimised.
+func startComms(conn net.Conn, // Network connection (must be active)
+	c commsFns, // getters and setters to enable us to cleanly interact with client
+	inboundFromStore <-chan packets.ControlPacket, // Inbound packets from the persistence store (should be closed relatively soon after startup)
+	oboundp <-chan *PacketAndToken,
+	obound <-chan *PacketAndToken) (
+	<-chan *packets.PublishPacket, // Publishpackages received over the network
+	<-chan error, // Any errors (should generally trigger a disconnect)
+) {
+	// Start inbound comms handler; this needs to be able to transmit messages so we start a go routine to add these to the priority outbound channel
+	ibound := startIncomingComms(conn, c, inboundFromStore)
+	outboundFromIncoming := make(chan *PacketAndToken) // Will accept outgoing messages triggered by startIncomingComms (e.g. acknowledgements)
+
+	// Start the outgoing handler. It is important to note that output from startIncomingComms is fed into startOutgoingComms (for ACK's)
+	oboundErr := startOutgoingComms(conn, c, oboundp, obound, outboundFromIncoming)
+	DEBUG.Println(NET, "startComms started")
+
+	// Run up go routines to handle the output from the above comms functions - these are handled in separate
+	// go routines because they can interact (e.g. ibound triggers an ACK to obound which triggers an error)
+	var wg sync.WaitGroup
+	wg.Add(2)
+
+	outPublish := make(chan *packets.PublishPacket)
+	outError := make(chan error)
+
+	// Any messages received get passed to the appropriate channel
+	go func() {
+		for ic := range ibound {
+			if ic.err != nil {
+				outError <- ic.err
+				continue
+			}
+			if ic.outbound != nil {
+				outboundFromIncoming <- ic.outbound
+				continue
+			}
+			if ic.incomingPub != nil {
+				outPublish <- ic.incomingPub
+				continue
+			}
+			ERROR.Println(STR, "startComms received empty incomingComms msg")
+		}
+		// Close channels that will not be written to again (allowing other routines to exit)
+		close(outboundFromIncoming)
+		close(outPublish)
+		wg.Done()
+	}()
+
+	// Any errors will be passed out to our caller
+	go func() {
+		for err := range oboundErr {
+			outError <- err
+		}
+		wg.Done()
+	}()
+
+	// outError is used by both routines so can only be closed when they are both complete
+	go func() {
+		wg.Wait()
+		close(outError)
+		DEBUG.Println(NET, "startComms closing outError")
+	}()
+
+	return outPublish, outError
+}
+
+// ackFunc acknowledges a packet
+// WARNING the function returned must not be called if the comms routine is shutting down or not running
+// (it needs outgoing comms in order to send the acknowledgement). Currently this is only called from
+// matchAndDispatch which will be shutdown before the comms are
+func ackFunc(oboundP chan *PacketAndToken, persist Store, packet *packets.PublishPacket) func() {
+	return func() {
+		switch packet.Qos {
+		case 2:
+			pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
+			pr.MessageID = packet.MessageID
+			DEBUG.Println(NET, "putting pubrec msg on obound")
+			oboundP <- &PacketAndToken{p: pr, t: nil}
+			DEBUG.Println(NET, "done putting pubrec msg on obound")
+		case 1:
+			pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
+			pa.MessageID = packet.MessageID
+			DEBUG.Println(NET, "putting puback msg on obound")
+			persistOutbound(persist, pa)
+			oboundP <- &PacketAndToken{p: pa, t: nil}
+			DEBUG.Println(NET, "done putting puback msg on obound")
+		case 0:
+			// do nothing, since there is no need to send an ack packet back
+		}
+	}
+}

+ 92 - 0
vendor/github.com/eclipse/paho.mqtt.golang/netconn.go

@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"crypto/tls"
+	"errors"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"time"
+
+	"golang.org/x/net/proxy"
+)
+
+//
+// This just establishes the network connection; once established the type of connection should be irrelevant
+//
+
+// openConnection opens a network connection using the protocol indicated in the URL.
+// Does not carry out any MQTT specific handshakes.
+func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions) (net.Conn, error) {
+	switch uri.Scheme {
+	case "ws":
+		conn, err := NewWebsocket(uri.String(), nil, timeout, headers, websocketOptions)
+		return conn, err
+	case "wss":
+		conn, err := NewWebsocket(uri.String(), tlsc, timeout, headers, websocketOptions)
+		return conn, err
+	case "mqtt", "tcp":
+		allProxy := os.Getenv("all_proxy")
+		if len(allProxy) == 0 {
+			conn, err := net.DialTimeout("tcp", uri.Host, timeout)
+			if err != nil {
+				return nil, err
+			}
+			return conn, nil
+		}
+		proxyDialer := proxy.FromEnvironment()
+
+		conn, err := proxyDialer.Dial("tcp", uri.Host)
+		if err != nil {
+			return nil, err
+		}
+		return conn, nil
+	case "unix":
+		conn, err := net.DialTimeout("unix", uri.Host, timeout)
+		if err != nil {
+			return nil, err
+		}
+		return conn, nil
+	case "ssl", "tls", "mqtts", "mqtt+ssl", "tcps":
+		allProxy := os.Getenv("all_proxy")
+		if len(allProxy) == 0 {
+			conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
+			if err != nil {
+				return nil, err
+			}
+			return conn, nil
+		}
+		proxyDialer := proxy.FromEnvironment()
+
+		conn, err := proxyDialer.Dial("tcp", uri.Host)
+		if err != nil {
+			return nil, err
+		}
+
+		tlsConn := tls.Client(conn, tlsc)
+
+		err = tlsConn.Handshake()
+		if err != nil {
+			_ = conn.Close()
+			return nil, err
+		}
+
+		return tlsConn, nil
+	}
+	return nil, errors.New("unknown protocol")
+}

+ 108 - 0
vendor/github.com/eclipse/paho.mqtt.golang/notice.html

@@ -0,0 +1,108 @@
+<?xml version="1.0" encoding="ISO-8859-1" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
+<title>Eclipse Foundation Software User Agreement</title>
+</head>
+
+<body lang="EN-US">
+<h2>Eclipse Foundation Software User Agreement</h2>
+<p>February 1, 2011</p>
+
+<h3>Usage Of Content</h3>
+
+<p>THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
+   (COLLECTIVELY &quot;CONTENT&quot;).  USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
+   CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW.  BY USING THE CONTENT, YOU AGREE THAT YOUR USE
+   OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
+   NOTICES INDICATED OR REFERENCED BELOW.  IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
+   CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.</p>
+
+<h3>Applicable Licenses</h3>
+
+<p>Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
+   (&quot;EPL&quot;).  A copy of the EPL is provided with this Content and is also available at <a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>.
+   For purposes of the EPL, &quot;Program&quot; will mean the Content.</p>
+
+<p>Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
+   repository (&quot;Repository&quot;) in software modules (&quot;Modules&quot;) and made available as downloadable archives (&quot;Downloads&quot;).</p>
+
+<ul>
+       <li>Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content.  Typical modules may include plug-ins (&quot;Plug-ins&quot;), plug-in fragments (&quot;Fragments&quot;), and features (&quot;Features&quot;).</li>
+       <li>Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java&trade; ARchive) in a directory named &quot;plugins&quot;.</li>
+       <li>A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material.  Each Feature may be packaged as a sub-directory in a directory named &quot;features&quot;.  Within a Feature, files named &quot;feature.xml&quot; may contain a list of the names and version numbers of the Plug-ins
+      and/or Fragments associated with that Feature.</li>
+       <li>Features may also include other Features (&quot;Included Features&quot;). Within a Feature, files named &quot;feature.xml&quot; may contain a list of the names and version numbers of Included Features.</li>
+</ul>
+
+<p>The terms and conditions governing Plug-ins and Fragments should be contained in files named &quot;about.html&quot; (&quot;Abouts&quot;). The terms and conditions governing Features and
+Included Features should be contained in files named &quot;license.html&quot; (&quot;Feature Licenses&quot;).  Abouts and Feature Licenses may be located in any directory of a Download or Module
+including, but not limited to the following locations:</p>
+
+<ul>
+       <li>The top-level (root) directory</li>
+       <li>Plug-in and Fragment directories</li>
+       <li>Inside Plug-ins and Fragments packaged as JARs</li>
+       <li>Sub-directories of the directory named &quot;src&quot; of certain Plug-ins</li>
+       <li>Feature directories</li>
+</ul>
+
+<p>Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license (&quot;Feature Update License&quot;) during the
+installation process.  If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
+inform you where you can locate them.  Feature Update Licenses may be found in the &quot;license&quot; property of files named &quot;feature.properties&quot; found within a Feature.
+Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
+that directory.</p>
+
+<p>THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS.  SOME OF THESE
+OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):</p>
+
+<ul>
+       <li>Eclipse Distribution License Version 1.0 (available at <a href="http://www.eclipse.org/licenses/edl-v10.html">http://www.eclipse.org/licenses/edl-v1.0.html</a>)</li>
+       <li>Common Public License Version 1.0 (available at <a href="http://www.eclipse.org/legal/cpl-v10.html">http://www.eclipse.org/legal/cpl-v10.html</a>)</li>
+       <li>Apache Software License 1.1 (available at <a href="http://www.apache.org/licenses/LICENSE">http://www.apache.org/licenses/LICENSE</a>)</li>
+       <li>Apache Software License 2.0 (available at <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>)</li>
+       <li>Metro Link Public License 1.00 (available at <a href="http://www.opengroup.org/openmotif/supporters/metrolink/license.html">http://www.opengroup.org/openmotif/supporters/metrolink/license.html</a>)</li>
+       <li>Mozilla Public License Version 1.1 (available at <a href="http://www.mozilla.org/MPL/MPL-1.1.html">http://www.mozilla.org/MPL/MPL-1.1.html</a>)</li>
+</ul>
+
+<p>IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT.  If no About, Feature License, or Feature Update License is provided, please
+contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.</p>
+
+
+<h3>Use of Provisioning Technology</h3>
+
+<p>The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
+   Update Manager (&quot;Provisioning Technology&quot;) for the purpose of allowing users to install software, documentation, information and/or
+   other materials (collectively &quot;Installable Software&quot;). This capability is provided with the intent of allowing such users to
+   install, extend and update Eclipse-based products. Information about packaging Installable Software is available at <a
+       href="http://eclipse.org/equinox/p2/repository_packaging.html">http://eclipse.org/equinox/p2/repository_packaging.html</a>
+   (&quot;Specification&quot;).</p>
+
+<p>You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
+   applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
+   in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
+   Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:</p>
+
+<ol>
+       <li>A series of actions may occur (&quot;Provisioning Process&quot;) in which a user may execute the Provisioning Technology
+       on a machine (&quot;Target Machine&quot;) with the intent of installing, extending or updating the functionality of an Eclipse-based
+       product.</li>
+       <li>During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
+       accessed and copied to the Target Machine.</li>
+       <li>Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
+       Software (&quot;Installable Software Agreement&quot;) and such Installable Software Agreement shall be accessed from the Target
+       Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
+       the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
+       indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.</li>
+</ol>
+
+<h3>Cryptography</h3>
+
+<p>Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
+   another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
+   possession, or use, and re-export of encryption software, to see if this is permitted.</p>
+
+<p><small>Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.</small></p>
+</body>
+</html>

+ 21 - 0
vendor/github.com/eclipse/paho.mqtt.golang/oops.go

@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+func chkerr(e error) {
+	if e != nil {
+		panic(e)
+	}
+}

+ 403 - 0
vendor/github.com/eclipse/paho.mqtt.golang/options.go

@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ *    Måns Ansgariusson
+ */
+
+// Portions copyright © 2018 TIBCO Software Inc.
+
+package mqtt
+
+import (
+	"crypto/tls"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+)
+
+// CredentialsProvider allows the username and password to be updated
+// before reconnecting. It should return the current username and password.
+type CredentialsProvider func() (username string, password string)
+
+// MessageHandler is a callback type which can be set to be
+// executed upon the arrival of messages published to topics
+// to which the client is subscribed.
+type MessageHandler func(Client, Message)
+
+// ConnectionLostHandler is a callback type which can be set to be
+// executed upon an unintended disconnection from the MQTT broker.
+// Disconnects caused by calling Disconnect or ForceDisconnect will
+// not cause an OnConnectionLost callback to execute.
+type ConnectionLostHandler func(Client, error)
+
+// OnConnectHandler is a callback that is called when the client
+// state changes from unconnected/disconnected to connected. Both
+// at initial connection and on reconnection
+type OnConnectHandler func(Client)
+
+// ReconnectHandler is invoked prior to reconnecting after
+// the initial connection is lost
+type ReconnectHandler func(Client, *ClientOptions)
+
+// ConnectionAttemptHandler is invoked prior to making the initial connection.
+type ConnectionAttemptHandler func(broker *url.URL, tlsCfg *tls.Config) *tls.Config
+
+// ClientOptions contains configurable options for an Client. Note that these should be set using the
+// relevant methods (e.g. AddBroker) rather than directly. See those functions for information on usage.
+type ClientOptions struct {
+	Servers                 []*url.URL
+	ClientID                string
+	Username                string
+	Password                string
+	CredentialsProvider     CredentialsProvider
+	CleanSession            bool
+	Order                   bool
+	WillEnabled             bool
+	WillTopic               string
+	WillPayload             []byte
+	WillQos                 byte
+	WillRetained            bool
+	ProtocolVersion         uint
+	protocolVersionExplicit bool
+	TLSConfig               *tls.Config
+	KeepAlive               int64
+	PingTimeout             time.Duration
+	ConnectTimeout          time.Duration
+	MaxReconnectInterval    time.Duration
+	AutoReconnect           bool
+	ConnectRetryInterval    time.Duration
+	ConnectRetry            bool
+	Store                   Store
+	DefaultPublishHandler   MessageHandler
+	OnConnect               OnConnectHandler
+	OnConnectionLost        ConnectionLostHandler
+	OnReconnecting          ReconnectHandler
+	OnConnectAttempt        ConnectionAttemptHandler
+	WriteTimeout            time.Duration
+	MessageChannelDepth     uint
+	ResumeSubs              bool
+	HTTPHeaders             http.Header
+	WebsocketOptions        *WebsocketOptions
+}
+
+// NewClientOptions will create a new ClientClientOptions type with some
+// default values.
+//   Port: 1883
+//   CleanSession: True
+//   Order: True (note: it is recommended that this be set to FALSE unless order is important)
+//   KeepAlive: 30 (seconds)
+//   ConnectTimeout: 30 (seconds)
+//   MaxReconnectInterval 10 (minutes)
+//   AutoReconnect: True
+func NewClientOptions() *ClientOptions {
+	o := &ClientOptions{
+		Servers:                 nil,
+		ClientID:                "",
+		Username:                "",
+		Password:                "",
+		CleanSession:            true,
+		Order:                   true,
+		WillEnabled:             false,
+		WillTopic:               "",
+		WillPayload:             nil,
+		WillQos:                 0,
+		WillRetained:            false,
+		ProtocolVersion:         0,
+		protocolVersionExplicit: false,
+		KeepAlive:               30,
+		PingTimeout:             10 * time.Second,
+		ConnectTimeout:          30 * time.Second,
+		MaxReconnectInterval:    10 * time.Minute,
+		AutoReconnect:           true,
+		ConnectRetryInterval:    30 * time.Second,
+		ConnectRetry:            false,
+		Store:                   nil,
+		OnConnect:               nil,
+		OnConnectionLost:        DefaultConnectionLostHandler,
+		OnConnectAttempt:        nil,
+		WriteTimeout:            0, // 0 represents timeout disabled
+		ResumeSubs:              false,
+		HTTPHeaders:             make(map[string][]string),
+		WebsocketOptions:        &WebsocketOptions{},
+	}
+	return o
+}
+
+// AddBroker adds a broker URI to the list of brokers to be used. The format should be
+// scheme://host:port
+// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname)
+// and "port" is the port on which the broker is accepting connections.
+//
+// Default values for hostname is "127.0.0.1", for schema is "tcp://".
+//
+// An example broker URI would look like: tcp://foobar.com:1883
+func (o *ClientOptions) AddBroker(server string) *ClientOptions {
+	if len(server) > 0 && server[0] == ':' {
+		server = "127.0.0.1" + server
+	}
+	if !strings.Contains(server, "://") {
+		server = "tcp://" + server
+	}
+	brokerURI, err := url.Parse(server)
+	if err != nil {
+		ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err)
+		return o
+	}
+	o.Servers = append(o.Servers, brokerURI)
+	return o
+}
+
+// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting
+// but not reconnecting if CleanSession is false. Otherwise these messages are discarded.
+func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions {
+	o.ResumeSubs = resume
+	return o
+}
+
+// SetClientID will set the client id to be used by this client when
+// connecting to the MQTT broker. According to the MQTT v3.1 specification,
+// a client id must be no longer than 23 characters.
+func (o *ClientOptions) SetClientID(id string) *ClientOptions {
+	o.ClientID = id
+	return o
+}
+
+// SetUsername will set the username to be used by this client when connecting
+// to the MQTT broker. Note: without the use of SSL/TLS, this information will
+// be sent in plaintext across the wire.
+func (o *ClientOptions) SetUsername(u string) *ClientOptions {
+	o.Username = u
+	return o
+}
+
+// SetPassword will set the password to be used by this client when connecting
+// to the MQTT broker. Note: without the use of SSL/TLS, this information will
+// be sent in plaintext across the wire.
+func (o *ClientOptions) SetPassword(p string) *ClientOptions {
+	o.Password = p
+	return o
+}
+
+// SetCredentialsProvider will set a method to be called by this client when
+// connecting to the MQTT broker that provide the current username and password.
+// Note: without the use of SSL/TLS, this information will be sent
+// in plaintext across the wire.
+func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions {
+	o.CredentialsProvider = p
+	return o
+}
+
+// SetCleanSession will set the "clean session" flag in the connect message
+// when this client connects to an MQTT broker. By setting this flag, you are
+// indicating that no messages saved by the broker for this client should be
+// delivered. Any messages that were going to be sent by this client before
+// disconnecting previously but didn't will not be sent upon connecting to the
+// broker.
+func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
+	o.CleanSession = clean
+	return o
+}
+
+// SetOrderMatters will set the message routing to guarantee order within
+// each QoS level. By default, this value is true. If set to false (recommended),
+// this flag indicates that messages can be delivered asynchronously
+// from the client to the application and possibly arrive out of order.
+// Specifically, the message handler is called in its own go routine.
+// Note that setting this to true does not guarantee in-order delivery
+// (this is subject to broker settings like "max_inflight_messages=1" in mosquitto)
+// and if true then handlers must not block.
+func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
+	o.Order = order
+	return o
+}
+
+// SetTLSConfig will set an SSL/TLS configuration to be used when connecting
+// to an MQTT broker. Please read the official Go documentation for more
+// information.
+func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions {
+	o.TLSConfig = t
+	return o
+}
+
+// SetStore will set the implementation of the Store interface
+// used to provide message persistence in cases where QoS levels
+// QoS_ONE or QoS_TWO are used. If no store is provided, then the
+// client will use MemoryStore by default.
+func (o *ClientOptions) SetStore(s Store) *ClientOptions {
+	o.Store = s
+	return o
+}
+
+// SetKeepAlive will set the amount of time (in seconds) that the client
+// should wait before sending a PING request to the broker. This will
+// allow the client to know that a connection has not been lost with the
+// server.
+func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions {
+	o.KeepAlive = int64(k / time.Second)
+	return o
+}
+
+// SetPingTimeout will set the amount of time (in seconds) that the client
+// will wait after sending a PING request to the broker, before deciding
+// that the connection has been lost. Default is 10 seconds.
+func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions {
+	o.PingTimeout = k
+	return o
+}
+
+// SetProtocolVersion sets the MQTT version to be used to connect to the
+// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1
+func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions {
+	if (pv >= 3 && pv <= 4) || (pv > 0x80) {
+		o.ProtocolVersion = pv
+		o.protocolVersionExplicit = true
+	}
+	return o
+}
+
+// UnsetWill will cause any set will message to be disregarded.
+func (o *ClientOptions) UnsetWill() *ClientOptions {
+	o.WillEnabled = false
+	return o
+}
+
+// SetWill accepts a string will message to be set. When the client connects,
+// it will give this will message to the broker, which will then publish the
+// provided payload (the will) to any clients that are subscribed to the provided
+// topic.
+func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions {
+	o.SetBinaryWill(topic, []byte(payload), qos, retained)
+	return o
+}
+
+// SetBinaryWill accepts a []byte will message to be set. When the client connects,
+// it will give this will message to the broker, which will then publish the
+// provided payload (the will) to any clients that are subscribed to the provided
+// topic.
+func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions {
+	o.WillEnabled = true
+	o.WillTopic = topic
+	o.WillPayload = payload
+	o.WillQos = qos
+	o.WillRetained = retained
+	return o
+}
+
+// SetDefaultPublishHandler sets the MessageHandler that will be called when a message
+// is received that does not match any known subscriptions.
+//
+// If OrderMatters is true (the defaultHandler) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// defaultHandler must be safe for concurrent use by multiple goroutines.
+func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions {
+	o.DefaultPublishHandler = defaultHandler
+	return o
+}
+
+// SetOnConnectHandler sets the function to be called when the client is connected. Both
+// at initial connection time and upon automatic reconnect.
+func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions {
+	o.OnConnect = onConn
+	return o
+}
+
+// SetConnectionLostHandler will set the OnConnectionLost callback to be executed
+// in the case where the client unexpectedly loses connection with the MQTT broker.
+func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions {
+	o.OnConnectionLost = onLost
+	return o
+}
+
+// SetReconnectingHandler sets the OnReconnecting callback to be executed prior
+// to the client attempting a reconnect to the MQTT broker.
+func (o *ClientOptions) SetReconnectingHandler(cb ReconnectHandler) *ClientOptions {
+	o.OnReconnecting = cb
+	return o
+}
+
+// SetConnectionAttemptHandler sets the ConnectionAttemptHandler callback to be executed prior
+// to each attempt to connect to an MQTT broker. Returns the *tls.Config that will be used when establishing
+// the connection (a copy of the tls.Config from ClientOptions will be passed in along with the broker URL).
+// This allows connection specific changes to be made to the *tls.Config.
+func (o *ClientOptions) SetConnectionAttemptHandler(onConnectAttempt ConnectionAttemptHandler) *ClientOptions {
+	o.OnConnectAttempt = onConnectAttempt
+	return o
+}
+
+// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
+// timeout error. A duration of 0 never times out. Default never times out
+func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
+	o.WriteTimeout = t
+	return o
+}
+
+// SetConnectTimeout limits how long the client will wait when trying to open a connection
+// to an MQTT server before timing out. A duration of 0 never times out.
+// Default 30 seconds. Currently only operational on TCP/TLS connections.
+func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
+	o.ConnectTimeout = t
+	return o
+}
+
+// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts
+// when connection is lost
+func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions {
+	o.MaxReconnectInterval = t
+	return o
+}
+
+// SetAutoReconnect sets whether the automatic reconnection logic should be used
+// when the connection is lost, even if disabled the ConnectionLostHandler is still
+// called
+func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
+	o.AutoReconnect = a
+	return o
+}
+
+// SetConnectRetryInterval sets the time that will be waited between connection attempts
+// when initially connecting if ConnectRetry is TRUE
+func (o *ClientOptions) SetConnectRetryInterval(t time.Duration) *ClientOptions {
+	o.ConnectRetryInterval = t
+	return o
+}
+
+// SetConnectRetry sets whether the connect function will automatically retry the connection
+// in the event of a failure (when true the token returned by the Connect function will
+// not complete until the connection is up or it is cancelled)
+// If ConnectRetry is true then subscriptions should be requested in OnConnect handler
+// Setting this to TRUE permits messages to be published before the connection is established
+func (o *ClientOptions) SetConnectRetry(a bool) *ClientOptions {
+	o.ConnectRetry = a
+	return o
+}
+
+// SetMessageChannelDepth DEPRECATED The value set here no longer has any effect, this function
+// remains so the API is not altered.
+func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions {
+	o.MessageChannelDepth = s
+	return o
+}
+
+// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket
+// opening handshake.
+func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions {
+	o.HTTPHeaders = h
+	return o
+}
+
+// SetWebsocketOptions sets the additional websocket options used in a WebSocket connection
+func (o *ClientOptions) SetWebsocketOptions(w *WebsocketOptions) *ClientOptions {
+	o.WebsocketOptions = w
+	return o
+}

+ 167 - 0
vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go

@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Seth Hoenig
+ *    Allan Stockdill-Mander
+ *    Mike Robertson
+ */
+
+package mqtt
+
+import (
+	"crypto/tls"
+	"net/http"
+	"net/url"
+	"time"
+)
+
+// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized.
+type ClientOptionsReader struct {
+	options *ClientOptions
+}
+
+// Servers returns a slice of the servers defined in the clientoptions
+func (r *ClientOptionsReader) Servers() []*url.URL {
+	s := make([]*url.URL, len(r.options.Servers))
+
+	for i, u := range r.options.Servers {
+		nu := *u
+		s[i] = &nu
+	}
+
+	return s
+}
+
+// ResumeSubs returns true if resuming stored (un)sub is enabled
+func (r *ClientOptionsReader) ResumeSubs() bool {
+	s := r.options.ResumeSubs
+	return s
+}
+
+// ClientID returns the set client id
+func (r *ClientOptionsReader) ClientID() string {
+	s := r.options.ClientID
+	return s
+}
+
+// Username returns the set username
+func (r *ClientOptionsReader) Username() string {
+	s := r.options.Username
+	return s
+}
+
+// Password returns the set password
+func (r *ClientOptionsReader) Password() string {
+	s := r.options.Password
+	return s
+}
+
+// CleanSession returns whether Cleansession is set
+func (r *ClientOptionsReader) CleanSession() bool {
+	s := r.options.CleanSession
+	return s
+}
+
+func (r *ClientOptionsReader) Order() bool {
+	s := r.options.Order
+	return s
+}
+
+func (r *ClientOptionsReader) WillEnabled() bool {
+	s := r.options.WillEnabled
+	return s
+}
+
+func (r *ClientOptionsReader) WillTopic() string {
+	s := r.options.WillTopic
+	return s
+}
+
+func (r *ClientOptionsReader) WillPayload() []byte {
+	s := r.options.WillPayload
+	return s
+}
+
+func (r *ClientOptionsReader) WillQos() byte {
+	s := r.options.WillQos
+	return s
+}
+
+func (r *ClientOptionsReader) WillRetained() bool {
+	s := r.options.WillRetained
+	return s
+}
+
+func (r *ClientOptionsReader) ProtocolVersion() uint {
+	s := r.options.ProtocolVersion
+	return s
+}
+
+func (r *ClientOptionsReader) TLSConfig() *tls.Config {
+	s := r.options.TLSConfig
+	return s
+}
+
+func (r *ClientOptionsReader) KeepAlive() time.Duration {
+	s := time.Duration(r.options.KeepAlive * int64(time.Second))
+	return s
+}
+
+func (r *ClientOptionsReader) PingTimeout() time.Duration {
+	s := r.options.PingTimeout
+	return s
+}
+
+func (r *ClientOptionsReader) ConnectTimeout() time.Duration {
+	s := r.options.ConnectTimeout
+	return s
+}
+
+func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration {
+	s := r.options.MaxReconnectInterval
+	return s
+}
+
+func (r *ClientOptionsReader) AutoReconnect() bool {
+	s := r.options.AutoReconnect
+	return s
+}
+
+// ConnectRetryInterval returns the delay between retries on the initial connection (if ConnectRetry true)
+func (r *ClientOptionsReader) ConnectRetryInterval() time.Duration {
+	s := r.options.ConnectRetryInterval
+	return s
+}
+
+// ConnectRetry returns whether the initial connection request will be retried until connection established
+func (r *ClientOptionsReader) ConnectRetry() bool {
+	s := r.options.ConnectRetry
+	return s
+}
+
+func (r *ClientOptionsReader) WriteTimeout() time.Duration {
+	s := r.options.WriteTimeout
+	return s
+}
+
+func (r *ClientOptionsReader) MessageChannelDepth() uint {
+	s := r.options.MessageChannelDepth
+	return s
+}
+
+func (r *ClientOptionsReader) HTTPHeaders() http.Header {
+	h := r.options.HTTPHeaders
+	return h
+}
+
+// WebsocketOptions returns the currently configured WebSocket options
+func (r *ClientOptionsReader) WebsocketOptions() *WebsocketOptions {
+	s := r.options.WebsocketOptions
+	return s
+}

+ 52 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go

@@ -0,0 +1,52 @@
+package packets
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+)
+
+// ConnackPacket is an internal representation of the fields of the
+// Connack MQTT packet
+type ConnackPacket struct {
+	FixedHeader
+	SessionPresent bool
+	ReturnCode     byte
+}
+
+func (ca *ConnackPacket) String() string {
+	return fmt.Sprintf("%s sessionpresent: %t returncode: %d", ca.FixedHeader, ca.SessionPresent, ca.ReturnCode)
+}
+
+func (ca *ConnackPacket) Write(w io.Writer) error {
+	var body bytes.Buffer
+	var err error
+
+	body.WriteByte(boolToByte(ca.SessionPresent))
+	body.WriteByte(ca.ReturnCode)
+	ca.FixedHeader.RemainingLength = 2
+	packet := ca.FixedHeader.pack()
+	packet.Write(body.Bytes())
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (ca *ConnackPacket) Unpack(b io.Reader) error {
+	flags, err := decodeByte(b)
+	if err != nil {
+		return err
+	}
+	ca.SessionPresent = 1&flags > 0
+	ca.ReturnCode, err = decodeByte(b)
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (ca *ConnackPacket) Details() Details {
+	return Details{Qos: 0, MessageID: 0}
+}

+ 155 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go

@@ -0,0 +1,155 @@
+package packets
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+)
+
+// ConnectPacket is an internal representation of the fields of the
+// Connect MQTT packet
+type ConnectPacket struct {
+	FixedHeader
+	ProtocolName    string
+	ProtocolVersion byte
+	CleanSession    bool
+	WillFlag        bool
+	WillQos         byte
+	WillRetain      bool
+	UsernameFlag    bool
+	PasswordFlag    bool
+	ReservedBit     byte
+	Keepalive       uint16
+
+	ClientIdentifier string
+	WillTopic        string
+	WillMessage      []byte
+	Username         string
+	Password         []byte
+}
+
+func (c *ConnectPacket) String() string {
+	var password string
+	if len(c.Password) > 0 {
+		password = "<redacted>"
+	}
+	return fmt.Sprintf("%s protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.FixedHeader, c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, password)
+}
+
+func (c *ConnectPacket) Write(w io.Writer) error {
+	var body bytes.Buffer
+	var err error
+
+	body.Write(encodeString(c.ProtocolName))
+	body.WriteByte(c.ProtocolVersion)
+	body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7)
+	body.Write(encodeUint16(c.Keepalive))
+	body.Write(encodeString(c.ClientIdentifier))
+	if c.WillFlag {
+		body.Write(encodeString(c.WillTopic))
+		body.Write(encodeBytes(c.WillMessage))
+	}
+	if c.UsernameFlag {
+		body.Write(encodeString(c.Username))
+	}
+	if c.PasswordFlag {
+		body.Write(encodeBytes(c.Password))
+	}
+	c.FixedHeader.RemainingLength = body.Len()
+	packet := c.FixedHeader.pack()
+	packet.Write(body.Bytes())
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (c *ConnectPacket) Unpack(b io.Reader) error {
+	var err error
+	c.ProtocolName, err = decodeString(b)
+	if err != nil {
+		return err
+	}
+	c.ProtocolVersion, err = decodeByte(b)
+	if err != nil {
+		return err
+	}
+	options, err := decodeByte(b)
+	if err != nil {
+		return err
+	}
+	c.ReservedBit = 1 & options
+	c.CleanSession = 1&(options>>1) > 0
+	c.WillFlag = 1&(options>>2) > 0
+	c.WillQos = 3 & (options >> 3)
+	c.WillRetain = 1&(options>>5) > 0
+	c.PasswordFlag = 1&(options>>6) > 0
+	c.UsernameFlag = 1&(options>>7) > 0
+	c.Keepalive, err = decodeUint16(b)
+	if err != nil {
+		return err
+	}
+	c.ClientIdentifier, err = decodeString(b)
+	if err != nil {
+		return err
+	}
+	if c.WillFlag {
+		c.WillTopic, err = decodeString(b)
+		if err != nil {
+			return err
+		}
+		c.WillMessage, err = decodeBytes(b)
+		if err != nil {
+			return err
+		}
+	}
+	if c.UsernameFlag {
+		c.Username, err = decodeString(b)
+		if err != nil {
+			return err
+		}
+	}
+	if c.PasswordFlag {
+		c.Password, err = decodeBytes(b)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Validate performs validation of the fields of a Connect packet
+func (c *ConnectPacket) Validate() byte {
+	if c.PasswordFlag && !c.UsernameFlag {
+		return ErrRefusedBadUsernameOrPassword
+	}
+	if c.ReservedBit != 0 {
+		// Bad reserved bit
+		return ErrProtocolViolation
+	}
+	if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
+		// Mismatched or unsupported protocol version
+		return ErrRefusedBadProtocolVersion
+	}
+	if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
+		// Bad protocol name
+		return ErrProtocolViolation
+	}
+	if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
+		// Bad size field
+		return ErrProtocolViolation
+	}
+	if len(c.ClientIdentifier) == 0 && !c.CleanSession {
+		// Bad client identifier
+		return ErrRefusedIDRejected
+	}
+	return Accepted
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (c *ConnectPacket) Details() Details {
+	return Details{Qos: 0, MessageID: 0}
+}

+ 34 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go

@@ -0,0 +1,34 @@
+package packets
+
+import (
+	"io"
+)
+
+// DisconnectPacket is an internal representation of the fields of the
+// Disconnect MQTT packet
+type DisconnectPacket struct {
+	FixedHeader
+}
+
+func (d *DisconnectPacket) String() string {
+	return d.FixedHeader.String()
+}
+
+func (d *DisconnectPacket) Write(w io.Writer) error {
+	packet := d.FixedHeader.pack()
+	_, err := packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (d *DisconnectPacket) Unpack(b io.Reader) error {
+	return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (d *DisconnectPacket) Details() Details {
+	return Details{Qos: 0, MessageID: 0}
+}

+ 356 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go

@@ -0,0 +1,356 @@
+package packets
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+)
+
+// ControlPacket defines the interface for structs intended to hold
+// decoded MQTT packets, either from being read or before being
+// written
+type ControlPacket interface {
+	Write(io.Writer) error
+	Unpack(io.Reader) error
+	String() string
+	Details() Details
+}
+
+// PacketNames maps the constants for each of the MQTT packet types
+// to a string representation of their name.
+var PacketNames = map[uint8]string{
+	1:  "CONNECT",
+	2:  "CONNACK",
+	3:  "PUBLISH",
+	4:  "PUBACK",
+	5:  "PUBREC",
+	6:  "PUBREL",
+	7:  "PUBCOMP",
+	8:  "SUBSCRIBE",
+	9:  "SUBACK",
+	10: "UNSUBSCRIBE",
+	11: "UNSUBACK",
+	12: "PINGREQ",
+	13: "PINGRESP",
+	14: "DISCONNECT",
+}
+
+// Below are the constants assigned to each of the MQTT packet types
+const (
+	Connect     = 1
+	Connack     = 2
+	Publish     = 3
+	Puback      = 4
+	Pubrec      = 5
+	Pubrel      = 6
+	Pubcomp     = 7
+	Subscribe   = 8
+	Suback      = 9
+	Unsubscribe = 10
+	Unsuback    = 11
+	Pingreq     = 12
+	Pingresp    = 13
+	Disconnect  = 14
+)
+
+// Below are the const definitions for error codes returned by
+// Connect()
+const (
+	Accepted                        = 0x00
+	ErrRefusedBadProtocolVersion    = 0x01
+	ErrRefusedIDRejected            = 0x02
+	ErrRefusedServerUnavailable     = 0x03
+	ErrRefusedBadUsernameOrPassword = 0x04
+	ErrRefusedNotAuthorised         = 0x05
+	ErrNetworkError                 = 0xFE
+	ErrProtocolViolation            = 0xFF
+)
+
+// ConnackReturnCodes is a map of the error codes constants for Connect()
+// to a string representation of the error
+var ConnackReturnCodes = map[uint8]string{
+	0:   "Connection Accepted",
+	1:   "Connection Refused: Bad Protocol Version",
+	2:   "Connection Refused: Client Identifier Rejected",
+	3:   "Connection Refused: Server Unavailable",
+	4:   "Connection Refused: Username or Password in unknown format",
+	5:   "Connection Refused: Not Authorised",
+	254: "Connection Error",
+	255: "Connection Refused: Protocol Violation",
+}
+
+var (
+	ErrorRefusedBadProtocolVersion    = errors.New("unacceptable protocol version")
+	ErrorRefusedIDRejected            = errors.New("identifier rejected")
+	ErrorRefusedServerUnavailable     = errors.New("server Unavailable")
+	ErrorRefusedBadUsernameOrPassword = errors.New("bad user name or password")
+	ErrorRefusedNotAuthorised         = errors.New("not Authorized")
+	ErrorNetworkError                 = errors.New("network Error")
+	ErrorProtocolViolation            = errors.New("protocol Violation")
+)
+
+// ConnErrors is a map of the errors codes constants for Connect()
+// to a Go error
+var ConnErrors = map[byte]error{
+	Accepted:                        nil,
+	ErrRefusedBadProtocolVersion:    ErrorRefusedBadProtocolVersion,
+	ErrRefusedIDRejected:            ErrorRefusedIDRejected,
+	ErrRefusedServerUnavailable:     ErrorRefusedServerUnavailable,
+	ErrRefusedBadUsernameOrPassword: ErrorRefusedBadUsernameOrPassword,
+	ErrRefusedNotAuthorised:         ErrorRefusedNotAuthorised,
+	ErrNetworkError:                 ErrorNetworkError,
+	ErrProtocolViolation:            ErrorProtocolViolation,
+}
+
+// ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
+// to read an MQTT packet from the stream. It returns a ControlPacket
+// representing the decoded MQTT packet and an error. One of these returns will
+// always be nil, a nil ControlPacket indicating an error occurred.
+func ReadPacket(r io.Reader) (ControlPacket, error) {
+	var fh FixedHeader
+	b := make([]byte, 1)
+
+	_, err := io.ReadFull(r, b)
+	if err != nil {
+		return nil, err
+	}
+
+	err = fh.unpack(b[0], r)
+	if err != nil {
+		return nil, err
+	}
+
+	cp, err := NewControlPacketWithHeader(fh)
+	if err != nil {
+		return nil, err
+	}
+
+	packetBytes := make([]byte, fh.RemainingLength)
+	n, err := io.ReadFull(r, packetBytes)
+	if err != nil {
+		return nil, err
+	}
+	if n != fh.RemainingLength {
+		return nil, errors.New("failed to read expected data")
+	}
+
+	err = cp.Unpack(bytes.NewBuffer(packetBytes))
+	return cp, err
+}
+
+// NewControlPacket is used to create a new ControlPacket of the type specified
+// by packetType, this is usually done by reference to the packet type constants
+// defined in packets.go. The newly created ControlPacket is empty and a pointer
+// is returned.
+func NewControlPacket(packetType byte) ControlPacket {
+	switch packetType {
+	case Connect:
+		return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}}
+	case Connack:
+		return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}}
+	case Disconnect:
+		return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}}
+	case Publish:
+		return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}}
+	case Puback:
+		return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}}
+	case Pubrec:
+		return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}}
+	case Pubrel:
+		return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}}
+	case Pubcomp:
+		return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}}
+	case Subscribe:
+		return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}}
+	case Suback:
+		return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}}
+	case Unsubscribe:
+		return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}}
+	case Unsuback:
+		return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}}
+	case Pingreq:
+		return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}}
+	case Pingresp:
+		return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}}
+	}
+	return nil
+}
+
+// NewControlPacketWithHeader is used to create a new ControlPacket of the type
+// specified within the FixedHeader that is passed to the function.
+// The newly created ControlPacket is empty and a pointer is returned.
+func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) {
+	switch fh.MessageType {
+	case Connect:
+		return &ConnectPacket{FixedHeader: fh}, nil
+	case Connack:
+		return &ConnackPacket{FixedHeader: fh}, nil
+	case Disconnect:
+		return &DisconnectPacket{FixedHeader: fh}, nil
+	case Publish:
+		return &PublishPacket{FixedHeader: fh}, nil
+	case Puback:
+		return &PubackPacket{FixedHeader: fh}, nil
+	case Pubrec:
+		return &PubrecPacket{FixedHeader: fh}, nil
+	case Pubrel:
+		return &PubrelPacket{FixedHeader: fh}, nil
+	case Pubcomp:
+		return &PubcompPacket{FixedHeader: fh}, nil
+	case Subscribe:
+		return &SubscribePacket{FixedHeader: fh}, nil
+	case Suback:
+		return &SubackPacket{FixedHeader: fh}, nil
+	case Unsubscribe:
+		return &UnsubscribePacket{FixedHeader: fh}, nil
+	case Unsuback:
+		return &UnsubackPacket{FixedHeader: fh}, nil
+	case Pingreq:
+		return &PingreqPacket{FixedHeader: fh}, nil
+	case Pingresp:
+		return &PingrespPacket{FixedHeader: fh}, nil
+	}
+	return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType)
+}
+
+// Details struct returned by the Details() function called on
+// ControlPackets to present details of the Qos and MessageID
+// of the ControlPacket
+type Details struct {
+	Qos       byte
+	MessageID uint16
+}
+
+// FixedHeader is a struct to hold the decoded information from
+// the fixed header of an MQTT ControlPacket
+type FixedHeader struct {
+	MessageType     byte
+	Dup             bool
+	Qos             byte
+	Retain          bool
+	RemainingLength int
+}
+
+func (fh FixedHeader) String() string {
+	return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength)
+}
+
+func boolToByte(b bool) byte {
+	switch b {
+	case true:
+		return 1
+	default:
+		return 0
+	}
+}
+
+func (fh *FixedHeader) pack() bytes.Buffer {
+	var header bytes.Buffer
+	header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain))
+	header.Write(encodeLength(fh.RemainingLength))
+	return header
+}
+
+func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error {
+	fh.MessageType = typeAndFlags >> 4
+	fh.Dup = (typeAndFlags>>3)&0x01 > 0
+	fh.Qos = (typeAndFlags >> 1) & 0x03
+	fh.Retain = typeAndFlags&0x01 > 0
+
+	var err error
+	fh.RemainingLength, err = decodeLength(r)
+	return err
+}
+
+func decodeByte(b io.Reader) (byte, error) {
+	num := make([]byte, 1)
+	_, err := b.Read(num)
+	if err != nil {
+		return 0, err
+	}
+
+	return num[0], nil
+}
+
+func decodeUint16(b io.Reader) (uint16, error) {
+	num := make([]byte, 2)
+	_, err := b.Read(num)
+	if err != nil {
+		return 0, err
+	}
+	return binary.BigEndian.Uint16(num), nil
+}
+
+func encodeUint16(num uint16) []byte {
+	bytesResult := make([]byte, 2)
+	binary.BigEndian.PutUint16(bytesResult, num)
+	return bytesResult
+}
+
+func encodeString(field string) []byte {
+	return encodeBytes([]byte(field))
+}
+
+func decodeString(b io.Reader) (string, error) {
+	buf, err := decodeBytes(b)
+	return string(buf), err
+}
+
+func decodeBytes(b io.Reader) ([]byte, error) {
+	fieldLength, err := decodeUint16(b)
+	if err != nil {
+		return nil, err
+	}
+
+	field := make([]byte, fieldLength)
+	_, err = b.Read(field)
+	if err != nil {
+		return nil, err
+	}
+
+	return field, nil
+}
+
+func encodeBytes(field []byte) []byte {
+	fieldLength := make([]byte, 2)
+	binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
+	return append(fieldLength, field...)
+}
+
+func encodeLength(length int) []byte {
+	var encLength []byte
+	for {
+		digit := byte(length % 128)
+		length /= 128
+		if length > 0 {
+			digit |= 0x80
+		}
+		encLength = append(encLength, digit)
+		if length == 0 {
+			break
+		}
+	}
+	return encLength
+}
+
+func decodeLength(r io.Reader) (int, error) {
+	var rLength uint32
+	var multiplier uint32
+	b := make([]byte, 1)
+	for multiplier < 27 { // fix: Infinite '(digit & 128) == 1' will cause the dead loop
+		_, err := io.ReadFull(r, b)
+		if err != nil {
+			return 0, err
+		}
+
+		digit := b[0]
+		rLength |= uint32(digit&127) << multiplier
+		if (digit & 128) == 0 {
+			break
+		}
+		multiplier += 7
+	}
+	return int(rLength), nil
+}

+ 34 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go

@@ -0,0 +1,34 @@
+package packets
+
+import (
+	"io"
+)
+
+// PingreqPacket is an internal representation of the fields of the
+// Pingreq MQTT packet
+type PingreqPacket struct {
+	FixedHeader
+}
+
+func (pr *PingreqPacket) String() string {
+	return pr.FixedHeader.String()
+}
+
+func (pr *PingreqPacket) Write(w io.Writer) error {
+	packet := pr.FixedHeader.pack()
+	_, err := packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PingreqPacket) Unpack(b io.Reader) error {
+	return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PingreqPacket) Details() Details {
+	return Details{Qos: 0, MessageID: 0}
+}

+ 34 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go

@@ -0,0 +1,34 @@
+package packets
+
+import (
+	"io"
+)
+
+// PingrespPacket is an internal representation of the fields of the
+// Pingresp MQTT packet
+type PingrespPacket struct {
+	FixedHeader
+}
+
+func (pr *PingrespPacket) String() string {
+	return pr.FixedHeader.String()
+}
+
+func (pr *PingrespPacket) Write(w io.Writer) error {
+	packet := pr.FixedHeader.pack()
+	_, err := packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PingrespPacket) Unpack(b io.Reader) error {
+	return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PingrespPacket) Details() Details {
+	return Details{Qos: 0, MessageID: 0}
+}

+ 42 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go

@@ -0,0 +1,42 @@
+package packets
+
+import (
+	"fmt"
+	"io"
+)
+
+// PubackPacket is an internal representation of the fields of the
+// Puback MQTT packet
+type PubackPacket struct {
+	FixedHeader
+	MessageID uint16
+}
+
+func (pa *PubackPacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", pa.FixedHeader, pa.MessageID)
+}
+
+func (pa *PubackPacket) Write(w io.Writer) error {
+	var err error
+	pa.FixedHeader.RemainingLength = 2
+	packet := pa.FixedHeader.pack()
+	packet.Write(encodeUint16(pa.MessageID))
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pa *PubackPacket) Unpack(b io.Reader) error {
+	var err error
+	pa.MessageID, err = decodeUint16(b)
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pa *PubackPacket) Details() Details {
+	return Details{Qos: pa.Qos, MessageID: pa.MessageID}
+}

+ 42 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go

@@ -0,0 +1,42 @@
+package packets
+
+import (
+	"fmt"
+	"io"
+)
+
+// PubcompPacket is an internal representation of the fields of the
+// Pubcomp MQTT packet
+type PubcompPacket struct {
+	FixedHeader
+	MessageID uint16
+}
+
+func (pc *PubcompPacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", pc.FixedHeader, pc.MessageID)
+}
+
+func (pc *PubcompPacket) Write(w io.Writer) error {
+	var err error
+	pc.FixedHeader.RemainingLength = 2
+	packet := pc.FixedHeader.pack()
+	packet.Write(encodeUint16(pc.MessageID))
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pc *PubcompPacket) Unpack(b io.Reader) error {
+	var err error
+	pc.MessageID, err = decodeUint16(b)
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pc *PubcompPacket) Details() Details {
+	return Details{Qos: pc.Qos, MessageID: pc.MessageID}
+}

+ 83 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go

@@ -0,0 +1,83 @@
+package packets
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+)
+
+// PublishPacket is an internal representation of the fields of the
+// Publish MQTT packet
+type PublishPacket struct {
+	FixedHeader
+	TopicName string
+	MessageID uint16
+	Payload   []byte
+}
+
+func (p *PublishPacket) String() string {
+	return fmt.Sprintf("%s topicName: %s MessageID: %d payload: %s", p.FixedHeader, p.TopicName, p.MessageID, string(p.Payload))
+}
+
+func (p *PublishPacket) Write(w io.Writer) error {
+	var body bytes.Buffer
+	var err error
+
+	body.Write(encodeString(p.TopicName))
+	if p.Qos > 0 {
+		body.Write(encodeUint16(p.MessageID))
+	}
+	p.FixedHeader.RemainingLength = body.Len() + len(p.Payload)
+	packet := p.FixedHeader.pack()
+	packet.Write(body.Bytes())
+	packet.Write(p.Payload)
+	_, err = w.Write(packet.Bytes())
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (p *PublishPacket) Unpack(b io.Reader) error {
+	var payloadLength = p.FixedHeader.RemainingLength
+	var err error
+	p.TopicName, err = decodeString(b)
+	if err != nil {
+		return err
+	}
+
+	if p.Qos > 0 {
+		p.MessageID, err = decodeUint16(b)
+		if err != nil {
+			return err
+		}
+		payloadLength -= len(p.TopicName) + 4
+	} else {
+		payloadLength -= len(p.TopicName) + 2
+	}
+	if payloadLength < 0 {
+		return fmt.Errorf("error unpacking publish, payload length < 0")
+	}
+	p.Payload = make([]byte, payloadLength)
+	_, err = b.Read(p.Payload)
+
+	return err
+}
+
+// Copy creates a new PublishPacket with the same topic and payload
+// but an empty fixed header, useful for when you want to deliver
+// a message with different properties such as Qos but the same
+// content
+func (p *PublishPacket) Copy() *PublishPacket {
+	newP := NewControlPacket(Publish).(*PublishPacket)
+	newP.TopicName = p.TopicName
+	newP.Payload = p.Payload
+
+	return newP
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (p *PublishPacket) Details() Details {
+	return Details{Qos: p.Qos, MessageID: p.MessageID}
+}

+ 42 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go

@@ -0,0 +1,42 @@
+package packets
+
+import (
+	"fmt"
+	"io"
+)
+
+// PubrecPacket is an internal representation of the fields of the
+// Pubrec MQTT packet
+type PubrecPacket struct {
+	FixedHeader
+	MessageID uint16
+}
+
+func (pr *PubrecPacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
+}
+
+func (pr *PubrecPacket) Write(w io.Writer) error {
+	var err error
+	pr.FixedHeader.RemainingLength = 2
+	packet := pr.FixedHeader.pack()
+	packet.Write(encodeUint16(pr.MessageID))
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PubrecPacket) Unpack(b io.Reader) error {
+	var err error
+	pr.MessageID, err = decodeUint16(b)
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PubrecPacket) Details() Details {
+	return Details{Qos: pr.Qos, MessageID: pr.MessageID}
+}

+ 42 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go

@@ -0,0 +1,42 @@
+package packets
+
+import (
+	"fmt"
+	"io"
+)
+
+// PubrelPacket is an internal representation of the fields of the
+// Pubrel MQTT packet
+type PubrelPacket struct {
+	FixedHeader
+	MessageID uint16
+}
+
+func (pr *PubrelPacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
+}
+
+func (pr *PubrelPacket) Write(w io.Writer) error {
+	var err error
+	pr.FixedHeader.RemainingLength = 2
+	packet := pr.FixedHeader.pack()
+	packet.Write(encodeUint16(pr.MessageID))
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PubrelPacket) Unpack(b io.Reader) error {
+	var err error
+	pr.MessageID, err = decodeUint16(b)
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PubrelPacket) Details() Details {
+	return Details{Qos: pr.Qos, MessageID: pr.MessageID}
+}

+ 57 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go

@@ -0,0 +1,57 @@
+package packets
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+)
+
+// SubackPacket is an internal representation of the fields of the
+// Suback MQTT packet
+type SubackPacket struct {
+	FixedHeader
+	MessageID   uint16
+	ReturnCodes []byte
+}
+
+func (sa *SubackPacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", sa.FixedHeader, sa.MessageID)
+}
+
+func (sa *SubackPacket) Write(w io.Writer) error {
+	var body bytes.Buffer
+	var err error
+	body.Write(encodeUint16(sa.MessageID))
+	body.Write(sa.ReturnCodes)
+	sa.FixedHeader.RemainingLength = body.Len()
+	packet := sa.FixedHeader.pack()
+	packet.Write(body.Bytes())
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (sa *SubackPacket) Unpack(b io.Reader) error {
+	var qosBuffer bytes.Buffer
+	var err error
+	sa.MessageID, err = decodeUint16(b)
+	if err != nil {
+		return err
+	}
+
+	_, err = qosBuffer.ReadFrom(b)
+	if err != nil {
+		return err
+	}
+	sa.ReturnCodes = qosBuffer.Bytes()
+
+	return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (sa *SubackPacket) Details() Details {
+	return Details{Qos: 0, MessageID: sa.MessageID}
+}

+ 69 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go

@@ -0,0 +1,69 @@
+package packets
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+)
+
+// SubscribePacket is an internal representation of the fields of the
+// Subscribe MQTT packet
+type SubscribePacket struct {
+	FixedHeader
+	MessageID uint16
+	Topics    []string
+	Qoss      []byte
+}
+
+func (s *SubscribePacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d topics: %s", s.FixedHeader, s.MessageID, s.Topics)
+}
+
+func (s *SubscribePacket) Write(w io.Writer) error {
+	var body bytes.Buffer
+	var err error
+
+	body.Write(encodeUint16(s.MessageID))
+	for i, topic := range s.Topics {
+		body.Write(encodeString(topic))
+		body.WriteByte(s.Qoss[i])
+	}
+	s.FixedHeader.RemainingLength = body.Len()
+	packet := s.FixedHeader.pack()
+	packet.Write(body.Bytes())
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (s *SubscribePacket) Unpack(b io.Reader) error {
+	var err error
+	s.MessageID, err = decodeUint16(b)
+	if err != nil {
+		return err
+	}
+	payloadLength := s.FixedHeader.RemainingLength - 2
+	for payloadLength > 0 {
+		topic, err := decodeString(b)
+		if err != nil {
+			return err
+		}
+		s.Topics = append(s.Topics, topic)
+		qos, err := decodeByte(b)
+		if err != nil {
+			return err
+		}
+		s.Qoss = append(s.Qoss, qos)
+		payloadLength -= 2 + len(topic) + 1 // 2 bytes of string length, plus string, plus 1 byte for Qos
+	}
+
+	return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (s *SubscribePacket) Details() Details {
+	return Details{Qos: 1, MessageID: s.MessageID}
+}

+ 42 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go

@@ -0,0 +1,42 @@
+package packets
+
+import (
+	"fmt"
+	"io"
+)
+
+// UnsubackPacket is an internal representation of the fields of the
+// Unsuback MQTT packet
+type UnsubackPacket struct {
+	FixedHeader
+	MessageID uint16
+}
+
+func (ua *UnsubackPacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", ua.FixedHeader, ua.MessageID)
+}
+
+func (ua *UnsubackPacket) Write(w io.Writer) error {
+	var err error
+	ua.FixedHeader.RemainingLength = 2
+	packet := ua.FixedHeader.pack()
+	packet.Write(encodeUint16(ua.MessageID))
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (ua *UnsubackPacket) Unpack(b io.Reader) error {
+	var err error
+	ua.MessageID, err = decodeUint16(b)
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (ua *UnsubackPacket) Details() Details {
+	return Details{Qos: 0, MessageID: ua.MessageID}
+}

+ 56 - 0
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go

@@ -0,0 +1,56 @@
+package packets
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+)
+
+// UnsubscribePacket is an internal representation of the fields of the
+// Unsubscribe MQTT packet
+type UnsubscribePacket struct {
+	FixedHeader
+	MessageID uint16
+	Topics    []string
+}
+
+func (u *UnsubscribePacket) String() string {
+	return fmt.Sprintf("%s MessageID: %d", u.FixedHeader, u.MessageID)
+}
+
+func (u *UnsubscribePacket) Write(w io.Writer) error {
+	var body bytes.Buffer
+	var err error
+	body.Write(encodeUint16(u.MessageID))
+	for _, topic := range u.Topics {
+		body.Write(encodeString(topic))
+	}
+	u.FixedHeader.RemainingLength = body.Len()
+	packet := u.FixedHeader.pack()
+	packet.Write(body.Bytes())
+	_, err = packet.WriteTo(w)
+
+	return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (u *UnsubscribePacket) Unpack(b io.Reader) error {
+	var err error
+	u.MessageID, err = decodeUint16(b)
+	if err != nil {
+		return err
+	}
+
+	for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) {
+		u.Topics = append(u.Topics, topic)
+	}
+
+	return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (u *UnsubscribePacket) Details() Details {
+	return Details{Qos: 1, MessageID: u.MessageID}
+}

部分文件因文件數量過多而無法顯示