Ver código fonte

first commit

mvtov 2 anos atrás
commit
43fba13799
100 arquivos alterados com 13783 adições e 0 exclusões
  1. 8 0
      .idea/.gitignore
  2. 9 0
      .idea/dlt645-server.iml
  3. 6 0
      .idea/inspectionProfiles/Project_Default.xml
  4. 8 0
      .idea/modules.xml
  5. 6 0
      .idea/vcs.xml
  6. 1 0
      README.md
  7. 12 0
      config/config.toml
  8. 32 0
      errors/errors.go
  9. 38 0
      go.mod
  10. 111 0
      go.sum
  11. 57 0
      main.go
  12. 190 0
      protocol/bufio.go
  13. 29 0
      protocol/const.go
  14. 17 0
      protocol/dlt645_0x0001.go
  15. 68 0
      protocol/dlt645_0x33333433.go
  16. 68 0
      protocol/dlt645_0x33333533.go
  17. 68 0
      protocol/dlt645_0x33333635.go
  18. 67 0
      protocol/dlt645_0x33333735.go
  19. 67 0
      protocol/dlt645_0x33333835.go
  20. 66 0
      protocol/dlt645_0x33333935.go
  21. 67 0
      protocol/dlt645_0x33343435.go
  22. 67 0
      protocol/dlt645_0x33343535.go
  23. 58 0
      protocol/dlt645_0x33353435.go
  24. 59 0
      protocol/dlt645_0x33353535.go
  25. 58 0
      protocol/dlt645_0x33363435.go
  26. 58 0
      protocol/dlt645_0x33363535.go
  27. 9 0
      protocol/entity.go
  28. 26 0
      protocol/errors.go
  29. 113 0
      protocol/header.go
  30. 146 0
      protocol/message.go
  31. 46 0
      protocol/property.go
  32. 82 0
      protocol/register.go
  33. 88 0
      server/client.go
  34. 90 0
      server/server.go
  35. 19 0
      test/dlt645_0x33333433_test.go
  36. 17 0
      test/main.go
  37. 105 0
      utils/util.go
  38. 2 0
      vendor/github.com/BurntSushi/toml/.gitignore
  39. 1 0
      vendor/github.com/BurntSushi/toml/COMPATIBLE
  40. 21 0
      vendor/github.com/BurntSushi/toml/COPYING
  41. 211 0
      vendor/github.com/BurntSushi/toml/README.md
  42. 561 0
      vendor/github.com/BurntSushi/toml/decode.go
  43. 19 0
      vendor/github.com/BurntSushi/toml/decode_go116.go
  44. 21 0
      vendor/github.com/BurntSushi/toml/deprecated.go
  45. 13 0
      vendor/github.com/BurntSushi/toml/doc.go
  46. 698 0
      vendor/github.com/BurntSushi/toml/encode.go
  47. 229 0
      vendor/github.com/BurntSushi/toml/error.go
  48. 36 0
      vendor/github.com/BurntSushi/toml/internal/tz.go
  49. 1224 0
      vendor/github.com/BurntSushi/toml/lex.go
  50. 120 0
      vendor/github.com/BurntSushi/toml/meta.go
  51. 767 0
      vendor/github.com/BurntSushi/toml/parse.go
  52. 242 0
      vendor/github.com/BurntSushi/toml/type_fields.go
  53. 70 0
      vendor/github.com/BurntSushi/toml/type_toml.go
  54. 4 0
      vendor/github.com/clbanning/mxj/.travis.yml
  55. 55 0
      vendor/github.com/clbanning/mxj/LICENSE
  56. 199 0
      vendor/github.com/clbanning/mxj/anyxml.go
  57. 54 0
      vendor/github.com/clbanning/mxj/atomFeedString.xml
  58. 138 0
      vendor/github.com/clbanning/mxj/doc.go
  59. 54 0
      vendor/github.com/clbanning/mxj/escapechars.go
  60. 9 0
      vendor/github.com/clbanning/mxj/exists.go
  61. 287 0
      vendor/github.com/clbanning/mxj/files.go
  62. 2 0
      vendor/github.com/clbanning/mxj/files_test.badjson
  63. 9 0
      vendor/github.com/clbanning/mxj/files_test.badxml
  64. 2 0
      vendor/github.com/clbanning/mxj/files_test.json
  65. 9 0
      vendor/github.com/clbanning/mxj/files_test.xml
  66. 1 0
      vendor/github.com/clbanning/mxj/files_test_dup.json
  67. 1 0
      vendor/github.com/clbanning/mxj/files_test_dup.xml
  68. 12 0
      vendor/github.com/clbanning/mxj/files_test_indent.json
  69. 8 0
      vendor/github.com/clbanning/mxj/files_test_indent.xml
  70. 35 0
      vendor/github.com/clbanning/mxj/gob.go
  71. 323 0
      vendor/github.com/clbanning/mxj/json.go
  72. 668 0
      vendor/github.com/clbanning/mxj/keyvalues.go
  73. 112 0
      vendor/github.com/clbanning/mxj/leafnode.go
  74. 86 0
      vendor/github.com/clbanning/mxj/misc.go
  75. 128 0
      vendor/github.com/clbanning/mxj/mxj.go
  76. 184 0
      vendor/github.com/clbanning/mxj/newmap.go
  77. 192 0
      vendor/github.com/clbanning/mxj/readme.md
  78. 37 0
      vendor/github.com/clbanning/mxj/remove.go
  79. 61 0
      vendor/github.com/clbanning/mxj/rename.go
  80. 26 0
      vendor/github.com/clbanning/mxj/set.go
  81. 20 0
      vendor/github.com/clbanning/mxj/setfieldsep.go
  82. 29 0
      vendor/github.com/clbanning/mxj/songtext.xml
  83. 30 0
      vendor/github.com/clbanning/mxj/strict.go
  84. 54 0
      vendor/github.com/clbanning/mxj/struct.go
  85. 258 0
      vendor/github.com/clbanning/mxj/updatevalues.go
  86. 1324 0
      vendor/github.com/clbanning/mxj/xml.go
  87. 844 0
      vendor/github.com/clbanning/mxj/xmlseq.go
  88. 18 0
      vendor/github.com/clbanning/mxj/xmlseq2.go
  89. 15 0
      vendor/github.com/davecgh/go-spew/LICENSE
  90. 145 0
      vendor/github.com/davecgh/go-spew/spew/bypass.go
  91. 38 0
      vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
  92. 341 0
      vendor/github.com/davecgh/go-spew/spew/common.go
  93. 306 0
      vendor/github.com/davecgh/go-spew/spew/config.go
  94. 211 0
      vendor/github.com/davecgh/go-spew/spew/doc.go
  95. 509 0
      vendor/github.com/davecgh/go-spew/spew/dump.go
  96. 419 0
      vendor/github.com/davecgh/go-spew/spew/format.go
  97. 148 0
      vendor/github.com/davecgh/go-spew/spew/spew.go
  98. 36 0
      vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
  99. 56 0
      vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
  100. 15 0
      vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION

+ 8 - 0
.idea/.gitignore

@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml

+ 9 - 0
.idea/dlt645-server.iml

@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+  <component name="Go" enabled="true" />
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>

+ 6 - 0
.idea/inspectionProfiles/Project_Default.xml

@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <profile version="1.0">
+    <option name="myName" value="Project Default" />
+    <inspection_tool class="GoCommentStart" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
+  </profile>
+</component>

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/dlt645-server.iml" filepath="$PROJECT_DIR$/.idea/dlt645-server.iml" />
+    </modules>
+  </component>
+</project>

+ 6 - 0
.idea/vcs.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="$PROJECT_DIR$" vcs="Git" />
+  </component>
+</project>

+ 1 - 0
README.md

@@ -0,0 +1 @@
+# 部标 dlt645-2007 协议解析

+ 12 - 0
config/config.toml

@@ -0,0 +1,12 @@
+[Server]
+    Addr = "0.0.0.0"
+    Port = 8999
+    RunMode = "debug"
+[Sparrow]
+    Server = "http://192.168.0.224:18100"
+    ProductKey = "57c8ba507fedce9b729844e4e4bf45bd09076ac272057b128b40fc7f9d9b662d9a0dd8b8288cdf61e9af0238499e27b0"
+    DeviceCode = "4G-LET-DTU"
+    Debug = true
+
+
+

+ 32 - 0
errors/errors.go

@@ -0,0 +1,32 @@
+package errors
+
+import (
+	"errors"
+)
+
+var (
+	ErrCRC16Failed = errors.New("CRC16 verification failed")
+
+	// 无效消息体
+	ErrInvalidBody = errors.New("invalid body")
+	// 消息体过长
+	ErrBodyTooLong = errors.New("body too long")
+	// 无效消息头
+	ErrInvalidHeader = errors.New("invalid header")
+	// 未找到标识符
+	ErrNotFoundPrefixID = errors.New("not found prefix")
+	// 无效BCD时间
+	ErrInvalidBCDTime = errors.New("invalid BCD time")
+	// 无效消息格式
+	ErrInvalidMessage = errors.New("invalid message")
+	// 无效消息校验和
+	ErrInvalidCheckSum = errors.New("invalid check sum")
+	// 消息类型未注册
+	ErrTypeNotRegistered = errors.New("entity not registered")
+	// 附加信息长度错误
+	ErrInvalidExtraLength = errors.New("invalid extra length")
+	// 消息解密失败
+	ErrDecryptMessageFailed = errors.New("decrypt message failed")
+	//
+	WTF = errors.New("wtf")
+)

+ 38 - 0
go.mod

@@ -0,0 +1,38 @@
+module dlt645-server
+
+go 1.17
+
+require (
+	github.com/gogf/gf v1.16.9
+	github.com/sirupsen/logrus v1.9.0
+	github.com/stretchr/testify v1.7.1
+	golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2
+	sparrow-sdk v1.0.0
+)
+
+require (
+	github.com/BurntSushi/toml v1.1.0 // indirect
+	github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28 // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/eclipse/paho.mqtt.golang v1.3.5 // indirect
+	github.com/fatih/color v1.13.0 // indirect
+	github.com/fsnotify/fsnotify v1.5.4 // indirect
+	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
+	github.com/go-sql-driver/mysql v1.6.0 // indirect
+	github.com/gomodule/redigo v1.8.5 // indirect
+	github.com/gorilla/websocket v1.5.0 // indirect
+	github.com/grokify/html-strip-tags-go v0.0.1 // indirect
+	github.com/mattn/go-colorable v0.1.9 // indirect
+	github.com/mattn/go-isatty v0.0.14 // indirect
+	github.com/mattn/go-runewidth v0.0.9 // indirect
+	github.com/olekukonko/tablewriter v0.0.5 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	go.opentelemetry.io/otel v1.7.0 // indirect
+	go.opentelemetry.io/otel/trace v1.7.0 // indirect
+	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
+	golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
+	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+)
+
+replace sparrow-sdk v1.0.0 => gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.2

+ 111 - 0
go.sum

@@ -0,0 +1,111 @@
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
+github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28 h1:LdXxtjzvZYhhUaonAaAKArG3pyC67kGL3YY+6hGG8G4=
+github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y=
+github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
+github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/gogf/gf v1.16.6/go.mod h1:4LoHfEBl2jbVmZpVx+qk2La3zWr1V315FtF2PVZuyQ8=
+github.com/gogf/gf v1.16.9 h1:Q803UmmRo59+Ws08sMVFOcd8oNpkSWL9vS33hlo/Cyk=
+github.com/gogf/gf v1.16.9/go.mod h1:8Q/kw05nlVRp+4vv7XASBsMe9L1tsVKiGoeP2AHnlkk=
+github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc=
+github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grokify/html-strip-tags-go v0.0.0-20190921062105-daaa06bf1aaf/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
+github.com/grokify/html-strip-tags-go v0.0.1 h1:0fThFwLbW7P/kOiTBs03FsJSV9RM2M/Q/MOnCQxKMo0=
+github.com/grokify/html-strip-tags-go v0.0.1/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
+go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
+go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
+go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
+go.opentelemetry.io/otel/oteltest v1.0.0-RC2/go.mod h1:kiQ4tw5tAL4JLTbcOYwK1CWI1HkT5aiLzHovgOVnz/A=
+go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
+go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
+go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
+go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
+gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.2 h1:Iej//+HmxZSKIwP+1sbSAegE7KErswmsBwCEQLZ17jM=
+gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.2/go.mod h1:hWw7D5hrW8f8cOKKdhtlt8HQbdfD2o6PllWMhs0BdQs=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2 h1:GLw7MR8AfAG2GmGcmVgObFOHXYypgGjnGno25RDwn3Y=
+golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

+ 57 - 0
main.go

@@ -0,0 +1,57 @@
+package main
+
+import (
+	"context"
+	"dlt645-server/server"
+	"github.com/gogf/gf/frame/g"
+	"github.com/gogf/gf/os/glog"
+	"github.com/gogf/gf/os/gproc"
+	"os"
+	"sparrow-sdk/config"
+	gatewayV2 "sparrow-sdk/v2"
+)
+
+func main() {
+	ctx := context.Background()
+	err := glog.SetLevelStr(g.Cfg().GetString("Server.RunMode"))
+	if err != nil {
+		panic(err)
+	}
+	gw := gatewayV2.NewGateway(&config.Config{
+		SparrowServer: g.Cfg().GetString("Sparrow.Server"),
+		ProductKey:    g.Cfg().GetString("Sparrow.ProductKey"),
+		Protocol:      "mqtt",
+		DeviceCode:    g.Cfg().GetString("Sparrow.DeviceCode"),
+		Version:       "1.0.0",
+		Debug:         g.Cfg().GetBool("Sparrow.Debug"),
+	})
+	if _, err = gw.Register(); err != nil {
+		panic(err)
+	}
+	if _, err = gw.Authentication(); err != nil {
+		panic(err)
+	}
+	// 通用指令回调
+	gw.SetReportCommandCallback(func(deviceCode, subId string) error {
+		return nil
+	})
+	go gw.Connect()
+
+	srv := server.NewServer(
+		ctx,
+		g.Cfg().GetString("Server.Addr"),
+		g.Cfg().GetInt("Server.Port"),
+		gw,
+	)
+	go func() {
+		if err := srv.Start(); err != nil {
+			panic(err)
+		}
+	}()
+
+	gproc.AddSigHandlerShutdown(func(sig os.Signal) {
+		gw.Close()
+		srv.Stop()
+	})
+	gproc.Listen()
+}

+ 190 - 0
protocol/bufio.go

@@ -0,0 +1,190 @@
+package protocol
+
+import (
+	"bytes"
+	"dlt645-server/utils"
+	"encoding/binary"
+	"golang.org/x/text/encoding/simplifiedchinese"
+	"golang.org/x/text/transform"
+	"io"
+	"io/ioutil"
+	"time"
+)
+
+type Writer struct {
+	b *bytes.Buffer
+}
+
+func NewWriter() Writer {
+	return Writer{b: bytes.NewBuffer(nil)}
+}
+
+func (writer *Writer) Bytes() []byte {
+	return writer.b.Bytes()
+}
+
+func (writer *Writer) Write(p []byte, size ...int) *Writer {
+	if len(size) == 0 {
+		writer.b.Write(p)
+		return writer
+	}
+
+	if len(p) >= size[0] {
+		writer.b.Write(p[:size[0]])
+	} else {
+		writer.b.Write(p)
+		end := size[0] - len(p)
+		for i := 0; i < end; i++ {
+			writer.b.WriteByte(0)
+		}
+	}
+	return writer
+}
+
+func (writer *Writer) WriteByte(b byte) *Writer {
+	writer.b.WriteByte(b)
+	return writer
+}
+
+func (writer *Writer) WriteUint16(n uint16) *Writer {
+	var buf [2]byte
+	binary.BigEndian.PutUint16(buf[:], n)
+	writer.b.Write(buf[:])
+	return writer
+}
+
+func (writer *Writer) WriteUint32(n uint32) *Writer {
+	var buf [4]byte
+	binary.BigEndian.PutUint32(buf[:], n)
+	writer.b.Write(buf[:])
+	return writer
+}
+
+func (writer *Writer) WriteBcdTime(t time.Time) *Writer {
+	writer.b.Write(utils.ToBCDTime(t))
+	return writer
+}
+
+func (writer *Writer) WritString(str string, size ...int) error {
+	reader := bytes.NewReader([]byte(str))
+	data, err := ioutil.ReadAll(
+		transform.NewReader(reader, simplifiedchinese.GB18030.NewEncoder()))
+	if err != nil {
+		return err
+	}
+	writer.Write(data, size...)
+	return nil
+}
+
+type Reader struct {
+	d []byte
+	r *bytes.Reader
+}
+
+func NewReader(data []byte) Reader {
+	return Reader{d: data, r: bytes.NewReader(data)}
+}
+
+func (reader *Reader) Len() int {
+	return reader.r.Len()
+}
+
+func (reader *Reader) Read(size ...int) ([]byte, error) {
+	num := reader.r.Len()
+	if len(size) > 0 {
+		num = size[0]
+	}
+
+	if num > reader.r.Len() {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	curr := len(reader.d) - reader.r.Len()
+	buf := reader.d[curr : curr+num]
+	reader.r.Seek(int64(num), io.SeekCurrent)
+	return buf, nil
+}
+
+func (reader *Reader) ReadByte() (byte, error) {
+	return reader.r.ReadByte()
+}
+
+func (reader *Reader) ReadUint16() (uint16, error) {
+	if reader.r.Len() < 2 {
+		return 0, io.ErrUnexpectedEOF
+	}
+
+	var buf [2]byte
+	n, err := reader.r.Read(buf[:])
+	if err != nil {
+		return 0, err
+	}
+	if n != len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return binary.BigEndian.Uint16(buf[:]), nil
+}
+
+func (reader *Reader) ReadUint32() (uint32, error) {
+	if reader.r.Len() < 4 {
+		return 0, io.ErrUnexpectedEOF
+	}
+
+	var buf [4]byte
+	n, err := reader.r.Read(buf[:])
+	if err != nil {
+		return 0, err
+	}
+	if n != len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return binary.BigEndian.Uint32(buf[:]), nil
+}
+
+//LittleEndian高位到低位读取
+func (reader *Reader) ReadUint64() (uint64, error) {
+	if reader.r.Len() < 8 {
+		return 0, io.ErrUnexpectedEOF
+	}
+
+	var buf [8]byte
+	n, err := reader.r.Read(buf[:])
+	if err != nil {
+		return 0, err
+	}
+	if n != len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return binary.BigEndian.Uint64(buf[:]), nil
+}
+
+func (reader *Reader) ReadBcdTime() (time.Time, error) {
+	if reader.r.Len() < 6 {
+		return time.Time{}, io.ErrUnexpectedEOF
+	}
+
+	var buf [6]byte
+	n, err := reader.r.Read(buf[:])
+	if err != nil {
+		return time.Time{}, err
+	}
+	if n != len(buf) {
+		return time.Time{}, io.ErrUnexpectedEOF
+	}
+	return utils.FromBCDTime(buf[:])
+}
+
+func (reader *Reader) ReadString(size ...int) (string, error) {
+	data, err := reader.Read(size...)
+	if err != nil {
+		return "", err
+	}
+
+	text, err := ioutil.ReadAll(transform.NewReader(
+		bytes.NewReader(data), simplifiedchinese.GB18030.NewDecoder()))
+	if err != nil {
+		return "", err
+	}
+	return utils.BytesToString(text), nil
+
+}

+ 29 - 0
protocol/const.go

@@ -0,0 +1,29 @@
+package protocol
+
+const (
+	// RegisterByte 注册
+	RegisterByte = byte(0x7B)
+
+	// SendByte 发送
+	SendByte = byte(0xFE)
+
+	// ReceiveByte 接收
+	ReceiveByte = byte(0x68)
+
+	// CRID IP位
+	CRID = byte(0x16)
+	// PrefixID 标志位
+	PrefixID = byte(0x7e)
+
+	//  EscapeByte 转义符
+	EscapeByte = byte(0x7d)
+
+	//  EscapeByteSufix1 0x7d < ———— > 0x7d 后紧跟一个0x01
+	EscapeByteSufix1 = byte(0x01)
+
+	//  EscapeByteSufix2 0x7e < ———— > 0x7d 后紧跟一个0x02
+	EscapeByteSufix2 = byte(0x02)
+
+	// MessageHeaderSize 消息头大小
+	MessageHeaderSize = 12
+)

+ 17 - 0
protocol/dlt645_0x0001.go

@@ -0,0 +1,17 @@
+package protocol
+
+//  注册应答
+type Dlt_0x0040 struct {
+}
+
+func (entity *Dlt_0x0040) MsgID() MsgID {
+	return Msgdlt_0x0040
+}
+
+func (entity *Dlt_0x0040) Encode() ([]byte, error) {
+	panic("emmm")
+}
+
+func (entity *Dlt_0x0040) Decode(data []byte) (int, error) {
+	return 0, nil
+}

+ 68 - 0
protocol/dlt645_0x33333433.go

@@ -0,0 +1,68 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33333433 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//正向有功总电能
+	WP float64
+}
+
+func (entity *Dlt_0x33333433) MsgID() MsgID {
+	return Msgdlt_0x33333433
+}
+
+func (entity *Dlt_0x33333433) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x33, 0x34, 0x33})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33333433) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 4)
+	for i := 0; i < 4; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.WP, err = stringToWP(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,正向有功总电能:%2f", entity.DeviceID, entity.WP)
+	return len(data), nil
+}
+
+func stringToWP(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	a2, _ := strconv.ParseFloat(s[4:6], 64)
+	a3, _ := strconv.ParseFloat(s[6:8], 64)
+	res := a0*0.01 + a1 + a2*100 + a3*10000
+	return res, nil
+}

+ 68 - 0
protocol/dlt645_0x33333533.go

@@ -0,0 +1,68 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33333533 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//反向有功总电能
+	ReactiveEnergy float64
+}
+
+func (entity *Dlt_0x33333533) MsgID() MsgID {
+	return Msgdlt_0x33333533
+}
+
+func (entity *Dlt_0x33333533) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x33, 0x35, 0x33})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33333533) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 4)
+	for i := 0; i < 4; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.ReactiveEnergy, err = stringToReactiveEnergy(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,反向有功总电能:%2f", entity.DeviceID, entity.ReactiveEnergy)
+	return len(data), nil
+}
+
+func stringToReactiveEnergy(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	a2, _ := strconv.ParseFloat(s[4:6], 64)
+	a3, _ := strconv.ParseFloat(s[6:8], 64)
+	res := a0*0.01 + a1 + a2*100 + a3*10000
+	return res, nil
+}

+ 68 - 0
protocol/dlt645_0x33333635.go

@@ -0,0 +1,68 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33333635 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前总有功功率
+	ActivePower float64
+}
+
+func (entity *Dlt_0x33333635) MsgID() MsgID {
+	return Msgdlt_0x33333635
+}
+
+func (entity *Dlt_0x33333635) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x33, 0x36, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33333635) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 3)
+	for i := 0; i < 3; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.ActivePower, err = stringToActivePower(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+
+	glog.Debugf("数据读取成功:表号:%s,当前总有功功率:%2f", entity.DeviceID, entity.ActivePower)
+	return len(data), nil
+}
+
+func stringToActivePower(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	a2, _ := strconv.ParseFloat(s[4:6], 64)
+	res := a0*0.0001 + a1*0.01 + a2
+	return res, nil
+}

+ 67 - 0
protocol/dlt645_0x33333735.go

@@ -0,0 +1,67 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33333735 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前总无功功率
+	ReactivePower float64
+}
+
+func (entity *Dlt_0x33333735) MsgID() MsgID {
+	return Msgdlt_0x33333735
+}
+
+func (entity *Dlt_0x33333735) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x33, 0x37, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33333735) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 3)
+	for i := 0; i < 3; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.ReactivePower, err = stringToReactivePower(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前总无功功率:%2f", entity.DeviceID, entity.ReactivePower)
+	return len(data), nil
+}
+
+func stringToReactivePower(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	a2, _ := strconv.ParseFloat(s[4:6], 64)
+	res := a0*0.0001 + a1*0.01 + a2
+	return res, nil
+}

+ 67 - 0
protocol/dlt645_0x33333835.go

@@ -0,0 +1,67 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33333835 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前视在功率
+	ApparentPower float64
+}
+
+func (entity *Dlt_0x33333835) MsgID() MsgID {
+	return Msgdlt_0x33333835
+}
+
+func (entity *Dlt_0x33333835) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x33, 0x38, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33333835) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 3)
+	for i := 0; i < 3; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.ApparentPower, err = stringToApparentPower(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前视在功率:%2f", entity.DeviceID, entity.ApparentPower)
+	return len(data), nil
+}
+
+func stringToApparentPower(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	a2, _ := strconv.ParseFloat(s[4:6], 64)
+	res := a0*0.0001 + a1*0.01 + a2
+	return res, nil
+}

+ 66 - 0
protocol/dlt645_0x33333935.go

@@ -0,0 +1,66 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33333935 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前总功率因数
+	PowerFactor float64
+}
+
+func (entity *Dlt_0x33333935) MsgID() MsgID {
+	return Msgdlt_0x33333935
+}
+
+func (entity *Dlt_0x33333935) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x33, 0x39, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33333935) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 2)
+	for i := 0; i < 2; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.PowerFactor, err = stringToPowerFactor(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前总功率因数:%2f", entity.DeviceID, entity.PowerFactor)
+	return len(data), nil
+}
+
+func stringToPowerFactor(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	res := a0*0.001 + a1*0.1
+	return res, nil
+}

+ 67 - 0
protocol/dlt645_0x33343435.go

@@ -0,0 +1,67 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33343435 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前A相电压
+	VoltageA float64
+}
+
+func (entity *Dlt_0x33343435) MsgID() MsgID {
+	return Msgdlt_0x33343435
+}
+
+func (entity *Dlt_0x33343435) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x34, 0x34, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33343435) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 2)
+	for i := 0; i < 2; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.VoltageA, err = stringToVoltage(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+
+	glog.Debugf("数据读取成功:表号:%s,当前A相电压:%2f", entity.DeviceID, entity.VoltageA)
+	return len(data), nil
+}
+
+func stringToVoltage(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	res := a0*0.1 + a1*10
+	return res, nil
+}

+ 67 - 0
protocol/dlt645_0x33343535.go

@@ -0,0 +1,67 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+	"strconv"
+)
+
+type Dlt_0x33343535 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前A相电流
+	CurrentA float64
+}
+
+func (entity *Dlt_0x33343535) MsgID() MsgID {
+	return Msgdlt_0x33343535
+}
+
+func (entity *Dlt_0x33343535) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x34, 0x35, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33343535) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 3)
+	for i := 0; i < 3; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.CurrentA, err = stringToCurrent(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前A相电流:%2f", entity.DeviceID, entity.CurrentA)
+	return len(data), nil
+}
+
+func stringToCurrent(s string) (float64, error) {
+	a0, _ := strconv.ParseFloat(s[0:2], 64)
+	a1, _ := strconv.ParseFloat(s[2:4], 64)
+	a2, _ := strconv.ParseFloat(s[4:6], 64)
+	res := a0*0.001 + a1*0.1 + a2*10
+	return res, nil
+}

+ 58 - 0
protocol/dlt645_0x33353435.go

@@ -0,0 +1,58 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+)
+
+type Dlt_0x33353435 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前B相电压
+	VoltageB float64
+}
+
+func (entity *Dlt_0x33353435) MsgID() MsgID {
+	return Msgdlt_0x33353435
+}
+
+func (entity *Dlt_0x33353435) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x35, 0x34, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33353435) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 2)
+	for i := 0; i < 2; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.VoltageB, err = stringToVoltage(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前B相电压:%2f", entity.DeviceID, entity.VoltageB)
+	return len(data), nil
+}

+ 59 - 0
protocol/dlt645_0x33353535.go

@@ -0,0 +1,59 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+)
+
+type Dlt_0x33353535 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前B相电流
+	CurrentB float64
+}
+
+func (entity *Dlt_0x33353535) MsgID() MsgID {
+	return Msgdlt_0x33353535
+}
+
+func (entity *Dlt_0x33353535) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x35, 0x35, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33353535) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 3)
+	for i := 0; i < 3; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.CurrentB, err = stringToCurrent(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前B相电流:%2f", entity.DeviceID, entity.CurrentB)
+
+	return len(data), nil
+}

+ 58 - 0
protocol/dlt645_0x33363435.go

@@ -0,0 +1,58 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+)
+
+type Dlt_0x33363435 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前C相电压
+	VoltageC float64
+}
+
+func (entity *Dlt_0x33363435) MsgID() MsgID {
+	return Msgdlt_0x33363435
+}
+
+func (entity *Dlt_0x33363435) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x36, 0x34, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33363435) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 2)
+	for i := 0; i < 2; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.VoltageC, err = stringToVoltage(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前C相电压:%2f", entity.DeviceID, entity.VoltageC)
+	return len(data), nil
+}

+ 58 - 0
protocol/dlt645_0x33363535.go

@@ -0,0 +1,58 @@
+package protocol
+
+import (
+	"encoding/hex"
+	"github.com/gogf/gf/os/glog"
+)
+
+type Dlt_0x33363535 struct {
+	//接收表号
+	DeviceName []byte
+	//表号
+	DeviceID string
+	//当前C相电流
+	CurrentC float64
+}
+
+func (entity *Dlt_0x33363535) MsgID() MsgID {
+	return Msgdlt_0x33363535
+}
+
+func (entity *Dlt_0x33363535) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 接收符号
+	writer.Write([]byte{0xFE, 0xFE, 0x68})
+	writer.Write(entity.DeviceName)
+	writer.Write([]byte{0x68, 0x11, 0x04, 0x33, 0x36, 0x35, 0x35})
+
+	//cs效验位
+	var one byte
+	for _, v := range writer.Bytes()[2:] {
+		one += v
+	}
+	writer.WriteByte(one)
+	// 功能码
+	writer.WriteByte(0x16)
+	return writer.Bytes(), nil
+}
+
+func (entity *Dlt_0x33363535) Decode(data []byte) (int, error) {
+	bytea := data[1:7]
+	for i, j := 0, len(bytea)-1; i < j; i, j = i+1, j-1 {
+		bytea[i], bytea[j] = bytea[j], bytea[i]
+	}
+	entity.DeviceID = hex.EncodeToString(bytea)
+	//正向总电能每个字节-33,1-4,分别为,小数位,个位,百位,万位
+	byteb := make([]byte, 3)
+	for i := 0; i < 3; i++ {
+		byteb[i] = data[14+i] - 0x33
+	}
+	var err error
+	entity.CurrentC, err = stringToCurrent(hex.EncodeToString(byteb))
+	if err != nil {
+		return 0, err
+	}
+	glog.Debugf("数据读取成功:表号:%s,当前C相电流:%2f", entity.DeviceID, entity.CurrentC)
+	return len(data), nil
+}

+ 9 - 0
protocol/entity.go

@@ -0,0 +1,9 @@
+package protocol
+
+// 消息实体
+type Entity interface {
+	MsgID() MsgID
+	Encode() ([]byte, error)
+	Decode([]byte) (int, error)
+}
+

+ 26 - 0
protocol/errors.go

@@ -0,0 +1,26 @@
+package protocol
+
+import (
+	"errors"
+)
+
+var (
+	// 消息体过长
+	ErrBodyTooLong = errors.New("too long message body")
+	// 无效消息体
+	ErrInvalidBody = errors.New("invalid message body")
+	// 无效消息头
+	ErrInvalidHeader = errors.New("invalid message header")
+	// 无效消息格式
+	ErrInvalidMessage = errors.New("invalid message format")
+	// 无效消息校验和
+	ErrInvalidCheckSum = errors.New("invalid message check sum")
+	// 方法尚未实现
+	ErrMethodNotImplemented = errors.New("method not implemented")
+	// 消息类型未注册
+	ErrMessageNotRegistered = errors.New("message not registered")
+	// 消息解码错误
+	ErrEntityDecode = errors.New("entity decode error")
+	// 附加信息长度错误
+	ErrInvalidExtraLength = errors.New("invalid extra length")
+)

+ 113 - 0
protocol/header.go

@@ -0,0 +1,113 @@
+package protocol
+
+import (
+	"dlt645-server/errors"
+	"dlt645-server/utils"
+	"strconv"
+)
+
+// 封包信息
+type Packet struct {
+	Sum uint16
+	Seq uint16
+}
+
+// 消息头
+type Header struct {
+	MsgID       MsgID
+	Property    Property
+	Ver         string `json:"ver"`
+	Csp         uint64 `json:"csp"`
+	Imei        string `json:"imei"`
+	ICID        string `json:"iccid"`
+	IccID       uint64
+	MsgSerialNo uint16
+	Packet      *Packet
+}
+
+// 协议编码
+func (header *Header) Encode() ([]byte, error) {
+	writer := NewWriter()
+
+	// 写入消息ID
+	writer.WriteUint16(uint16(header.MsgID))
+
+	// 写入消息体属性
+	if header.Packet != nil {
+		header.Property.enablePacket()
+	}
+	writer.WriteUint16(uint16(header.Property))
+
+	// 写入终端号码
+	writer.Write(utils.StringToBCD(strconv.FormatUint(header.IccID, 10), 6))
+
+	// 写入消息流水号
+	writer.WriteUint16(header.MsgSerialNo)
+
+	// 写入分包信息
+	if header.Property.IsEnablePacket() {
+		writer.WriteUint16(header.Packet.Sum)
+		writer.WriteUint16(header.Packet.Seq)
+	}
+	return writer.Bytes(), nil
+}
+
+// 协议解码
+func (header *Header) Decode(data []byte) error {
+	if len(data) < MessageHeaderSize {
+		return errors.ErrInvalidHeader
+	}
+	reader := NewReader(data)
+
+	// 读取消息ID
+	msgID, err := reader.ReadUint16()
+	if err != nil {
+		return errors.ErrInvalidHeader
+	}
+
+	// 读取消息体属性
+	property, err := reader.ReadUint16()
+	if err != nil {
+		return errors.ErrInvalidHeader
+	}
+
+	// 读取终端号码
+	temp, err := reader.Read(6)
+	if err != nil {
+		return errors.ErrInvalidHeader
+	}
+	iccID, err := strconv.ParseUint(utils.BcdToString(temp), 10, 64)
+	if err != nil {
+		return err
+	}
+
+	// 读取消息流水号
+	serialNo, err := reader.ReadUint16()
+	if err != nil {
+		return errors.ErrInvalidHeader
+	}
+
+	// 读取分包信息
+	if Property(property).IsEnablePacket() {
+		var packet Packet
+
+		// 读取分包总数
+		packet.Sum, err = reader.ReadUint16()
+		if err != nil {
+			return err
+		}
+
+		// 读取分包序列号
+		packet.Seq, err = reader.ReadUint16()
+		if err != nil {
+			return err
+		}
+		header.Packet = &packet
+	}
+
+	header.MsgID = MsgID(msgID)
+	header.IccID = iccID
+	header.Property = Property(property)
+	header.MsgSerialNo = serialNo
+	return nil
+}

+ 146 - 0
protocol/message.go

@@ -0,0 +1,146 @@
+package protocol
+
+import (
+	"bytes"
+	"dlt645-server/errors"
+	"encoding/binary"
+	"encoding/json"
+	"fmt"
+	log "github.com/sirupsen/logrus"
+	"reflect"
+	"strconv"
+)
+
+// 消息包
+type Message struct {
+	Header Header
+	Body   Entity
+}
+
+type DHeader struct {
+	MsgID MsgID
+}
+
+// 协议编码
+func (message *Message) Encode() ([]byte, error) {
+	// 编码消息体
+	var err error
+	var body []byte
+	if message.Body != nil && !reflect.ValueOf(message.Body).IsNil() {
+		body, err = message.Body.Encode()
+		if err != nil {
+			return nil, err
+		}
+
+	}
+
+	// 二进制转义
+	buffer := bytes.NewBuffer(nil)
+
+	message.write(buffer, body)
+	return buffer.Bytes(), nil
+}
+
+// 协议解码
+func (message *Message) Decode(data []byte) error {
+	// 检验标志位
+	length := len(data)
+	if length == 0 {
+		return errors.ErrInvalidMessage
+	}
+	if data[0] != ReceiveByte && data[0] != RegisterByte {
+		return errors.ErrInvalidMessage
+	}
+	if  !message.checkSum(data){
+		return errors.ErrInvalidCheckSum
+	}
+	var header Header
+	var err error
+	//处理注册包
+	if data[0] == RegisterByte {
+
+		header := Header{}
+		 _ = json.Unmarshal(data, &header)
+
+		//fmt.Println(header.Imei)
+		//fmt.Println()
+
+		header.IccID, err = strconv.ParseUint(header.Imei, 10, 64)
+		header.MsgID = 0x0040 //消息ID
+		if err == nil {
+			message.Body = nil
+		} else {
+			log.WithFields(log.Fields{
+				"id":     fmt.Sprintf("0x%x", header.MsgID),
+				"reason": err,
+			}).Warn("failed to decode message")
+		}
+		message.Header = header
+		return nil
+	} else if len(data) < 8 {
+		log.WithFields(log.Fields{
+			"data":   fmt.Sprintf("V:%v", data),
+			"reason": "error datalen",
+		}).Warn("failed to decode message")
+	} else {
+		header.MsgID = MsgID(binary.BigEndian.Uint32(data[10:15]))      //消息ID
+		entity, _, err := message.decode(uint32(header.MsgID), data[:]) //解析实体对象 entity     buffer : 为消息标识
+		if err == nil {
+			message.Body = entity
+		} else {
+			log.WithFields(log.Fields{
+				"id":     fmt.Sprintf("0x%x", header.MsgID),
+				"reason": err,
+			}).Warn("failed to decode message")
+		}
+	}
+
+	message.Header = header
+	return nil
+}
+
+//--->
+func (message *Message) decode(dataType uint32, data []byte) (Entity, int, error) {
+	creator, ok := entityMapper[dataType]
+	if !ok {
+		return nil, 0, errors.ErrTypeNotRegistered
+	}
+
+	entity := creator()
+
+		count, err := entity.Decode(data) //解析data数据
+		if err != nil {
+			return nil, 0, err
+		}
+		return entity, count, nil
+}
+
+// 写入二进制数据
+func (message *Message) write(buffer *bytes.Buffer, data []byte) *Message {
+	for _, b := range data {
+		if b == PrefixID {
+			buffer.WriteByte(EscapeByte)
+			buffer.WriteByte(EscapeByteSufix2)
+		} else if b == EscapeByte {
+			buffer.WriteByte(EscapeByte)
+			buffer.WriteByte(EscapeByteSufix1)
+		} else {
+			buffer.WriteByte(b)
+		}
+	}
+	return message
+}
+
+// 和校验
+func (message *Message) checkSum(data []byte) bool {
+	var checkSum byte
+	for _, b := range data[:len(data)-2] {
+		checkSum +=b
+	}
+	checkSum = checkSum & 0xFF
+
+	if checkSum == data[len(data)-2] {
+		return true
+	}
+	return false
+}

+ 46 - 0
protocol/property.go

@@ -0,0 +1,46 @@
+package protocol
+
+// 消息体属性
+type Property uint16
+
+// 启用分包
+func (property *Property) enablePacket() {
+	val := uint16(*property)
+	*property = Property(val | (1 << 13))
+}
+
+// 启用加密
+func (property *Property) enableEncrypt() {
+	val := uint16(*property)
+	*property = Property(val | (1 << 10))
+}
+
+// 是否分包
+func (property Property) IsEnablePacket() bool {
+	val := uint16(property)
+	return val&(1<<13) > 0
+}
+
+// 是否加密
+func (property Property) IsEnableEncrypt() bool {
+	val := uint16(property)
+	return val&(1<<10) > 0
+}
+
+// 获取消息体长度
+func (property *Property) GetBodySize() uint16 {
+	// 前十位表示消息体长度
+	// 0x3ff == ‭001111111111‬
+	val := uint16(*property)
+	return ((val << 6) >> 6) & 0x3ff
+}
+
+// 设置消息体长度
+func (property *Property) SetBodySize(size uint16) error {
+	if size > 0x3ff {
+		return ErrBodyTooLong
+	}
+	val := uint16(*property)
+	*property = Property(((val >> 10) << 10) | size)
+	return nil
+}

+ 82 - 0
protocol/register.go

@@ -0,0 +1,82 @@
+package protocol
+
+// 消息ID枚举
+type MsgID uint32
+
+const (
+	//  注册心跳
+	Msgdlt_0x0040 MsgID = 0x0040
+	//上传正向有功总电能
+	Msgdlt_0x33333433 MsgID = 0x33333433
+	//上传反向有功总电能
+	Msgdlt_0x33333533 MsgID = 0x33333533
+	//上传A电压
+	Msgdlt_0x33343435 MsgID = 0x33343435
+	//上传A电流
+	Msgdlt_0x33343535 MsgID = 0x33343535
+	//上传B电压
+	Msgdlt_0x33353435 MsgID = 0x33353435
+	//上传B电流
+	Msgdlt_0x33353535 MsgID = 0x33353535
+	//上传C电压
+	Msgdlt_0x33363435 MsgID = 0x33363435
+	//上传C电流
+	Msgdlt_0x33363535 MsgID = 0x33363535
+	//当前总有功功率
+	Msgdlt_0x33333635 MsgID = 0x33333635
+	//当前总无功功率
+	Msgdlt_0x33333735 MsgID = 0x33333735
+	//总功率因数
+	Msgdlt_0x33333935 MsgID = 0x33333935
+	//当前视在功率
+	Msgdlt_0x33333835 MsgID = 0x33333835
+)
+
+// 消息实体映射
+var entityMapper = map[uint32]func() Entity{
+	uint32(Msgdlt_0x0040): func() Entity {
+		return new(Dlt_0x0040)
+	},
+	uint32(Msgdlt_0x33333433): func() Entity {
+		return new(Dlt_0x33333433)
+	},
+	uint32(Msgdlt_0x33333533): func() Entity {
+		return new(Dlt_0x33333533)
+	},
+	uint32(Msgdlt_0x33343435): func() Entity {
+		return new(Dlt_0x33343435)
+	},
+	uint32(Msgdlt_0x33343535): func() Entity {
+		return new(Dlt_0x33343535)
+	},
+
+	uint32(Msgdlt_0x33353435): func() Entity {
+		return new(Dlt_0x33353435)
+	},
+	uint32(Msgdlt_0x33353535): func() Entity {
+		return new(Dlt_0x33353535)
+	},
+	uint32(Msgdlt_0x33363435): func() Entity {
+		return new(Dlt_0x33363435)
+	},
+	uint32(Msgdlt_0x33363535): func() Entity {
+		return new(Dlt_0x33363535)
+	},
+	uint32(Msgdlt_0x33333635): func() Entity {
+		return new(Dlt_0x33333635)
+	},
+	uint32(Msgdlt_0x33333735): func() Entity {
+		return new(Dlt_0x33333735)
+	},
+	uint32(Msgdlt_0x33333935): func() Entity {
+		return new(Dlt_0x33333935)
+	},
+	uint32(Msgdlt_0x33333835): func() Entity {
+		return new(Dlt_0x33333835)
+	},
+}
+
+// Register 类型注册
+func Register(typ uint32, creator func() Entity) {
+	entityMapper[typ] = creator
+}

+ 88 - 0
server/client.go

@@ -0,0 +1,88 @@
+package server
+
+import (
+	"github.com/gogf/gf/net/gtcp"
+	"github.com/gogf/gf/os/glog"
+	"io"
+	"net"
+	"strings"
+	"syscall"
+	"time"
+)
+
+type Client struct {
+	Id           string
+	srv          *Server
+	conn         *gtcp.Conn
+	sendChan     chan []byte
+	closeChan    chan struct{}
+	closeHandler func(id string, c *Client)
+	nc            *gtcp.Conn
+	lastHeartBeat time.Time
+	done          chan struct{}
+	gatewayId     uint16
+
+}
+
+
+func (c *Client) ReadLoop() {
+	defer c.srv.grWG.Done()
+	for {
+		buf, err := c.nc.RecvTil([]byte{0x16})
+		if err != nil {
+			c.readError(err)
+			return
+		}
+		if len(buf) > 0 {
+			err = c.srv.message.Decode(buf)
+			if err != nil {
+				glog.Errorf("解析报文失败:%s", err.Error())
+			}
+		}
+	}
+}
+
+
+func (c *Client) SetId(id string) {
+	c.Id = id
+}
+
+
+func (c *Client) readError(err error) {
+	defer c.closeConnection()
+	if err == io.EOF || isErrConnReset(err) {
+		return
+	}
+	glog.Errorf("读取数据发生错误:%s", err.Error())
+}
+
+func (c *Client) closeConnection() {
+	c.nc.Close()
+	c.nc = nil
+	close(c.done)
+	if c.closeHandler != nil {
+		c.closeHandler(c.Id, c)
+	}
+}
+
+// isErrConnReset read: connection reset by peer
+func isErrConnReset(err error) bool {
+	if ne, ok := err.(*net.OpError); ok {
+		return strings.Contains(ne.Err.Error(), syscall.ECONNRESET.Error())
+	}
+	return false
+}
+
+func (c *Client) send(buf []byte) error {
+	if c.nc == nil {
+		return nil
+	}
+	err := c.nc.Send(buf)
+	if err != nil {
+		glog.Error(err)
+		c.closeConnection()
+		return err
+	}
+	return nil
+}
+

+ 90 - 0
server/server.go

@@ -0,0 +1,90 @@
+package server
+
+import (
+	"context"
+	"dlt645-server/protocol"
+	"fmt"
+	"github.com/gogf/gf/container/gmap"
+	"github.com/gogf/gf/net/gtcp"
+	"github.com/gogf/gf/os/glog"
+	gatewayV2 "sparrow-sdk/v2"
+	"sync"
+)
+
+type Server struct {
+	closeChan chan struct{}
+	server    *gtcp.Server
+	ctx       context.Context
+	addr      string
+	port      int
+	clients   *gmap.HashMap
+	grMu      sync.Mutex
+	grWG      sync.WaitGroup
+	grRunning bool
+	message   protocol.Message
+	gateWay   *gatewayV2.Gateway
+}
+
+func NewServer(ctx context.Context, addr string, port int, gw *gatewayV2.Gateway) *Server {
+	return &Server{
+		closeChan: make(chan struct{}),
+		ctx:       ctx,
+		addr:      addr,
+		port:      port,
+		gateWay:   gw,
+	}
+}
+
+func (s *Server) Start() error {
+	glog.Printf("服务端启动[%s:%d]", s.addr, s.port)
+	server := gtcp.NewServer(fmt.Sprintf("%s:%d", s.addr, s.port), s.handleConnect)
+	s.server = server
+	s.grMu.Lock()
+	s.grRunning = true
+	s.grMu.Unlock()
+	return s.server.Run()
+}
+
+func (s *Server) Stop() {
+	s.server.Close()
+}
+
+func (s *Server) handleConnect(conn *gtcp.Conn) {
+	s.startGoRoutine(func() {
+		s.createClient(conn)
+		s.grWG.Done()
+	})
+}
+
+func (s *Server) startGoRoutine(f func()) {
+	s.grMu.Lock()
+	if s.grRunning {
+		s.grWG.Add(1)
+		go f()
+	}
+	s.grMu.Unlock()
+}
+
+func (s *Server) createClient(conn *gtcp.Conn) *Client {
+	c := &Client{
+		srv:  s,
+		done: make(chan struct{}),
+		nc:   conn,
+		closeHandler: func(id string, c *Client) {
+			glog.Debugf("客户端断开:%s", id)
+		},
+	}
+	glog.Info("client connect created")
+	s.startGoRoutine(func() {
+		c.ReadLoop()
+	})
+	return c
+}
+
+func (s *Server) removeClient(gatewayId uint16) {
+	s.clients.Remove(gatewayId)
+}
+
+func (s *Server) ReportStatus(subId string, data interface{}) error {
+	return s.gateWay.ReportStatus(subId, "status", data)
+}

+ 19 - 0
test/dlt645_0x33333433_test.go

@@ -0,0 +1,19 @@
+package main
+//
+//import (
+//	"dlt645-server/protocol"
+//	"github.com/stretchr/testify/assert"
+//	"reflect"
+//	"testing"
+//)
+//
+//func TestDlt645_0x33333433(t *testing.T) {
+//	data := []byte{0x68, 0x76, 0x02, 0x02, 0x47, 0x02, 0x02, 0x68, 0x91, 0x08, 0x33, 0x33, 0x34, 0x33, 0x35, 0x33, 0x33, 0x33, 0xC9, 0x16}
+//	var message protocol.Dlt_0x33333433
+//	_, err := message.Decode(data)
+//	if err != nil {
+//		assert.Error(t, err, "decode error")
+//	}
+//	assert.True(t, reflect.DeepEqual("020247020276", message.DeviceID))
+//	assert.True(t, reflect.DeepEqual(0.02, message.WP))
+//}

+ 17 - 0
test/main.go

@@ -0,0 +1,17 @@
+package main
+
+import (
+	"github.com/gogf/gf/net/gtcp"
+	"time"
+)
+
+func main() {
+	conn, err := gtcp.NewConn("127.0.0.1:8999")
+	if err != nil {
+		panic(err)
+	}
+	for {
+		conn.Write([]byte{0x68, 0x76, 0x02, 0x02, 0x47, 0x02, 0x02, 0x68, 0x91, 0x08, 0x33, 0x33, 0x34, 0x33, 0x35, 0x33, 0x33, 0x33, 0xC9, 0x16})
+		time.Sleep(5 * time.Second)
+	}
+}

+ 105 - 0
utils/util.go

@@ -0,0 +1,105 @@
+package utils
+
+import (
+	"bytes"
+	"dlt645-server/errors"
+	"time"
+)
+
+// bytes切割
+func BytesSplit(data []byte, limit int) [][]byte {
+	var chunk []byte
+	chunks := make([][]byte, 0, len(data)/limit+1)
+	for len(data) >= limit {
+		chunk, data = data[:limit], data[limit:]
+		chunks = append(chunks, chunk)
+	}
+	if len(data) > 0 {
+		chunks = append(chunks, data[:len(data)])
+	}
+	return chunks
+}
+
+// bytes转字符串
+func BytesToString(data []byte) string {
+	n := bytes.IndexByte(data, 0)
+	if n == -1 {
+		return string(data)
+	}
+	return string(data[:n])
+}
+
+// 字符串转BCD
+func StringToBCD(s string, size ...int) []byte {
+	if (len(s) & 1) != 0 {
+		s = "0" + s
+	}
+
+	data := []byte(s)
+	bcd := make([]byte, len(s)/2)
+	for i := 0; i < len(bcd); i++ {
+		high := data[i*2] - '0'
+		low := data[i*2+1] - '0'
+		bcd[i] = (high << 4) | low
+	}
+
+	if len(size) == 0 {
+		return bcd
+	}
+
+	ret := make([]byte, size[0])
+	if size[0] < len(bcd) {
+		copy(ret, bcd)
+	} else {
+		copy(ret[len(ret)-len(bcd):], bcd)
+	}
+	return ret
+}
+
+// BCD转字符串
+func BcdToString(data []byte, ignorePadding ...bool) string {
+	for {
+		if len(data) == 0 {
+			return ""
+		}
+		if data[0] != 0 {
+			break
+		}
+		data = data[1:]
+	}
+
+	buf := make([]byte, 0, len(data)*2)
+	for i := 0; i < len(data); i++ {
+		buf = append(buf, data[i]&0xf0>>4+'0')
+		buf = append(buf, data[i]&0x0f+'0')
+	}
+
+	if len(ignorePadding) == 0 || !ignorePadding[0] {
+		for idx := range buf {
+			if buf[idx] != '0' {
+				return string(buf[idx:])
+			}
+		}
+	}
+	return string(buf)
+}
+
+// 转为BCD时间
+func ToBCDTime(t time.Time) []byte {
+	t = time.Unix(t.Unix(), 0)
+	s := t.Format("20060102150405")[2:]
+	return StringToBCD(s, 6)
+}
+
+// 转为time.Time
+func FromBCDTime(bcd []byte) (time.Time, error) {
+	if len(bcd) != 6 {
+		return time.Time{}, errors.ErrInvalidBCDTime
+	}
+	t, err := time.ParseInLocation(
+		"20060102150405", "20"+BcdToString(bcd), time.Local)
+	if err != nil {
+		return time.Time{}, err
+	}
+	return t, nil
+}

+ 2 - 0
vendor/github.com/BurntSushi/toml/.gitignore

@@ -0,0 +1,2 @@
+toml.test
+/toml-test

+ 1 - 0
vendor/github.com/BurntSushi/toml/COMPATIBLE

@@ -0,0 +1 @@
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).

+ 21 - 0
vendor/github.com/BurntSushi/toml/COPYING

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 TOML authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 211 - 0
vendor/github.com/BurntSushi/toml/README.md

@@ -0,0 +1,211 @@
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages.
+
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
+
+Documentation: https://godocs.io/github.com/BurntSushi/toml
+
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
+
+This library requires Go 1.13 or newer; install it with:
+
+    % go get github.com/BurntSushi/toml@latest
+
+It also comes with a TOML validator CLI tool:
+
+    % go install github.com/BurntSushi/toml/cmd/tomlv@latest
+    % tomlv some-toml-file.toml
+
+### Testing
+This package passes all tests in [toml-test] for both the decoder and the
+encoder.
+
+[toml-test]: https://github.com/BurntSushi/toml-test
+
+### Examples
+This package works similar to how the Go standard library handles XML and JSON.
+Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys and
+values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+	Age        int
+	Cats       []string
+	Pi         float64
+	Perfection []int
+	DOB        time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+_, err := toml.Decode(tomlData, &conf)
+// handle error
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+    ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+Beware that like other most other decoders **only exported fields** are
+considered when encoding and decoding; private fields are silently ignored.
+
+### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+	Name     string
+	Duration duration
+}
+type songs struct {
+	Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+	log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+	fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+	time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+	var err error
+	d.Duration, err = time.ParseDuration(string(text))
+	return err
+}
+```
+
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
+
+### More complex usage
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+  "alpha",
+  "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+	Title   string
+	Owner   ownerInfo
+	DB      database `toml:"database"`
+	Servers map[string]server
+	Clients clients
+}
+
+type ownerInfo struct {
+	Name string
+	Org  string `toml:"organization"`
+	Bio  string
+	DOB  time.Time
+}
+
+type database struct {
+	Server  string
+	Ports   []int
+	ConnMax int `toml:"connection_max"`
+	Enabled bool
+}
+
+type server struct {
+	IP string
+	DC string
+}
+
+type clients struct {
+	Data  [][]interface{}
+	Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_example/example.{go,toml}`.

+ 561 - 0
vendor/github.com/BurntSushi/toml/decode.go

@@ -0,0 +1,561 @@
+package toml
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"reflect"
+	"strings"
+)
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+	UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(data []byte, v interface{}) error {
+	_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
+	return err
+}
+
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+	return NewDecoder(strings.NewReader(data)).Decode(v)
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+	fp, err := os.Open(path)
+	if err != nil {
+		return MetaData{}, err
+	}
+	defer fp.Close()
+	return NewDecoder(fp).Decode(v)
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+//
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use the PrimitiveDecode() function to "manually" decode these values.
+//
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
+//
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
+type Primitive struct {
+	undecoded interface{}
+	context   Key
+}
+
+// The significand precision for float32 and float64 is 24 and 53 bits; this is
+// the range a natural number can be stored in a float without loss of data.
+const (
+	maxSafeFloat32Int = 16777215                // 2^24-1
+	maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
+)
+
+// Decoder decodes TOML data.
+//
+// TOML tables correspond to Go structs or maps (dealer's choice – they can be
+// used interchangeably).
+//
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
+//
+// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
+// in the local timezone.
+//
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
+//
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
+//
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+	r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+var (
+	unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+	unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		s := "%q"
+		if reflect.TypeOf(v) == nil {
+			s = "%v"
+		}
+
+		return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
+	}
+	if rv.IsNil() {
+		return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
+	}
+
+	// Check if this is a supported type: struct, map, interface{}, or something
+	// that implements UnmarshalTOML or UnmarshalText.
+	rv = indirect(rv)
+	rt := rv.Type()
+	if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
+		!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
+		!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
+		return MetaData{}, e("cannot decode to type %s", rt)
+	}
+
+	// TODO: parser should read from io.Reader? Or at the very least, make it
+	// read from []byte rather than string
+	data, err := ioutil.ReadAll(dec.r)
+	if err != nil {
+		return MetaData{}, err
+	}
+
+	p, err := parse(string(data))
+	if err != nil {
+		return MetaData{}, err
+	}
+
+	md := MetaData{
+		mapping: p.mapping,
+		types:   p.types,
+		keys:    p.ordered,
+		decoded: make(map[string]struct{}, len(p.ordered)),
+		context: nil,
+	}
+	return md, md.unify(p.mapping, rv)
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md.context = primValue.context
+	defer func() { md.context = nil }()
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+	// Special case. Look for a `Primitive` value.
+	// TODO: #76 would make this superfluous after implemented.
+	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+		// Save the undecoded data and the key context into the primitive
+		// value.
+		context := make(Key, len(md.context))
+		copy(context, md.context)
+		rv.Set(reflect.ValueOf(Primitive{
+			undecoded: data,
+			context:   context,
+		}))
+		return nil
+	}
+
+	// Special case. Unmarshaler Interface support.
+	if rv.CanAddr() {
+		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+			return v.UnmarshalTOML(data)
+		}
+	}
+
+	// Special case. Look for a value satisfying the TextUnmarshaler interface.
+	if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+		return md.unifyText(data, v)
+	}
+	// TODO:
+	// The behavior here is incorrect whenever a Go type satisfies the
+	// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+	// array. In particular, the unmarshaler should only be applied to primitive
+	// TOML values. But at this point, it will be applied to all kinds of values
+	// and produce an incorrect error whenever those values are hashes or arrays
+	// (including arrays of tables).
+
+	k := rv.Kind()
+
+	// laziness
+	if k >= reflect.Int && k <= reflect.Uint64 {
+		return md.unifyInt(data, rv)
+	}
+	switch k {
+	case reflect.Ptr:
+		elem := reflect.New(rv.Type().Elem())
+		err := md.unify(data, reflect.Indirect(elem))
+		if err != nil {
+			return err
+		}
+		rv.Set(elem)
+		return nil
+	case reflect.Struct:
+		return md.unifyStruct(data, rv)
+	case reflect.Map:
+		return md.unifyMap(data, rv)
+	case reflect.Array:
+		return md.unifyArray(data, rv)
+	case reflect.Slice:
+		return md.unifySlice(data, rv)
+	case reflect.String:
+		return md.unifyString(data, rv)
+	case reflect.Bool:
+		return md.unifyBool(data, rv)
+	case reflect.Interface:
+		// we only support empty interfaces.
+		if rv.NumMethod() > 0 {
+			return e("unsupported type %s", rv.Type())
+		}
+		return md.unifyAnything(data, rv)
+	case reflect.Float32, reflect.Float64:
+		return md.unifyFloat64(data, rv)
+	}
+	return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if mapping == nil {
+			return nil
+		}
+		return e("type mismatch for %s: expected table but found %T",
+			rv.Type().String(), mapping)
+	}
+
+	for key, datum := range tmap {
+		var f *field
+		fields := cachedTypeFields(rv.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if ff.name == key {
+				f = ff
+				break
+			}
+			if f == nil && strings.EqualFold(ff.name, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			subv := rv
+			for _, i := range f.index {
+				subv = indirect(subv.Field(i))
+			}
+
+			if isUnifiable(subv) {
+				md.decoded[md.context.add(key).String()] = struct{}{}
+				md.context = append(md.context, key)
+				err := md.unify(datum, subv)
+				if err != nil {
+					return err
+				}
+				md.context = md.context[0 : len(md.context)-1]
+			} else if f.name != "" {
+				return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
+			}
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+	if k := rv.Type().Key().Kind(); k != reflect.String {
+		return fmt.Errorf(
+			"toml: cannot decode to a map with non-string key type (%s in %q)",
+			k, rv.Type())
+	}
+
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if tmap == nil {
+			return nil
+		}
+		return md.badtype("map", mapping)
+	}
+	if rv.IsNil() {
+		rv.Set(reflect.MakeMap(rv.Type()))
+	}
+	for k, v := range tmap {
+		md.decoded[md.context.add(k).String()] = struct{}{}
+		md.context = append(md.context, k)
+
+		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+		if err := md.unify(v, rvval); err != nil {
+			return err
+		}
+		md.context = md.context[0 : len(md.context)-1]
+
+		rvkey := indirect(reflect.New(rv.Type().Key()))
+		rvkey.SetString(k)
+		rv.SetMapIndex(rvkey, rvval)
+	}
+	return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return md.badtype("slice", data)
+	}
+	if l := datav.Len(); l != rv.Len() {
+		return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
+	}
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return md.badtype("slice", data)
+	}
+	n := datav.Len()
+	if rv.IsNil() || rv.Cap() < n {
+		rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+	}
+	rv.SetLen(n)
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+	l := data.Len()
+	for i := 0; i < l; i++ {
+		err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+	if s, ok := data.(string); ok {
+		rv.SetString(s)
+		return nil
+	}
+	return md.badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(float64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			if num < -math.MaxFloat32 || num > math.MaxFloat32 {
+				return e("value %f is out of range for float32", num)
+			}
+			fallthrough
+		case reflect.Float64:
+			rv.SetFloat(num)
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+
+	if num, ok := data.(int64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
+				return e("value %d is out of range for float32", num)
+			}
+			fallthrough
+		case reflect.Float64:
+			if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
+				return e("value %d is out of range for float64", num)
+			}
+			rv.SetFloat(float64(num))
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+
+	return md.badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(int64); ok {
+		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+			switch rv.Kind() {
+			case reflect.Int, reflect.Int64:
+				// No bounds checking necessary.
+			case reflect.Int8:
+				if num < math.MinInt8 || num > math.MaxInt8 {
+					return e("value %d is out of range for int8", num)
+				}
+			case reflect.Int16:
+				if num < math.MinInt16 || num > math.MaxInt16 {
+					return e("value %d is out of range for int16", num)
+				}
+			case reflect.Int32:
+				if num < math.MinInt32 || num > math.MaxInt32 {
+					return e("value %d is out of range for int32", num)
+				}
+			}
+			rv.SetInt(num)
+		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+			unum := uint64(num)
+			switch rv.Kind() {
+			case reflect.Uint, reflect.Uint64:
+				// No bounds checking necessary.
+			case reflect.Uint8:
+				if num < 0 || unum > math.MaxUint8 {
+					return e("value %d is out of range for uint8", num)
+				}
+			case reflect.Uint16:
+				if num < 0 || unum > math.MaxUint16 {
+					return e("value %d is out of range for uint16", num)
+				}
+			case reflect.Uint32:
+				if num < 0 || unum > math.MaxUint32 {
+					return e("value %d is out of range for uint32", num)
+				}
+			}
+			rv.SetUint(unum)
+		} else {
+			panic("unreachable")
+		}
+		return nil
+	}
+	return md.badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+	if b, ok := data.(bool); ok {
+		rv.SetBool(b)
+		return nil
+	}
+	return md.badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+	rv.Set(reflect.ValueOf(data))
+	return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+	var s string
+	switch sdata := data.(type) {
+	case Marshaler:
+		text, err := sdata.MarshalTOML()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case TextMarshaler:
+		text, err := sdata.MarshalText()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case fmt.Stringer:
+		s = sdata.String()
+	case string:
+		s = sdata
+	case bool:
+		s = fmt.Sprintf("%v", sdata)
+	case int64:
+		s = fmt.Sprintf("%d", sdata)
+	case float64:
+		s = fmt.Sprintf("%f", sdata)
+	default:
+		return md.badtype("primitive (string-like)", data)
+	}
+	if err := v.UnmarshalText([]byte(s)); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (md *MetaData) badtype(dst string, data interface{}) error {
+	return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+	return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+//
+// Pointers are followed until the value is not a pointer. New values are
+// allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of interest
+// to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+	if v.Kind() != reflect.Ptr {
+		if v.CanSet() {
+			pv := v.Addr()
+			if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
+				return pv
+			}
+		}
+		return v
+	}
+	if v.IsNil() {
+		v.Set(reflect.New(v.Type().Elem()))
+	}
+	return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+	if rv.CanSet() {
+		return true
+	}
+	if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+		return true
+	}
+	return false
+}
+
+func e(format string, args ...interface{}) error {
+	return fmt.Errorf("toml: "+format, args...)
+}

+ 19 - 0
vendor/github.com/BurntSushi/toml/decode_go116.go

@@ -0,0 +1,19 @@
+//go:build go1.16
+// +build go1.16
+
+package toml
+
+import (
+	"io/fs"
+)
+
+// DecodeFS is just like Decode, except it will automatically read the contents
+// of the file at `path` from a fs.FS instance.
+func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
+	fp, err := fsys.Open(path)
+	if err != nil {
+		return MetaData{}, err
+	}
+	defer fp.Close()
+	return NewDecoder(fp).Decode(v)
+}

+ 21 - 0
vendor/github.com/BurntSushi/toml/deprecated.go

@@ -0,0 +1,21 @@
+package toml
+
+import (
+	"encoding"
+	"io"
+)
+
+// Deprecated: use encoding.TextMarshaler
+type TextMarshaler encoding.TextMarshaler
+
+// Deprecated: use encoding.TextUnmarshaler
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// Deprecated: use MetaData.PrimitiveDecode.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md := MetaData{decoded: make(map[string]struct{})}
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }

+ 13 - 0
vendor/github.com/BurntSushi/toml/doc.go

@@ -0,0 +1,13 @@
+/*
+Package toml implements decoding and encoding of TOML files.
+
+This package supports TOML v1.0.0, as listed on https://toml.io
+
+There is also support for delaying decoding with the Primitive type, and
+querying the set of keys in a TOML document with the MetaData type.
+
+The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+and can be used to verify if TOML document is valid. It can also be used to
+print the type of each key.
+*/
+package toml

+ 698 - 0
vendor/github.com/BurntSushi/toml/encode.go

@@ -0,0 +1,698 @@
+package toml
+
+import (
+	"bufio"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/BurntSushi/toml/internal"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+	errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+	errNonString       = errors.New("toml: cannot encode a map with non-string key type")
+	errNoKey           = errors.New("toml: top-level values must be Go maps or structs")
+	errAnything        = errors.New("") // used in testing
+)
+
+var dblQuotedReplacer = strings.NewReplacer(
+	"\"", "\\\"",
+	"\\", "\\\\",
+	"\x00", `\u0000`,
+	"\x01", `\u0001`,
+	"\x02", `\u0002`,
+	"\x03", `\u0003`,
+	"\x04", `\u0004`,
+	"\x05", `\u0005`,
+	"\x06", `\u0006`,
+	"\x07", `\u0007`,
+	"\b", `\b`,
+	"\t", `\t`,
+	"\n", `\n`,
+	"\x0b", `\u000b`,
+	"\f", `\f`,
+	"\r", `\r`,
+	"\x0e", `\u000e`,
+	"\x0f", `\u000f`,
+	"\x10", `\u0010`,
+	"\x11", `\u0011`,
+	"\x12", `\u0012`,
+	"\x13", `\u0013`,
+	"\x14", `\u0014`,
+	"\x15", `\u0015`,
+	"\x16", `\u0016`,
+	"\x17", `\u0017`,
+	"\x18", `\u0018`,
+	"\x19", `\u0019`,
+	"\x1a", `\u001a`,
+	"\x1b", `\u001b`,
+	"\x1c", `\u001c`,
+	"\x1d", `\u001d`,
+	"\x1e", `\u001e`,
+	"\x1f", `\u001f`,
+	"\x7f", `\u007f`,
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid TOML.
+type Marshaler interface {
+	MarshalTOML() ([]byte, error)
+}
+
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for the Decode* functions.
+//
+// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
+// encoding the value as custom TOML.
+//
+// If you want to write arbitrary binary data then you will need to use
+// something like base64 since TOML does not have any binary types.
+//
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
+//
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
+type Encoder struct {
+	// String to use for a single indentation level; default is two spaces.
+	Indent string
+
+	w          *bufio.Writer
+	hasWritten bool // written any output to w yet?
+}
+
+// NewEncoder create a new Encoder.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:      bufio.NewWriter(w),
+		Indent: "  ",
+	}
+}
+
+// Encode writes a TOML representation of the Go value to the Encoder's writer.
+//
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
+func (enc *Encoder) Encode(v interface{}) error {
+	rv := eindirect(reflect.ValueOf(v))
+	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+		return err
+	}
+	return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if terr, ok := r.(tomlEncodeError); ok {
+				err = terr.error
+				return
+			}
+			panic(r)
+		}
+	}()
+	enc.encode(key, rv)
+	return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+	// Special case: time needs to be in ISO8601 format.
+	//
+	// Special case: if we can marshal the type to text, then we used that. This
+	// prevents the encoder for handling these types as generic structs (or
+	// whatever the underlying type of a TextMarshaler is).
+	switch t := rv.Interface().(type) {
+	case time.Time, encoding.TextMarshaler, Marshaler:
+		enc.writeKeyValue(key, rv, false)
+		return
+	// TODO: #76 would make this superfluous after implemented.
+	case Primitive:
+		enc.encode(key, reflect.ValueOf(t.undecoded))
+		return
+	}
+
+	k := rv.Kind()
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64,
+		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+		enc.writeKeyValue(key, rv, false)
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+			enc.eArrayOfTables(key, rv)
+		} else {
+			enc.writeKeyValue(key, rv, false)
+		}
+	case reflect.Interface:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Map:
+		if rv.IsNil() {
+			return
+		}
+		enc.eTable(key, rv)
+	case reflect.Ptr:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Struct:
+		enc.eTable(key, rv)
+	default:
+		encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
+	}
+}
+
+// eElement encodes any value that can be an array element.
+func (enc *Encoder) eElement(rv reflect.Value) {
+	switch v := rv.Interface().(type) {
+	case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+		format := time.RFC3339Nano
+		switch v.Location() {
+		case internal.LocalDatetime:
+			format = "2006-01-02T15:04:05.999999999"
+		case internal.LocalDate:
+			format = "2006-01-02"
+		case internal.LocalTime:
+			format = "15:04:05.999999999"
+		}
+		switch v.Location() {
+		default:
+			enc.wf(v.Format(format))
+		case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+			enc.wf(v.In(time.UTC).Format(format))
+		}
+		return
+	case Marshaler:
+		s, err := v.MarshalTOML()
+		if err != nil {
+			encPanic(err)
+		}
+		enc.w.Write(s)
+		return
+	case encoding.TextMarshaler:
+		s, err := v.MarshalText()
+		if err != nil {
+			encPanic(err)
+		}
+		enc.writeQuoted(string(s))
+		return
+	}
+
+	switch rv.Kind() {
+	case reflect.String:
+		enc.writeQuoted(rv.String())
+	case reflect.Bool:
+		enc.wf(strconv.FormatBool(rv.Bool()))
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		enc.wf(strconv.FormatInt(rv.Int(), 10))
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		enc.wf(strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Float32:
+		f := rv.Float()
+		if math.IsNaN(f) {
+			enc.wf("nan")
+		} else if math.IsInf(f, 0) {
+			enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+		} else {
+			enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+		}
+	case reflect.Float64:
+		f := rv.Float()
+		if math.IsNaN(f) {
+			enc.wf("nan")
+		} else if math.IsInf(f, 0) {
+			enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+		} else {
+			enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+		}
+	case reflect.Array, reflect.Slice:
+		enc.eArrayOrSliceElement(rv)
+	case reflect.Struct:
+		enc.eStruct(nil, rv, true)
+	case reflect.Map:
+		enc.eMap(nil, rv, true)
+	case reflect.Interface:
+		enc.eElement(rv.Elem())
+	default:
+		encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
+	}
+}
+
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
+func floatAddDecimal(fstr string) string {
+	if !strings.Contains(fstr, ".") {
+		return fstr + ".0"
+	}
+	return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+	enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+	length := rv.Len()
+	enc.wf("[")
+	for i := 0; i < length; i++ {
+		elem := rv.Index(i)
+		enc.eElement(elem)
+		if i != length-1 {
+			enc.wf(", ")
+		}
+	}
+	enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	for i := 0; i < rv.Len(); i++ {
+		trv := rv.Index(i)
+		if isNil(trv) {
+			continue
+		}
+		enc.newline()
+		enc.wf("%s[[%s]]", enc.indentStr(key), key)
+		enc.newline()
+		enc.eMapOrStruct(key, trv, false)
+	}
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+	if len(key) == 1 {
+		// Output an extra newline between top-level tables.
+		// (The newline isn't written if nothing else has been written though.)
+		enc.newline()
+	}
+	if len(key) > 0 {
+		enc.wf("%s[%s]", enc.indentStr(key), key)
+		enc.newline()
+	}
+	enc.eMapOrStruct(key, rv, false)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
+	switch rv := eindirect(rv); rv.Kind() {
+	case reflect.Map:
+		enc.eMap(key, rv, inline)
+	case reflect.Struct:
+		enc.eStruct(key, rv, inline)
+	default:
+		// Should never happen?
+		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+	}
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
+	rt := rv.Type()
+	if rt.Key().Kind() != reflect.String {
+		encPanic(errNonString)
+	}
+
+	// Sort keys so that we have deterministic output. And write keys directly
+	// underneath this key first, before writing sub-structs or sub-maps.
+	var mapKeysDirect, mapKeysSub []string
+	for _, mapKey := range rv.MapKeys() {
+		k := mapKey.String()
+		if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+			mapKeysSub = append(mapKeysSub, k)
+		} else {
+			mapKeysDirect = append(mapKeysDirect, k)
+		}
+	}
+
+	var writeMapKeys = func(mapKeys []string, trailC bool) {
+		sort.Strings(mapKeys)
+		for i, mapKey := range mapKeys {
+			val := rv.MapIndex(reflect.ValueOf(mapKey))
+			if isNil(val) {
+				continue
+			}
+
+			if inline {
+				enc.writeKeyValue(Key{mapKey}, val, true)
+				if trailC || i != len(mapKeys)-1 {
+					enc.wf(", ")
+				}
+			} else {
+				enc.encode(key.add(mapKey), val)
+			}
+		}
+	}
+
+	if inline {
+		enc.wf("{")
+	}
+	writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+	writeMapKeys(mapKeysSub, false)
+	if inline {
+		enc.wf("}")
+	}
+}
+
+const is32Bit = (32 << (^uint(0) >> 63)) == 32
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
+	// Write keys for fields directly under this key first, because if we write
+	// a field that creates a new table then all keys under it will be in that
+	// table (not the one we're writing here).
+	//
+	// Fields is a [][]int: for fieldsDirect this always has one entry (the
+	// struct index). For fieldsSub it contains two entries: the parent field
+	// index from tv, and the field indexes for the fields of the sub.
+	var (
+		rt                      = rv.Type()
+		fieldsDirect, fieldsSub [][]int
+		addFields               func(rt reflect.Type, rv reflect.Value, start []int)
+	)
+	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
+				continue
+			}
+			opts := getOptions(f.Tag)
+			if opts.skip {
+				continue
+			}
+
+			frv := rv.Field(i)
+
+			// Treat anonymous struct fields with tag names as though they are
+			// not anonymous, like encoding/json does.
+			//
+			// Non-struct anonymous fields use the normal encoding logic.
+			if f.Anonymous {
+				t := f.Type
+				switch t.Kind() {
+				case reflect.Struct:
+					if getOptions(f.Tag).name == "" {
+						addFields(t, frv, append(start, f.Index...))
+						continue
+					}
+				case reflect.Ptr:
+					if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
+						if !frv.IsNil() {
+							addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
+						}
+						continue
+					}
+				}
+			}
+
+			if typeIsTable(tomlTypeOfGo(frv)) {
+				fieldsSub = append(fieldsSub, append(start, f.Index...))
+			} else {
+				// Copy so it works correct on 32bit archs; not clear why this
+				// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
+				// This also works fine on 64bit, but 32bit archs are somewhat
+				// rare and this is a wee bit faster.
+				if is32Bit {
+					copyStart := make([]int, len(start))
+					copy(copyStart, start)
+					fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
+				} else {
+					fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+				}
+			}
+		}
+	}
+	addFields(rt, rv, nil)
+
+	writeFields := func(fields [][]int) {
+		for _, fieldIndex := range fields {
+			fieldType := rt.FieldByIndex(fieldIndex)
+			fieldVal := rv.FieldByIndex(fieldIndex)
+
+			if isNil(fieldVal) { /// Don't write anything for nil fields.
+				continue
+			}
+
+			opts := getOptions(fieldType.Tag)
+			if opts.skip {
+				continue
+			}
+			keyName := fieldType.Name
+			if opts.name != "" {
+				keyName = opts.name
+			}
+			if opts.omitempty && isEmpty(fieldVal) {
+				continue
+			}
+			if opts.omitzero && isZero(fieldVal) {
+				continue
+			}
+
+			if inline {
+				enc.writeKeyValue(Key{keyName}, fieldVal, true)
+				if fieldIndex[0] != len(fields)-1 {
+					enc.wf(", ")
+				}
+			} else {
+				enc.encode(key.add(keyName), fieldVal)
+			}
+		}
+	}
+
+	if inline {
+		enc.wf("{")
+	}
+	writeFields(fieldsDirect)
+	writeFields(fieldsSub)
+	if inline {
+		enc.wf("}")
+	}
+}
+
+// tomlTypeOfGo returns the TOML type name of the Go value's type.
+//
+// It is used to determine whether the types of array elements are mixed (which
+// is forbidden). If the Go value is nil, then it is illegal for it to be an
+// array element, and valueIsNil is returned as true.
+//
+// The type may be `nil`, which means no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() {
+		return nil
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		return tomlBool
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64:
+		return tomlInteger
+	case reflect.Float32, reflect.Float64:
+		return tomlFloat
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlHash, tomlArrayType(rv)) {
+			return tomlArrayHash
+		}
+		return tomlArray
+	case reflect.Ptr, reflect.Interface:
+		return tomlTypeOfGo(rv.Elem())
+	case reflect.String:
+		return tomlString
+	case reflect.Map:
+		return tomlHash
+	case reflect.Struct:
+		if _, ok := rv.Interface().(time.Time); ok {
+			return tomlDatetime
+		}
+		if isMarshaler(rv) {
+			return tomlString
+		}
+		return tomlHash
+	default:
+		if isMarshaler(rv) {
+			return tomlString
+		}
+
+		encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+		panic("unreachable")
+	}
+}
+
+func isMarshaler(rv reflect.Value) bool {
+	switch rv.Interface().(type) {
+	case encoding.TextMarshaler:
+		return true
+	case Marshaler:
+		return true
+	}
+
+	// Someone used a pointer receiver: we can make it work for pointer values.
+	if rv.CanAddr() {
+		if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
+			return true
+		}
+		if _, ok := rv.Addr().Interface().(Marshaler); ok {
+			return true
+		}
+	}
+	return false
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+		return nil
+	}
+
+	/// Don't allow nil.
+	rvlen := rv.Len()
+	for i := 1; i < rvlen; i++ {
+		if tomlTypeOfGo(rv.Index(i)) == nil {
+			encPanic(errArrayNilElement)
+		}
+	}
+
+	firstType := tomlTypeOfGo(rv.Index(0))
+	if firstType == nil {
+		encPanic(errArrayNilElement)
+	}
+	return firstType
+}
+
+type tagOptions struct {
+	skip      bool // "-"
+	name      string
+	omitempty bool
+	omitzero  bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+	t := tag.Get("toml")
+	if t == "-" {
+		return tagOptions{skip: true}
+	}
+	var opts tagOptions
+	parts := strings.Split(t, ",")
+	opts.name = parts[0]
+	for _, s := range parts[1:] {
+		switch s {
+		case "omitempty":
+			opts.omitempty = true
+		case "omitzero":
+			opts.omitzero = true
+		}
+	}
+	return opts
+}
+
+func isZero(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return rv.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return rv.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return rv.Float() == 0.0
+	}
+	return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+		return rv.Len() == 0
+	case reflect.Bool:
+		return !rv.Bool()
+	}
+	return false
+}
+
+func (enc *Encoder) newline() {
+	if enc.hasWritten {
+		enc.wf("\n")
+	}
+}
+
+// Write a key/value pair:
+//
+//   key = <any value>
+//
+// This is also used for "k = v" in inline tables; so something like this will
+// be written in three calls:
+//
+//     ┌────────────────────┐
+//     │      ┌───┐  ┌─────┐│
+//     v      v   v  v     vv
+//     key = {k = v, k2 = v2}
+//
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+	enc.eElement(val)
+	if !inline {
+		enc.newline()
+	}
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+	_, err := fmt.Fprintf(enc.w, format, v...)
+	if err != nil {
+		encPanic(err)
+	}
+	enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+	return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+	panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return eindirect(v.Elem())
+	default:
+		return v
+	}
+}
+
+func isNil(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return rv.IsNil()
+	default:
+		return false
+	}
+}

+ 229 - 0
vendor/github.com/BurntSushi/toml/error.go

@@ -0,0 +1,229 @@
+package toml
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ParseError is returned when there is an error parsing the TOML syntax.
+//
+// For example invalid syntax, duplicate keys, etc.
+//
+// In addition to the error message itself, you can also print detailed location
+// information with context by using ErrorWithPosition():
+//
+//     toml: error: Key 'fruit' was already created and cannot be used as an array.
+//
+//     At line 4, column 2-7:
+//
+//           2 | fruit = []
+//           3 |
+//           4 | [[fruit]] # Not allowed
+//                 ^^^^^
+//
+// Furthermore, the ErrorWithUsage() can be used to print the above with some
+// more detailed usage guidance:
+//
+//    toml: error: newlines not allowed within inline tables
+//
+//    At line 1, column 18:
+//
+//          1 | x = [{ key = 42 #
+//                               ^
+//
+//    Error help:
+//
+//      Inline tables must always be on a single line:
+//
+//          table = {key = 42, second = 43}
+//
+//      It is invalid to split them over multiple lines like so:
+//
+//          # INVALID
+//          table = {
+//              key    = 42,
+//              second = 43
+//          }
+//
+//      Use regular for this:
+//
+//          [table]
+//          key    = 42
+//          second = 43
+type ParseError struct {
+	Message  string   // Short technical message.
+	Usage    string   // Longer message with usage guidance; may be blank.
+	Position Position // Position of the error
+	LastKey  string   // Last parsed key, may be blank.
+	Line     int      // Line the error occurred. Deprecated: use Position.
+
+	err   error
+	input string
+}
+
+// Position of an error.
+type Position struct {
+	Line  int // Line number, starting at 1.
+	Start int // Start of error, as byte offset starting at 0.
+	Len   int // Lenght in bytes.
+}
+
+func (pe ParseError) Error() string {
+	msg := pe.Message
+	if msg == "" { // Error from errorf()
+		msg = pe.err.Error()
+	}
+
+	if pe.LastKey == "" {
+		return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+	}
+	return fmt.Sprintf("toml: line %d (last key %q): %s",
+		pe.Position.Line, pe.LastKey, msg)
+}
+
+// ErrorWithUsage() returns the error with detailed location context.
+//
+// See the documentation on ParseError.
+func (pe ParseError) ErrorWithPosition() string {
+	if pe.input == "" { // Should never happen, but just in case.
+		return pe.Error()
+	}
+
+	var (
+		lines = strings.Split(pe.input, "\n")
+		col   = pe.column(lines)
+		b     = new(strings.Builder)
+	)
+
+	msg := pe.Message
+	if msg == "" {
+		msg = pe.err.Error()
+	}
+
+	// TODO: don't show control characters as literals? This may not show up
+	// well everywhere.
+
+	if pe.Position.Len == 1 {
+		fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
+			msg, pe.Position.Line, col+1)
+	} else {
+		fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
+			msg, pe.Position.Line, col, col+pe.Position.Len)
+	}
+	if pe.Position.Line > 2 {
+		fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
+	}
+	if pe.Position.Line > 1 {
+		fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
+	}
+	fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
+	fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
+	return b.String()
+}
+
+// ErrorWithUsage() returns the error with detailed location context and usage
+// guidance.
+//
+// See the documentation on ParseError.
+func (pe ParseError) ErrorWithUsage() string {
+	m := pe.ErrorWithPosition()
+	if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
+		return m + "Error help:\n\n    " +
+			strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n    ") +
+			"\n"
+	}
+	return m
+}
+
+func (pe ParseError) column(lines []string) int {
+	var pos, col int
+	for i := range lines {
+		ll := len(lines[i]) + 1 // +1 for the removed newline
+		if pos+ll >= pe.Position.Start {
+			col = pe.Position.Start - pos
+			if col < 0 { // Should never happen, but just in case.
+				col = 0
+			}
+			break
+		}
+		pos += ll
+	}
+
+	return col
+}
+
+type (
+	errLexControl       struct{ r rune }
+	errLexEscape        struct{ r rune }
+	errLexUTF8          struct{ b byte }
+	errLexInvalidNum    struct{ v string }
+	errLexInvalidDate   struct{ v string }
+	errLexInlineTableNL struct{}
+	errLexStringNL      struct{}
+)
+
+func (e errLexControl) Error() string {
+	return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
+}
+func (e errLexControl) Usage() string { return "" }
+
+func (e errLexEscape) Error() string        { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
+func (e errLexEscape) Usage() string        { return usageEscape }
+func (e errLexUTF8) Error() string          { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
+func (e errLexUTF8) Usage() string          { return "" }
+func (e errLexInvalidNum) Error() string    { return fmt.Sprintf("invalid number: %q", e.v) }
+func (e errLexInvalidNum) Usage() string    { return "" }
+func (e errLexInvalidDate) Error() string   { return fmt.Sprintf("invalid date: %q", e.v) }
+func (e errLexInvalidDate) Usage() string   { return "" }
+func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
+func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
+func (e errLexStringNL) Error() string      { return "strings cannot contain newlines" }
+func (e errLexStringNL) Usage() string      { return usageStringNewline }
+
+const usageEscape = `
+A '\' inside a "-delimited string is interpreted as an escape character.
+
+The following escape sequences are supported:
+\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
+
+To prevent a '\' from being recognized as an escape character, use either:
+
+- a ' or '''-delimited string; escape characters aren't processed in them; or
+- write two backslashes to get a single backslash: '\\'.
+
+If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
+instead of '\' will usually also work: "C:/Users/martin".
+`
+
+const usageInlineNewline = `
+Inline tables must always be on a single line:
+
+    table = {key = 42, second = 43}
+
+It is invalid to split them over multiple lines like so:
+
+    # INVALID
+    table = {
+        key    = 42,
+        second = 43
+    }
+
+Use regular for this:
+
+    [table]
+    key    = 42
+    second = 43
+`
+
+const usageStringNewline = `
+Strings must always be on a single line, and cannot span more than one line:
+
+    # INVALID
+    string = "Hello,
+    world!"
+
+Instead use """ or ''' to split strings over multiple lines:
+
+    string = """Hello,
+    world!"""
+`

+ 36 - 0
vendor/github.com/BurntSushi/toml/internal/tz.go

@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+	localOffset   = func() int { _, o := time.Now().Zone(); return o }()
+	LocalDatetime = time.FixedZone("datetime-local", localOffset)
+	LocalDate     = time.FixedZone("date-local", localOffset)
+	LocalTime     = time.FixedZone("time-local", localOffset)
+)

+ 1224 - 0
vendor/github.com/BurntSushi/toml/lex.go

@@ -0,0 +1,1224 @@
+package toml
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type itemType int
+
+const (
+	itemError itemType = iota
+	itemNIL            // used in the parser to indicate no type
+	itemEOF
+	itemText
+	itemString
+	itemRawString
+	itemMultilineString
+	itemRawMultilineString
+	itemBool
+	itemInteger
+	itemFloat
+	itemDatetime
+	itemArray // the start of an array
+	itemArrayEnd
+	itemTableStart
+	itemTableEnd
+	itemArrayTableStart
+	itemArrayTableEnd
+	itemKeyStart
+	itemKeyEnd
+	itemCommentStart
+	itemInlineTableStart
+	itemInlineTableEnd
+)
+
+const eof = 0
+
+type stateFn func(lx *lexer) stateFn
+
+func (p Position) String() string {
+	return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len)
+}
+
+type lexer struct {
+	input string
+	start int
+	pos   int
+	line  int
+	state stateFn
+	items chan item
+
+	// Allow for backing up up to 4 runes. This is necessary because TOML
+	// contains 3-rune tokens (""" and ''').
+	prevWidths [4]int
+	nprev      int  // how many of prevWidths are in use
+	atEOF      bool // If we emit an eof, we can still back up, but it is not OK to call next again.
+
+	// A stack of state functions used to maintain context.
+	//
+	// The idea is to reuse parts of the state machine in various places. For
+	// example, values can appear at the top level or within arbitrarily nested
+	// arrays. The last state on the stack is used after a value has been lexed.
+	// Similarly for comments.
+	stack []stateFn
+}
+
+type item struct {
+	typ itemType
+	val string
+	err error
+	pos Position
+}
+
+func (lx *lexer) nextItem() item {
+	for {
+		select {
+		case item := <-lx.items:
+			return item
+		default:
+			lx.state = lx.state(lx)
+			//fmt.Printf("     STATE %-24s  current: %-10q	stack: %s\n", lx.state, lx.current(), lx.stack)
+		}
+	}
+}
+
+func lex(input string) *lexer {
+	lx := &lexer{
+		input: input,
+		state: lexTop,
+		items: make(chan item, 10),
+		stack: make([]stateFn, 0, 10),
+		line:  1,
+	}
+	return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+	lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+	if len(lx.stack) == 0 {
+		return lx.errorf("BUG in lexer: no states to pop")
+	}
+	last := lx.stack[len(lx.stack)-1]
+	lx.stack = lx.stack[0 : len(lx.stack)-1]
+	return last
+}
+
+func (lx *lexer) current() string {
+	return lx.input[lx.start:lx.pos]
+}
+
+func (lx lexer) getPos() Position {
+	p := Position{
+		Line:  lx.line,
+		Start: lx.start,
+		Len:   lx.pos - lx.start,
+	}
+	if p.Len <= 0 {
+		p.Len = 1
+	}
+	return p
+}
+
+func (lx *lexer) emit(typ itemType) {
+	// Needed for multiline strings ending with an incomplete UTF-8 sequence.
+	if lx.start > lx.pos {
+		lx.error(errLexUTF8{lx.input[lx.pos]})
+		return
+	}
+	lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+	lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+	if lx.atEOF {
+		panic("BUG in lexer: next called after EOF")
+	}
+	if lx.pos >= len(lx.input) {
+		lx.atEOF = true
+		return eof
+	}
+
+	if lx.input[lx.pos] == '\n' {
+		lx.line++
+	}
+	lx.prevWidths[3] = lx.prevWidths[2]
+	lx.prevWidths[2] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[0]
+	if lx.nprev < 4 {
+		lx.nprev++
+	}
+
+	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+	if r == utf8.RuneError {
+		lx.error(errLexUTF8{lx.input[lx.pos]})
+		return utf8.RuneError
+	}
+
+	// Note: don't use peek() here, as this calls next().
+	if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) {
+		lx.errorControlChar(r)
+		return utf8.RuneError
+	}
+
+	lx.prevWidths[0] = w
+	lx.pos += w
+	return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+	lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called 4 times between calls to next.
+func (lx *lexer) backup() {
+	if lx.atEOF {
+		lx.atEOF = false
+		return
+	}
+	if lx.nprev < 1 {
+		panic("BUG in lexer: backed up too far")
+	}
+	w := lx.prevWidths[0]
+	lx.prevWidths[0] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[2]
+	lx.prevWidths[2] = lx.prevWidths[3]
+	lx.nprev--
+
+	lx.pos -= w
+	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+		lx.line--
+	}
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+	if lx.next() == valid {
+		return true
+	}
+	lx.backup()
+	return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+	r := lx.next()
+	lx.backup()
+	return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+	for {
+		r := lx.next()
+		if pred(r) {
+			continue
+		}
+		lx.backup()
+		lx.ignore()
+		return
+	}
+}
+
+// error stops all lexing by emitting an error and returning `nil`.
+//
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) error(err error) stateFn {
+	if lx.atEOF {
+		return lx.errorPrevLine(err)
+	}
+	lx.items <- item{typ: itemError, pos: lx.getPos(), err: err}
+	return nil
+}
+
+// errorfPrevline is like error(), but sets the position to the last column of
+// the previous line.
+//
+// This is so that unexpected EOF or NL errors don't show on a new blank line.
+func (lx *lexer) errorPrevLine(err error) stateFn {
+	pos := lx.getPos()
+	pos.Line--
+	pos.Len = 1
+	pos.Start = lx.pos - 1
+	lx.items <- item{typ: itemError, pos: pos, err: err}
+	return nil
+}
+
+// errorPos is like error(), but allows explicitly setting the position.
+func (lx *lexer) errorPos(start, length int, err error) stateFn {
+	pos := lx.getPos()
+	pos.Start = start
+	pos.Len = length
+	lx.items <- item{typ: itemError, pos: pos, err: err}
+	return nil
+}
+
+// errorf is like error, and creates a new error.
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+	if lx.atEOF {
+		pos := lx.getPos()
+		pos.Line--
+		pos.Len = 1
+		pos.Start = lx.pos - 1
+		lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
+		return nil
+	}
+	lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)}
+	return nil
+}
+
+func (lx *lexer) errorControlChar(cc rune) stateFn {
+	return lx.errorPos(lx.pos-1, 1, errLexControl{cc})
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+	r := lx.next()
+	if isWhitespace(r) || isNL(r) {
+		return lexSkip(lx, lexTop)
+	}
+	switch r {
+	case '#':
+		lx.push(lexTop)
+		return lexCommentStart
+	case '[':
+		return lexTableStart
+	case eof:
+		if lx.pos > lx.start {
+			return lx.errorf("unexpected EOF")
+		}
+		lx.emit(itemEOF)
+		return nil
+	}
+
+	// At this point, the only valid item can be a key, so we back up
+	// and let the key lexer do the rest.
+	lx.backup()
+	lx.push(lexTopEnd)
+	return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == '#':
+		// a comment will read to a newline for us.
+		lx.push(lexTop)
+		return lexCommentStart
+	case isWhitespace(r):
+		return lexTopEnd
+	case isNL(r):
+		lx.ignore()
+		return lexTop
+	case r == eof:
+		lx.emit(itemEOF)
+		return nil
+	}
+	return lx.errorf(
+		"expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
+		r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+	if lx.peek() == '[' {
+		lx.next()
+		lx.emit(itemArrayTableStart)
+		lx.push(lexArrayTableEnd)
+	} else {
+		lx.emit(itemTableStart)
+		lx.push(lexTableEnd)
+	}
+	return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+	lx.emit(itemTableEnd)
+	return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+	if r := lx.next(); r != ']' {
+		return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r)
+	}
+	lx.emit(itemArrayTableEnd)
+	return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == ']' || r == eof:
+		return lx.errorf("unexpected end of table name (table names cannot be empty)")
+	case r == '.':
+		return lx.errorf("unexpected table separator (table names cannot be empty)")
+	case r == '"' || r == '\'':
+		lx.ignore()
+		lx.push(lexTableNameEnd)
+		return lexQuotedName
+	default:
+		lx.push(lexTableNameEnd)
+		return lexBareName
+	}
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexTableNameEnd
+	case r == '.':
+		lx.ignore()
+		return lexTableNameStart
+	case r == ']':
+		return lx.pop()
+	default:
+		return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
+	}
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+	r := lx.next()
+	if isBareKeyChar(r) {
+		return lexBareName
+	}
+	lx.backup()
+	lx.emit(itemText)
+	return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case r == '"':
+		lx.ignore() // ignore the '"'
+		return lexString
+	case r == '\'':
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case r == eof:
+		return lx.errorf("unexpected EOF; expected value")
+	default:
+		return lx.errorf("expected value but found %q instead", r)
+	}
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == '=' || r == eof:
+		return lx.errorf("unexpected '=': key name appears blank")
+	case r == '.':
+		return lx.errorf("unexpected '.': keys cannot start with a '.'")
+	case r == '"' || r == '\'':
+		lx.ignore()
+		fallthrough
+	default: // Bare key
+		lx.emit(itemKeyStart)
+		return lexKeyNameStart
+	}
+}
+
+func lexKeyNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == '=' || r == eof:
+		return lx.errorf("unexpected '='")
+	case r == '.':
+		return lx.errorf("unexpected '.'")
+	case r == '"' || r == '\'':
+		lx.ignore()
+		lx.push(lexKeyEnd)
+		return lexQuotedName
+	default:
+		lx.push(lexKeyEnd)
+		return lexBareName
+	}
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexSkip(lx, lexKeyEnd)
+	case r == eof:
+		return lx.errorf("unexpected EOF; expected key separator '='")
+	case r == '.':
+		lx.ignore()
+		return lexKeyNameStart
+	case r == '=':
+		lx.emit(itemKeyEnd)
+		return lexSkip(lx, lexValue)
+	default:
+		return lx.errorf("expected '.' or '=', but got %q instead", r)
+	}
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+	// We allow whitespace to precede a value, but NOT newlines.
+	// In array syntax, the array states are responsible for ignoring newlines.
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case isDigit(r):
+		lx.backup() // avoid an extra state and use the same as above
+		return lexNumberOrDateStart
+	}
+	switch r {
+	case '[':
+		lx.ignore()
+		lx.emit(itemArray)
+		return lexArrayValue
+	case '{':
+		lx.ignore()
+		lx.emit(itemInlineTableStart)
+		return lexInlineTableValue
+	case '"':
+		if lx.accept('"') {
+			if lx.accept('"') {
+				lx.ignore() // Ignore """
+				return lexMultilineString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the '"'
+		return lexString
+	case '\'':
+		if lx.accept('\'') {
+			if lx.accept('\'') {
+				lx.ignore() // Ignore """
+				return lexMultilineRawString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case '.': // special error case, be kind to users
+		return lx.errorf("floats must start with a digit, not '.'")
+	case 'i', 'n':
+		if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+			lx.emit(itemFloat)
+			return lx.pop()
+		}
+	case '-', '+':
+		return lexDecimalNumberStart
+	}
+	if unicode.IsLetter(r) {
+		// Be permissive here; lexBool will give a nice error if the
+		// user wrote something like
+		//   x = foo
+		// (i.e. not 'true' or 'false' but is something else word-like.)
+		lx.backup()
+		return lexBool
+	}
+	if r == eof {
+		return lx.errorf("unexpected EOF; expected value")
+	}
+	return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValue)
+	case r == '#':
+		lx.push(lexArrayValue)
+		return lexCommentStart
+	case r == ',':
+		return lx.errorf("unexpected comma")
+	case r == ']':
+		return lexArrayEnd
+	}
+
+	lx.backup()
+	lx.push(lexArrayValueEnd)
+	return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValueEnd)
+	case r == '#':
+		lx.push(lexArrayValueEnd)
+		return lexCommentStart
+	case r == ',':
+		lx.ignore()
+		return lexArrayValue // move on to the next value
+	case r == ']':
+		return lexArrayEnd
+	default:
+		return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r))
+	}
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemArrayEnd)
+	return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValue)
+	case isNL(r):
+		return lx.errorPrevLine(errLexInlineTableNL{})
+	case r == '#':
+		lx.push(lexInlineTableValue)
+		return lexCommentStart
+	case r == ',':
+		return lx.errorf("unexpected comma")
+	case r == '}':
+		return lexInlineTableEnd
+	}
+	lx.backup()
+	lx.push(lexInlineTableValueEnd)
+	return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValueEnd)
+	case isNL(r):
+		return lx.errorPrevLine(errLexInlineTableNL{})
+	case r == '#':
+		lx.push(lexInlineTableValueEnd)
+		return lexCommentStart
+	case r == ',':
+		lx.ignore()
+		lx.skip(isWhitespace)
+		if lx.peek() == '}' {
+			return lx.errorf("trailing comma not allowed in inline tables")
+		}
+		return lexInlineTableValue
+	case r == '}':
+		return lexInlineTableEnd
+	default:
+		return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r))
+	}
+}
+
+func runeOrEOF(r rune) string {
+	if r == eof {
+		return "end of file"
+	}
+	return "'" + string(r) + "'"
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemInlineTableEnd)
+	return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == eof:
+		return lx.errorf(`unexpected EOF; expected '"'`)
+	case isNL(r):
+		return lx.errorPrevLine(errLexStringNL{})
+	case r == '\\':
+		lx.push(lexString)
+		return lexStringEscape
+	case r == '"':
+		lx.backup()
+		lx.emit(itemString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	default:
+		return lexMultilineString
+	case eof:
+		return lx.errorf(`unexpected EOF; expected '"""'`)
+	case '\\':
+		return lexMultilineStringEscape
+	case '"':
+		/// Found " → try to read two more "".
+		if lx.accept('"') {
+			if lx.accept('"') {
+				/// Peek ahead: the string can contain " and "", including at the
+				/// end: """str"""""
+				/// 6 or more at the end, however, is an error.
+				if lx.peek() == '"' {
+					/// Check if we already lexed 5 's; if so we have 6 now, and
+					/// that's just too many man!
+					if strings.HasSuffix(lx.current(), `"""""`) {
+						return lx.errorf(`unexpected '""""""'`)
+					}
+					lx.backup()
+					lx.backup()
+					return lexMultilineString
+				}
+
+				lx.backup() /// backup: don't include the """ in the item.
+				lx.backup()
+				lx.backup()
+				lx.emit(itemMultilineString)
+				lx.next() /// Read over ''' again and discard it.
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+		return lexMultilineString
+	}
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	default:
+		return lexRawString
+	case r == eof:
+		return lx.errorf(`unexpected EOF; expected "'"`)
+	case isNL(r):
+		return lx.errorPrevLine(errLexStringNL{})
+	case r == '\'':
+		lx.backup()
+		lx.emit(itemRawString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	default:
+		return lexMultilineRawString
+	case eof:
+		return lx.errorf(`unexpected EOF; expected "'''"`)
+	case '\'':
+		/// Found ' → try to read two more ''.
+		if lx.accept('\'') {
+			if lx.accept('\'') {
+				/// Peek ahead: the string can contain ' and '', including at the
+				/// end: '''str'''''
+				/// 6 or more at the end, however, is an error.
+				if lx.peek() == '\'' {
+					/// Check if we already lexed 5 's; if so we have 6 now, and
+					/// that's just too many man!
+					if strings.HasSuffix(lx.current(), "'''''") {
+						return lx.errorf(`unexpected "''''''"`)
+					}
+					lx.backup()
+					lx.backup()
+					return lexMultilineRawString
+				}
+
+				lx.backup() /// backup: don't include the ''' in the item.
+				lx.backup()
+				lx.backup()
+				lx.emit(itemRawMultilineString)
+				lx.next() /// Read over ''' again and discard it.
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+		return lexMultilineRawString
+	}
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+	// Handle the special case first:
+	if isNL(lx.next()) {
+		return lexMultilineString
+	}
+	lx.backup()
+	lx.push(lexMultilineString)
+	return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case 'b':
+		fallthrough
+	case 't':
+		fallthrough
+	case 'n':
+		fallthrough
+	case 'f':
+		fallthrough
+	case 'r':
+		fallthrough
+	case '"':
+		fallthrough
+	case ' ', '\t':
+		// Inside """ .. """ strings you can use \ to escape newlines, and any
+		// amount of whitespace can be between the \ and \n.
+		fallthrough
+	case '\\':
+		return lx.pop()
+	case 'u':
+		return lexShortUnicodeEscape
+	case 'U':
+		return lexLongUnicodeEscape
+	}
+	return lx.error(errLexEscape{r})
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 4; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(
+				`expected four hexadecimal digits after '\u', but got %q instead`,
+				lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 8; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(
+				`expected eight hexadecimal digits after '\U', but got %q instead`,
+				lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case '0':
+		return lexBaseNumberOrDate
+	}
+
+	if !isDigit(r) {
+		// The only way to reach this state is if the value starts
+		// with a digit, so specifically treat anything else as an
+		// error.
+		return lx.errorf("expected a digit but got %q", r)
+	}
+
+	return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '-', ':':
+		return lexDatetime
+	case '_':
+		return lexDecimalNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDatetime
+	}
+	switch r {
+	case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
+		return lexDatetime
+	}
+
+	lx.backup()
+	lx.emitTrim(itemDatetime)
+	return lx.pop()
+}
+
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isHexadecimal(r) {
+		return lexHexInteger
+	}
+	switch r {
+	case '_':
+		return lexHexInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isOctal(r) {
+		return lexOctalInteger
+	}
+	switch r {
+	case '_':
+		return lexOctalInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isBinary(r) {
+		return lexBinaryInteger
+	}
+	switch r {
+	case '_':
+		return lexBinaryInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDecimalNumber
+	}
+	switch r {
+	case '.', 'e', 'E':
+		return lexFloat
+	case '_':
+		return lexDecimalNumber
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+	r := lx.next()
+
+	// Special error cases to give users better error messages
+	switch r {
+	case 'i':
+		if !lx.accept('n') || !lx.accept('f') {
+			return lx.errorf("invalid float: '%s'", lx.current())
+		}
+		lx.emit(itemFloat)
+		return lx.pop()
+	case 'n':
+		if !lx.accept('a') || !lx.accept('n') {
+			return lx.errorf("invalid float: '%s'", lx.current())
+		}
+		lx.emit(itemFloat)
+		return lx.pop()
+	case '0':
+		p := lx.peek()
+		switch p {
+		case 'b', 'o', 'x':
+			return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+		}
+	case '.':
+		return lx.errorf("floats must start with a digit, not '.'")
+	}
+
+	if isDigit(r) {
+		return lexDecimalNumber
+	}
+
+	return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	// Note: All datetimes start with at least two digits, so we don't
+	// handle date characters (':', '-', etc.) here.
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '_':
+		// Can only be decimal, because there can't be an underscore
+		// between the '0' and the base designator, and dates can't
+		// contain underscores.
+		return lexDecimalNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	case 'b':
+		r = lx.peek()
+		if !isBinary(r) {
+			lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+		}
+		return lexBinaryInteger
+	case 'o':
+		r = lx.peek()
+		if !isOctal(r) {
+			lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+		}
+		return lexOctalInteger
+	case 'x':
+		r = lx.peek()
+		if !isHexadecimal(r) {
+			lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+		}
+		return lexHexInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexFloat
+	}
+	switch r {
+	case '_', '.', '-', '+', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemFloat)
+	return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+	var rs []rune
+	for {
+		r := lx.next()
+		if !unicode.IsLetter(r) {
+			lx.backup()
+			break
+		}
+		rs = append(rs, r)
+	}
+	s := string(rs)
+	switch s {
+	case "true", "false":
+		lx.emit(itemBool)
+		return lx.pop()
+	}
+	return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemCommentStart)
+	return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isNL(r) || r == eof:
+		lx.backup()
+		lx.emit(itemText)
+		return lx.pop()
+	default:
+		return lexComment
+	}
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+	lx.ignore()
+	return nextState
+}
+
+func (s stateFn) String() string {
+	name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+	if i := strings.LastIndexByte(name, '.'); i > -1 {
+		name = name[i+1:]
+	}
+	if s == nil {
+		name = "<nil>"
+	}
+	return name + "()"
+}
+
+func (itype itemType) String() string {
+	switch itype {
+	case itemError:
+		return "Error"
+	case itemNIL:
+		return "NIL"
+	case itemEOF:
+		return "EOF"
+	case itemText:
+		return "Text"
+	case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+		return "String"
+	case itemBool:
+		return "Bool"
+	case itemInteger:
+		return "Integer"
+	case itemFloat:
+		return "Float"
+	case itemDatetime:
+		return "DateTime"
+	case itemTableStart:
+		return "TableStart"
+	case itemTableEnd:
+		return "TableEnd"
+	case itemKeyStart:
+		return "KeyStart"
+	case itemKeyEnd:
+		return "KeyEnd"
+	case itemArray:
+		return "Array"
+	case itemArrayEnd:
+		return "ArrayEnd"
+	case itemCommentStart:
+		return "CommentStart"
+	case itemInlineTableStart:
+		return "InlineTableStart"
+	case itemInlineTableEnd:
+		return "InlineTableEnd"
+	}
+	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
+
+func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
+func isNL(r rune) bool         { return r == '\n' || r == '\r' }
+func isControl(r rune) bool { // Control characters except \t, \r, \n
+	switch r {
+	case '\t', '\r', '\n':
+		return false
+	default:
+		return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+	}
+}
+func isDigit(r rune) bool  { return r >= '0' && r <= '9' }
+func isBinary(r rune) bool { return r == '0' || r == '1' }
+func isOctal(r rune) bool  { return r >= '0' && r <= '7' }
+func isHexadecimal(r rune) bool {
+	return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
+}
+func isBareKeyChar(r rune) bool {
+	return (r >= 'A' && r <= 'Z') ||
+		(r >= 'a' && r <= 'z') ||
+		(r >= '0' && r <= '9') ||
+		r == '_' || r == '-'
+}

+ 120 - 0
vendor/github.com/BurntSushi/toml/meta.go

@@ -0,0 +1,120 @@
+package toml
+
+import (
+	"strings"
+)
+
+// MetaData allows access to meta information about TOML data that's not
+// accessible otherwise.
+//
+// It allows checking if a key is defined in the TOML data, whether any keys
+// were undecoded, and the TOML type of a key.
+type MetaData struct {
+	context Key // Used only during decoding.
+
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	keys    []Key
+	decoded map[string]struct{}
+}
+
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
+//
+// Returns false for an empty key.
+func (md *MetaData) IsDefined(key ...string) bool {
+	if len(key) == 0 {
+		return false
+	}
+
+	var (
+		hash      map[string]interface{}
+		ok        bool
+		hashOrVal interface{} = md.mapping
+	)
+	for _, k := range key {
+		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+			return false
+		}
+		if hashOrVal, ok = hash[k]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+	if typ, ok := md.types[Key(key).String()]; ok {
+		return typ.typeString()
+	}
+	return ""
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+//
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+	return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+	undecoded := make([]Key, 0, len(md.keys))
+	for _, key := range md.keys {
+		if _, ok := md.decoded[key.String()]; !ok {
+			undecoded = append(undecoded, key)
+		}
+	}
+	return undecoded
+}
+
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
+type Key []string
+
+func (k Key) String() string {
+	ss := make([]string, len(k))
+	for i := range k {
+		ss[i] = k.maybeQuoted(i)
+	}
+	return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+	if k[i] == "" {
+		return `""`
+	}
+	for _, c := range k[i] {
+		if !isBareKeyChar(c) {
+			return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+		}
+	}
+	return k[i]
+}
+
+func (k Key) add(piece string) Key {
+	newKey := make(Key, len(k)+1)
+	copy(newKey, k)
+	newKey[len(k)] = piece
+	return newKey
+}

+ 767 - 0
vendor/github.com/BurntSushi/toml/parse.go

@@ -0,0 +1,767 @@
+package toml
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/BurntSushi/toml/internal"
+)
+
+type parser struct {
+	lx         *lexer
+	context    Key      // Full key for the current hash in scope.
+	currentKey string   // Base key name for everything except hashes.
+	pos        Position // Current position in the TOML file.
+
+	ordered   []Key                  // List of keys in the order that they appear in the TOML data.
+	mapping   map[string]interface{} // Map keyname → key value.
+	types     map[string]tomlType    // Map keyname → TOML type.
+	implicits map[string]struct{}    // Record implicit keys (e.g. "key.group.names").
+}
+
+func parse(data string) (p *parser, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if pErr, ok := r.(ParseError); ok {
+				pErr.input = data
+				err = pErr
+				return
+			}
+			panic(r)
+		}
+	}()
+
+	// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+	// which mangles stuff.
+	if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+		data = data[2:]
+	}
+
+	// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+	// file (second byte in surrogate pair being NULL). Again, do this here to
+	// avoid having to deal with UTF-8/16 stuff in the lexer.
+	ex := 6
+	if len(data) < 6 {
+		ex = len(data)
+	}
+	if i := strings.IndexRune(data[:ex], 0); i > -1 {
+		return nil, ParseError{
+			Message:  "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
+			Position: Position{Line: 1, Start: i, Len: 1},
+			Line:     1,
+			input:    data,
+		}
+	}
+
+	p = &parser{
+		mapping:   make(map[string]interface{}),
+		types:     make(map[string]tomlType),
+		lx:        lex(data),
+		ordered:   make([]Key, 0),
+		implicits: make(map[string]struct{}),
+	}
+	for {
+		item := p.next()
+		if item.typ == itemEOF {
+			break
+		}
+		p.topLevel(item)
+	}
+
+	return p, nil
+}
+
+func (p *parser) panicItemf(it item, format string, v ...interface{}) {
+	panic(ParseError{
+		Message:  fmt.Sprintf(format, v...),
+		Position: it.pos,
+		Line:     it.pos.Len,
+		LastKey:  p.current(),
+	})
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+	panic(ParseError{
+		Message:  fmt.Sprintf(format, v...),
+		Position: p.pos,
+		Line:     p.pos.Line,
+		LastKey:  p.current(),
+	})
+}
+
+func (p *parser) next() item {
+	it := p.lx.nextItem()
+	//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
+	if it.typ == itemError {
+		if it.err != nil {
+			panic(ParseError{
+				Position: it.pos,
+				Line:     it.pos.Line,
+				LastKey:  p.current(),
+				err:      it.err,
+			})
+		}
+
+		p.panicItemf(it, "%s", it.val)
+	}
+	return it
+}
+
+func (p *parser) nextPos() item {
+	it := p.next()
+	p.pos = it.pos
+	return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+	it := p.next()
+	p.assertEqual(typ, it.typ)
+	return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+	if expected != got {
+		p.bug("Expected '%s' but got '%s'.", expected, got)
+	}
+}
+
+func (p *parser) topLevel(item item) {
+	switch item.typ {
+	case itemCommentStart: // # ..
+		p.expect(itemText)
+	case itemTableStart: // [ .. ]
+		name := p.nextPos()
+
+		var key Key
+		for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+			key = append(key, p.keyString(name))
+		}
+		p.assertEqual(itemTableEnd, name.typ)
+
+		p.addContext(key, false)
+		p.setType("", tomlHash)
+		p.ordered = append(p.ordered, key)
+	case itemArrayTableStart: // [[ .. ]]
+		name := p.nextPos()
+
+		var key Key
+		for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+			key = append(key, p.keyString(name))
+		}
+		p.assertEqual(itemArrayTableEnd, name.typ)
+
+		p.addContext(key, true)
+		p.setType("", tomlArrayHash)
+		p.ordered = append(p.ordered, key)
+	case itemKeyStart: // key = ..
+		outerContext := p.context
+		/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+		k := p.nextPos()
+		var key Key
+		for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+			key = append(key, p.keyString(k))
+		}
+		p.assertEqual(itemKeyEnd, k.typ)
+
+		/// The current key is the last part.
+		p.currentKey = key[len(key)-1]
+
+		/// All the other parts (if any) are the context; need to set each part
+		/// as implicit.
+		context := key[:len(key)-1]
+		for i := range context {
+			p.addImplicitContext(append(p.context, context[i:i+1]...))
+		}
+
+		/// Set value.
+		val, typ := p.value(p.next(), false)
+		p.set(p.currentKey, val, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+		/// Remove the context we added (preserving any context from [tbl] lines).
+		p.context = outerContext
+		p.currentKey = ""
+	default:
+		p.bug("Unexpected type at top level: %s", item.typ)
+	}
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+	switch it.typ {
+	case itemText:
+		return it.val
+	case itemString, itemMultilineString,
+		itemRawString, itemRawMultilineString:
+		s, _ := p.value(it, false)
+		return s.(string)
+	default:
+		p.bug("Unexpected key type: %s", it.typ)
+	}
+	panic("unreachable")
+}
+
+var datetimeRepl = strings.NewReplacer(
+	"z", "Z",
+	"t", "T",
+	" ", "T")
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+	switch it.typ {
+	case itemString:
+		return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
+	case itemMultilineString:
+		return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+	case itemRawString:
+		return it.val, p.typeOfPrimitive(it)
+	case itemRawMultilineString:
+		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+	case itemInteger:
+		return p.valueInteger(it)
+	case itemFloat:
+		return p.valueFloat(it)
+	case itemBool:
+		switch it.val {
+		case "true":
+			return true, p.typeOfPrimitive(it)
+		case "false":
+			return false, p.typeOfPrimitive(it)
+		default:
+			p.bug("Expected boolean value, but got '%s'.", it.val)
+		}
+	case itemDatetime:
+		return p.valueDatetime(it)
+	case itemArray:
+		return p.valueArray(it)
+	case itemInlineTableStart:
+		return p.valueInlineTable(it, parentIsArray)
+	default:
+		p.bug("Unexpected value type: %s", it.typ)
+	}
+	panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+	if !numUnderscoresOK(it.val) {
+		p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
+	}
+	if numHasLeadingZero(it.val) {
+		p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
+	}
+
+	num, err := strconv.ParseInt(it.val, 0, 64)
+	if err != nil {
+		// Distinguish integer values. Normally, it'd be a bug if the lexer
+		// provides an invalid integer, but it's possible that the number is
+		// out of range of valid values (which the lexer cannot determine).
+		// So mark the former as a bug but the latter as a legitimate user
+		// error.
+		if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+			p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+		} else {
+			p.bug("Expected integer value, but got '%s'.", it.val)
+		}
+	}
+	return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+	parts := strings.FieldsFunc(it.val, func(r rune) bool {
+		switch r {
+		case '.', 'e', 'E':
+			return true
+		}
+		return false
+	})
+	for _, part := range parts {
+		if !numUnderscoresOK(part) {
+			p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
+		}
+	}
+	if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+		p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
+	}
+	if !numPeriodsOK(it.val) {
+		// As a special case, numbers like '123.' or '1.e2',
+		// which are valid as far as Go/strconv are concerned,
+		// must be rejected because TOML says that a fractional
+		// part consists of '.' followed by 1+ digits.
+		p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
+	}
+	val := strings.Replace(it.val, "_", "", -1)
+	if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+		val = "nan"
+	}
+	num, err := strconv.ParseFloat(val, 64)
+	if err != nil {
+		if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+			p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+		} else {
+			p.panicItemf(it, "Invalid float value: %q", it.val)
+		}
+	}
+	return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+	fmt  string
+	zone *time.Location
+}{
+	{time.RFC3339Nano, time.Local},
+	{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+	{"2006-01-02", internal.LocalDate},
+	{"15:04:05.999999999", internal.LocalTime},
+}
+
+func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+	it.val = datetimeRepl.Replace(it.val)
+	var (
+		t   time.Time
+		ok  bool
+		err error
+	)
+	for _, dt := range dtTypes {
+		t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+		if err == nil {
+			ok = true
+			break
+		}
+	}
+	if !ok {
+		p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
+	}
+	return t, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueArray(it item) (interface{}, tomlType) {
+	p.setType(p.currentKey, tomlArray)
+
+	// p.setType(p.currentKey, typ)
+	var (
+		types []tomlType
+
+		// Initialize to a non-nil empty slice. This makes it consistent with
+		// how S = [] decodes into a non-nil slice inside something like struct
+		// { S []string }. See #338
+		array = []interface{}{}
+	)
+	for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+		if it.typ == itemCommentStart {
+			p.expect(itemText)
+			continue
+		}
+
+		val, typ := p.value(it, true)
+		array = append(array, val)
+		types = append(types, typ)
+
+		// XXX: types isn't used here, we need it to record the accurate type
+		// information.
+		//
+		// Not entirely sure how to best store this; could use "key[0]",
+		// "key[1]" notation, or maybe store it on the Array type?
+	}
+	return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+	var (
+		hash         = make(map[string]interface{})
+		outerContext = p.context
+		outerKey     = p.currentKey
+	)
+
+	p.context = append(p.context, p.currentKey)
+	prevContext := p.context
+	p.currentKey = ""
+
+	p.addImplicit(p.context)
+	p.addContext(p.context, parentIsArray)
+
+	/// Loop over all table key/value pairs.
+	for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+		if it.typ == itemCommentStart {
+			p.expect(itemText)
+			continue
+		}
+
+		/// Read all key parts.
+		k := p.nextPos()
+		var key Key
+		for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+			key = append(key, p.keyString(k))
+		}
+		p.assertEqual(itemKeyEnd, k.typ)
+
+		/// The current key is the last part.
+		p.currentKey = key[len(key)-1]
+
+		/// All the other parts (if any) are the context; need to set each part
+		/// as implicit.
+		context := key[:len(key)-1]
+		for i := range context {
+			p.addImplicitContext(append(p.context, context[i:i+1]...))
+		}
+
+		/// Set the value.
+		val, typ := p.value(p.next(), false)
+		p.set(p.currentKey, val, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+		hash[p.currentKey] = val
+
+		/// Restore context.
+		p.context = prevContext
+	}
+	p.context = outerContext
+	p.currentKey = outerKey
+	return hash, tomlHash
+}
+
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+	if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
+		return true
+	}
+	if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+		return true
+	}
+	return false
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+	switch s {
+	case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+		return true
+	}
+	accept := false
+	for _, r := range s {
+		if r == '_' {
+			if !accept {
+				return false
+			}
+		}
+
+		// isHexadecimal is a superset of all the permissable characters
+		// surrounding an underscore.
+		accept = isHexadecimal(r)
+	}
+	return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+	period := false
+	for _, r := range s {
+		if period && !isDigit(r) {
+			return false
+		}
+		period = r == '.'
+	}
+	return !period
+}
+
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) addContext(key Key, array bool) {
+	var ok bool
+
+	// Always start at the top level and drill down for our context.
+	hashContext := p.mapping
+	keyContext := make(Key, 0)
+
+	// We only need implicit hashes for key[0:-1]
+	for _, k := range key[0 : len(key)-1] {
+		_, ok = hashContext[k]
+		keyContext = append(keyContext, k)
+
+		// No key? Make an implicit hash and move on.
+		if !ok {
+			p.addImplicit(keyContext)
+			hashContext[k] = make(map[string]interface{})
+		}
+
+		// If the hash context is actually an array of tables, then set
+		// the hash context to the last element in that array.
+		//
+		// Otherwise, it better be a table, since this MUST be a key group (by
+		// virtue of it not being the last element in a key).
+		switch t := hashContext[k].(type) {
+		case []map[string]interface{}:
+			hashContext = t[len(t)-1]
+		case map[string]interface{}:
+			hashContext = t
+		default:
+			p.panicf("Key '%s' was already created as a hash.", keyContext)
+		}
+	}
+
+	p.context = keyContext
+	if array {
+		// If this is the first element for this array, then allocate a new
+		// list of tables for it.
+		k := key[len(key)-1]
+		if _, ok := hashContext[k]; !ok {
+			hashContext[k] = make([]map[string]interface{}, 0, 4)
+		}
+
+		// Add a new table. But make sure the key hasn't already been used
+		// for something else.
+		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+			hashContext[k] = append(hash, make(map[string]interface{}))
+		} else {
+			p.panicf("Key '%s' was already created and cannot be used as an array.", key)
+		}
+	} else {
+		p.setValue(key[len(key)-1], make(map[string]interface{}))
+	}
+	p.context = append(p.context, key[len(key)-1])
+}
+
+// set calls setValue and setType.
+func (p *parser) set(key string, val interface{}, typ tomlType) {
+	p.setValue(key, val)
+	p.setType(key, typ)
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+	var (
+		tmpHash    interface{}
+		ok         bool
+		hash       = p.mapping
+		keyContext Key
+	)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+		if tmpHash, ok = hash[k]; !ok {
+			p.bug("Context for key '%s' has not been established.", keyContext)
+		}
+		switch t := tmpHash.(type) {
+		case []map[string]interface{}:
+			// The context is a table of hashes. Pick the most recent table
+			// defined as the current hash.
+			hash = t[len(t)-1]
+		case map[string]interface{}:
+			hash = t
+		default:
+			p.panicf("Key '%s' has already been defined.", keyContext)
+		}
+	}
+	keyContext = append(keyContext, key)
+
+	if _, ok := hash[key]; ok {
+		// Normally redefining keys isn't allowed, but the key could have been
+		// defined implicitly and it's allowed to be redefined concretely. (See
+		// the `valid/implicit-and-explicit-after.toml` in toml-test)
+		//
+		// But we have to make sure to stop marking it as an implicit. (So that
+		// another redefinition provokes an error.)
+		//
+		// Note that since it has already been defined (as a hash), we don't
+		// want to overwrite it. So our business is done.
+		if p.isArray(keyContext) {
+			p.removeImplicit(keyContext)
+			hash[key] = value
+			return
+		}
+		if p.isImplicit(keyContext) {
+			p.removeImplicit(keyContext)
+			return
+		}
+
+		// Otherwise, we have a concrete key trying to override a previous
+		// key, which is *always* wrong.
+		p.panicf("Key '%s' has already been defined.", keyContext)
+	}
+
+	hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key. It should be
+// called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+	keyContext := make(Key, 0, len(p.context)+1)
+	keyContext = append(keyContext, p.context...)
+	if len(key) > 0 { // allow type setting for hashes
+		keyContext = append(keyContext, key)
+	}
+	// Special case to make empty keys ("" = 1) work.
+	// Without it it will set "" rather than `""`.
+	// TODO: why is this needed? And why is this only needed here?
+	if len(keyContext) == 0 {
+		keyContext = Key{""}
+	}
+	p.types[keyContext.String()] = typ
+}
+
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key)     { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key)  { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool    { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicitContext(key Key) {
+	p.addImplicit(key)
+	p.addContext(key, false)
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+	if len(p.currentKey) == 0 {
+		return p.context.String()
+	}
+	if len(p.context) == 0 {
+		return p.currentKey
+	}
+	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+	if len(s) > 0 && s[0] == '\n' {
+		return s[1:]
+	}
+	if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+		return s[2:]
+	}
+	return s
+}
+
+// Remove newlines inside triple-quoted strings if a line ends with "\".
+func (p *parser) stripEscapedNewlines(s string) string {
+	split := strings.Split(s, "\n")
+	if len(split) < 1 {
+		return s
+	}
+
+	escNL := false // Keep track of the last non-blank line was escaped.
+	for i, line := range split {
+		line = strings.TrimRight(line, " \t\r")
+
+		if len(line) == 0 || line[len(line)-1] != '\\' {
+			split[i] = strings.TrimRight(split[i], "\r")
+			if !escNL && i != len(split)-1 {
+				split[i] += "\n"
+			}
+			continue
+		}
+
+		escBS := true
+		for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
+			escBS = !escBS
+		}
+		if escNL {
+			line = strings.TrimLeft(line, " \t\r")
+		}
+		escNL = !escBS
+
+		if escBS {
+			split[i] += "\n"
+			continue
+		}
+
+		if i == len(split)-1 {
+			p.panicf("invalid escape: '\\ '")
+		}
+
+		split[i] = line[:len(line)-1] // Remove \
+		if len(split)-1 > i {
+			split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
+		}
+	}
+	return strings.Join(split, "")
+}
+
+func (p *parser) replaceEscapes(it item, str string) string {
+	replaced := make([]rune, 0, len(str))
+	s := []byte(str)
+	r := 0
+	for r < len(s) {
+		if s[r] != '\\' {
+			c, size := utf8.DecodeRune(s[r:])
+			r += size
+			replaced = append(replaced, c)
+			continue
+		}
+		r += 1
+		if r >= len(s) {
+			p.bug("Escape sequence at end of string.")
+			return ""
+		}
+		switch s[r] {
+		default:
+			p.bug("Expected valid escape code after \\, but got %q.", s[r])
+			return ""
+		case ' ', '\t':
+			p.panicItemf(it, "invalid escape: '\\%c'", s[r])
+			return ""
+		case 'b':
+			replaced = append(replaced, rune(0x0008))
+			r += 1
+		case 't':
+			replaced = append(replaced, rune(0x0009))
+			r += 1
+		case 'n':
+			replaced = append(replaced, rune(0x000A))
+			r += 1
+		case 'f':
+			replaced = append(replaced, rune(0x000C))
+			r += 1
+		case 'r':
+			replaced = append(replaced, rune(0x000D))
+			r += 1
+		case '"':
+			replaced = append(replaced, rune(0x0022))
+			r += 1
+		case '\\':
+			replaced = append(replaced, rune(0x005C))
+			r += 1
+		case 'u':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
+			replaced = append(replaced, escaped)
+			r += 5
+		case 'U':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
+			replaced = append(replaced, escaped)
+			r += 9
+		}
+	}
+	return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
+	s := string(bs)
+	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+	if err != nil {
+		p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
+	}
+	if !utf8.ValidRune(rune(hex)) {
+		p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
+	}
+	return rune(hex)
+}

+ 242 - 0
vendor/github.com/BurntSushi/toml/type_fields.go

@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+	name  string       // the name of the field (`toml` tag included)
+	tag   bool         // whether field has a `toml` tag
+	index []int        // represents the depth of an anonymous field
+	typ   reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	var count map[reflect.Type]int
+	var nextCount map[reflect.Type]int
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				opts := getOptions(sf.Tag)
+				if opts.skip {
+					continue
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := opts.name != ""
+					name := opts.name
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, field{name, tagged, index, ft})
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					f := field{name: ft.Name(), index: index, typ: ft}
+					next = append(next, f)
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with TOML tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}

+ 70 - 0
vendor/github.com/BurntSushi/toml/type_toml.go

@@ -0,0 +1,70 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+	typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+	if t1 == nil || t2 == nil {
+		return false
+	}
+	return t1.typeString() == t2.typeString()
+}
+
+func typeIsTable(t tomlType) bool {
+	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+	return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+	return btype.typeString()
+}
+
+var (
+	tomlInteger   tomlBaseType = "Integer"
+	tomlFloat     tomlBaseType = "Float"
+	tomlDatetime  tomlBaseType = "Datetime"
+	tomlString    tomlBaseType = "String"
+	tomlBool      tomlBaseType = "Bool"
+	tomlArray     tomlBaseType = "Array"
+	tomlHash      tomlBaseType = "Hash"
+	tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+	switch lexItem.typ {
+	case itemInteger:
+		return tomlInteger
+	case itemFloat:
+		return tomlFloat
+	case itemDatetime:
+		return tomlDatetime
+	case itemString:
+		return tomlString
+	case itemMultilineString:
+		return tomlString
+	case itemRawString:
+		return tomlString
+	case itemRawMultilineString:
+		return tomlString
+	case itemBool:
+		return tomlBool
+	}
+	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+	panic("unreachable")
+}

+ 4 - 0
vendor/github.com/clbanning/mxj/.travis.yml

@@ -0,0 +1,4 @@
+language: go
+
+go:
+- 1.x

+ 55 - 0
vendor/github.com/clbanning/mxj/LICENSE

@@ -0,0 +1,55 @@
+Copyright (c) 2012-2019 Charles Banning <clbanning@gmail.com>.  All rights reserved.
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+===================== for some Go code used in test case ======================
+
+Go Language Copyright & License - 
+
+Copyright 2009 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 199 - 0
vendor/github.com/clbanning/mxj/anyxml.go

@@ -0,0 +1,199 @@
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"reflect"
+)
+
+const (
+	DefaultElementTag = "element"
+)
+
+// Encode arbitrary value as XML.
+//
+// Note: unmarshaling the resultant
+// XML may not return the original value, since tag labels may have been injected
+// to create the XML representation of the value.
+/*
+ Encode an arbitrary JSON object.
+	package main
+
+	import (
+		"encoding/json"
+		"fmt"
+		"github.com/clbanning/mxj"
+	)
+
+	func main() {
+		jsondata := []byte(`[
+			{ "somekey":"somevalue" },
+			"string",
+			3.14159265,
+			true
+		]`)
+		var i interface{}
+		err := json.Unmarshal(jsondata, &i)
+		if err != nil {
+			// do something
+		}
+		x, err := mxj.AnyXmlIndent(i, "", "  ", "mydoc")
+		if err != nil {
+			// do something else
+		}
+		fmt.Println(string(x))
+	}
+
+	output:
+		<mydoc>
+		  <somekey>somevalue</somekey>
+		  <element>string</element>
+		  <element>3.14159265</element>
+		  <element>true</element>
+		</mydoc>
+*/
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXml( v, myRootTag, myElementTag).
+func AnyXml(v interface{}, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte("<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte("<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">"); err != nil {
+			return nil, err
+		}
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(false, s, tag, val, p)
+					}
+				} else {
+					err = marshalMapToXmlIndent(false, s, et, vv, p)
+				}
+			default:
+				err = marshalMapToXmlIndent(false, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString("</" + rt + ">"); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.Xml(rt)
+	default:
+		err = marshalMapToXmlIndent(false, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string.
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXmlIndent( v, "", "  ", myRootTag, myElementTag).
+func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte(prefix + "<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte(prefix + "<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">\n"); err != nil {
+			return nil, err
+		}
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we 1 tag in
+					err = marshalMapToXmlIndent(true, s, et, vv, p)
+					// *s += "\n"
+					if _, err = s.WriteString("\n"); err != nil {
+						return nil, err
+					}
+				}
+			default:
+				p.start = 0 // in case trailing p.start = 1
+				err = marshalMapToXmlIndent(true, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString(`</` + rt + `>`); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.XmlIndent(prefix, indent, rt)
+	default:
+		err = marshalMapToXmlIndent(true, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}

+ 54 - 0
vendor/github.com/clbanning/mxj/atomFeedString.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+  An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+     feeds that will be pubsubhubbubbed.
+  2. every time one of those feeds changes, tell the hub
+     with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code.  I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+  This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops.  Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites.  I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> 	   `
+

+ 138 - 0
vendor/github.com/clbanning/mxj/doc.go

@@ -0,0 +1,138 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2019, Charles Banning. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file
+
+/*
+Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package.  The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them.
+
+Note: this library was designed for processing ad hoc anonymous messages.  Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly.
+
+Related Packages:
+	checkxml: github.com/clbanning/checkxml provides functions for validating XML data.
+
+Notes:
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>].
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+
+SUMMARY
+
+   type Map map[string]interface{}
+
+   Create a Map value, 'mv', from any map[string]interface{} value, 'v':
+      mv := Map(v)
+
+   Unmarshal / marshal XML as a Map value, 'mv':
+      mv, err := NewMapXml(xmlValue) // unmarshal
+      xmlValue, err := mv.Xml()      // marshal
+
+   Unmarshal XML from an io.Reader as a Map value, 'mv':
+      mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+      mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+
+   Marshal Map value, 'mv', to an XML Writer (io.Writer):
+      err := mv.XmlWriter(xmlWriter)
+      raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+
+   Also, for prettified output:
+      xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+      err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+      raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+
+   Bulk process XML with error handling (note: handlers must return a boolean value):
+      err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+      err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+
+   Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader.
+
+   There are comparable functions and methods for JSON processing.
+
+   Arbitrary structure values can be decoded to / encoded from Map values:
+      mv, err := NewMapStruct(structVal)
+      err := mv.Struct(structPointer)
+
+   To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+   or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then:
+      paths := mv.PathsForKey(key)
+      path := mv.PathForKeyShortest(key)
+      values, err := mv.ValuesForKey(key, subkeys)
+      values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays.
+      count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+
+   Get everything at once, irrespective of path depth:
+      leafnodes := mv.LeafNodes()
+      leafvalues := mv.LeafValues()
+
+   A new Map with whatever keys are desired can be created from the current Map and then encoded in XML
+   or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.)
+      newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+      newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+      newXml, err := newMap.Xml()   // for example
+      newJson, err := newMap.Json() // ditto
+
+XML PARSING CONVENTIONS
+
+   Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+   Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - <ns:key>something</ns.key> parses to map["ns:key"]interface{}{"something"}
+      - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"}
+
+   Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+XML ENCODING CONVENTIONS
+   
+   - 'nil' Map values, which may represent 'null' JSON values, are encoded as "<tag/>".
+     NOTE: the operation is not symmetric as "<tag/>" elements are decoded as 'tag:""' Map values,
+           which, then, encode in JSON as '"tag":""' values..
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+*/
+package mxj

+ 54 - 0
vendor/github.com/clbanning/mxj/escapechars.go

@@ -0,0 +1,54 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+)
+
+var xmlEscapeChars bool
+
+// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values.
+// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is
+// then '&amp;' will be re-escaped as '&amp;amp;'.
+//  
+/*
+	The values are:
+	"   &quot;
+	'   &apos;
+	<   &lt;
+	>   &gt;
+	&   &amp;
+*/
+func XMLEscapeChars(b bool) {
+	xmlEscapeChars = b
+}
+
+// Scan for '&' first, since 's' may contain "&amp;" that is parsed to "&amp;amp;" 
+// - or "&lt;" that is parsed to "&amp;lt;".
+var escapechars = [][2][]byte{
+	{[]byte(`&`), []byte(`&amp;`)},
+	{[]byte(`<`), []byte(`&lt;`)},
+	{[]byte(`>`), []byte(`&gt;`)},
+	{[]byte(`"`), []byte(`&quot;`)},
+	{[]byte(`'`), []byte(`&apos;`)},
+}
+
+func escapeChars(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+
+	b := []byte(s)
+	for _, v := range escapechars {
+		n := bytes.Count(b, v[0])
+		if n == 0 {
+			continue
+		}
+		b = bytes.Replace(b, v[0], v[1], n)
+	}
+	return string(b)
+}
+

+ 9 - 0
vendor/github.com/clbanning/mxj/exists.go

@@ -0,0 +1,9 @@
+package mxj
+
+// Checks whether the path exists. If err != nil then 'false' is returned
+// along with the error encountered parsing either the "path" or "subkeys"
+// argument.
+func (mv Map) Exists(path string, subkeys ...string) (bool, error) {
+	v, err := mv.ValuesForPath(path, subkeys...)
+	return (err == nil && len(v) > 0), err
+}

+ 287 - 0
vendor/github.com/clbanning/mxj/files.go

@@ -0,0 +1,287 @@
+package mxj
+
+import (
+	"fmt"
+	"io"
+	"os"
+)
+
+type Maps []Map
+
+func NewMaps() Maps {
+	return make(Maps, 0)
+}
+
+type MapRaw struct {
+	M Map
+	R []byte
+}
+
+// NewMapsFromXmlFile - creates an array from a file of JSON values.
+func NewMapsFromJsonFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values.
+func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFile - creates an array from a file of XML values.
+func NewMapsFromXmlFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values.
+// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw().
+// It is slow at parsing a file from disk and is intended for relatively small utility files.
+func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ------------------------ Maps writing -------------------------
+// These are handy-dandy methods for dumping configuration data, etc.
+
+// JsonString - analogous to mv.Json()
+func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) {
+	var s string
+	for _, v := range mvs {
+		j, err := v.Json()
+		if err != nil {
+			return s, err
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// JsonStringIndent - analogous to mv.JsonIndent()
+func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) {
+	var s string
+	var haveFirst bool
+	for _, v := range mvs {
+		j, err := v.JsonIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		if haveFirst {
+			s += "\n"
+		} else {
+			haveFirst = true
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// XmlString - analogous to mv.Xml()
+func (mvs Maps) XmlString() (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.Xml()
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// XmlStringIndent - analogous to mv.XmlIndent()
+func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.XmlIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// JsonFile - write Maps to named file as JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonWriter method.
+func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonString(encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// JsonFileIndent - write Maps to named file as pretty JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonIndentWriter method.
+func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonStringIndent(prefix, indent, encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFile - write Maps to named file as XML
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlWriter method.
+func (mvs Maps) XmlFile(file string) error {
+	s, err := mvs.XmlString()
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFileIndent - write Maps to named file as pretty XML
+// Note: the file will be created,if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlIndentWriter method.
+func (mvs Maps) XmlFileIndent(file, prefix, indent string) error {
+	s, err := mvs.XmlStringIndent(prefix, indent)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}

+ 2 - 0
vendor/github.com/clbanning/mxj/files_test.badjson

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"some", "bad":JSON, "in":"it" }

+ 9 - 0
vendor/github.com/clbanning/mxj/files_test.badxml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</other>
+	<for>test case</for>
+</msg>

+ 2 - 0
vendor/github.com/clbanning/mxj/files_test.json

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"just", "two":2, "JSON":"values", "true":true }

+ 9 - 0
vendor/github.com/clbanning/mxj/files_test.xml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</another>
+	<for>test case</for>
+</msg>

+ 1 - 0
vendor/github.com/clbanning/mxj/files_test_dup.json

@@ -0,0 +1 @@
+{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"}

+ 1 - 0
vendor/github.com/clbanning/mxj/files_test_dup.xml

@@ -0,0 +1 @@
+<doc><data>for files.go</data><some>test</some></doc><msg><another>doc</another><for>test case</for><just>some</just></msg>

+ 12 - 0
vendor/github.com/clbanning/mxj/files_test_indent.json

@@ -0,0 +1,12 @@
+{
+  "a": "test",
+  "file": "for",
+  "files_test.go": "case",
+  "this": "is"
+}
+{
+  "JSON": "values",
+  "true": true,
+  "two": 2,
+  "with": "just"
+}

+ 8 - 0
vendor/github.com/clbanning/mxj/files_test_indent.xml

@@ -0,0 +1,8 @@
+<doc>
+  <data>for files.go</data>
+  <some>test</some>
+</doc><msg>
+  <another>doc</another>
+  <for>test case</for>
+  <just>some</just>
+</msg>

+ 35 - 0
vendor/github.com/clbanning/mxj/gob.go

@@ -0,0 +1,35 @@
+// gob.go - Encode/Decode a Map into a gob object.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/gob"
+)
+
+// NewMapGob returns a Map value for a gob object that has been
+// encoded from a map[string]interface{} (or compatible type) value.
+// It is intended to provide symmetric handling of Maps that have
+// been encoded using mv.Gob.
+func NewMapGob(gobj []byte) (Map, error) {
+	m := make(map[string]interface{}, 0)
+	if len(gobj) == 0 {
+		return m, nil
+	}
+	r := bytes.NewReader(gobj)
+	dec := gob.NewDecoder(r)
+	if err := dec.Decode(&m); err != nil {
+		return m, err
+	}
+	return m, nil
+}
+
+// Gob returns a gob-encoded value for the Map 'mv'.
+func (mv Map) Gob() ([]byte, error) {
+	var buf bytes.Buffer
+	enc := gob.NewEncoder(&buf)
+	if err := enc.Encode(map[string]interface{}(mv)); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}

+ 323 - 0
vendor/github.com/clbanning/mxj/json.go

@@ -0,0 +1,323 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+)
+
+// ------------------------------ write JSON -----------------------
+
+// Just a wrapper on json.Marshal.
+// If option safeEncoding is'true' then safe encoding of '<', '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) Json(safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.Marshal(mv)
+
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// Just a wrapper on json.MarshalIndent.
+// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.MarshalIndent(mv, prefix, indent)
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// The following implementation is provided for symmetry with NewMapJsonReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// Writes the Map as pretty JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// --------------------------- read JSON -----------------------------
+
+// Decode numericvalues as json.Number type Map values - see encoding/json#Number.
+// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), 
+// etc.; it does not affect NewMapXml(), etc.  The XML encoders mv.Xml() and mv.XmlIndent()
+// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number
+// value types and the resulting Map can be correctly encoded into a XML object.
+var JsonUseNumber bool
+
+// Just a wrapper on json.Unmarshal
+//	Converting JSON to XML is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapJson(jsonVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		xmlVal, xerr := mapVal.Xml()
+//		if xerr != nil {
+//			// handle error
+//		}
+// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}],
+// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map.
+// See mxj/j2x/j2x_test.go.
+func NewMapJson(jsonVal []byte) (Map, error) {
+	// empty or nil begets empty
+	if len(jsonVal) == 0 {
+		m := make(map[string]interface{}, 0)
+		return m, nil
+	}
+	// handle a goofy case ...
+	if jsonVal[0] == '[' {
+		jsonVal = []byte(`{"object":` + string(jsonVal) + `}`)
+	}
+	m := make(map[string]interface{})
+	// err := json.Unmarshal(jsonVal, &m)
+	buf := bytes.NewReader(jsonVal)
+	dec := json.NewDecoder(buf)
+	if JsonUseNumber {
+		dec.UseNumber()
+	}
+	err := dec.Decode(&m)
+	return m, err
+}
+
+// Retrieve a Map value from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object.
+func NewMapJsonReader(jsonReader io.Reader) (Map, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	return NewMapJson(*jb)
+}
+
+// Retrieve a Map value and raw JSON - []byte - from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object and retrieve the raw JSON in a single call.
+func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, *jb, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	m, merr := NewMapJson(*jb)
+	return m, *jb, merr
+}
+
+// Pull the next JSON string off the stream: just read from first '{' to its closing '}'.
+// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package.
+func getJson(rdr io.Reader) (*[]byte, error) {
+	bval := make([]byte, 1)
+	jb := make([]byte, 0)
+	var inQuote, inJson bool
+	var parenCnt int
+	var previous byte
+
+	// scan the input for a matched set of {...}
+	// json.Unmarshal will handle syntax checking.
+	for {
+		_, err := rdr.Read(bval)
+		if err != nil {
+			if err == io.EOF && inJson && parenCnt > 0 {
+				return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb))
+			}
+			return &jb, err
+		}
+		switch bval[0] {
+		case '{':
+			if !inQuote {
+				parenCnt++
+				inJson = true
+			}
+		case '}':
+			if !inQuote {
+				parenCnt--
+			}
+			if parenCnt < 0 {
+				return nil, fmt.Errorf("closing } without opening {: %s", string(jb))
+			}
+		case '"':
+			if inQuote {
+				if previous == '\\' {
+					break
+				}
+				inQuote = false
+			} else {
+				inQuote = true
+			}
+		case '\n', '\r', '\t', ' ':
+			if !inQuote {
+				continue
+			}
+		}
+		if inJson {
+			jb = append(jb, bval[0])
+			if parenCnt == 0 {
+				break
+			}
+		}
+		previous = bval[0]
+	}
+
+	return &jb, nil
+}
+
+// ------------------------------- JSON Reader handler via Map values  -----------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var jhandlerPollInterval = time.Duration(1e6)
+
+// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader().
+// This avoids treating one or other as a special case and discussing the underlying stdlib logic.
+
+// Bulk process JSON using handlers that process a Map value.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader  processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapJsonReader(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process JSON using handlers that process a Map value and the raw JSON.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapJsonReaderRaw(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}

+ 668 - 0
vendor/github.com/clbanning/mxj/keyvalues.go

@@ -0,0 +1,668 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+//	keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters.
+
+package mxj
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// ----------------------------- get everything FOR a single key -------------------------
+
+const (
+	minArraySize = 32
+)
+
+var defaultArraySize int = minArraySize
+
+// SetArraySize adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath().
+// This can have the effect of significantly reducing memory allocation-copy functions for large data sets.
+// Returns the initial buffer size.
+func SetArraySize(size int) int {
+	if size > minArraySize {
+		defaultArraySize = size
+	} else {
+		defaultArraySize = minArraySize
+	}
+	return defaultArraySize
+}
+
+// ValuesForKey return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match.
+// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'key' refers to a list, then "key:value" could select a list member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	ret := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	hasKey(m, key, &ret, &cnt, subKeyMap)
+	return ret[:cnt], nil
+}
+
+var KeyNotExistError = errors.New("Key does not exist")
+
+// ValueForKey is a wrapper on ValuesForKey.  It returns the first member of []interface{}, if any.
+// If there is no value, "nil, nil" is returned.
+func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) {
+	vals, err := mv.ValuesForKey(key, subkeys...)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, KeyNotExistError
+	}
+	return vals[0], nil
+}
+
+// hasKey - if the map 'key' exists append it to array
+//          if it doesn't do nothing except scan array and map values
+func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) {
+	// func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		// see if the current value is of interest
+		if v, ok := vv[key]; ok {
+			switch v.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(v, subkeys) {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			case []interface{}:
+				for _, av := range v.([]interface{}) {
+					if hasSubKeys(av, subkeys) {
+						*ret = append(*ret, av)
+						*cnt++
+					}
+				}
+			default:
+				if len(subkeys) == 0 {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			}
+		}
+
+		// wildcard case
+		if key == "*" {
+			for _, v := range vv {
+				switch v.(type) {
+				case map[string]interface{}:
+					if hasSubKeys(v, subkeys) {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				case []interface{}:
+					for _, av := range v.([]interface{}) {
+						if hasSubKeys(av, subkeys) {
+							*ret = append(*ret, av)
+							*cnt++
+						}
+					}
+				default:
+					if len(subkeys) == 0 {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				}
+			}
+		}
+
+		// scan the rest
+		for _, v := range vv {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	case []interface{}:
+		for _, v := range iv.([]interface{}) {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	}
+}
+
+// -----------------------  get everything for a node in the Map ---------------------------
+
+// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.)
+// 2014.04.28 - implementation note.
+// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion
+// of wildcards and unindexed arrays.  Embedding such logic into valuesForKeyPath() would have made the
+// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead.
+
+// ValuesForPatb retrieves all values for a path from the Map.  If len(returned_values) == 0, then no match.
+// On error, the returned array is 'nil'.
+//   'path' is a dot-separated path of key values.
+//          - If a node in the path is '*', then everything beyond is walked.
+//          - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" -
+//            even "*[2].*[0].field".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'path' refers to a list, then "tag:value" would return member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	// If there are no array indexes in path, use legacy ValuesForPath() logic.
+	if strings.Index(path, "[") < 0 {
+		return mv.oldValuesForPath(path, subkeys...)
+	}
+
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys, kerr := parsePath(path)
+	if kerr != nil {
+		return nil, kerr
+	}
+
+	vals, verr := valuesForArray(keys, mv)
+	if verr != nil {
+		return nil, verr // Vals may be nil, but return empty array.
+	}
+
+	// Need to handle subkeys ... only return members of vals that satisfy conditions.
+	retvals := make([]interface{}, 0)
+	for _, v := range vals {
+		if hasSubKeys(v, subKeyMap) {
+			retvals = append(retvals, v)
+		}
+	}
+	return retvals, nil
+}
+
+func valuesForArray(keys []*key, m Map) ([]interface{}, error) {
+	var tmppath string
+	var haveFirst bool
+	var vals []interface{}
+	var verr error
+
+	lastkey := len(keys) - 1
+	for i := 0; i <= lastkey; i++ {
+		if !haveFirst {
+			tmppath = keys[i].name
+			haveFirst = true
+		} else {
+			tmppath += "." + keys[i].name
+		}
+
+		// Look-ahead: explode wildcards and unindexed arrays.
+		// Need to handle un-indexed list recursively:
+		// e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]".
+		// Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ...
+		if !keys[i].isArray && i < lastkey && keys[i+1].isArray {
+			// Can't pass subkeys because we may not be at literal end of path.
+			vv, vverr := m.oldValuesForPath(tmppath)
+			if vverr != nil {
+				return nil, vverr
+			}
+			for _, v := range vv {
+				// See if we can walk the value.
+				am, ok := v.(map[string]interface{})
+				if !ok {
+					continue
+				}
+				// Work the backend.
+				nvals, nvalserr := valuesForArray(keys[i+1:], Map(am))
+				if nvalserr != nil {
+					return nil, nvalserr
+				}
+				vals = append(vals, nvals...)
+			}
+			break // have recursed the whole path - return
+		}
+
+		if keys[i].isArray || i == lastkey {
+			// Don't pass subkeys because may not be at literal end of path.
+			vals, verr = m.oldValuesForPath(tmppath)
+		} else {
+			continue
+		}
+		if verr != nil {
+			return nil, verr
+		}
+
+		if i == lastkey && !keys[i].isArray {
+			break
+		}
+
+		// Now we're looking at an array - supposedly.
+		// Is index in range of vals?
+		if len(vals) <= keys[i].position {
+			vals = nil
+			break
+		}
+
+		// Return the array member of interest, if at end of path.
+		if i == lastkey {
+			vals = vals[keys[i].position:(keys[i].position + 1)]
+			break
+		}
+
+		// Extract the array member of interest.
+		am := vals[keys[i].position:(keys[i].position + 1)]
+
+		// must be a map[string]interface{} value so we can keep walking the path
+		amm, ok := am[0].(map[string]interface{})
+		if !ok {
+			vals = nil
+			break
+		}
+
+		m = Map(amm)
+		haveFirst = false
+	}
+
+	return vals, nil
+}
+
+type key struct {
+	name     string
+	isArray  bool
+	position int
+}
+
+func parsePath(s string) ([]*key, error) {
+	keys := strings.Split(s, ".")
+
+	ret := make([]*key, 0)
+
+	for i := 0; i < len(keys); i++ {
+		if keys[i] == "" {
+			continue
+		}
+
+		newkey := new(key)
+		if strings.Index(keys[i], "[") < 0 {
+			newkey.name = keys[i]
+			ret = append(ret, newkey)
+			continue
+		}
+
+		p := strings.Split(keys[i], "[")
+		newkey.name = p[0]
+		p = strings.Split(p[1], "]")
+		if p[0] == "" { // no right bracket
+			return nil, fmt.Errorf("no right bracket on key index: %s", keys[i])
+		}
+		// convert p[0] to a int value
+		pos, nerr := strconv.ParseInt(p[0], 10, 32)
+		if nerr != nil {
+			return nil, fmt.Errorf("cannot convert index to int value: %s", p[0])
+		}
+		newkey.position = int(pos)
+		newkey.isArray = true
+		ret = append(ret, newkey)
+	}
+
+	return ret, nil
+}
+
+// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'.
+func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys := strings.Split(path, ".")
+	if keys[len(keys)-1] == "" {
+		keys = keys[:len(keys)-1]
+	}
+	ivals := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap)
+	return ivals[:cnt], nil
+}
+
+func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) {
+	lenKeys := len(keys)
+
+	// load 'm' values into 'ret'
+	// expand any lists
+	if lenKeys == 0 {
+		switch m.(type) {
+		case map[string]interface{}:
+			if subkeys != nil {
+				if ok := hasSubKeys(m, subkeys); !ok {
+					return
+				}
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		case []interface{}:
+			for i, v := range m.([]interface{}) {
+				if subkeys != nil {
+					if ok := hasSubKeys(v, subkeys); !ok {
+						continue // only load list members with subkeys
+					}
+				}
+				*ret = append(*ret, (m.([]interface{}))[i])
+				*cnt++
+			}
+		default:
+			if subkeys != nil {
+				return // must be map[string]interface{} if there are subkeys
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		}
+		return
+	}
+
+	// key of interest
+	key := keys[0]
+	switch key {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				default:
+					// valuesForKeyPath(ret, v, keys[1:], subkeys)
+					valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[key]; ok {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[key]; ok {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				}
+			}
+		}
+	}
+}
+
+// hasSubKeys() - interface{} equality works for string, float64, bool
+// 'v' must be a map[string]interface{} value to have subkeys
+// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard.
+func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool {
+	if len(subkeys) == 0 {
+		return true
+	}
+
+	switch v.(type) {
+	case map[string]interface{}:
+		// do all subKey name:value pairs match?
+		mv := v.(map[string]interface{})
+		for skey, sval := range subkeys {
+			isNotKey := false
+			if skey[:1] == "!" { // a NOT-key
+				skey = skey[1:]
+				isNotKey = true
+			}
+			vv, ok := mv[skey]
+			if !ok { // key doesn't exist
+				if isNotKey { // key not there, but that's what we want
+					if kv, ok := sval.(string); ok && kv == "*" {
+						continue
+					}
+				}
+				return false
+			}
+			// wildcard check
+			if kv, ok := sval.(string); ok && kv == "*" {
+				if isNotKey { // key is there, and we don't want it
+					return false
+				}
+				continue
+			}
+			switch sval.(type) {
+			case string:
+				if s, ok := vv.(string); ok && s == sval.(string) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case bool:
+				if b, ok := vv.(bool); ok && b == sval.(bool) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case float64:
+				if f, ok := vv.(float64); ok && f == sval.(float64) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			}
+			// key there but didn't match subkey value
+			if isNotKey { // that's what we want
+				continue
+			}
+			return false
+		}
+		// all subkeys matched
+		return true
+	}
+
+	// not a map[string]interface{} value, can't have subkeys
+	return false
+}
+
+// Generate map of key:value entries as map[string]string.
+//	'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'.
+//	If len(kv) == 0, the return is (nil, nil).
+func getSubKeyMap(kv ...string) (map[string]interface{}, error) {
+	if len(kv) == 0 {
+		return nil, nil
+	}
+	m := make(map[string]interface{}, 0)
+	for _, v := range kv {
+		vv := strings.Split(v, fieldSep)
+		switch len(vv) {
+		case 2:
+			m[vv[0]] = interface{}(vv[1])
+		case 3:
+			switch vv[2] {
+			case "string", "char", "text":
+				m[vv[0]] = interface{}(vv[1])
+			case "bool", "boolean":
+				// ParseBool treats "1"==true & "0"==false
+				b, err := strconv.ParseBool(vv[1])
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(b)
+			case "float", "float64", "num", "number", "numeric":
+				f, err := strconv.ParseFloat(vv[1], 64)
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(f)
+			default:
+				return nil, fmt.Errorf("unknown subkey conversion spec: %s", v)
+			}
+		default:
+			return nil, fmt.Errorf("unknown subkey spec: %s", v)
+		}
+	}
+	return m, nil
+}
+
+// -------------------------------  END of valuesFor ... ----------------------------
+
+// ----------------------- locate where a key value is in the tree -------------------
+
+//----------------------------- find all paths to a key --------------------------------
+
+// PathsForKey returns all paths through Map, 'mv', (in dot-notation) that terminate with the specified key.
+// Results can be used with ValuesForPath.
+func (mv Map) PathsForKey(key string) []string {
+	m := map[string]interface{}(mv)
+	breadbasket := make(map[string]bool, 0)
+	breadcrumbs := ""
+
+	hasKeyPath(breadcrumbs, m, key, breadbasket)
+	if len(breadbasket) == 0 {
+		return nil
+	}
+
+	// unpack map keys to return
+	res := make([]string, len(breadbasket))
+	var i int
+	for k := range breadbasket {
+		res[i] = k
+		i++
+	}
+
+	return res
+}
+
+// PathForKeyShortest extracts the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'..
+// Paths are strings using dot-notation.
+func (mv Map) PathForKeyShortest(key string) string {
+	paths := mv.PathsForKey(key)
+
+	lp := len(paths)
+	if lp == 0 {
+		return ""
+	}
+	if lp == 1 {
+		return paths[0]
+	}
+
+	shortest := paths[0]
+	shortestLen := len(strings.Split(shortest, "."))
+
+	for i := 1; i < len(paths); i++ {
+		vlen := len(strings.Split(paths[i], "."))
+		if vlen < shortestLen {
+			shortest = paths[i]
+			shortestLen = vlen
+		}
+	}
+
+	return shortest
+}
+
+// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth
+// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'.
+func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		if _, ok := vv[key]; ok {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = key
+			} else {
+				nbc = crumbs + "." + key
+			}
+			basket[nbc] = true
+		}
+		// walk on down the path, key could occur again at deeper node
+		for k, v := range vv {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = k
+			} else {
+				nbc = crumbs + "." + k
+			}
+			hasKeyPath(nbc, v, key, basket)
+		}
+	case []interface{}:
+		// crumb-trail doesn't change, pass it on
+		for _, v := range iv.([]interface{}) {
+			hasKeyPath(crumbs, v, key, basket)
+		}
+	}
+}
+
+var PathNotExistError = errors.New("Path does not exist")
+
+// ValueForPath wraps ValuesFor Path and returns the first value returned.
+// If no value is found it returns 'nil' and PathNotExistError.
+func (mv Map) ValueForPath(path string) (interface{}, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, PathNotExistError
+	}
+	return vals[0], nil
+}
+
+// ValuesForPathString returns the first found value for the path as a string.
+func (mv Map) ValueForPathString(path string) (string, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return "", err
+	}
+	if len(vals) == 0 {
+		return "", errors.New("ValueForPath: path not found")
+	}
+	val := vals[0]
+	return fmt.Sprintf("%v", val), nil
+}
+
+// ValueOrEmptyForPathString returns the first found value for the path as a string.
+// If the path is not found then it returns an empty string.
+func (mv Map) ValueOrEmptyForPathString(path string) string {
+	str, _ := mv.ValueForPathString(path)
+	return str
+}

+ 112 - 0
vendor/github.com/clbanning/mxj/leafnode.go

@@ -0,0 +1,112 @@
+package mxj
+
+// leafnode.go - return leaf nodes with paths and values for the Map
+// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw
+
+import (
+	"strconv"
+	"strings"
+)
+
+const (
+	NoAttributes = true // suppress LeafNode values that are attributes
+)
+
+// LeafNode - a terminal path value in a Map.
+// For XML Map values it represents an attribute or simple element value  - of type
+// string unless Map was created using Cast flag. For JSON Map values it represents
+// a string, numeric, boolean, or null value.
+type LeafNode struct {
+	Path  string      // a dot-notation representation of the path with array subscripting
+	Value interface{} // the value at the path termination
+}
+
+// LeafNodes - returns an array of all LeafNode values for the Map.
+// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-')
+// as well as the "#text" key for the associated simple element value.
+//
+// PrependAttrWithHypen(false) will result in attributes having .attr-name as 
+// terminal node in 'path' while the path for the element value, itself, will be 
+// the base path w/o "#text". 
+//
+// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax
+// rather than "[N]" syntax.
+func (mv Map) LeafNodes(no_attr ...bool) []LeafNode {
+	var a bool
+	if len(no_attr) == 1 {
+		a = no_attr[0]
+	}
+
+	l := make([]LeafNode, 0)
+	getLeafNodes("", "", map[string]interface{}(mv), &l, a)
+	return l
+}
+
+func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) {
+	// if stripping attributes, then also strip "#text" key
+	if !noattr || node != "#text" {
+		if path != "" && node[:1] != "[" {
+			path += "."
+		}
+		path += node
+	}
+	switch mv.(type) {
+	case map[string]interface{}:
+		for k, v := range mv.(map[string]interface{}) {
+			// if noattr && k[:1] == "-" {
+			if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue
+			}
+			getLeafNodes(path, k, v, l, noattr)
+		}
+	case []interface{}:
+		for i, v := range mv.([]interface{}) {
+			if useDotNotation {
+				getLeafNodes(path, strconv.Itoa(i), v, l, noattr)
+			} else {
+				getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr)
+			}
+		}
+	default:
+		// can't walk any further, so create leaf
+		n := LeafNode{path, mv}
+		*l = append(*l, n)
+	}
+}
+
+// LeafPaths - all paths that terminate in LeafNode values.
+func (mv Map) LeafPaths(no_attr ...bool) []string {
+	ln := mv.LeafNodes()
+	ss := make([]string, len(ln))
+	for i := 0; i < len(ln); i++ {
+		ss[i] = ln[i].Path
+	}
+	return ss
+}
+
+// LeafValues - all terminal values in the Map.
+func (mv Map) LeafValues(no_attr ...bool) []interface{} {
+	ln := mv.LeafNodes()
+	vv := make([]interface{}, len(ln))
+	for i := 0; i < len(ln); i++ {
+		vv[i] = ln[i].Value
+	}
+	return vv
+}
+
+// ====================== utilities ======================
+
+// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I
+var useDotNotation bool
+
+// LeafUseDotNotation sets a flag that list members in LeafNode paths
+// should be identified using ".N" syntax rather than the default "[N]"
+// syntax.  Calling LeafUseDotNotation with no arguments toggles the 
+// flag on/off; otherwise, the argument sets the flag value 'true'/'false'.
+func LeafUseDotNotation(b ...bool) {
+	if len(b) == 0 {
+		useDotNotation = !useDotNotation
+		return
+	}
+	useDotNotation = b[0]
+}

+ 86 - 0
vendor/github.com/clbanning/mxj/misc.go

@@ -0,0 +1,86 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// misc.go - mimic functions (+others) called out in:
+//          https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ
+// Primarily these methods let you retrive XML structure information.
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Return the root element of the Map. If there is not a single key in Map,
+// then an error is returned.
+func (mv Map) Root() (string, error) {
+	mm := map[string]interface{}(mv)
+	if len(mm) != 1 {
+		return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm))
+	}
+	for k, _ := range mm {
+		return k, nil
+	}
+	return "", nil
+}
+
+// If the path is an element with sub-elements, return a list of the sub-element
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are prefixed with
+// '-', a hyphen, are considered attributes; see m.Attributes(path).
+func (mv Map) Elements(path string) ([]string, error) {
+	e, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch e.(type) {
+	case map[string]interface{}:
+		ee := e.(map[string]interface{})
+		elems := make([]string, len(ee))
+		var i int
+		for k, _ := range ee {
+			if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue // skip attributes
+			}
+			elems[i] = k
+			i++
+		}
+		elems = elems[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(elems)
+		return elems, nil
+	}
+	return nil, fmt.Errorf("no elements for path: %s", path)
+}
+
+// If the path is an element with attributes, return a list of the attribute
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are not prefixed with
+// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the
+// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then
+// there are no identifiable attributes.
+func (mv Map) Attributes(path string) ([]string, error) {
+	a, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch a.(type) {
+	case map[string]interface{}:
+		aa := a.(map[string]interface{})
+		attrs := make([]string, len(aa))
+		var i int
+		for k, _ := range aa {
+			if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 {
+				continue // skip non-attributes
+			}
+			attrs[i] = k[len(attrPrefix):]
+			i++
+		}
+		attrs = attrs[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(attrs)
+		return attrs, nil
+	}
+	return nil, fmt.Errorf("no attributes for path: %s", path)
+}

+ 128 - 0
vendor/github.com/clbanning/mxj/mxj.go

@@ -0,0 +1,128 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+)
+
+const (
+	Cast         = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast)
+	SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding)
+)
+
+type Map map[string]interface{}
+
+// Allocate a Map.
+func New() Map {
+	m := make(map[string]interface{}, 0)
+	return m
+}
+
+// Cast a Map to map[string]interface{}
+func (mv Map) Old() map[string]interface{} {
+	return mv
+}
+
+// Return a copy of mv as a newly allocated Map.  If the Map only contains string,
+// numeric, map[string]interface{}, and []interface{} values, then it can be thought
+// of as a "deep copy."  Copying a structure (or structure reference) value is subject
+// to the noted restrictions.
+//	NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags
+//	      then only public fields of the structure are in the new Map - and with
+//	      keys that conform to any encoding tag instructions. The structure itself will
+//	      be represented as a map[string]interface{} value.
+func (mv Map) Copy() (Map, error) {
+	// this is the poor-man's deep copy
+	// not efficient, but it works
+	j, jerr := mv.Json()
+	// must handle, we don't know how mv got built
+	if jerr != nil {
+		return nil, jerr
+	}
+	return NewMapJson(j)
+}
+
+// --------------- StringIndent ... from x2j.WriteMap -------------
+
+// Pretty print a Map.
+func (mv Map) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (mv Map) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), false, true, offset...)
+}
+
+// writeMap - dumps the map[string]interface{} for examination.
+// 'typeInfo' causes value type to be printed.
+//	'offset' is initial indentation count; typically: Write(m).
+func writeMap(m interface{}, typeInfo, root bool, offset ...int) string {
+	var indent int
+	if len(offset) == 1 {
+		indent = offset[0]
+	}
+
+	var s string
+	switch m.(type) {
+	case []interface{}:
+		if typeInfo {
+			s += "[[]interface{}]"
+		}
+		for _, v := range m.([]interface{}) {
+			s += "\n"
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += writeMap(v, typeInfo, false, indent+1)
+		}
+	case map[string]interface{}:
+		list := make([][2]string, len(m.(map[string]interface{})))
+		var n int
+		for k, v := range m.(map[string]interface{}) {
+			list[n][0] = k
+			list[n][1] = writeMap(v, typeInfo, false, indent+1)
+			n++
+		}
+		sort.Sort(mapList(list))
+		for _, v := range list {
+			if root {
+				root = false
+			} else {
+				s += "\n"
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += v[0] + " : " + v[1]
+		}
+	default:
+		if typeInfo {
+			s += fmt.Sprintf("[%T] %+v", m, m)
+		} else {
+			s += fmt.Sprintf("%+v", m)
+		}
+	}
+	return s
+}
+
+// ======================== utility ===============
+
+type mapList [][2]string
+
+func (ml mapList) Len() int {
+	return len(ml)
+}
+
+func (ml mapList) Swap(i, j int) {
+	ml[i], ml[j] = ml[j], ml[i]
+}
+
+func (ml mapList) Less(i, j int) bool {
+	return ml[i][0] <= ml[j][0]
+}

+ 184 - 0
vendor/github.com/clbanning/mxj/newmap.go

@@ -0,0 +1,184 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014, 2018 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings
+//            keys can use dot-notation, keyOld can use wildcard, '*'
+//
+// Computational strategy -
+// Using the key path - []string - traverse a new map[string]interface{} and
+// insert the oldVal as the newVal when we arrive at the end of the path.
+// If the type at the end is nil, then that is newVal
+// If the type at the end is a singleton (string, float64, bool) an array is created.
+// If the type at the end is an array, newVal is just appended.
+// If the type at the end is a map, it is inserted if possible or the map value
+//    is converted into an array if necessary.
+
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// (Map)NewMap - create a new Map from data in the current Map.
+//	'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey'
+//	should be the value for 'newKey' in the returned Map.
+//		- 'oldKey' supports dot-notation as described for (Map)ValuesForPath()
+//		- 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays
+//		- "oldKey" is shorthand for the keypair value "oldKey:oldKey"
+//		- "oldKey:" and ":newKey" are invalid keypair values
+//		- if 'oldKey' does not exist in the current Map, it is not written to the new Map.
+//		  "null" is not supported unless it is the current Map.
+//		- see newmap_test.go for several syntax examples
+// 	- mv.NewMap() == mxj.New()
+//
+//	NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc.
+func (mv Map) NewMap(keypairs ...string) (Map, error) {
+	n := make(map[string]interface{}, 0)
+	if len(keypairs) == 0 {
+		return n, nil
+	}
+
+	// loop through the pairs
+	var oldKey, newKey string
+	var path []string
+	for _, v := range keypairs {
+		if len(v) == 0 {
+			continue // just skip over empty keypair arguments
+		}
+
+		// initialize oldKey, newKey and check
+		vv := strings.Split(v, ":")
+		if len(vv) > 2 {
+			return n, errors.New("oldKey:newKey keypair value not valid - " + v)
+		}
+		if len(vv) == 1 {
+			oldKey, newKey = vv[0], vv[0]
+		} else {
+			oldKey, newKey = vv[0], vv[1]
+		}
+		strings.TrimSpace(oldKey)
+		strings.TrimSpace(newKey)
+		if i := strings.Index(newKey, "*"); i > -1 {
+			return n, errors.New("newKey value cannot contain wildcard character - " + v)
+		}
+		if i := strings.Index(newKey, "["); i > -1 {
+			return n, errors.New("newKey value cannot contain indexed arrays - " + v)
+		}
+		if oldKey == "" || newKey == "" {
+			return n, errors.New("oldKey or newKey is not specified - " + v)
+		}
+
+		// get oldKey value
+		oldVal, err := mv.ValuesForPath(oldKey)
+		if err != nil {
+			return n, err
+		}
+		if len(oldVal) == 0 {
+			continue // oldKey has no value, may not exist in mv
+		}
+
+		// break down path
+		path = strings.Split(newKey, ".")
+		if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec
+			path = path[:len(path)-1]
+		}
+
+		addNewVal(&n, path, oldVal)
+	}
+
+	return n, nil
+}
+
+// navigate 'n' to end of path and add val
+func addNewVal(n *map[string]interface{}, path []string, val []interface{}) {
+	// newVal - either singleton or array
+	var newVal interface{}
+	if len(val) == 1 {
+		newVal = val[0] // is type interface{}
+	} else {
+		newVal = interface{}(val)
+	}
+
+	// walk to the position of interest, create it if necessary
+	m := (*n)           // initialize map walker
+	var k string        // key for m
+	lp := len(path) - 1 // when to stop looking
+	for i := 0; i < len(path); i++ {
+		k = path[i]
+		if i == lp {
+			break
+		}
+		var nm map[string]interface{} // holds position of next-map
+		switch m[k].(type) {
+		case nil: // need a map for next node in path, so go there
+			nm = make(map[string]interface{}, 0)
+			m[k] = interface{}(nm)
+			m = m[k].(map[string]interface{})
+		case map[string]interface{}:
+			// OK - got somewhere to walk to, go there
+			m = m[k].(map[string]interface{})
+		case []interface{}:
+			// add a map and nm points to new map unless there's already
+			// a map in the array, then nm points there
+			// The placement of the next value in the array is dependent
+			// on the sequence of members - could land on a map or a nil
+			// value first.  TODO: how to test this.
+			a := make([]interface{}, 0)
+			var foundmap bool
+			for _, vv := range m[k].([]interface{}) {
+				switch vv.(type) {
+				case nil: // doesn't appear that this occurs, need a test case
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = make(map[string]interface{}, 0)
+					a = append(a, interface{}(nm))
+					foundmap = true
+				case map[string]interface{}:
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = vv.(map[string]interface{})
+					a = append(a, vv)
+					foundmap = true
+				default:
+					a = append(a, vv)
+				}
+			}
+			// no map found in array
+			if !foundmap {
+				nm = make(map[string]interface{}, 0)
+				a = append(a, interface{}(nm))
+			}
+			m[k] = interface{}(a) // must insert in map
+			m = nm
+		default: // it's a string, float, bool, etc.
+			aa := make([]interface{}, 0)
+			nm = make(map[string]interface{}, 0)
+			aa = append(aa, m[k], nm)
+			m[k] = interface{}(aa)
+			m = nm
+		}
+	}
+
+	// value is nil, array or a singleton of some kind
+	// initially m.(type) == map[string]interface{}
+	v := m[k]
+	switch v.(type) {
+	case nil: // initialized
+		m[k] = newVal
+	case []interface{}:
+		a := m[k].([]interface{})
+		a = append(a, newVal)
+		m[k] = interface{}(a)
+	default: // v exists:string, float64, bool, map[string]interface, etc.
+		a := make([]interface{}, 0)
+		a = append(a, v, newVal)
+		m[k] = interface{}(a)
+	}
+}

+ 192 - 0
vendor/github.com/clbanning/mxj/readme.md

@@ -0,0 +1,192 @@
+<h2>mxj - to/from maps, XML and JSON</h2>
+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages.
+
+<h4>Related Packages</h4>
+
+https://github.com/clbanning/checkxml provides functions for validating XML data.
+
+<h4>Refactor Encoder - 2020.05.01</h4>
+Issue #70 highlighted that encoding large maps does not scale well, since the original logic used string appends operations. Using bytes.Buffer results in linear scaling for very large XML docs. (Metrics based on MacBook Pro i7 w/ 16 GB.)
+
+	Nodes      m.XML() time
+	54809       12.53708ms
+	109780      32.403183ms
+	164678      59.826412ms
+	482598     109.358007ms
+
+<h4>Refactor Decoder - 2015.11.15</h4>
+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant.  I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi.  Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value.  As shown by:
+
+	BenchmarkNewMapXml-4         	  100000	     18043 ns/op
+	BenchmarkNewStructXml-4      	  100000	     14892 ns/op
+	BenchmarkNewMapJson-4        	  300000	      4633 ns/op
+	BenchmarkNewStructJson-4     	  300000	      3427 ns/op
+	BenchmarkNewMapXmlBooks-4    	   20000	     82850 ns/op
+	BenchmarkNewStructXmlBooks-4 	   20000	     67822 ns/op
+	BenchmarkNewMapJsonBooks-4   	  100000	     17222 ns/op
+	BenchmarkNewStructJsonBooks-4	  100000	     15309 ns/op
+
+<h4>Notices</h4>
+
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>]
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq()
+	            and mv.XmlSeq() / mv.XmlSeqIndent().
+	2015-05-20: New: mv.StringIndentNoTypeInfo().
+	            Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(),
+	            mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo().
+	2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information.
+	            (NOTE: PreserveXmlList() is similar and will be here soon.)
+	2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+	2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references.
+
+<h4>Basic Unmarshal XML to map[string]interface{}</h4>
+<pre>type Map map[string]interface{}</pre>
+
+Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v':
+<pre>mv := Map(v)</pre>
+
+Unmarshal / marshal XML as a `Map` value, 'mv':
+<pre>mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal</pre>
+
+Unmarshal XML from an `io.Reader` as a `Map` value, 'mv':
+<pre>mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded</pre>
+
+Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`):
+<pre>err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter</pre>
+   
+Also, for prettified output:
+<pre>xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)</pre>
+
+Bulk process XML with error handling (note: handlers must return a boolean value):
+<pre>err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))</pre>
+
+Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`.
+
+There are comparable functions and methods for JSON processing.
+
+Arbitrary structure values can be decoded to / encoded from `Map` values:
+<pre>mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)</pre>
+
+<h4>Extract / modify Map values</h4>
+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then:
+<pre>paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)</pre>
+
+Get everything at once, irrespective of path depth:
+<pre>leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()</pre>
+
+A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML
+or JSON. (Note: keys can use dot-notation.)
+<pre>newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto</pre>
+
+<h4>Usage</h4>
+
+The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj).
+
+Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions.
+
+<h4>XML parsing conventions</h4>
+
+Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - `<ns:key>something</ns.key>` parses to `map["ns:key"]interface{}{"something"}`
+      - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}`
+
+Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+<h4>XML encoding conventions</h4>
+
+   - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as `<tag/>`.
+     NOTE: the operation is not symmetric as `<tag/>` elements are decoded as `tag:""` `Map` values,
+           which, then, encode in JSON as `"tag":""` values.
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+<h4>Running "go test"</h4>
+
+Because there are no guarantees on the sequence map elements are retrieved, the tests have been 
+written for visual verification in most cases.  One advantage is that you can easily use the 
+output from running "go test" as examples of calling the various functions and methods.
+
+<h4>Motivation</h4>
+
+I make extensive use of JSON for messaging and typically unmarshal the messages into
+`map[string]interface{}` values.  This is easily done using `json.Unmarshal` from the
+standard Go libraries.  Unfortunately, many legacy solutions use structured
+XML messages; in those environments the applications would have to be refactored to
+interoperate with my components.
+
+The better solution is to just provide an alternative HTTP handler that receives
+XML messages and parses it into a `map[string]interface{}` value and then reuse
+all the JSON-based code.  The Go `xml.Unmarshal()` function does not provide the same
+option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote
+a couple of small functions to fill this gap and released them as the x2j package.
+
+Over the next year and a half additional features were added, and the companion j2x
+package was released to address XML encoding of arbitrary JSON and `map[string]interface{}`
+values.  As part of a refactoring of our production system and looking at how we had been
+using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or
+JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}`
+values was the primary value.  Thus, everything was refactored into the mxj package.
+

+ 37 - 0
vendor/github.com/clbanning/mxj/remove.go

@@ -0,0 +1,37 @@
+package mxj
+
+import "strings"
+
+// Removes the path.
+func (mv Map) Remove(path string) error {
+	m := map[string]interface{}(mv)
+	return remove(m, path)
+}
+
+func remove(m interface{}, path string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	lastKey := lastKey(path)
+	delete(val, lastKey)
+
+	return nil
+}
+
+// returns the last key of the path.
+// lastKey("a.b.c") would had returned "c"
+func lastKey(path string) string {
+	keys := strings.Split(path, ".")
+	key := keys[len(keys)-1]
+	return key
+}
+
+// returns the path without the last key
+// parentPath("a.b.c") whould had returned "a.b"
+func parentPath(path string) string {
+	keys := strings.Split(path, ".")
+	parentPath := strings.Join(keys[0:len(keys)-1], ".")
+	return parentPath
+}

+ 61 - 0
vendor/github.com/clbanning/mxj/rename.go

@@ -0,0 +1,61 @@
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// RenameKey renames a key in a Map.
+// It works only for nested maps. 
+// It doesn't work for cases when the key is in a list.
+func (mv Map) RenameKey(path string, newName string) error {
+	var v bool
+	var err error
+	if v, err = mv.Exists(path); err == nil && !v {
+		return errors.New("RenameKey: path not found: " + path)
+	} else if err != nil {
+		return err
+	}
+	if v, err = mv.Exists(parentPath(path) + "." + newName); err == nil && v {
+		return errors.New("RenameKey: key already exists: " + newName)
+	} else if err != nil {
+		return err
+	}
+
+	m := map[string]interface{}(mv)
+	return renameKey(m, path, newName)
+}
+
+func renameKey(m interface{}, path string, newName string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	oldName := lastKey(path)
+	val[newName] = val[oldName]
+	delete(val, oldName)
+
+	return nil
+}
+
+// returns a value which contains a last key in the path
+// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3}
+func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) {
+	keys := strings.Split(path, ".")
+
+	switch mValue := m.(type) {
+	case map[string]interface{}:
+		for key, value := range mValue {
+			if key == keys[0] {
+				if len(keys) == 1 {
+					return mValue, nil
+				} else {
+					// keep looking for the full path to the key
+					return prevValueByPath(value, strings.Join(keys[1:], "."))
+				}
+			}
+		}
+	}
+	return nil, errors.New("prevValueByPath: didn't find path – " + path)
+}

+ 26 - 0
vendor/github.com/clbanning/mxj/set.go

@@ -0,0 +1,26 @@
+package mxj
+
+import (
+	"strings"
+)
+
+// Sets the value for the path
+func (mv Map) SetValueForPath(value interface{}, path string) error {
+	pathAry := strings.Split(path, ".")
+	parentPathAry := pathAry[0 : len(pathAry)-1]
+	parentPath := strings.Join(parentPathAry, ".")
+
+	val, err := mv.ValueForPath(parentPath)
+	if err != nil {
+		return err
+	}
+	if val == nil {
+		return nil // we just ignore the request if there's no val
+	}
+
+	key := pathAry[len(pathAry)-1]
+	cVal := val.(map[string]interface{})
+	cVal[key] = value
+
+	return nil
+}

+ 20 - 0
vendor/github.com/clbanning/mxj/setfieldsep.go

@@ -0,0 +1,20 @@
+package mxj
+
+// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862
+var fieldSep string = ":"
+
+// SetFieldSeparator changes the default field separator, ":", for the
+// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments
+// in mv.ValuesForKey and mv.ValuesForPath. 
+// 
+// E.g., if the newVal value is "http://blah/blah", setting the field separator
+// to "|" will allow the newVal specification, "<key>|http://blah/blah" to parse
+// properly.  If called with no argument or an empty string value, the field
+// separator is set to the default, ":".
+func SetFieldSeparator(s ...string) {
+	if len(s) == 0 || s[0] == "" {
+		fieldSep = ":" // the default
+		return
+	}
+	fieldSep = s[0]
+}

+ 29 - 0
vendor/github.com/clbanning/mxj/songtext.xml

@@ -0,0 +1,29 @@
+<msg mtype="alert" mpriority="1">
+	<text>help me!</text>
+	<song title="A Long Time" author="Mayer Hawthorne">
+		<verses>
+			<verse name="verse 1" no="1">
+				<line no="1">Henry was a renegade</line>
+				<line no="2">Didn't like to play it safe</line>
+				<line no="3">One component at a time</line>
+				<line no="4">There's got to be a better way</line>
+				<line no="5">Oh, people came from miles around</line>
+				<line no="6">Searching for a steady job</line>
+				<line no="7">Welcome to the Motor Town</line>
+				<line no="8">Booming like an atom bomb</line>
+			</verse>
+			<verse name="verse 2" no="2">
+				<line no="1">Oh, Henry was the end of the story</line>
+				<line no="2">Then everything went wrong</line>
+				<line no="3">And we'll return it to its former glory</line>
+				<line no="4">But it just takes so long</line>
+			</verse>
+		</verses>
+		<chorus>
+			<line no="1">It's going to take a long time</line>
+			<line no="2">It's going to take it, but we'll make it one day</line>
+			<line no="3">It's going to take a long time</line>
+			<line no="4">It's going to take it, but we'll make it one day</line>
+		</chorus>
+	</song>
+</msg>

+ 30 - 0
vendor/github.com/clbanning/mxj/strict.go

@@ -0,0 +1,30 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// strict.go actually addresses setting xml.Decoder attribute
+// values.  This'll let you parse non-standard XML.
+
+package mxj
+
+import (
+	"encoding/xml"
+)
+
+// CustomDecoder can be used to specify xml.Decoder attribute
+// values, e.g., Strict:false, to be used.  By default CustomDecoder
+// is nil.  If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is
+// ignored and must be set as part of the CustomDecoder value, if needed.
+//	Usage:
+//		mxj.CustomDecoder = &xml.Decoder{Strict:false}
+var CustomDecoder *xml.Decoder
+
+// useCustomDecoder copy over public attributes from customDecoder
+func useCustomDecoder(d *xml.Decoder) {
+	d.Strict = CustomDecoder.Strict
+	d.AutoClose = CustomDecoder.AutoClose
+	d.Entity = CustomDecoder.Entity
+	d.CharsetReader = CustomDecoder.CharsetReader
+	d.DefaultSpace = CustomDecoder.DefaultSpace
+}
+

+ 54 - 0
vendor/github.com/clbanning/mxj/struct.go

@@ -0,0 +1,54 @@
+// Copyright 2012-2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"encoding/json"
+	"errors"
+	"reflect"
+
+	// "github.com/fatih/structs"
+)
+
+// Create a new Map value from a structure.  Error returned if argument is not a structure.
+// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map
+// for handling of "structs" tags.
+
+// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map.
+//	import "github.com/fatih/structs"
+//	...
+//	   sm, err := structs.Map(<some struct>)
+//	   if err != nil {
+//	      // handle error
+//	   }
+//	   m := mxj.Map(sm)
+// Alernatively uncomment the old source and import in struct.go.
+func NewMapStruct(structVal interface{}) (Map, error) {
+	return nil, errors.New("deprecated - see package documentation")
+	/*
+		if !structs.IsStruct(structVal) {
+			return nil, errors.New("NewMapStruct() error: argument is not type Struct")
+		}
+		return structs.Map(structVal), nil
+	*/
+}
+
+// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned
+// if argument is not a pointer or if json.Unmarshal returns an error.
+//	json.Unmarshal structure encoding rules are followed to encode public structure fields.
+func (mv Map) Struct(structPtr interface{}) error {
+	// should check that we're getting a pointer.
+	if reflect.ValueOf(structPtr).Kind() != reflect.Ptr {
+		return errors.New("mv.Struct() error: argument is not type Ptr")
+	}
+
+	m := map[string]interface{}(mv)
+	j, err := json.Marshal(m)
+	if err != nil {
+		return err
+	}
+
+	return json.Unmarshal(j, structPtr)
+}

+ 258 - 0
vendor/github.com/clbanning/mxj/updatevalues.go

@@ -0,0 +1,258 @@
+// Copyright 2012-2014, 2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// updatevalues.go - modify a value based on path and possibly sub-keys
+// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values.
+
+package mxj
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Update value based on path and possible sub-key values.
+// A count of the number of values changed and any error are returned.
+// If the count == 0, then no path (and subkeys) matched.
+//	'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified
+//	             or a string value "key:value[:type]" where type is "bool" or "num" to cast the value.
+//	'path' is dot-notation list of keys to traverse; last key in path can be newVal key
+//	       NOTE: 'path' spec does not currently support indexed array references.
+//	'subkeys' are "key:value[:type]" entries that must match for path node
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//	              exclusion critera - e.g., "!author:William T. Gaddis".
+//
+//	NOTES:
+//		1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value.
+//		2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key.
+//		3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol,
+//	      perhaps "|".
+func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) {
+	m := map[string]interface{}(mv)
+
+	// extract the subkeys
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	// extract key and value from newVal
+	var key string
+	var val interface{}
+	switch newVal.(type) {
+	case map[string]interface{}, Map:
+		switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec)
+		case Map:
+			newVal = newVal.(Map).Old()
+		}
+		if len(newVal.(map[string]interface{})) != 1 {
+			return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal)
+		}
+		for key, val = range newVal.(map[string]interface{}) {
+		}
+	case string: // split it as a key:value pair
+		ss := strings.Split(newVal.(string), fieldSep)
+		n := len(ss)
+		if n < 2 || n > 3 {
+			return 0, fmt.Errorf("unknown newVal spec - %+v", newVal)
+		}
+		key = ss[0]
+		if n == 2 {
+			val = interface{}(ss[1])
+		} else if n == 3 {
+			switch ss[2] {
+			case "bool", "boolean":
+				nv, err := strconv.ParseBool(ss[1])
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal)
+				}
+				val = interface{}(nv)
+			case "num", "numeric", "float", "int":
+				nv, err := strconv.ParseFloat(ss[1], 64)
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal)
+				}
+				val = interface{}(nv)
+			default:
+				return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal)
+			}
+		}
+	default:
+		return 0, fmt.Errorf("invalid newVal type - %+v", newVal)
+	}
+
+	// parse path
+	keys := strings.Split(path, ".")
+
+	var count int
+	updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count)
+
+	return count, nil
+}
+
+// navigate the path
+func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) {
+	// ----- at end node: looking at possible node to get 'key' ----
+	if len(keys) == 1 {
+		updateValue(key, value, m, keys[0], subkeys, cnt)
+		return
+	}
+
+	// ----- here we are navigating the path thru the penultimate node --------
+	// key of interest is keys[0] - the next in the path
+	switch keys[0] {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				default:
+					updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[keys[0]]; ok {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[keys[0]]; ok {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				}
+			}
+		}
+	}
+}
+
+// change value if key and subkeys are present
+func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) {
+	// there are two possible options for the value of 'keys0': map[string]interface, []interface{}
+	// and 'key' is a key in the map or is a key in a map in a list.
+	switch m.(type) {
+	case map[string]interface{}: // gotta have the last key
+		if keys0 == "*" {
+			for k := range m.(map[string]interface{}) {
+				updateValue(key, value, m, k, subkeys, cnt)
+			}
+			return
+		}
+		endVal, _ := m.(map[string]interface{})[keys0]
+
+		// if newV key is the end of path, replace the value for path-end
+		// may be []interface{} - means replace just an entry w/ subkeys
+		// otherwise replace the keys0 value if subkeys are there
+		// NOTE: this will replace the subkeys, also
+		if key == keys0 {
+			switch endVal.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			case []interface{}:
+				// without subkeys can't select list member to modify
+				// so key:value spec is it ...
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+					break
+				}
+				nv := make([]interface{}, 0)
+				var valmodified bool
+				for _, v := range endVal.([]interface{}) {
+					// check entry subkeys
+					if hasSubKeys(v, subkeys) {
+						// replace v with value
+						nv = append(nv, value)
+						valmodified = true
+						(*cnt)++
+						continue
+					}
+					nv = append(nv, v)
+				}
+				if valmodified {
+					(m.(map[string]interface{}))[keys0] = interface{}(nv)
+				}
+			default: // anything else is a strict replacement
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			}
+			return
+		}
+
+		// so value is for an element of endVal
+		// if endVal is a map then 'key' must be there w/ subkeys
+		// if endVal is a list then 'key' must be in a list member w/ subkeys
+		switch endVal.(type) {
+		case map[string]interface{}:
+			if !hasSubKeys(endVal, subkeys) {
+				return
+			}
+			if _, ok := (endVal.(map[string]interface{}))[key]; ok {
+				(endVal.(map[string]interface{}))[key] = value
+				(*cnt)++
+			}
+		case []interface{}: // keys0 points to a list, check subkeys
+			for _, v := range endVal.([]interface{}) {
+				// got to be a map so we can replace value for 'key'
+				vv, vok := v.(map[string]interface{})
+				if !vok {
+					continue
+				}
+				if _, ok := vv[key]; !ok {
+					continue
+				}
+				if !hasSubKeys(vv, subkeys) {
+					continue
+				}
+				vv[key] = value
+				(*cnt)++
+			}
+		}
+	case []interface{}: // key may be in a list member
+		// don't need to handle keys0 == "*"; we're looking at everything, anyway.
+		for _, v := range m.([]interface{}) {
+			// only map values - we're looking for 'key'
+			mm, ok := v.(map[string]interface{})
+			if !ok {
+				continue
+			}
+			if _, ok := mm[key]; !ok {
+				continue
+			}
+			if !hasSubKeys(mm, subkeys) {
+				continue
+			}
+			mm[key] = value
+			(*cnt)++
+		}
+	}
+
+	// return
+}

+ 1324 - 0
vendor/github.com/clbanning/mxj/xml.go

@@ -0,0 +1,1324 @@
+// Copyright 2012-2016, 2018-2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xml.go - basically the core of X2j for map[string]interface{} values.
+//          NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter
+// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ------------------- NewMapXml & NewMapXmlReader ... -------------------------
+
+// If XmlCharsetReader != nil, it will be used to decode the XML, if required.
+// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored;
+// set the CustomDecoder attribute instead.
+//   import (
+//	     charset "code.google.com/p/go-charset/charset"
+//	     github.com/clbanning/mxj
+//	 )
+//   ...
+//   mxj.XmlCharsetReader = charset.NewReader
+//   m, merr := mxj.NewMapXml(xmlValue)
+var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+// NewMapXml - convert a XML doc into a Map
+// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().)
+//	If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+//
+//	Converting XML to JSON is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapXml(xmlVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		jsonVal, jerr := mapVal.Json()
+//		if jerr != nil {
+//			// handle error
+//		}
+//
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlToMap(xmlVal, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlReaderToMap(xmlReader, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value and slice with the raw XML.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	   3. The 'raw' return value may be larger than the XML text value.
+//	   4. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   5. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   6. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb) // see code at EOF
+
+	m, err := xmlReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	if err != nil {
+		return nil, b, err
+	}
+
+	return m, b, nil
+}
+
+// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// xmlToMap - convert a XML doc into map[string]interface{} value
+func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// PrependAttrWithHyphen. Prepend attribute tags with a hyphen.
+// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.)
+//	Note:
+//		If 'false', unmarshaling and marshaling is not symmetric. Attributes will be
+//		marshal'd as <attr_tag>attr</attr_tag> and may be part of a list.
+func PrependAttrWithHyphen(v bool) {
+	if v {
+		attrPrefix = "-"
+		lenAttrPrefix = len(attrPrefix)
+		return
+	}
+	attrPrefix = ""
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com.
+var includeTagSeqNum bool
+
+// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting
+// its position when parsed. This is of limited usefulness, since list values cannot
+// be tagged with "_seq" without changing their depth in the Map.
+// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what
+// you get.
+/*
+		<Obj c="la" x="dee" h="da">
+			<IntObj id="3"/>
+			<IntObj1 id="1"/>
+			<IntObj id="2"/>
+			<StrObj>hello</StrObj>
+		</Obj>
+
+	parses as:
+
+		{
+		Obj:{
+			"-c":"la",
+			"-h":"da",
+			"-x":"dee",
+			"intObj":[
+				{
+					"-id"="3",
+					"_seq":"0" // if mxj.Cast is passed, then: "_seq":0
+				},
+				{
+					"-id"="2",
+					"_seq":"2"
+				}],
+			"intObj1":{
+				"-id":"1",
+				"_seq":"1"
+				},
+			"StrObj":{
+				"#text":"hello", // simple element value gets "#text" tag
+				"_seq":"3"
+				}
+			}
+		}
+*/
+func IncludeTagSeqNum(b bool) {
+	includeTagSeqNum = b
+}
+
+// all keys will be "lower case"
+var lowerCase bool
+
+// Coerce all tag values to keys in lower case.  This is useful if you've got sources with variable
+// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec
+// in lower case.
+//	CoerceKeysToLower() will toggle the coercion flag true|false - on|off
+//	CoerceKeysToLower(true|false) will set the coercion flag on|off
+//
+//	NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as
+//	      the associated HandleXmlReader and HandleXmlReaderRaw.
+func CoerceKeysToLower(b ...bool) {
+	if len(b) == 0 {
+		lowerCase = !lowerCase
+	} else if len(b) == 1 {
+		lowerCase = b[0]
+	}
+}
+
+// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels.
+// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "",
+// and adding a SetAttrPrefix(s string) function.
+
+var attrPrefix string = `-` // the default
+var lenAttrPrefix int = 1   // the default
+
+// SetAttrPrefix changes the default, "-", to the specified value, s.
+// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false).
+// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.)
+func SetAttrPrefix(s string) {
+	attrPrefix = s
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// 18jan17: Allows user to specify if the map keys should be in snake case instead
+// of the default hyphenated notation.
+var snakeCaseKeys bool
+
+// CoerceKeysToSnakeCase changes the default, false, to the specified value, b.
+// Note: the attribute prefix will be a hyphen, '-', or what ever string value has
+// been specified using SetAttrPrefix.
+func CoerceKeysToSnakeCase(b ...bool) {
+	if len(b) == 0 {
+		snakeCaseKeys = !snakeCaseKeys
+	} else if len(b) == 1 {
+		snakeCaseKeys = b[0]
+	}
+}
+
+// 10jan19: use of pull request #57 should be conditional - legacy code assumes
+// numeric values are float64.
+var castToInt bool
+
+// CastValuesToInt tries to coerce numeric valus to int64 or uint64 instead of the
+// default float64. Repeated calls with no argument will toggle this on/off, or this
+// handling will be set with the value of 'b'.
+func CastValuesToInt(b ...bool) {
+	if len(b) == 0 {
+		castToInt = !castToInt
+	} else if len(b) == 1 {
+		castToInt = b[0]
+	}
+}
+
+// 05feb17: support processing XMPP streams (issue #36)
+var handleXMPPStreamTag bool
+
+// HandleXMPPStreamTag causes decoder to parse XMPP <stream:stream> elements.
+// If called with no argument, XMPP stream element handling is toggled on/off.
+// (See xmppStream_test.go for example.)
+//	If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream"
+//	element will be  returned as:
+//		map["stream"]interface{}{map[-<attrs>]interface{}}.
+//	If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream"
+//	element will be returned as:
+//		map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}}
+//		where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.)
+func HandleXMPPStreamTag(b ...bool) {
+	if len(b) == 0 {
+		handleXMPPStreamTag = !handleXMPPStreamTag
+	} else if len(b) == 1 {
+		handleXMPPStreamTag = b[0]
+	}
+}
+
+// 21jan18 - decode all values as map["#text":value] (issue #56)
+var decodeSimpleValuesAsMap bool
+
+// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":<value>].
+// If called with no argument, the decoding is toggled on/off.
+//
+// By default the NewMapXml functions decode simple values without attributes as
+// map[<tag>:<value>]. This function causes simple values without attributes to be
+// decoded the same as simple values with attributes - map[<tag>:map["#text":<value>]].
+func DecodeSimpleValuesAsMap(b ...bool) {
+	if len(b) == 0 {
+		decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap
+	} else if len(b) == 1 {
+		decodeSimpleValuesAsMap = b[0]
+	}
+}
+
+// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly.
+// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one.
+// We've removed the intermediate *node tree with the allocation and subsequent rescanning.
+func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if lowerCase {
+		skey = strings.ToLower(skey)
+	}
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	// Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value.
+	var n, na map[string]interface{}
+	var seq int // for includeTagSeqNum
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		n = make(map[string]interface{})  // old n
+		na = make(map[string]interface{}) // old n.nodes
+		if len(a) > 0 {
+			for _, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				var key string
+				key = attrPrefix + v.Name.Local
+				if lowerCase {
+					key = strings.ToLower(key)
+				}
+				na[key] = cast(v.Value, r, key)
+			}
+		}
+	}
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.Token()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				return xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			// Note:
+			// * if CoerceKeysToLower() called, then key will be lower case.
+			// * if CoerceKeysToSnakeCase() called, then key will be converted to snake case.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element.
+			// In theory, we don't need this if len(na) == 1. But, we don't know what might
+			// come next - we're only parsing forward.  So if you ask for 'includeTagSeqNum' you
+			// get it on every element. (Personally, I never liked this, but I added it on request
+			// and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!)
+			if includeTagSeqNum {
+				switch val.(type) {
+				case []interface{}:
+					// noop - There's no clean way to handle this w/o changing message structure.
+				case map[string]interface{}:
+					val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag
+					seq++
+				case interface{}: // a non-nil simple element: string, float64, bool
+					v := map[string]interface{}{"#text": val}
+					v["_seq"] = seq
+					seq++
+					val = v
+				}
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Note: in original node-tree parser, val defaulted to "";
+				// so we always had the default if len(node.nodes) == 0.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ")
+			if len(tt) > 0 {
+				if len(na) > 0 || decodeSimpleValuesAsMap {
+					na["#text"] = cast(tt, r, "#text")
+				} else if skey != "" {
+					n[skey] = cast(tt, r, skey)
+				} else {
+					// per Adrian (http://www.adrianlungu.com/) catch stray text
+					// in decoder stream -
+					// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+					// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+					// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+					continue
+				}
+			}
+		default:
+			// noop
+		}
+	}
+}
+
+var castNanInf bool
+
+// Cast "Nan", "Inf", "-Inf" XML values to 'float64'.
+// By default, these values will be decoded as 'string'.
+func CastNanInf(b bool) {
+	castNanInf = b
+}
+
+// cast - try to cast string values to bool or float64
+// 't' is the tag key that can be checked for 'not-casting'
+func cast(s string, r bool, t string) interface{} {
+	if checkTagToSkip != nil && t != "" && checkTagToSkip(t) {
+		// call the check-function here with 't[0]'
+		// if 'true' return s
+		return s
+	}
+
+	if r {
+		// handle nan and inf
+		if !castNanInf {
+			switch strings.ToLower(s) {
+			case "nan", "inf", "-inf":
+				return s
+			}
+		}
+
+		// handle numeric strings ahead of boolean
+		if castToInt {
+			if f, err := strconv.ParseInt(s, 10, 64); err == nil {
+				return f
+			}
+			if f, err := strconv.ParseUint(s, 10, 64); err == nil {
+				return f
+			}
+		}
+
+		if castToFloat {
+			if f, err := strconv.ParseFloat(s, 64); err == nil {
+				return f
+			}
+		}
+
+		// ParseBool treats "1"==true & "0"==false, we've already scanned those
+		// values as float64. See if value has 't' or 'f' as initial screen to
+		// minimize calls to ParseBool; also, see if len(s) < 6.
+		if castToBool {
+			if len(s) > 0 && len(s) < 6 {
+				switch s[:1] {
+				case "t", "T", "f", "F":
+					if b, err := strconv.ParseBool(s); err == nil {
+						return b
+					}
+				}
+			}
+		}
+	}
+	return s
+}
+
+// pull request, #59
+var castToFloat = true
+
+// CastValuesToFloat can be used to skip casting to float64 when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToFloat(b bool) {
+	castToFloat = b
+}
+
+var castToBool = true
+
+// CastValuesToBool can be used to skip casting to bool when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToBool(b bool) {
+	castToBool = b
+}
+
+// checkTagToSkip - switch to address Issue #58
+
+var checkTagToSkip func(string) bool
+
+// SetCheckTagToSkipFunc registers function to test whether the value
+// for a tag should be cast to bool or float64 when "cast" argument is 'true'.
+// (Dot tag path notation is not supported.)
+// NOTE: key may be "#text" if it's a simple element with attributes
+//       or "decodeSimpleValuesAsMap == true".
+// NOTE: does not apply to NewMapXmlSeq... functions.
+func SetCheckTagToSkipFunc(fn func(string) bool) {
+	checkTagToSkip = fn
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------
+
+const (
+	DefaultRootTag = "doc"
+)
+
+var useGoXmlEmptyElemSyntax bool
+
+// XmlGoEmptyElemSyntax() - <tag ...></tag> rather than <tag .../>.
+//	Go's encoding/xml package marshals empty XML elements as <tag ...></tag>.  By default this package
+//	encodes empty elements as <tag .../>.  If you're marshaling Map values that include structures
+//	(which are passed to xml.Marshal for encoding), this will let you conform to the standard package.
+func XmlGoEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = true
+}
+
+// XmlDefaultEmptyElemSyntax() - <tag .../> rather than <tag ...></tag>.
+// Return XML encoding for empty elements to the default package setting.
+// Reverses effect of XmlGoEmptyElemSyntax().
+func XmlDefaultEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = false
+}
+
+// Encode a Map as XML.  The companion of NewMapXml().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - Map keys that begin with a hyphen, '-', are interpreted as attributes.
+//      It is an error if the attribute doesn't have a []byte, string, number, or boolean value.
+//    - Map value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>".
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+//    - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax().
+// The attributes tag=value pairs are alphabetized by "tag".  Also, when encoding map[string]interface{} values -
+// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted.
+func (mv Map) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = marshalMapToXmlIndent(false, b, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(false, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+	}
+done:
+	return b.Bytes(), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as  XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as  XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// Writes the Map as pretty XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// --------------  Handle XML stream by processing Map value --------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var xhandlerPollInterval = time.Millisecond
+
+// Bulk process XML using handlers that process a Map value.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapXmlReader(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process XML using handlers that process a Map value and the raw XML.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+//	See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader.
+func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapXmlReaderRaw(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// ----------------- END: Handle XML stream by processing Map value --------------
+
+// --------  a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ----------
+
+// This is a clone of io.TeeReader with the additional method t.ReadByte().
+// Thus, this TeeReader is also an io.ByteReader.
+// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written
+// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte()..
+// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with
+// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic.
+
+type teeReader struct {
+	r io.Reader
+	w io.Writer
+	b []byte
+}
+
+func myTeeReader(r io.Reader, w io.Writer) io.Reader {
+	b := make([]byte, 1)
+	return &teeReader{r, w, b}
+}
+
+// need for io.Reader - but we don't use it ...
+func (t *teeReader) Read(p []byte) (int, error) {
+	return 0, nil
+}
+
+func (t *teeReader) ReadByte() (byte, error) {
+	n, err := t.r.Read(t.b)
+	if n > 0 {
+		if _, err := t.w.Write(t.b[:1]); err != nil {
+			return t.b[0], err
+		}
+	}
+	return t.b[0], err
+}
+
+// For use with NewMapXmlReader & NewMapXmlSeqReader.
+type byteReader struct {
+	r io.Reader
+	b []byte
+}
+
+func myByteReader(r io.Reader) io.Reader {
+	b := make([]byte, 1)
+	return &byteReader{r, b}
+}
+
+// Need for io.Reader interface ...
+// Needed if reading a malformed http.Request.Body - issue #38.
+func (b *byteReader) Read(p []byte) (int, error) {
+	return b.r.Read(p)
+}
+
+func (b *byteReader) ReadByte() (byte, error) {
+	_, err := b.r.Read(b.b)
+	if len(b.b) > 0 {
+		return b.b[0], nil
+	}
+	var c byte
+	return c, err
+}
+
+// ----------------------- END: io.TeeReader hack -----------------------------------
+
+// ---------------------- XmlIndent - from j2x package ----------------------------
+
+// Encode a map[string]interface{} as a pretty XML string.
+// See Xml for encoding rules.
+func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+			} else {
+				err = marshalMapToXmlIndent(true, b, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(true, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+	}
+	return b.Bytes(), err
+}
+
+type pretty struct {
+	indent   string
+	cnt      int
+	padding  string
+	mapDepth int
+	start    int
+}
+
+func (p *pretty) Indent() {
+	p.padding += p.indent
+	p.cnt++
+}
+
+func (p *pretty) Outdent() {
+	if p.cnt > 0 {
+		p.padding = p.padding[:len(p.padding)-len(p.indent)]
+		p.cnt--
+	}
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+// NOTE: 01may20 - replaces mapToXmlIndent(); uses bytes.Buffer instead for string appends.
+func marshalMapToXmlIndent(doIndent bool, b *bytes.Buffer, key string, value interface{}, pp *pretty) error {
+	var err error
+	var endTag bool
+	var isSimple bool
+	var elen int
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	// per issue #48, 18apr18 - try and coerce maps to map[string]interface{}
+	// Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq().
+	if reflect.ValueOf(value).Kind() == reflect.Map {
+		switch value.(type) {
+		case map[string]interface{}:
+		default:
+			val := make(map[string]interface{})
+			vv := reflect.ValueOf(value)
+			keys := vv.MapKeys()
+			for _, k := range keys {
+				val[fmt.Sprint(k)] = vv.MapIndex(k).Interface()
+			}
+			value = val
+		}
+	}
+
+	// 14jul20.  The following block of code has become something of a catch all for odd stuff
+	// that might be passed in as a result of casting an arbitrary map[<T>]<T> to an mxj.Map
+	// value and then call m.Xml or m.XmlIndent. See issue #71 (and #73) for such edge cases.
+	switch value.(type) {
+	// these types are handled during encoding
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number:
+	case []map[string]interface{}, []string, []float64, []bool, []int, []int32, []int64, []float32, []json.Number:
+	case []interface{}:
+	default:
+		// coerce eveything else into a string value
+		value = fmt.Sprint(value)
+	}
+
+	// start the XML tag with required indentaton and padding
+	if doIndent {
+		if _, err = b.WriteString(p.padding); err != nil {
+			return err
+		}
+	}
+	switch value.(type) {
+	case []interface{}:
+	default:
+		if _, err = b.WriteString(`<` + key); err != nil {
+			return err
+		}
+	}
+
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		// scan out attributes - attribute keys have prepended attrPrefix
+		attrlist := make([][2]string, len(vv))
+		var n int
+		var ss string
+		for k, v := range vv {
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				switch v.(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(v.(string))
+					} else {
+						ss = v.(string)
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				case float64, bool, int, int32, int64, float32, json.Number:
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = fmt.Sprintf("%v", v)
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(v.([]byte)))
+					} else {
+						ss = string(v.([]byte))
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				default:
+					return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v)
+				}
+				n++
+			}
+		}
+		if n > 0 {
+			attrlist = attrlist[:n]
+			sort.Sort(attrList(attrlist))
+			for _, v := range attrlist {
+				if _, err = b.WriteString(` ` + v[0] + `="` + v[1] + `"`); err != nil {
+					return err
+				}
+			}
+		}
+		// only attributes?
+		if n == lenvv {
+			if useGoXmlEmptyElemSyntax {
+				if _, err = b.WriteString(`</` + key + ">"); err != nil {
+					return err
+				}
+			} else {
+				if _, err = b.WriteString(`/>`); err != nil {
+					return err
+				}
+			}
+			break
+		}
+
+		// simple element? Note: '#text" is an invalid XML tag.
+		if v, ok := vv["#text"]; ok && n+1 == lenvv {
+			switch v.(type) {
+			case string:
+				if xmlEscapeChars {
+					v = escapeChars(v.(string))
+				} else {
+					v = v.(string)
+				}
+			case []byte:
+				if xmlEscapeChars {
+					v = escapeChars(string(v.([]byte)))
+				}
+			}
+			if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil {
+				return err
+			}
+			endTag = true
+			elen = 1
+			isSimple = true
+			break
+		} else if ok {
+			// Handle edge case where simple element with attributes
+			// is unmarshal'd using NewMapXml() where attribute prefix
+			// has been set to "".
+			// TODO(clb): should probably scan all keys for invalid chars.
+			return fmt.Errorf("invalid attribute key label: #text - due to attributes not being prefixed")
+		}
+
+		// close tag with possible attributes
+		if _, err = b.WriteString(">"); err != nil {
+			return err
+		}
+		if doIndent {
+			// *s += "\n"
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		// something more complex
+		p.mapDepth++
+		// extract the map k:v pairs and sort on key
+		elemlist := make([][2]interface{}, len(vv))
+		n = 0
+		for k, v := range vv {
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				continue
+			}
+			elemlist[n][0] = k
+			elemlist[n][1] = v
+			n++
+		}
+		elemlist = elemlist[:n]
+		sort.Sort(elemList(elemlist))
+		var i int
+		for _, v := range elemlist {
+			switch v[1].(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := marshalMapToXmlIndent(doIndent, b, v[0].(string), v[1], p); err != nil {
+				return err
+			}
+			switch v[1].(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content ...
+	case []interface{}:
+		// special case - found during implementing Issue #23
+		if len(value.([]interface{})) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case []string:
+		// This was added by https://github.com/slotix ... not a type that
+		// would be encountered if mv generated from NewMapXml, NewMapJson.
+		// Could be encountered in AnyXml(), so we'll let it stay, though
+		// it should be merged with case []interface{}, above.
+		//quick fix for []string type
+		//[]string should be treated exaclty as []interface{}
+		if len(value.([]string)) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]string) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			// *s += p.padding
+			if _, err = b.WriteString(p.padding); err != nil {
+				return err
+			}
+		}
+		if _, err = b.WriteString("<" + key); err != nil {
+			return err
+		}
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			v := value.(string)
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		case float64, bool, int, int32, int64, float32, json.Number:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v) // always > 0
+			if _, err = b.WriteString(">" + v); err != nil {
+				return err
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			v := string(value.([]byte))
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		default:
+			if _, err = b.WriteString(">"); err != nil {
+				return err
+			}
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				if _, err = b.WriteString(">UNKNOWN"); err != nil {
+					return err
+				}
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					if _, err = b.Write(v); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				if _, err = b.WriteString(p.padding); err != nil {
+					return err
+				}
+			}
+		}
+		if elen > 0 || useGoXmlEmptyElemSyntax {
+			if elen == 0 {
+				if _, err = b.WriteString(">"); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString(`</` + key + ">"); err != nil {
+				return err
+			}
+		} else {
+			if _, err = b.WriteString(`/>`); err != nil {
+				return err
+			}
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// ============================ sort interface implementation =================
+
+type attrList [][2]string
+
+func (a attrList) Len() int {
+	return len(a)
+}
+
+func (a attrList) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a attrList) Less(i, j int) bool {
+	return a[i][0] <= a[j][0]
+}
+
+type elemList [][2]interface{}
+
+func (e elemList) Len() int {
+	return len(e)
+}
+
+func (e elemList) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemList) Less(i, j int) bool {
+	return e[i][0].(string) <= e[j][0].(string)
+}
+
+// ======================== newMapToXmlIndent
+
+func (mv Map) MarshalXml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	// s := new(string)
+	// b := new(strings.Builder)
+	b := new(bytes.Buffer)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = marshalMapToXmlIndent(false, b, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(false, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+	}
+done:
+	return b.Bytes(), err
+}

+ 844 - 0
vendor/github.com/clbanning/mxj/xmlseq.go

@@ -0,0 +1,844 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding.
+// Also, handles comments, directives and process instructions.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+)
+
+// MapSeq is like Map but contains seqencing indices to allow recovering the original order of
+// the XML elements when the map[string]interface{} is marshaled. Element attributes are
+// stored as a map["#attr"]map[<attr_key>]map[string]interface{}{"#text":"<value>", "#seq":<attr_index>}
+// value instead of denoting the keys with a prefix character.  Also, comments, directives and
+// process instructions are preserved.
+type MapSeq map[string]interface{}
+
+// NoRoot is returned by NewXmlSeq, etc., when a comment, directive or procinstr element is parsed
+// in the XML data stream and the element is not contained in an XML object with a root element.
+var NoRoot = errors.New("no root key")
+var NO_ROOT = NoRoot // maintain backwards compatibility
+
+// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... -------------------------
+
+// NewMapXmlSeq converts a XML doc into a MapSeq value with elements id'd with decoding sequence key represented
+// as map["#seq"]<int value>.
+// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+// NOTE: "#seq" key/value pairs are removed on encoding with msv.Xml() / msv.XmlIndent().
+//	• attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":<aval>, "#seq":<num>}
+//	• all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well.
+//	• lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that
+//	  include a "#seq" k:v pair based on sequence they are decoded.  Thus, XML like:
+//	      <doc>
+//	         <ltag>value 1</ltag>
+//	         <newtag>value 2</newtag>
+//	         <ltag>value 3</ltag>
+//	      </doc>
+//	  is decoded as:
+//	    doc :
+//	      ltag :[[]interface{}]
+//	        [item: 0]
+//	          #seq :[int] 0
+//	          #text :[string] value 1
+//	        [item: 1]
+//	          #seq :[int] 2
+//	          #text :[string] value 3
+//	      newtag :
+//	        #seq :[int] 1
+//	        #text :[string] value 2
+//	  It will encode in proper sequence even though the MapSeq representation merges all "ltag" elements in an array.
+//	• comments - "<!--comment-->" -  are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair.
+//	• directives - "<!text>" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair.
+//	• process instructions  - "<?instr?>" - are decoded as map["#procinst"]interface{} where the #procinst value
+//	  is of map[string]interface{} type with the following keys: #target, #inst, and #seq.
+//	• comments, directives, and procinsts that are NOT part of a document with a root key will be returned as
+//	  map[string]interface{} and the error value 'NoRoot'.
+//	• note: "<![CDATA[" syntax is lost in xml.Decode parser - and is not handled here, either.
+//	   and: "\r\n" is converted to "\n"
+//
+//	NOTES:
+//	   1. The 'xmlVal' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	NAME SPACES:
+//	   1. Keys in the MapSeq value that are parsed from a <name space prefix>:<local name> tag preserve the
+//	      "<prefix>:" notation rather than stripping it as with NewMapXml().
+//	   2. Attribute keys for name space prefix declarations preserve "xmlns:<prefix>" notation.
+// 
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", 
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlSeqToMap(xmlVal, r)
+}
+
+// NewMpaXmlSeqReader returns next XML doc from an io.Reader as a MapSeq value.
+//	NOTES:
+//	   1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+// 
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", 
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlSeqReaderToMap(xmlReader, r)
+}
+
+// NewMapXmlSeqReaderRaw returns the  next XML doc from  an io.Reader as a MapSeq value.
+// Returns MapSeq value, slice with the raw XML, and any error.
+//	NOTES:
+//	   1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	    2. The 'raw' return value may be larger than the XML text value.
+//	    3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	       extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	    4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	       re-encode the message in its original structure.
+//	    5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+// 
+//	ERRORS:
+//	    1. If a NoRoot error, "no root key," is returned, check if the initial map key is "#comment", 
+//	       "#directive" or #procinst" key.
+func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (MapSeq, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb)
+
+	m, err := xmlSeqReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	// err may be NoRoot
+	return m, b, err
+}
+
+// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// xmlSeqToMap - convert a XML doc into map[string]interface{} value
+func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly.
+// Add #seq tag value for each element decoded - to be used for Encoding later.
+func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	var n, na map[string]interface{}
+	var seq int // for including seq num when decoding
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		// 'n' only needs one slot - save call to runtime•hashGrow()
+		// 'na' we don't know
+		n = make(map[string]interface{}, 1)
+		na = make(map[string]interface{})
+		if len(a) > 0 {
+			// xml.Attr is decoded into: map["#attr"]map[<attr_label>]interface{}
+			// where interface{} is map[string]interface{}{"#text":<attr_val>, "#seq":<attr_seq>}
+			aa := make(map[string]interface{}, len(a))
+			for i, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				if len(v.Name.Space) > 0 {
+					aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i}
+				} else {
+					aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i}
+				}
+			}
+			na["#attr"] = aa
+		}
+	}
+
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream:stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.RawToken()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				if len(tt.Name.Space) > 0 {
+					return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+				} else {
+					return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+				}
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			var nn map[string]interface{}
+			if len(tt.Name.Space) > 0 {
+				nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+			} else {
+				nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// add "#seq" k:v pair -
+			// Sequence number included even in list elements - this should allow us
+			// to properly resequence even something goofy like:
+			//     <list>item 1</list>
+			//     <subelement>item 2</subelement>
+			//     <list>item 3</list>
+			// where all the "list" subelements are decoded into an array.
+			switch val.(type) {
+			case map[string]interface{}:
+				val.(map[string]interface{})["#seq"] = seq
+				seq++
+			case interface{}: // a non-nil simple element: string, float64, bool
+				v := map[string]interface{}{"#text": val, "#seq": seq}
+				seq++
+				val = v
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			if skey != "" {
+				tt := t.(xml.EndElement)
+				if snakeCaseKeys {
+					tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1)
+				}
+				var name string
+				if len(tt.Name.Space) > 0 {
+					name = tt.Name.Space + `:` + tt.Name.Local
+				} else {
+					name = tt.Name.Local
+				}
+				if skey != name {
+					return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d",
+						skey, name, p.InputOffset())
+				}
+			}
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Empty element content will be  map["etag"]map["#text"]""
+				// after #seq injection - map["etag"]map["#seq"]seq - after return.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ")
+			if skey == "" {
+				// per Adrian (http://www.adrianlungu.com/) catch stray text
+				// in decoder stream -
+				// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+				// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+				// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+				continue
+			}
+			if len(tt) > 0 {
+				// every simple element is a #text and has #seq associated with it
+				na["#text"] = cast(tt, r, "")
+				na["#seq"] = seq
+				seq++
+			}
+		case xml.Comment:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#comment": string(t.(xml.Comment))}
+				return n, NoRoot
+			}
+			cm := make(map[string]interface{}, 2)
+			cm["#text"] = string(t.(xml.Comment))
+			cm["#seq"] = seq
+			seq++
+			na["#comment"] = cm
+		case xml.Directive:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#directive": string(t.(xml.Directive))}
+				return n, NoRoot
+			}
+			dm := make(map[string]interface{}, 2)
+			dm["#text"] = string(t.(xml.Directive))
+			dm["#seq"] = seq
+			seq++
+			na["#directive"] = dm
+		case xml.ProcInst:
+			if n == nil {
+				na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)}
+				n = map[string]interface{}{"#procinst": na}
+				return n, NoRoot
+			}
+			pm := make(map[string]interface{}, 3)
+			pm["#target"] = t.(xml.ProcInst).Target
+			pm["#inst"] = string(t.(xml.ProcInst).Inst)
+			pm["#seq"] = seq
+			seq++
+			na["#procinst"] = pm
+		default:
+			// noop - shouldn't ever get here, now, since we handle all token types
+		}
+	}
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// --------------------- mv.XmlSeq & mv.XmlSeqWriter -------------------------
+
+// Xml encodes a MapSeq as XML with elements sorted on #seq.  The companion of NewMapXmlSeq().
+// The following rules apply.
+//    - The "#seq" key value is used to seqence the subelements or attributes only.
+//    - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key.
+//    - The "#comment" map key identifies a comment in the value "#text" map entry - <!--comment-->.
+//    - The "#directive" map key identifies a directive in the value "#text" map entry - <!directive>.
+//    - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst"
+//      map entries - <?target inst?>.
+//    - Value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called.
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+func (mv MapSeq) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it's an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlSeqIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// XmlWriter Writes the MapSeq value as  XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlWriteRaw writes the MapSeq value as XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// XmlIndentWriter writes the MapSeq value as pretty XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlIndentWriterRaw writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlSeqIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// ---------------------- XmlSeqIndent ----------------------------
+
+// XmlIndent encodes a map[string]interface{} as a pretty XML string.
+// See MapSeq.XmlSeq() for encoding rules.
+func (mv MapSeq) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlSeqIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+	}
+	return []byte(*s), err
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	var noEndTag bool
+	var elen int
+	var ss string
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		if key != "#comment" && key != "#directive" && key != "#procinst" {
+			*s += `<` + key
+		}
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		val := value.(map[string]interface{})
+
+		if key == "#comment" {
+			*s += `<!--` + val["#text"].(string) + `-->`
+			noEndTag = true
+			break
+		}
+
+		if key == "#directive" {
+			*s += `<!` + val["#text"].(string) + `>`
+			noEndTag = true
+			break
+		}
+
+		if key == "#procinst" {
+			*s += `<?` + val["#target"].(string) + ` ` + val["#inst"].(string) + `?>`
+			noEndTag = true
+			break
+		}
+
+		haveAttrs := false
+		// process attributes first
+		if v, ok := val["#attr"].(map[string]interface{}); ok {
+			// First, unroll the map[string]interface{} into a []keyval array.
+			// Then sequence it.
+			kv := make([]keyval, len(v))
+			n := 0
+			for ak, av := range v {
+				kv[n] = keyval{ak, av}
+				n++
+			}
+			sort.Sort(elemListSeq(kv))
+			// Now encode the attributes in original decoding sequence, using keyval array.
+			for _, a := range kv {
+				vv := a.v.(map[string]interface{})
+				switch vv["#text"].(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(vv["#text"].(string))
+					} else {
+						ss = vv["#text"].(string)
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				case float64, bool, int, int32, int64, float32:
+					*s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"`
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(vv["#text"].([]byte)))
+					} else {
+						ss = string(vv["#text"].([]byte))
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", a.k)
+				}
+			}
+			haveAttrs = true
+		}
+
+		// simple element?
+		// every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr"
+		_, seqOK := val["#seq"] // have key
+		if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK {
+			if stmp, ok := v.(string); ok && stmp != "" {
+				if xmlEscapeChars {
+					stmp = escapeChars(stmp)
+				}
+				*s += ">" + stmp
+				endTag = true
+				elen = 1
+			}
+			isSimple = true
+			break
+		} else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK {
+			// here no #text but have #seq or #seq+#attr
+			endTag = false
+			break
+		}
+
+		// we now need to sequence everything except attributes
+		// 'kv' will hold everything that needs to be written
+		kv := make([]keyval, 0)
+		for k, v := range val {
+			if k == "#attr" { // already processed
+				continue
+			}
+			if k == "#seq" { // ignore - just for sorting
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+				// unwind the array as separate entries
+				for _, vv := range v.([]interface{}) {
+					kv = append(kv, keyval{k, vv})
+				}
+			default:
+				kv = append(kv, keyval{k, v})
+			}
+		}
+
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		sort.Sort(elemListSeq(kv))
+		i := 0
+		for _, v := range kv {
+			switch v.v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil {
+				return err
+			}
+			switch v.v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content other than attrs
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			*s += p.padding
+		}
+		*s += "<" + key
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			if xmlEscapeChars {
+				ss = escapeChars(value.(string))
+			} else {
+				ss = value.(string)
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		case float64, bool, int, int32, int64, float32:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			if xmlEscapeChars {
+				ss = escapeChars(string(value.([]byte)))
+			} else {
+				ss = string(value.([]byte))
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					*s += string(v)
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag && !noEndTag {
+		if doIndent {
+			if !isSimple {
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			if elen > 0 || useGoXmlEmptyElemSyntax {
+				if elen == 0 {
+					*s += ">"
+				}
+				*s += `</` + key + ">"
+			} else {
+				*s += `/>`
+			}
+		}
+	} else if !noEndTag {
+		if useGoXmlEmptyElemSyntax {
+			*s += `</` + key + ">"
+			// *s += "></" + key + ">"
+		} else {
+			*s += "/>"
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// the element sort implementation
+
+type keyval struct {
+	k string
+	v interface{}
+}
+type elemListSeq []keyval
+
+func (e elemListSeq) Len() int {
+	return len(e)
+}
+
+func (e elemListSeq) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemListSeq) Less(i, j int) bool {
+	var iseq, jseq int
+	var fiseq, fjseq float64
+	var ok bool
+	if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok {
+		if fiseq, ok = e[i].v.(map[string]interface{})["#seq"].(float64); ok {
+			iseq = int(fiseq)
+		} else {
+			iseq = 9999999
+		}
+	}
+
+	if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok {
+		if fjseq, ok = e[j].v.(map[string]interface{})["#seq"].(float64); ok {
+			jseq = int(fjseq)
+		} else {
+			jseq = 9999999
+		}
+	}
+
+	return iseq <= jseq
+}
+
+// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio
+
+// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent().
+// It preserves comments, directives and process instructions, 
+func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) {
+	x, err := NewMapXmlSeq(b)
+	if err != nil {
+		return nil, err
+	}
+	return x.XmlIndent(prefix, indent)
+}

+ 18 - 0
vendor/github.com/clbanning/mxj/xmlseq2.go

@@ -0,0 +1,18 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+// ---------------- expose Map methods to MapSeq type ---------------------------
+
+// Pretty print a Map.
+func (msv MapSeq) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (msv MapSeq) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), false, true, offset...)
+}
+

+ 15 - 0
vendor/github.com/davecgh/go-spew/LICENSE

@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 145 - 0
vendor/github.com/davecgh/go-spew/spew/bypass.go

@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
+	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
+	}
+	panic("reflect.Value read-only flag has changed semantics")
+}

+ 38 - 0
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go

@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}

+ 341 - 0
vendor/github.com/davecgh/go-spew/spew/common.go

@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}

+ 306 - 0
vendor/github.com/davecgh/go-spew/spew/config.go

@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}

+ 211 - 0
vendor/github.com/davecgh/go-spew/spew/doc.go

@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew

+ 509 - 0
vendor/github.com/davecgh/go-spew/spew/dump.go

@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}

+ 419 - 0
vendor/github.com/davecgh/go-spew/spew/format.go

@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}

+ 148 - 0
vendor/github.com/davecgh/go-spew/spew/spew.go

@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}

+ 36 - 0
vendor/github.com/eclipse/paho.mqtt.golang/.gitignore

@@ -0,0 +1,36 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.msg
+*.lok
+
+samples/trivial
+samples/trivial2
+samples/sample
+samples/reconnect
+samples/ssl
+samples/custom_store
+samples/simple
+samples/stdinpub
+samples/stdoutsub
+samples/routing

+ 56 - 0
vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md

@@ -0,0 +1,56 @@
+Contributing to Paho
+====================
+
+Thanks for your interest in this project.
+
+Project description:
+--------------------
+
+The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
+Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
+
+- https://projects.eclipse.org/projects/technology.paho
+
+Developer resources:
+--------------------
+
+Information regarding source code management, builds, coding standards, and more.
+
+- https://projects.eclipse.org/projects/technology.paho/developer
+
+Contributor License Agreement:
+------------------------------
+
+Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
+
+- http://www.eclipse.org/legal/CLA.php
+
+Contributing Code:
+------------------
+
+The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/
+
+Git commit messages should follow the style described here;
+
+http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+
+Contact:
+--------
+
+Contact the project developers via the project's "dev" list.
+
+- https://dev.eclipse.org/mailman/listinfo/paho-dev
+
+Search for bugs:
+----------------
+
+This project uses Github issues to track ongoing development and issues.
+
+- https://github.com/eclipse/paho.mqtt.golang/issues
+
+Create a new bug:
+-----------------
+
+Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
+
+- https://github.com/eclipse/paho.mqtt.golang/issues

+ 15 - 0
vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION

@@ -0,0 +1,15 @@
+
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+    Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff