liuxiulin 1 vuosi sitten
vanhempi
commit
170c44d822
100 muutettua tiedostoa jossa 22480 lisäystä ja 136 poistoa
  1. 6 0
      .idea/vcs.xml
  2. BIN
      bin/linux_amd64/yx-4g-gateway
  3. 8 2
      go.mod
  4. 100 1
      go.sum
  5. 41 42
      main.go
  6. 21 19
      schema/schema.go
  7. 99 63
      server/client.go
  8. 17 9
      server/server.go
  9. 22 0
      vendor/github.com/cespare/xxhash/v2/LICENSE.txt
  10. 69 0
      vendor/github.com/cespare/xxhash/v2/README.md
  11. 235 0
      vendor/github.com/cespare/xxhash/v2/xxhash.go
  12. 13 0
      vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
  13. 215 0
      vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
  14. 76 0
      vendor/github.com/cespare/xxhash/v2/xxhash_other.go
  15. 15 0
      vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
  16. 57 0
      vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
  17. 4 0
      vendor/github.com/clbanning/mxj/v2/.travis.yml
  18. 22 0
      vendor/github.com/clbanning/mxj/v2/LICENSE
  19. 201 0
      vendor/github.com/clbanning/mxj/v2/anyxml.go
  20. 54 0
      vendor/github.com/clbanning/mxj/v2/atomFeedString.xml
  21. 138 0
      vendor/github.com/clbanning/mxj/v2/doc.go
  22. 93 0
      vendor/github.com/clbanning/mxj/v2/escapechars.go
  23. 9 0
      vendor/github.com/clbanning/mxj/v2/exists.go
  24. 287 0
      vendor/github.com/clbanning/mxj/v2/files.go
  25. 2 0
      vendor/github.com/clbanning/mxj/v2/files_test.badjson
  26. 9 0
      vendor/github.com/clbanning/mxj/v2/files_test.badxml
  27. 2 0
      vendor/github.com/clbanning/mxj/v2/files_test.json
  28. 9 0
      vendor/github.com/clbanning/mxj/v2/files_test.xml
  29. 1 0
      vendor/github.com/clbanning/mxj/v2/files_test_dup.json
  30. 1 0
      vendor/github.com/clbanning/mxj/v2/files_test_dup.xml
  31. 12 0
      vendor/github.com/clbanning/mxj/v2/files_test_indent.json
  32. 8 0
      vendor/github.com/clbanning/mxj/v2/files_test_indent.xml
  33. 35 0
      vendor/github.com/clbanning/mxj/v2/gob.go
  34. 323 0
      vendor/github.com/clbanning/mxj/v2/json.go
  35. 668 0
      vendor/github.com/clbanning/mxj/v2/keyvalues.go
  36. 112 0
      vendor/github.com/clbanning/mxj/v2/leafnode.go
  37. 86 0
      vendor/github.com/clbanning/mxj/v2/misc.go
  38. 128 0
      vendor/github.com/clbanning/mxj/v2/mxj.go
  39. 184 0
      vendor/github.com/clbanning/mxj/v2/newmap.go
  40. 207 0
      vendor/github.com/clbanning/mxj/v2/readme.md
  41. 37 0
      vendor/github.com/clbanning/mxj/v2/remove.go
  42. 61 0
      vendor/github.com/clbanning/mxj/v2/rename.go
  43. 26 0
      vendor/github.com/clbanning/mxj/v2/set.go
  44. 20 0
      vendor/github.com/clbanning/mxj/v2/setfieldsep.go
  45. 29 0
      vendor/github.com/clbanning/mxj/v2/songtext.xml
  46. 30 0
      vendor/github.com/clbanning/mxj/v2/strict.go
  47. 54 0
      vendor/github.com/clbanning/mxj/v2/struct.go
  48. 258 0
      vendor/github.com/clbanning/mxj/v2/updatevalues.go
  49. 1410 0
      vendor/github.com/clbanning/mxj/v2/xml.go
  50. 877 0
      vendor/github.com/clbanning/mxj/v2/xmlseq.go
  51. 18 0
      vendor/github.com/clbanning/mxj/v2/xmlseq2.go
  52. 21 0
      vendor/github.com/dgryski/go-rendezvous/LICENSE
  53. 79 0
      vendor/github.com/dgryski/go-rendezvous/rdv.go
  54. 3 0
      vendor/github.com/go-redis/redis/v8/.gitignore
  55. 4 0
      vendor/github.com/go-redis/redis/v8/.golangci.yml
  56. 4 0
      vendor/github.com/go-redis/redis/v8/.prettierrc.yml
  57. 177 0
      vendor/github.com/go-redis/redis/v8/CHANGELOG.md
  58. 25 0
      vendor/github.com/go-redis/redis/v8/LICENSE
  59. 35 0
      vendor/github.com/go-redis/redis/v8/Makefile
  60. 175 0
      vendor/github.com/go-redis/redis/v8/README.md
  61. 15 0
      vendor/github.com/go-redis/redis/v8/RELEASING.md
  62. 1750 0
      vendor/github.com/go-redis/redis/v8/cluster.go
  63. 109 0
      vendor/github.com/go-redis/redis/v8/cluster_commands.go
  64. 3478 0
      vendor/github.com/go-redis/redis/v8/command.go
  65. 3475 0
      vendor/github.com/go-redis/redis/v8/commands.go
  66. 4 0
      vendor/github.com/go-redis/redis/v8/doc.go
  67. 144 0
      vendor/github.com/go-redis/redis/v8/error.go
  68. 56 0
      vendor/github.com/go-redis/redis/v8/internal/arg.go
  69. 78 0
      vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
  70. 201 0
      vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
  71. 93 0
      vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
  72. 29 0
      vendor/github.com/go-redis/redis/v8/internal/internal.go
  73. 26 0
      vendor/github.com/go-redis/redis/v8/internal/log.go
  74. 60 0
      vendor/github.com/go-redis/redis/v8/internal/once.go
  75. 121 0
      vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
  76. 557 0
      vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
  77. 58 0
      vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
  78. 201 0
      vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
  79. 332 0
      vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
  80. 180 0
      vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
  81. 155 0
      vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
  82. 50 0
      vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
  83. 12 0
      vendor/github.com/go-redis/redis/v8/internal/safe.go
  84. 21 0
      vendor/github.com/go-redis/redis/v8/internal/unsafe.go
  85. 46 0
      vendor/github.com/go-redis/redis/v8/internal/util.go
  86. 12 0
      vendor/github.com/go-redis/redis/v8/internal/util/safe.go
  87. 19 0
      vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
  88. 23 0
      vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
  89. 77 0
      vendor/github.com/go-redis/redis/v8/iterator.go
  90. 429 0
      vendor/github.com/go-redis/redis/v8/options.go
  91. 8 0
      vendor/github.com/go-redis/redis/v8/package.json
  92. 147 0
      vendor/github.com/go-redis/redis/v8/pipeline.go
  93. 668 0
      vendor/github.com/go-redis/redis/v8/pubsub.go
  94. 773 0
      vendor/github.com/go-redis/redis/v8/redis.go
  95. 180 0
      vendor/github.com/go-redis/redis/v8/result.go
  96. 736 0
      vendor/github.com/go-redis/redis/v8/ring.go
  97. 65 0
      vendor/github.com/go-redis/redis/v8/script.go
  98. 796 0
      vendor/github.com/go-redis/redis/v8/sentinel.go
  99. 149 0
      vendor/github.com/go-redis/redis/v8/tx.go
  100. 215 0
      vendor/github.com/go-redis/redis/v8/universal.go

+ 6 - 0
.idea/vcs.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="$PROJECT_DIR$" vcs="Git" />
+  </component>
+</project>

BIN
bin/linux_amd64/yx-4g-gateway


+ 8 - 2
go.mod

@@ -1,27 +1,33 @@
 module yx-4g-gateway
 
 require (
-	github.com/gogf/gf v1.16.9
+	github.com/gogf/gf/v2 v2.2.2
 	sparrow-sdk v1.0.0
 )
 
 require (
 	github.com/BurntSushi/toml v1.1.0 // indirect
+	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28 // indirect
+	github.com/clbanning/mxj/v2 v2.5.5 // indirect
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
 	github.com/eclipse/paho.mqtt.golang v1.3.5 // indirect
 	github.com/fatih/color v1.13.0 // indirect
 	github.com/fsnotify/fsnotify v1.5.4 // indirect
 	github.com/go-logr/logr v1.2.3 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
-	github.com/go-sql-driver/mysql v1.6.0 // indirect
+	github.com/go-redis/redis/v8 v8.11.5 // indirect
+	github.com/gogf/gf v1.16.9 // indirect
 	github.com/gomodule/redigo v1.8.5 // indirect
 	github.com/gorilla/websocket v1.5.0 // indirect
 	github.com/grokify/html-strip-tags-go v0.0.1 // indirect
+	github.com/magiconair/properties v1.8.6 // indirect
 	github.com/mattn/go-colorable v0.1.9 // indirect
 	github.com/mattn/go-isatty v0.0.14 // indirect
 	github.com/mattn/go-runewidth v0.0.9 // indirect
 	github.com/olekukonko/tablewriter v0.0.5 // indirect
 	go.opentelemetry.io/otel v1.7.0 // indirect
+	go.opentelemetry.io/otel/sdk v1.7.0 // indirect
 	go.opentelemetry.io/otel/trace v1.7.0 // indirect
 	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
 	golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect

+ 100 - 1
go.sum

@@ -1,15 +1,26 @@
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
 github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
 github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28 h1:LdXxtjzvZYhhUaonAaAKArG3pyC67kGL3YY+6hGG8G4=
 github.com/clbanning/mxj v1.8.5-0.20200714211355-ff02cfb8ea28/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/clbanning/mxj/v2 v2.5.5 h1:oT81vUeEiQQ/DcHbzSytRngP6Ky9O+L+0Bw0zSJag9E=
+github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
 github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y=
 github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
 github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
 github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
@@ -18,22 +29,45 @@ github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
 github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
 github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
 github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/gogf/gf v1.16.6/go.mod h1:4LoHfEBl2jbVmZpVx+qk2La3zWr1V315FtF2PVZuyQ8=
 github.com/gogf/gf v1.16.9 h1:Q803UmmRo59+Ws08sMVFOcd8oNpkSWL9vS33hlo/Cyk=
 github.com/gogf/gf v1.16.9/go.mod h1:8Q/kw05nlVRp+4vv7XASBsMe9L1tsVKiGoeP2AHnlkk=
+github.com/gogf/gf/v2 v2.2.2 h1:ew4k/VSr/gcPdMZcI8/HZYBYPjB0KOk6bQqA61M8bYE=
+github.com/gogf/gf/v2 v2.2.2/go.mod h1:thvkyb43RWUu/m05sRm4CbH9r7t7/FrW2M56L9Ystwk=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc=
 github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
 github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
 github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 github.com/grokify/html-strip-tags-go v0.0.0-20190921062105-daaa06bf1aaf/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
 github.com/grokify/html-strip-tags-go v0.0.1 h1:0fThFwLbW7P/kOiTBs03FsJSV9RM2M/Q/MOnCQxKMo0=
 github.com/grokify/html-strip-tags-go v0.0.1/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
+github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
 github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
 github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
 github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -42,8 +76,22 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9
 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
 github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
 github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
 github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -51,11 +99,15 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
 go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
 go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
 go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
 go.opentelemetry.io/otel/oteltest v1.0.0-RC2/go.mod h1:kiQ4tw5tAL4JLTbcOYwK1CWI1HkT5aiLzHovgOVnz/A=
+go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
+go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
 go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
 go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
 go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
@@ -63,30 +115,77 @@ go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48
 gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.6 h1:eWYj9ZjaKC2p8mxBgCIZgrpNZXQDMN9R+J1d2cATJdI=
 gogs.yehaoji.cn/yongxu/sparrow-sdk.git v1.1.6/go.mod h1:hWw7D5hrW8f8cOKKdhtlt8HQbdfD2o6PllWMhs0BdQs=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
 golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
 golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2 h1:GLw7MR8AfAG2GmGcmVgObFOHXYypgGjnGno25RDwn3Y=
 golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

+ 41 - 42
main.go

@@ -2,10 +2,9 @@ package main
 
 import (
 	"context"
-	"github.com/gogf/gf/encoding/gjson"
-	"github.com/gogf/gf/frame/g"
-	"github.com/gogf/gf/os/glog"
-	"github.com/gogf/gf/os/gproc"
+	"github.com/gogf/gf/v2/encoding/gjson"
+	"github.com/gogf/gf/v2/os/glog"
+	"github.com/gogf/gf/v2/os/gproc"
 	"os"
 	"sparrow-sdk/config"
 	"sparrow-sdk/protocal"
@@ -16,17 +15,17 @@ import (
 
 func main() {
 	ctx := context.Background()
-	err := glog.SetLevelStr(g.Cfg().GetString("Server.RunMode"))
+	err := glog.SetLevelStr(server.GetConfig("Server.RunMode").String())
 	if err != nil {
 		panic(err)
 	}
 	gw := gatewayV2.NewGateway(&config.Config{
-		SparrowServer: g.Cfg().GetString("Sparrow.Server"),
-		ProductKey:    g.Cfg().GetString("Sparrow.ProductKey"),
+		SparrowServer: server.GetConfig("Sparrow.Server").String(),
+		ProductKey:    server.GetConfig("Sparrow.ProductKey").String(),
 		Protocol:      "mqtt",
-		DeviceCode:    g.Cfg().GetString("Sparrow.DeviceCode"),
+		DeviceCode:    server.GetConfig("Sparrow.DeviceCode").String(),
 		Version:       "1.0.0",
-		Debug:         g.Cfg().GetBool("Sparrow.Debug"),
+		Debug:         server.GetConfig("Sparrow.Debug").Bool(),
 	})
 	if _, err = gw.Register(); err != nil {
 		panic(err)
@@ -41,8 +40,8 @@ func main() {
 
 	srv := server.NewServer(
 		ctx,
-		g.Cfg().GetString("Server.Addr"),
-		g.Cfg().GetInt("Server.Port"),
+		server.GetConfig("Server.Addr").String(),
+		server.GetConfig("Server.Port").Int(),
 		gw,
 	)
 	go func() {
@@ -64,22 +63,22 @@ func main() {
 	if err = gw.RegisterCommand("setPower", func(msg protocal.CloudSend) error {
 		var params schema.SetPowerReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if params.Power == 1 {
 				if err = client.PowerOn(); err != nil {
-					glog.Errorf("执行命令出错:%s", err.Error())
+					glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 					return err
 				}
 			} else {
 				if err = client.PowerOff(); err != nil {
-					glog.Errorf("执行命令出错:%s", err.Error())
+					glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 					return err
 				}
 			}
@@ -93,16 +92,16 @@ func main() {
 	if err = gw.RegisterCommand("setMode", func(msg protocal.CloudSend) error {
 		var params schema.SetModeReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if err = client.SetMode(params.Mode); err != nil {
-				glog.Errorf("执行命令出错:%s", err.Error())
+				glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 				return err
 			}
 		}
@@ -114,17 +113,17 @@ func main() {
 	if err = gw.RegisterCommand("setTemp", func(msg protocal.CloudSend) error {
 		var params schema.SetTempReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
 
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if err = client.SetTemp(params.Temp); err != nil {
-				glog.Errorf("执行命令出错:%s", err.Error())
+				glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 				return err
 			}
 		}
@@ -136,16 +135,16 @@ func main() {
 	if err = gw.RegisterCommand("setFanLevel", func(msg protocal.CloudSend) error {
 		var params schema.FanSpeedReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if err = client.SetFanSpeed(params.Speed); err != nil {
-				glog.Errorf("执行命令出错:%s", err.Error())
+				glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 				return err
 			}
 		}
@@ -157,17 +156,17 @@ func main() {
 	if err = gw.RegisterCommand("setNewFan", func(msg protocal.CloudSend) error {
 		var params schema.SetNewFanReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
 
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if err = client.SetNewFan(params.Mode); err != nil {
-				glog.Errorf("执行命令出错:%s", err.Error())
+				glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 				return err
 			}
 		}
@@ -179,30 +178,30 @@ func main() {
 	if err = gw.RegisterCommand("setFanValue", func(msg protocal.CloudSend) error {
 		var params schema.FanValveReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
 
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if params.FanValve == "03" {
 				if err = client.SetFanGateThreeLevel(params.Value); err != nil {
-					glog.Errorf("执行命令出错:%s", err.Error())
+					glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 					return err
 				}
 			}
 			if params.FanValve == "04" {
 				if err = client.SetFanGateFourLevel(params.Value); err != nil {
-					glog.Errorf("执行命令出错:%s", err.Error())
+					glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 					return err
 				}
 			}
 			if params.FanValve == "05" {
 				if err = client.SetFanGateFiveLevel(params.Value); err != nil {
-					glog.Errorf("执行命令出错:%s", err.Error())
+					glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 					return err
 				}
 			}
@@ -216,16 +215,16 @@ func main() {
 	if err = gw.RegisterCommand("setFan", func(msg protocal.CloudSend) error {
 		var params schema.SetValvePowerReq
 		j := gjson.New(msg.Data.Params)
-		err = j.Struct(&params)
+		err = j.Scan(&params)
 		if err != nil {
-			glog.Errorf("错误的指令参数%s", err.Error())
+			glog.Errorf(ctx, "错误的指令参数%s", err.Error())
 			return err
 		}
-		glog.Debugf("指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
+		glog.Debugf(ctx, "指令:%s, 子设备Id:%s, 参数:%v", msg.Data.Cmd, msg.SubDeviceId, params)
 		client := srv.GetClient(msg.SubDeviceId)
 		if client != nil {
 			if err = client.SetValvePower(&params); err != nil {
-				glog.Errorf("执行命令出错:%s", err.Error())
+				glog.Errorf(ctx, "执行命令出错:%s", err.Error())
 				return err
 			}
 		}

+ 21 - 19
schema/schema.go

@@ -38,23 +38,25 @@ type SetValvePowerReq struct {
 }
 
 type StatusResponse struct {
-	Power             int    `json:"power"`                // 开机关状态
-	Mode              uint16 `json:"mode"`                 // 设置模式
-	NewFan            uint16 `json:"new_fan"`              // 新风开关
-	FanSpeed          uint16 `json:"fan_speed"`            // 设定风速
-	SetTemp           uint16 `json:"set_temp"`             // 设置温度
-	Temperature       uint16 `json:"temperature"`          // 温度
-	Humidity          uint16 `json:"humidity"`             // 湿度
-	AirQuality        uint16 `json:"air_quality"`          // pm25
-	CO2               uint16 `json:"co2"`                  // co2
-	FanGateOne        int    `json:"fan_gate_one"`         // 风阀1 状态
-	FanGateTwo        int    `json:"fan_gate_two"`         // 风阀2 状态
-	FanGateThree      int    `json:"fan_gate_three"`       // 风阀3 状态
-	FanGateFour       int    `json:"fan_gate_four"`        // 风阀4 状态
-	FanGateFive       int    `json:"fan_gate_five"`        // 风阀5 状态
-	FanGateOneLevel   uint16 `json:"fan_gate_one_level"`   // 风阀1 开度
-	FanGateTwoLevel   uint16 `json:"fan_gate_two_level"`   // 风阀2 开度
-	FanGateThreeLevel uint16 `json:"fan_gate_three_level"` // 风阀3 开度
-	FanGateFourLevel  uint16 `json:"fan_gate_four_level"`  // 风阀4 开度
-	FanGateFiveLevel  uint16 `json:"fan_gate_five_level"`  // 风阀5 开度
+	//Power             int    `json:"power"`                // 开机关状态
+	//Mode              uint16 `json:"mode"`                 // 设置模式
+	//NewFan            uint16 `json:"new_fan"`              // 新风开关
+	//FanSpeed          uint16 `json:"fan_speed"`            // 设定风速
+	//SetTemp           uint16 `json:"set_temp"`             // 设置温度
+	//Temperature       uint16 `json:"temperature"`          // 温度
+	//Humidity          uint16 `json:"humidity"`             // 湿度
+	//AirQuality        uint16 `json:"air_quality"`          // pm25
+	//CO2               uint16 `json:"co2"`                  // co2
+	//FanGateOne        int    `json:"fan_gate_one"`         // 风阀1 状态
+	//FanGateTwo        int    `json:"fan_gate_two"`         // 风阀2 状态
+	//FanGateThree      int    `json:"fan_gate_three"`       // 风阀3 状态
+	//FanGateFour       int    `json:"fan_gate_four"`        // 风阀4 状态
+	//FanGateFive       int    `json:"fan_gate_five"`        // 风阀5 状态
+	//FanGateOneLevel   uint16 `json:"fan_gate_one_level"`   // 风阀1 开度
+	//FanGateTwoLevel   uint16 `json:"fan_gate_two_level"`   // 风阀2 开度
+	//FanGateThreeLevel uint16 `json:"fan_gate_three_level"` // 风阀3 开度
+	//FanGateFourLevel  uint16 `json:"fan_gate_four_level"`  // 风阀4 开度
+	//FanGateFiveLevel  uint16 `json:"fan_gate_five_level"`  // 风阀5 开度
+	InletValve uint16 `json:"inlet_valve"` // 进水阀
+	HighWater  uint16 `json:"high_water"`  // 高水位
 }

+ 99 - 63
server/client.go

@@ -4,9 +4,10 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"github.com/gogf/gf/encoding/gbinary"
-	"github.com/gogf/gf/net/gtcp"
-	"github.com/gogf/gf/os/glog"
+	"github.com/gogf/gf/v2/encoding/gbinary"
+	"github.com/gogf/gf/v2/frame/g"
+	"github.com/gogf/gf/v2/net/gtcp"
+	"github.com/gogf/gf/v2/os/glog"
 	"io"
 	"net"
 	"strings"
@@ -52,12 +53,13 @@ func (c *Client) SetId(id string) {
 }
 
 func (c *Client) SendLoop() {
+	ctx := c.srv.ctx
 	for {
 		select {
 		case buf := <-c.sendChan:
 			err := c.send(buf)
 			if err != nil {
-				glog.Errorf("指令发送失败:%s", err.Error())
+				g.Log().Errorf(ctx, "指令发送失败:%s", err.Error())
 				continue
 			}
 			timer := time.NewTimer(5 * time.Second)
@@ -66,7 +68,7 @@ func (c *Client) SendLoop() {
 				case <-c.closeChan:
 					return
 				case <-timer.C:
-					glog.Error("接收指令超时")
+					g.Log().Errorf(ctx, "接收指令超时")
 					break
 				default:
 					receiveBuf, err := c.conn.Recv(0)
@@ -76,7 +78,7 @@ func (c *Client) SendLoop() {
 					}
 					if !c.isReg {
 						id := gbinary.DecodeToString(receiveBuf)
-						glog.Debugf("收到注册包!id:%s", id)
+						g.Log().Debugf(ctx, "收到注册包!id:%s", id)
 						c.SetId(id)
 						c.isReg = true
 						if c.regHandler != nil {
@@ -84,15 +86,17 @@ func (c *Client) SendLoop() {
 						}
 						continue
 					}
-					glog.Debugf("收到数据:%2X", receiveBuf)
-					c.receiveData = append(c.receiveData, receiveBuf...)
+					if c.Id == "863569064249469" {
+						g.Log().Debugf(ctx, "ID:%s 收到数据:%2X", c.Id, receiveBuf)
+					}
+					c.receiveData = receiveBuf
 					//for v := range receiveBuf {
 					//	c.receiveQueue.Push(v)
 					//	glog.Debugf("队列数据:%2X", c.receiveQueue)
 					//}
 					//glog.Debugf("receiveQueue长度:%d", c.receiveQueue.Len())
 					if err := c.readQueue(); err != nil {
-						glog.Debugf("处理数据失败:%s", err.Error())
+						g.Log().Errorf(ctx, "处理数据失败:%s", err.Error())
 						break
 					}
 				}
@@ -103,19 +107,39 @@ func (c *Client) SendLoop() {
 }
 
 func (c *Client) readQueue() error {
-	if bytes.Equal(c.receiveData[:3], []byte{0x02, 0x03, 0xa8}) {
+	//for i := 0; i < len(c.receiveData)-2; i++ {
+	//	if c.receiveData[i] == 0x02 && c.receiveData[i+1] == 0x03 && c.receiveData[i+2] == 0xa8 {
+	//		c.receiveData = c.receiveData[i:]
+	//		if len(c.receiveData) >= int(c.receiveData[2])+5 {
+	//			c.receiveData = c.receiveData[:int(c.receiveData[i+2])+5]
+	//			return c.decodeAndReport(c.receiveData)
+	//		} else {
+	//			return nil
+	//		}
+	//
+	//	}
+	//}
+	//c.receiveData = []byte{}
+	//return nil
+	if bytes.Equal(c.receiveData[:3], []byte{0x02, 0x03, 0x04}) {
 		if len(c.receiveData) >= int(c.receiveData[2])+5 {
-			c.receiveData = c.receiveData[:int(c.receiveData[2]+5)]
-			if err := c.decodeAndReport(c.receiveData); err != nil {
-				return err
-			}
-			c.receiveData = c.receiveData[:0]
+			return c.decodeAndReport(c.receiveData)
 		}
-		return nil
-	} else {
-		c.receiveData = c.receiveData[:0]
-		return nil
 	}
+	return nil
+	//if bytes.Equal(c.receiveData[:3], []byte{0x02, 0x03, 0x04}) {
+	//	if len(c.receiveData) >= int(c.receiveData[2])+5 {
+	//		c.receiveData = c.receiveData[:int(c.receiveData[2]+5)]
+	//		if err := c.decodeAndReport(c.receiveData); err != nil {
+	//			return err
+	//		}
+	//		c.receiveData = c.receiveData[:0]
+	//	}
+	//	return nil
+	//} else {
+	//	c.receiveData = c.receiveData[:0]
+	//	return nil
+	//}
 	//for {
 	//	if c.receiveQueue.Len() > 3 {
 	//		glog.Debugf("11111111111111111")
@@ -183,48 +207,55 @@ func (c *Client) decodeAndReport(buf []byte) error {
 	}
 
 	ret := new(schema.StatusResponse)
-	ret.Power = gbinary.BeDecodeToInt(data[address.UnitOnOff])
-	ret.Mode = gbinary.BeDecodeToUint16(data[address.SetMode])
-	ret.FanSpeed = gbinary.BeDecodeToUint16(data[address.SetFanSpeed])
-	ret.SetTemp = gbinary.BeDecodeToUint16(data[address.SetTemp])
-	ret.NewFan = gbinary.BeDecodeToUint16(data[address.SetNewFan])
+	//ret.Power = gbinary.BeDecodeToInt(data[address.UnitOnOff])
+	//ret.Mode = gbinary.BeDecodeToUint16(data[address.SetMode])
+	//ret.FanSpeed = gbinary.BeDecodeToUint16(data[address.SetFanSpeed])
+	//ret.SetTemp = gbinary.BeDecodeToUint16(data[address.SetTemp])
+	//ret.NewFan = gbinary.BeDecodeToUint16(data[address.SetNewFan])
 
 	//ret.FanSpeed = gbinary.BeDecodeToUint16(data[8:9])
 	//ret.SetTemp = gbinary.BeDecodeToUint16(data[28:29])
-	ret.AirQuality = gbinary.BeDecodeToUint16(data[35])
-	ret.CO2 = gbinary.BeDecodeToUint16(data[36])
-	ret.Temperature = gbinary.BeDecodeToUint16(data[27])
-	ret.Humidity = gbinary.BeDecodeToUint16(data[28])
+	//ret.AirQuality = gbinary.BeDecodeToUint16(data[35])
+	//ret.CO2 = gbinary.BeDecodeToUint16(data[36])
+	//ret.Temperature = gbinary.BeDecodeToUint16(data[27])
+	//ret.Humidity = gbinary.BeDecodeToUint16(data[28])
 	//
-	fanStatus := gbinary.BeDecodeToUint16(data[5])
-	ret.FanGateOne = int(fanStatus) & 0x80
-	ret.FanGateTwo = int(fanStatus) & 0x40
-	ret.FanGateThree = int(fanStatus) & 0x20
-	ret.FanGateFour = int(fanStatus) & 0x10
-	ret.FanGateFive = int(fanStatus) & 0x08
+	//fanStatus := gbinary.BeDecodeToUint16(data[5])
+	//ret.FanGateOne = int(fanStatus) & 0x80
+	//ret.FanGateTwo = int(fanStatus) & 0x40
+	//ret.FanGateThree = int(fanStatus) & 0x20
+	//ret.FanGateFour = int(fanStatus) & 0x10
+	//ret.FanGateFive = int(fanStatus) & 0x08
+	////
+	//ret.FanGateOneLevel = gbinary.BeDecodeToUint16(data[7])
+	//ret.FanGateThreeLevel = gbinary.BeDecodeToUint16(data[8])
+	//ret.FanGateFourLevel = gbinary.BeDecodeToUint16(data[9])
+	//ret.FanGateFiveLevel = gbinary.BeDecodeToUint16(data[10])
+
+	ret.InletValve = gbinary.BeDecodeToUint16(data[0])
+	ret.HighWater = gbinary.BeDecodeToUint16(data[1])
+	//ret.AirQuality = gbinary.BeDecodeToUint16(data[2])
+	//ret.CO2 = gbinary.BeDecodeToUint16(data[3])
+	//if ret.FanGateOneLevel > 0 {
+	//	ret.FanGateOne = 1
+	//	ret.FanGateTwo = 1
+	//}
+	//if ret.FanGateThreeLevel > 0 {
+	//	ret.FanGateThree = 1
+	//}
 	//
-	ret.FanGateOneLevel = gbinary.BeDecodeToUint16(data[7])
-	ret.FanGateThreeLevel = gbinary.BeDecodeToUint16(data[8])
-	ret.FanGateFourLevel = gbinary.BeDecodeToUint16(data[9])
-	ret.FanGateFiveLevel = gbinary.BeDecodeToUint16(data[10])
-
-	if ret.FanGateOneLevel > 0 {
-		ret.FanGateOne = 1
-		ret.FanGateTwo = 1
-	}
-	if ret.FanGateThreeLevel > 0 {
-		ret.FanGateThree = 1
-	}
-
-	if ret.FanGateFourLevel > 0 {
-		ret.FanGateFour = 1
-	}
-	if ret.FanGateFiveLevel > 0 {
-		ret.FanGateFive = 1
-	}
+	//if ret.FanGateFourLevel > 0 {
+	//	ret.FanGateFour = 1
+	//}
+	//if ret.FanGateFiveLevel > 0 {
+	//	ret.FanGateFive = 1
+	//}
 
-	if err := c.srv.ReportStatus(c.Id, ret, "status"); err != nil {
-		return err
+	c.receiveData = []byte{}
+	if c.Id == "863569064249469" {
+		if err := c.srv.ReportStatus(c.Id, ret, "status"); err != nil {
+			return err
+		}
 	}
 	return nil
 }
@@ -234,7 +265,7 @@ func (c *Client) readError(err error) {
 	if err == io.EOF || isErrConnReset(err) {
 		return
 	}
-	glog.Errorf("读取数据发生错误:%s", err.Error())
+	g.Log().Errorf(c.srv.ctx, "读取数据发生错误:%s", err.Error())
 }
 func (c *Client) closeConnection() {
 	_ = c.conn.Close()
@@ -248,7 +279,8 @@ func (c *Client) closeConnection() {
 
 // isErrConnReset read: connection reset by peer
 func isErrConnReset(err error) bool {
-	if ne, ok := err.(*net.OpError); ok {
+	var ne *net.OpError
+	if errors.As(err, &ne) {
 		return strings.Contains(ne.Err.Error(), syscall.ECONNRESET.Error())
 	}
 	return false
@@ -258,7 +290,10 @@ func (c *Client) send(buf []byte) error {
 	if c.conn == nil {
 		return nil
 	}
-	glog.Debugf("----->%2X", buf)
+	if c.Id == "863569064249469" {
+		g.Log().Debugf(c.srv.ctx, "----->%2X", buf)
+	}
+
 	err := c.conn.Send(buf)
 	if err != nil {
 		glog.Error(c.srv.ctx, err)
@@ -279,8 +314,9 @@ func (c *Client) GetStatus() {
 	//	time.Sleep(10 * time.Second)
 	//}
 	for {
-		c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
-		time.Sleep(10 * time.Second)
+		c.sendChan <- []byte{0x02, 0x03, 0x00, 0x21, 0x00, 0x02, 0x94, 0x32}
+		//c.sendChan <- []byte{0x02, 0x03, 0x00, 0x00, 0x00, 0x54, 0x44, 0x06}
+		time.Sleep(1 * time.Second)
 	}
 }
 
@@ -288,7 +324,7 @@ func (c *Client) GetSensorStatus() {
 	for {
 		err := c.send([]byte{0x01, 0x03, 0x00, 0x02, 0x00, 0x07, 0x00, 0x00})
 		if err != nil {
-			glog.Debugf("处理数据失败:%s", err.Error())
+			g.Log().Errorf(c.srv.ctx, "处理数据失败:%s", err.Error())
 		}
 		time.Sleep(10 * time.Second)
 	}
@@ -298,9 +334,9 @@ func (c *Client) Read() {
 	for {
 		receiveBuf, err := c.conn.Recv(-1)
 		if err != nil {
-			glog.Debugf("接收数据失败:%s", err.Error())
+			g.Log().Errorf(c.srv.ctx, "处理数据失败:%s", err.Error())
 		}
-		glog.Debugf("收到数据:%2X", receiveBuf)
+		g.Log().Debugf(c.srv.ctx, "收到数据:%2X", receiveBuf)
 		time.Sleep(1 * time.Second)
 	}
 }

+ 17 - 9
server/server.go

@@ -3,10 +3,10 @@ package server
 import (
 	"context"
 	"fmt"
-	"github.com/gogf/gf/container/gmap"
-	"github.com/gogf/gf/frame/g"
-	"github.com/gogf/gf/net/gtcp"
-	"github.com/gogf/gf/os/glog"
+	"github.com/gogf/gf/v2/container/gmap"
+	"github.com/gogf/gf/v2/container/gvar"
+	"github.com/gogf/gf/v2/frame/g"
+	"github.com/gogf/gf/v2/net/gtcp"
 	gatewayV2 "sparrow-sdk/v2"
 	"time"
 )
@@ -33,7 +33,7 @@ func NewServer(ctx context.Context, addr string, port int, gw *gatewayV2.Gateway
 }
 
 func (s *Server) Start() error {
-	glog.Printf("服务端启动[%s:%d]", s.addr, s.port)
+	g.Log().Printf(s.ctx, "服务端启动[%s:%d]", s.addr, s.port)
 	srv := gtcp.NewServer(fmt.Sprintf("%s:%d", s.addr, s.port), s.onClientConnect)
 	s.srv = srv
 	return s.srv.Run()
@@ -49,17 +49,17 @@ func (s *Server) Stop() {
 }
 
 func (s *Server) onClientConnect(conn *gtcp.Conn) {
-	glog.Debugf("新的设备接入:%s", conn.RemoteAddr())
+	g.Log().Debugf(s.ctx, "新的设备接入:%s", conn.RemoteAddr())
 	client := NewClient(s, conn)
 	client.closeHandler = func(id string, c *Client) {
-		glog.Debugf("客户端断开:%s", id)
+		g.Log().Debugf(s.ctx, "客户端断开:%s", id)
 		if id != "" {
-			_ = s.gateWay.SubDeviceLogout(g.Cfg().GetString("sparrow.DeviceCode"), id)
+			_ = s.gateWay.SubDeviceLogout(GetConfig("sparrow.DeviceCode").String(), id)
 			s.clients.Remove(id)
 		}
 	}
 	client.regHandler = func(id string, c *Client) {
-		_ = s.gateWay.SubDeviceLogin(g.Cfg().GetString("Sparrow.DeviceCode"), id)
+		_ = s.gateWay.SubDeviceLogin(GetConfig("Sparrow.DeviceCode").String(), id)
 		s.clients.Set(id, c)
 	}
 
@@ -85,3 +85,11 @@ func (s *Server) GetClient(subId string) *Client {
 	}
 	return nil
 }
+
+func GetConfig(key string) *gvar.Var {
+	ret, err := g.Cfg().Get(context.Background(), key)
+	if err != nil {
+
+	}
+	return ret
+}

+ 22 - 0
vendor/github.com/cespare/xxhash/v2/LICENSE.txt

@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 69 - 0
vendor/github.com/cespare/xxhash/v2/README.md

@@ -0,0 +1,69 @@
+# xxhash
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
+| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)

+ 235 - 0
vendor/github.com/cespare/xxhash/v2/xxhash.go

@@ -0,0 +1,235 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

+ 13 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go

@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int

+ 215 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s

@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX	h
+// SI	pointer to advance through b
+// DX	n
+// BX	loop end
+// R8	v1, k1
+// R9	v2
+// R10	v3
+// R11	v4
+// R12	tmp
+// R13	prime1v
+// R14	prime2v
+// DI	prime4v
+
+// round reads from and advances the buffer pointer in SI.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+	MOVQ  (SI), R12 \
+	ADDQ  $8, SI    \
+	IMULQ R14, R12  \
+	ADDQ  R12, r    \
+	ROLQ  $31, r    \
+	IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
+#define mergeRound(acc, val) \
+	IMULQ R14, val \
+	ROLQ  $31, val \
+	IMULQ R13, val \
+	XORQ  val, acc \
+	IMULQ R13, acc \
+	ADDQ  DI, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+	// Load fixed primes.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+	MOVQ ·prime4v(SB), DI
+
+	// Load slice.
+	MOVQ b_base+0(FP), SI
+	MOVQ b_len+8(FP), DX
+	LEAQ (SI)(DX*1), BX
+
+	// The first loop limit will be len(b)-32.
+	SUBQ $32, BX
+
+	// Check whether we have at least one block.
+	CMPQ DX, $32
+	JLT  noBlocks
+
+	// Set up initial state (v1, v2, v3, v4).
+	MOVQ R13, R8
+	ADDQ R14, R8
+	MOVQ R14, R9
+	XORQ R10, R10
+	XORQ R11, R11
+	SUBQ R13, R11
+
+	// Loop until SI > BX.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ SI, BX
+	JLE  blockLoop
+
+	MOVQ R8, AX
+	ROLQ $1, AX
+	MOVQ R9, R12
+	ROLQ $7, R12
+	ADDQ R12, AX
+	MOVQ R10, R12
+	ROLQ $12, R12
+	ADDQ R12, AX
+	MOVQ R11, R12
+	ROLQ $18, R12
+	ADDQ R12, AX
+
+	mergeRound(AX, R8)
+	mergeRound(AX, R9)
+	mergeRound(AX, R10)
+	mergeRound(AX, R11)
+
+	JMP afterBlocks
+
+noBlocks:
+	MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+	ADDQ DX, AX
+
+	// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
+	ADDQ $24, BX
+
+	CMPQ SI, BX
+	JG   fourByte
+
+wordLoop:
+	// Calculate k1.
+	MOVQ  (SI), R8
+	ADDQ  $8, SI
+	IMULQ R14, R8
+	ROLQ  $31, R8
+	IMULQ R13, R8
+
+	XORQ  R8, AX
+	ROLQ  $27, AX
+	IMULQ R13, AX
+	ADDQ  DI, AX
+
+	CMPQ SI, BX
+	JLE  wordLoop
+
+fourByte:
+	ADDQ $4, BX
+	CMPQ SI, BX
+	JG   singles
+
+	MOVL  (SI), R8
+	ADDQ  $4, SI
+	IMULQ R13, R8
+	XORQ  R8, AX
+
+	ROLQ  $23, AX
+	IMULQ R14, AX
+	ADDQ  ·prime3v(SB), AX
+
+singles:
+	ADDQ $4, BX
+	CMPQ SI, BX
+	JGE  finalize
+
+singlesLoop:
+	MOVBQZX (SI), R12
+	ADDQ    $1, SI
+	IMULQ   ·prime5v(SB), R12
+	XORQ    R12, AX
+
+	ROLQ  $11, AX
+	IMULQ R13, AX
+
+	CMPQ SI, BX
+	JL   singlesLoop
+
+finalize:
+	MOVQ  AX, R12
+	SHRQ  $33, R12
+	XORQ  R12, AX
+	IMULQ R14, AX
+	MOVQ  AX, R12
+	SHRQ  $29, R12
+	XORQ  R12, AX
+	IMULQ ·prime3v(SB), AX
+	MOVQ  AX, R12
+	SHRQ  $32, R12
+	XORQ  R12, AX
+
+	MOVQ AX, ret+24(FP)
+	RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+	// Load fixed primes needed for round.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+
+	// Load slice.
+	MOVQ b_base+8(FP), SI
+	MOVQ b_len+16(FP), DX
+	LEAQ (SI)(DX*1), BX
+	SUBQ $32, BX
+
+	// Load vN from d.
+	MOVQ d+0(FP), AX
+	MOVQ 0(AX), R8   // v1
+	MOVQ 8(AX), R9   // v2
+	MOVQ 16(AX), R10 // v3
+	MOVQ 24(AX), R11 // v4
+
+	// We don't need to check the loop condition here; this function is
+	// always called with at least one block of data to process.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ SI, BX
+	JLE  blockLoop
+
+	// Copy vN back to d.
+	MOVQ R8, 0(AX)
+	MOVQ R9, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R11, 24(AX)
+
+	// The number of bytes written is SI minus the old base pointer.
+	SUBQ b_base+8(FP), SI
+	MOVQ SI, ret+32(FP)
+
+	RET

+ 76 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_other.go

@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := prime1v + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -prime1v
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	i, end := 0, len(b)
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(b[i:i+8:len(b)]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for ; i < end; i++ {
+		h ^= uint64(b[i]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
+}

+ 15 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go

@@ -0,0 +1,15 @@
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	return d.Write([]byte(s))
+}

+ 57 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go

@@ -0,0 +1,57 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+	"unsafe"
+)
+
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
+//
+//   var b []byte
+//   bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+//   bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+//   bh.Len = len(s)
+//   bh.Cap = len(s)
+//
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
+//
+// See https://github.com/golang/go/issues/42739 for discussion.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
+	return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+	// d.Write always returns len(s), nil.
+	// Ignoring the return output and returning these fixed values buys a
+	// savings of 6 in the inliner's cost model.
+	return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+	s   string
+	cap int
+}

+ 4 - 0
vendor/github.com/clbanning/mxj/v2/.travis.yml

@@ -0,0 +1,4 @@
+language: go
+
+go:
+- 1.x

+ 22 - 0
vendor/github.com/clbanning/mxj/v2/LICENSE

@@ -0,0 +1,22 @@
+Copyright (c) 2012-2021 Charles Banning <clbanning@gmail.com>.  All rights reserved.
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 201 - 0
vendor/github.com/clbanning/mxj/v2/anyxml.go

@@ -0,0 +1,201 @@
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"reflect"
+)
+
+const (
+	DefaultElementTag = "element"
+)
+
+// Encode arbitrary value as XML.
+//
+// Note: unmarshaling the resultant
+// XML may not return the original value, since tag labels may have been injected
+// to create the XML representation of the value.
+/*
+ Encode an arbitrary JSON object.
+	package main
+
+	import (
+		"encoding/json"
+		"fmt"
+		"github.com/clbanning/mxj"
+	)
+
+	func main() {
+		jsondata := []byte(`[
+			{ "somekey":"somevalue" },
+			"string",
+			3.14159265,
+			true
+		]`)
+		var i interface{}
+		err := json.Unmarshal(jsondata, &i)
+		if err != nil {
+			// do something
+		}
+		x, err := mxj.AnyXmlIndent(i, "", "  ", "mydoc")
+		if err != nil {
+			// do something else
+		}
+		fmt.Println(string(x))
+	}
+
+	output:
+		<mydoc>
+		  <somekey>somevalue</somekey>
+		  <element>string</element>
+		  <element>3.14159265</element>
+		  <element>true</element>
+		</mydoc>
+
+An extreme example is available in examples/goofy_map.go.
+*/
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXml( v, myRootTag, myElementTag).
+func AnyXml(v interface{}, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte("<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte("<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">"); err != nil {
+			return nil, err
+		}
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(false, s, tag, val, p)
+					}
+				} else {
+					err = marshalMapToXmlIndent(false, s, et, vv, p)
+				}
+			default:
+				err = marshalMapToXmlIndent(false, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString("</" + rt + ">"); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.Xml(rt)
+	default:
+		err = marshalMapToXmlIndent(false, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string.
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXmlIndent( v, "", "  ", myRootTag, myElementTag).
+func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) {
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	if v == nil {
+		if useGoXmlEmptyElemSyntax {
+			return []byte(prefix + "<" + rt + "></" + rt + ">"), nil
+		}
+		return []byte(prefix + "<" + rt + "/>"), nil
+	}
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+	}
+
+	var err error
+	s := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		if _, err = s.WriteString("<" + rt + ">\n"); err != nil {
+			return nil, err
+		}
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = marshalMapToXmlIndent(true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we 1 tag in
+					err = marshalMapToXmlIndent(true, s, et, vv, p)
+					// *s += "\n"
+					if _, err = s.WriteString("\n"); err != nil {
+						return nil, err
+					}
+				}
+			default:
+				p.start = 0 // in case trailing p.start = 1
+				err = marshalMapToXmlIndent(true, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if _, err = s.WriteString(`</` + rt + `>`); err != nil {
+			return nil, err
+		}
+		b = s.Bytes()
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.XmlIndent(prefix, indent, rt)
+	default:
+		err = marshalMapToXmlIndent(true, s, rt, v, p)
+		b = s.Bytes()
+	}
+
+	return b, err
+}

+ 54 - 0
vendor/github.com/clbanning/mxj/v2/atomFeedString.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+  An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+     feeds that will be pubsubhubbubbed.
+  2. every time one of those feeds changes, tell the hub
+     with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code.  I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+  This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops.  Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites.  I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> 	   `
+

+ 138 - 0
vendor/github.com/clbanning/mxj/v2/doc.go

@@ -0,0 +1,138 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2019, Charles Banning. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file
+
+/*
+Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package.  The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them.
+
+Note: this library was designed for processing ad hoc anonymous messages.  Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly.
+
+Related Packages:
+	checkxml: github.com/clbanning/checkxml provides functions for validating XML data.
+
+Notes:
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>].
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+
+SUMMARY
+
+   type Map map[string]interface{}
+
+   Create a Map value, 'mv', from any map[string]interface{} value, 'v':
+      mv := Map(v)
+
+   Unmarshal / marshal XML as a Map value, 'mv':
+      mv, err := NewMapXml(xmlValue) // unmarshal
+      xmlValue, err := mv.Xml()      // marshal
+
+   Unmarshal XML from an io.Reader as a Map value, 'mv':
+      mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+      mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+
+   Marshal Map value, 'mv', to an XML Writer (io.Writer):
+      err := mv.XmlWriter(xmlWriter)
+      raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+
+   Also, for prettified output:
+      xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+      err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+      raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+
+   Bulk process XML with error handling (note: handlers must return a boolean value):
+      err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+      err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+
+   Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader.
+
+   There are comparable functions and methods for JSON processing.
+
+   Arbitrary structure values can be decoded to / encoded from Map values:
+      mv, err := NewMapStruct(structVal)
+      err := mv.Struct(structPointer)
+
+   To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+   or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then:
+      paths := mv.PathsForKey(key)
+      path := mv.PathForKeyShortest(key)
+      values, err := mv.ValuesForKey(key, subkeys)
+      values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays.
+      count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+
+   Get everything at once, irrespective of path depth:
+      leafnodes := mv.LeafNodes()
+      leafvalues := mv.LeafValues()
+
+   A new Map with whatever keys are desired can be created from the current Map and then encoded in XML
+   or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.)
+      newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+      newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+      newXml, err := newMap.Xml()   // for example
+      newJson, err := newMap.Json() // ditto
+
+XML PARSING CONVENTIONS
+
+   Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+   Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - <ns:key>something</ns.key> parses to map["ns:key"]interface{}{"something"}
+      - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"}
+
+   Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+XML ENCODING CONVENTIONS
+   
+   - 'nil' Map values, which may represent 'null' JSON values, are encoded as "<tag/>".
+     NOTE: the operation is not symmetric as "<tag/>" elements are decoded as 'tag:""' Map values,
+           which, then, encode in JSON as '"tag":""' values..
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+*/
+package mxj

+ 93 - 0
vendor/github.com/clbanning/mxj/v2/escapechars.go

@@ -0,0 +1,93 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+)
+
+var xmlEscapeChars bool
+
+// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values.
+// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is
+// then '&amp;' will be re-escaped as '&amp;amp;'.
+//
+/*
+	The values are:
+	"   &quot;
+	'   &apos;
+	<   &lt;
+	>   &gt;
+	&   &amp;
+*/
+//
+// Note: if XMLEscapeCharsDecoder(true) has been called - or the default, 'false,' value
+// has been toggled to 'true' - then XMLEscapeChars(true) is ignored.  If XMLEscapeChars(true)
+// has already been called before XMLEscapeCharsDecoder(true), XMLEscapeChars(false) is called
+// to turn escape encoding on mv.Xml, etc., to prevent double escaping ampersands, '&'.
+func XMLEscapeChars(b ...bool) {
+	var bb bool
+	if len(b) == 0 {
+		bb = !xmlEscapeChars
+	} else {
+		bb = b[0]
+	}
+	if bb == true && xmlEscapeCharsDecoder == false {
+		xmlEscapeChars = true
+	} else {
+		xmlEscapeChars = false
+	}
+}
+
+// Scan for '&' first, since 's' may contain "&amp;" that is parsed to "&amp;amp;"
+// - or "&lt;" that is parsed to "&amp;lt;".
+var escapechars = [][2][]byte{
+	{[]byte(`&`), []byte(`&amp;`)},
+	{[]byte(`<`), []byte(`&lt;`)},
+	{[]byte(`>`), []byte(`&gt;`)},
+	{[]byte(`"`), []byte(`&quot;`)},
+	{[]byte(`'`), []byte(`&apos;`)},
+}
+
+func escapeChars(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+
+	b := []byte(s)
+	for _, v := range escapechars {
+		n := bytes.Count(b, v[0])
+		if n == 0 {
+			continue
+		}
+		b = bytes.Replace(b, v[0], v[1], n)
+	}
+	return string(b)
+}
+
+// per issue #84, escape CharData values from xml.Decoder
+
+var xmlEscapeCharsDecoder bool
+
+// XMLEscapeCharsDecoder(b ...bool) escapes XML characters in xml.CharData values
+// returned by Decoder.Token.  Thus, the internal Map values will contain escaped
+// values, and you do not need to set XMLEscapeChars for proper encoding.
+//
+// By default, the Map values have the non-escaped values returned by Decoder.Token.
+// XMLEscapeCharsDecoder(true) - or, XMLEscapeCharsDecoder() - will toggle escape
+// encoding 'on.'
+//
+// Note: if XMLEscapeCharDecoder(true) is call then XMLEscapeChars(false) is
+// called to prevent re-escaping the values on encoding using mv.Xml, etc.
+func XMLEscapeCharsDecoder(b ...bool) {
+	if len(b) == 0 {
+		xmlEscapeCharsDecoder = !xmlEscapeCharsDecoder
+	} else {
+		xmlEscapeCharsDecoder = b[0]
+	}
+	if xmlEscapeCharsDecoder == true && xmlEscapeChars == true {
+		xmlEscapeChars = false
+	}
+}

+ 9 - 0
vendor/github.com/clbanning/mxj/v2/exists.go

@@ -0,0 +1,9 @@
+package mxj
+
+// Checks whether the path exists. If err != nil then 'false' is returned
+// along with the error encountered parsing either the "path" or "subkeys"
+// argument.
+func (mv Map) Exists(path string, subkeys ...string) (bool, error) {
+	v, err := mv.ValuesForPath(path, subkeys...)
+	return (err == nil && len(v) > 0), err
+}

+ 287 - 0
vendor/github.com/clbanning/mxj/v2/files.go

@@ -0,0 +1,287 @@
+package mxj
+
+import (
+	"fmt"
+	"io"
+	"os"
+)
+
+type Maps []Map
+
+func NewMaps() Maps {
+	return make(Maps, 0)
+}
+
+type MapRaw struct {
+	M Map
+	R []byte
+}
+
+// NewMapsFromXmlFile - creates an array from a file of JSON values.
+func NewMapsFromJsonFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values.
+func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFile - creates an array from a file of XML values.
+func NewMapsFromXmlFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values.
+// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw().
+// It is slow at parsing a file from disk and is intended for relatively small utility files.
+func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ------------------------ Maps writing -------------------------
+// These are handy-dandy methods for dumping configuration data, etc.
+
+// JsonString - analogous to mv.Json()
+func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) {
+	var s string
+	for _, v := range mvs {
+		j, err := v.Json()
+		if err != nil {
+			return s, err
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// JsonStringIndent - analogous to mv.JsonIndent()
+func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) {
+	var s string
+	var haveFirst bool
+	for _, v := range mvs {
+		j, err := v.JsonIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		if haveFirst {
+			s += "\n"
+		} else {
+			haveFirst = true
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// XmlString - analogous to mv.Xml()
+func (mvs Maps) XmlString() (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.Xml()
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// XmlStringIndent - analogous to mv.XmlIndent()
+func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.XmlIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// JsonFile - write Maps to named file as JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonWriter method.
+func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonString(encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// JsonFileIndent - write Maps to named file as pretty JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonIndentWriter method.
+func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonStringIndent(prefix, indent, encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFile - write Maps to named file as XML
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlWriter method.
+func (mvs Maps) XmlFile(file string) error {
+	s, err := mvs.XmlString()
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFileIndent - write Maps to named file as pretty XML
+// Note: the file will be created,if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlIndentWriter method.
+func (mvs Maps) XmlFileIndent(file, prefix, indent string) error {
+	s, err := mvs.XmlStringIndent(prefix, indent)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}

+ 2 - 0
vendor/github.com/clbanning/mxj/v2/files_test.badjson

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"some", "bad":JSON, "in":"it" }

+ 9 - 0
vendor/github.com/clbanning/mxj/v2/files_test.badxml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</other>
+	<for>test case</for>
+</msg>

+ 2 - 0
vendor/github.com/clbanning/mxj/v2/files_test.json

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"just", "two":2, "JSON":"values", "true":true }

+ 9 - 0
vendor/github.com/clbanning/mxj/v2/files_test.xml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</another>
+	<for>test case</for>
+</msg>

+ 1 - 0
vendor/github.com/clbanning/mxj/v2/files_test_dup.json

@@ -0,0 +1 @@
+{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"}

+ 1 - 0
vendor/github.com/clbanning/mxj/v2/files_test_dup.xml

@@ -0,0 +1 @@
+<doc><data>for files.go</data><some>test</some></doc><msg><another>doc</another><for>test case</for><just>some</just></msg>

+ 12 - 0
vendor/github.com/clbanning/mxj/v2/files_test_indent.json

@@ -0,0 +1,12 @@
+{
+  "a": "test",
+  "file": "for",
+  "files_test.go": "case",
+  "this": "is"
+}
+{
+  "JSON": "values",
+  "true": true,
+  "two": 2,
+  "with": "just"
+}

+ 8 - 0
vendor/github.com/clbanning/mxj/v2/files_test_indent.xml

@@ -0,0 +1,8 @@
+<doc>
+  <data>for files.go</data>
+  <some>test</some>
+</doc><msg>
+  <another>doc</another>
+  <for>test case</for>
+  <just>some</just>
+</msg>

+ 35 - 0
vendor/github.com/clbanning/mxj/v2/gob.go

@@ -0,0 +1,35 @@
+// gob.go - Encode/Decode a Map into a gob object.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/gob"
+)
+
+// NewMapGob returns a Map value for a gob object that has been
+// encoded from a map[string]interface{} (or compatible type) value.
+// It is intended to provide symmetric handling of Maps that have
+// been encoded using mv.Gob.
+func NewMapGob(gobj []byte) (Map, error) {
+	m := make(map[string]interface{}, 0)
+	if len(gobj) == 0 {
+		return m, nil
+	}
+	r := bytes.NewReader(gobj)
+	dec := gob.NewDecoder(r)
+	if err := dec.Decode(&m); err != nil {
+		return m, err
+	}
+	return m, nil
+}
+
+// Gob returns a gob-encoded value for the Map 'mv'.
+func (mv Map) Gob() ([]byte, error) {
+	var buf bytes.Buffer
+	enc := gob.NewEncoder(&buf)
+	if err := enc.Encode(map[string]interface{}(mv)); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}

+ 323 - 0
vendor/github.com/clbanning/mxj/v2/json.go

@@ -0,0 +1,323 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+)
+
+// ------------------------------ write JSON -----------------------
+
+// Just a wrapper on json.Marshal.
+// If option safeEncoding is'true' then safe encoding of '<', '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) Json(safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.Marshal(mv)
+
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// Just a wrapper on json.MarshalIndent.
+// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.MarshalIndent(mv, prefix, indent)
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// The following implementation is provided for symmetry with NewMapJsonReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// Writes the Map as pretty JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// --------------------------- read JSON -----------------------------
+
+// Decode numericvalues as json.Number type Map values - see encoding/json#Number.
+// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), 
+// etc.; it does not affect NewMapXml(), etc.  The XML encoders mv.Xml() and mv.XmlIndent()
+// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number
+// value types and the resulting Map can be correctly encoded into a XML object.
+var JsonUseNumber bool
+
+// Just a wrapper on json.Unmarshal
+//	Converting JSON to XML is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapJson(jsonVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		xmlVal, xerr := mapVal.Xml()
+//		if xerr != nil {
+//			// handle error
+//		}
+// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}],
+// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map.
+// See mxj/j2x/j2x_test.go.
+func NewMapJson(jsonVal []byte) (Map, error) {
+	// empty or nil begets empty
+	if len(jsonVal) == 0 {
+		m := make(map[string]interface{}, 0)
+		return m, nil
+	}
+	// handle a goofy case ...
+	if jsonVal[0] == '[' {
+		jsonVal = []byte(`{"object":` + string(jsonVal) + `}`)
+	}
+	m := make(map[string]interface{})
+	// err := json.Unmarshal(jsonVal, &m)
+	buf := bytes.NewReader(jsonVal)
+	dec := json.NewDecoder(buf)
+	if JsonUseNumber {
+		dec.UseNumber()
+	}
+	err := dec.Decode(&m)
+	return m, err
+}
+
+// Retrieve a Map value from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object.
+func NewMapJsonReader(jsonReader io.Reader) (Map, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	return NewMapJson(*jb)
+}
+
+// Retrieve a Map value and raw JSON - []byte - from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object and retrieve the raw JSON in a single call.
+func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, *jb, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	m, merr := NewMapJson(*jb)
+	return m, *jb, merr
+}
+
+// Pull the next JSON string off the stream: just read from first '{' to its closing '}'.
+// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package.
+func getJson(rdr io.Reader) (*[]byte, error) {
+	bval := make([]byte, 1)
+	jb := make([]byte, 0)
+	var inQuote, inJson bool
+	var parenCnt int
+	var previous byte
+
+	// scan the input for a matched set of {...}
+	// json.Unmarshal will handle syntax checking.
+	for {
+		_, err := rdr.Read(bval)
+		if err != nil {
+			if err == io.EOF && inJson && parenCnt > 0 {
+				return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb))
+			}
+			return &jb, err
+		}
+		switch bval[0] {
+		case '{':
+			if !inQuote {
+				parenCnt++
+				inJson = true
+			}
+		case '}':
+			if !inQuote {
+				parenCnt--
+			}
+			if parenCnt < 0 {
+				return nil, fmt.Errorf("closing } without opening {: %s", string(jb))
+			}
+		case '"':
+			if inQuote {
+				if previous == '\\' {
+					break
+				}
+				inQuote = false
+			} else {
+				inQuote = true
+			}
+		case '\n', '\r', '\t', ' ':
+			if !inQuote {
+				continue
+			}
+		}
+		if inJson {
+			jb = append(jb, bval[0])
+			if parenCnt == 0 {
+				break
+			}
+		}
+		previous = bval[0]
+	}
+
+	return &jb, nil
+}
+
+// ------------------------------- JSON Reader handler via Map values  -----------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var jhandlerPollInterval = time.Duration(1e6)
+
+// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader().
+// This avoids treating one or other as a special case and discussing the underlying stdlib logic.
+
+// Bulk process JSON using handlers that process a Map value.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader  processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapJsonReader(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process JSON using handlers that process a Map value and the raw JSON.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapJsonReaderRaw(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}

+ 668 - 0
vendor/github.com/clbanning/mxj/v2/keyvalues.go

@@ -0,0 +1,668 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+//	keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters.
+
+package mxj
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// ----------------------------- get everything FOR a single key -------------------------
+
+const (
+	minArraySize = 32
+)
+
+var defaultArraySize int = minArraySize
+
+// SetArraySize adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath().
+// This can have the effect of significantly reducing memory allocation-copy functions for large data sets.
+// Returns the initial buffer size.
+func SetArraySize(size int) int {
+	if size > minArraySize {
+		defaultArraySize = size
+	} else {
+		defaultArraySize = minArraySize
+	}
+	return defaultArraySize
+}
+
+// ValuesForKey return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match.
+// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'key' refers to a list, then "key:value" could select a list member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	ret := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	hasKey(m, key, &ret, &cnt, subKeyMap)
+	return ret[:cnt], nil
+}
+
+var KeyNotExistError = errors.New("Key does not exist")
+
+// ValueForKey is a wrapper on ValuesForKey.  It returns the first member of []interface{}, if any.
+// If there is no value, "nil, nil" is returned.
+func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) {
+	vals, err := mv.ValuesForKey(key, subkeys...)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, KeyNotExistError
+	}
+	return vals[0], nil
+}
+
+// hasKey - if the map 'key' exists append it to array
+//          if it doesn't do nothing except scan array and map values
+func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) {
+	// func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		// see if the current value is of interest
+		if v, ok := vv[key]; ok {
+			switch v.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(v, subkeys) {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			case []interface{}:
+				for _, av := range v.([]interface{}) {
+					if hasSubKeys(av, subkeys) {
+						*ret = append(*ret, av)
+						*cnt++
+					}
+				}
+			default:
+				if len(subkeys) == 0 {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			}
+		}
+
+		// wildcard case
+		if key == "*" {
+			for _, v := range vv {
+				switch v.(type) {
+				case map[string]interface{}:
+					if hasSubKeys(v, subkeys) {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				case []interface{}:
+					for _, av := range v.([]interface{}) {
+						if hasSubKeys(av, subkeys) {
+							*ret = append(*ret, av)
+							*cnt++
+						}
+					}
+				default:
+					if len(subkeys) == 0 {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				}
+			}
+		}
+
+		// scan the rest
+		for _, v := range vv {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	case []interface{}:
+		for _, v := range iv.([]interface{}) {
+			hasKey(v, key, ret, cnt, subkeys)
+		}
+	}
+}
+
+// -----------------------  get everything for a node in the Map ---------------------------
+
+// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.)
+// 2014.04.28 - implementation note.
+// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion
+// of wildcards and unindexed arrays.  Embedding such logic into valuesForKeyPath() would have made the
+// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead.
+
+// ValuesForPatb retrieves all values for a path from the Map.  If len(returned_values) == 0, then no match.
+// On error, the returned array is 'nil'.
+//   'path' is a dot-separated path of key values.
+//          - If a node in the path is '*', then everything beyond is walked.
+//          - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" -
+//            even "*[2].*[0].field".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - If the 'path' refers to a list, then "tag:value" would return member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+//             - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|".
+func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	// If there are no array indexes in path, use legacy ValuesForPath() logic.
+	if strings.Index(path, "[") < 0 {
+		return mv.oldValuesForPath(path, subkeys...)
+	}
+
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys, kerr := parsePath(path)
+	if kerr != nil {
+		return nil, kerr
+	}
+
+	vals, verr := valuesForArray(keys, mv)
+	if verr != nil {
+		return nil, verr // Vals may be nil, but return empty array.
+	}
+
+	// Need to handle subkeys ... only return members of vals that satisfy conditions.
+	retvals := make([]interface{}, 0)
+	for _, v := range vals {
+		if hasSubKeys(v, subKeyMap) {
+			retvals = append(retvals, v)
+		}
+	}
+	return retvals, nil
+}
+
+func valuesForArray(keys []*key, m Map) ([]interface{}, error) {
+	var tmppath string
+	var haveFirst bool
+	var vals []interface{}
+	var verr error
+
+	lastkey := len(keys) - 1
+	for i := 0; i <= lastkey; i++ {
+		if !haveFirst {
+			tmppath = keys[i].name
+			haveFirst = true
+		} else {
+			tmppath += "." + keys[i].name
+		}
+
+		// Look-ahead: explode wildcards and unindexed arrays.
+		// Need to handle un-indexed list recursively:
+		// e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]".
+		// Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ...
+		if !keys[i].isArray && i < lastkey && keys[i+1].isArray {
+			// Can't pass subkeys because we may not be at literal end of path.
+			vv, vverr := m.oldValuesForPath(tmppath)
+			if vverr != nil {
+				return nil, vverr
+			}
+			for _, v := range vv {
+				// See if we can walk the value.
+				am, ok := v.(map[string]interface{})
+				if !ok {
+					continue
+				}
+				// Work the backend.
+				nvals, nvalserr := valuesForArray(keys[i+1:], Map(am))
+				if nvalserr != nil {
+					return nil, nvalserr
+				}
+				vals = append(vals, nvals...)
+			}
+			break // have recursed the whole path - return
+		}
+
+		if keys[i].isArray || i == lastkey {
+			// Don't pass subkeys because may not be at literal end of path.
+			vals, verr = m.oldValuesForPath(tmppath)
+		} else {
+			continue
+		}
+		if verr != nil {
+			return nil, verr
+		}
+
+		if i == lastkey && !keys[i].isArray {
+			break
+		}
+
+		// Now we're looking at an array - supposedly.
+		// Is index in range of vals?
+		if len(vals) <= keys[i].position {
+			vals = nil
+			break
+		}
+
+		// Return the array member of interest, if at end of path.
+		if i == lastkey {
+			vals = vals[keys[i].position:(keys[i].position + 1)]
+			break
+		}
+
+		// Extract the array member of interest.
+		am := vals[keys[i].position:(keys[i].position + 1)]
+
+		// must be a map[string]interface{} value so we can keep walking the path
+		amm, ok := am[0].(map[string]interface{})
+		if !ok {
+			vals = nil
+			break
+		}
+
+		m = Map(amm)
+		haveFirst = false
+	}
+
+	return vals, nil
+}
+
+type key struct {
+	name     string
+	isArray  bool
+	position int
+}
+
+func parsePath(s string) ([]*key, error) {
+	keys := strings.Split(s, ".")
+
+	ret := make([]*key, 0)
+
+	for i := 0; i < len(keys); i++ {
+		if keys[i] == "" {
+			continue
+		}
+
+		newkey := new(key)
+		if strings.Index(keys[i], "[") < 0 {
+			newkey.name = keys[i]
+			ret = append(ret, newkey)
+			continue
+		}
+
+		p := strings.Split(keys[i], "[")
+		newkey.name = p[0]
+		p = strings.Split(p[1], "]")
+		if p[0] == "" { // no right bracket
+			return nil, fmt.Errorf("no right bracket on key index: %s", keys[i])
+		}
+		// convert p[0] to a int value
+		pos, nerr := strconv.ParseInt(p[0], 10, 32)
+		if nerr != nil {
+			return nil, fmt.Errorf("cannot convert index to int value: %s", p[0])
+		}
+		newkey.position = int(pos)
+		newkey.isArray = true
+		ret = append(ret, newkey)
+	}
+
+	return ret, nil
+}
+
+// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'.
+func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys := strings.Split(path, ".")
+	if keys[len(keys)-1] == "" {
+		keys = keys[:len(keys)-1]
+	}
+	ivals := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap)
+	return ivals[:cnt], nil
+}
+
+func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) {
+	lenKeys := len(keys)
+
+	// load 'm' values into 'ret'
+	// expand any lists
+	if lenKeys == 0 {
+		switch m.(type) {
+		case map[string]interface{}:
+			if subkeys != nil {
+				if ok := hasSubKeys(m, subkeys); !ok {
+					return
+				}
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		case []interface{}:
+			for i, v := range m.([]interface{}) {
+				if subkeys != nil {
+					if ok := hasSubKeys(v, subkeys); !ok {
+						continue // only load list members with subkeys
+					}
+				}
+				*ret = append(*ret, (m.([]interface{}))[i])
+				*cnt++
+			}
+		default:
+			if subkeys != nil {
+				return // must be map[string]interface{} if there are subkeys
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		}
+		return
+	}
+
+	// key of interest
+	key := keys[0]
+	switch key {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				default:
+					// valuesForKeyPath(ret, v, keys[1:], subkeys)
+					valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[key]; ok {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[key]; ok {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				}
+			}
+		}
+	}
+}
+
+// hasSubKeys() - interface{} equality works for string, float64, bool
+// 'v' must be a map[string]interface{} value to have subkeys
+// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard.
+func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool {
+	if len(subkeys) == 0 {
+		return true
+	}
+
+	switch v.(type) {
+	case map[string]interface{}:
+		// do all subKey name:value pairs match?
+		mv := v.(map[string]interface{})
+		for skey, sval := range subkeys {
+			isNotKey := false
+			if skey[:1] == "!" { // a NOT-key
+				skey = skey[1:]
+				isNotKey = true
+			}
+			vv, ok := mv[skey]
+			if !ok { // key doesn't exist
+				if isNotKey { // key not there, but that's what we want
+					if kv, ok := sval.(string); ok && kv == "*" {
+						continue
+					}
+				}
+				return false
+			}
+			// wildcard check
+			if kv, ok := sval.(string); ok && kv == "*" {
+				if isNotKey { // key is there, and we don't want it
+					return false
+				}
+				continue
+			}
+			switch sval.(type) {
+			case string:
+				if s, ok := vv.(string); ok && s == sval.(string) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case bool:
+				if b, ok := vv.(bool); ok && b == sval.(bool) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case float64:
+				if f, ok := vv.(float64); ok && f == sval.(float64) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			}
+			// key there but didn't match subkey value
+			if isNotKey { // that's what we want
+				continue
+			}
+			return false
+		}
+		// all subkeys matched
+		return true
+	}
+
+	// not a map[string]interface{} value, can't have subkeys
+	return false
+}
+
+// Generate map of key:value entries as map[string]string.
+//	'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'.
+//	If len(kv) == 0, the return is (nil, nil).
+func getSubKeyMap(kv ...string) (map[string]interface{}, error) {
+	if len(kv) == 0 {
+		return nil, nil
+	}
+	m := make(map[string]interface{}, 0)
+	for _, v := range kv {
+		vv := strings.Split(v, fieldSep)
+		switch len(vv) {
+		case 2:
+			m[vv[0]] = interface{}(vv[1])
+		case 3:
+			switch vv[2] {
+			case "string", "char", "text":
+				m[vv[0]] = interface{}(vv[1])
+			case "bool", "boolean":
+				// ParseBool treats "1"==true & "0"==false
+				b, err := strconv.ParseBool(vv[1])
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(b)
+			case "float", "float64", "num", "number", "numeric":
+				f, err := strconv.ParseFloat(vv[1], 64)
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(f)
+			default:
+				return nil, fmt.Errorf("unknown subkey conversion spec: %s", v)
+			}
+		default:
+			return nil, fmt.Errorf("unknown subkey spec: %s", v)
+		}
+	}
+	return m, nil
+}
+
+// -------------------------------  END of valuesFor ... ----------------------------
+
+// ----------------------- locate where a key value is in the tree -------------------
+
+//----------------------------- find all paths to a key --------------------------------
+
+// PathsForKey returns all paths through Map, 'mv', (in dot-notation) that terminate with the specified key.
+// Results can be used with ValuesForPath.
+func (mv Map) PathsForKey(key string) []string {
+	m := map[string]interface{}(mv)
+	breadbasket := make(map[string]bool, 0)
+	breadcrumbs := ""
+
+	hasKeyPath(breadcrumbs, m, key, breadbasket)
+	if len(breadbasket) == 0 {
+		return nil
+	}
+
+	// unpack map keys to return
+	res := make([]string, len(breadbasket))
+	var i int
+	for k := range breadbasket {
+		res[i] = k
+		i++
+	}
+
+	return res
+}
+
+// PathForKeyShortest extracts the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'..
+// Paths are strings using dot-notation.
+func (mv Map) PathForKeyShortest(key string) string {
+	paths := mv.PathsForKey(key)
+
+	lp := len(paths)
+	if lp == 0 {
+		return ""
+	}
+	if lp == 1 {
+		return paths[0]
+	}
+
+	shortest := paths[0]
+	shortestLen := len(strings.Split(shortest, "."))
+
+	for i := 1; i < len(paths); i++ {
+		vlen := len(strings.Split(paths[i], "."))
+		if vlen < shortestLen {
+			shortest = paths[i]
+			shortestLen = vlen
+		}
+	}
+
+	return shortest
+}
+
+// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth
+// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'.
+func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		if _, ok := vv[key]; ok {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = key
+			} else {
+				nbc = crumbs + "." + key
+			}
+			basket[nbc] = true
+		}
+		// walk on down the path, key could occur again at deeper node
+		for k, v := range vv {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = k
+			} else {
+				nbc = crumbs + "." + k
+			}
+			hasKeyPath(nbc, v, key, basket)
+		}
+	case []interface{}:
+		// crumb-trail doesn't change, pass it on
+		for _, v := range iv.([]interface{}) {
+			hasKeyPath(crumbs, v, key, basket)
+		}
+	}
+}
+
+var PathNotExistError = errors.New("Path does not exist")
+
+// ValueForPath wraps ValuesFor Path and returns the first value returned.
+// If no value is found it returns 'nil' and PathNotExistError.
+func (mv Map) ValueForPath(path string) (interface{}, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, PathNotExistError
+	}
+	return vals[0], nil
+}
+
+// ValuesForPathString returns the first found value for the path as a string.
+func (mv Map) ValueForPathString(path string) (string, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return "", err
+	}
+	if len(vals) == 0 {
+		return "", errors.New("ValueForPath: path not found")
+	}
+	val := vals[0]
+	return fmt.Sprintf("%v", val), nil
+}
+
+// ValueOrEmptyForPathString returns the first found value for the path as a string.
+// If the path is not found then it returns an empty string.
+func (mv Map) ValueOrEmptyForPathString(path string) string {
+	str, _ := mv.ValueForPathString(path)
+	return str
+}

+ 112 - 0
vendor/github.com/clbanning/mxj/v2/leafnode.go

@@ -0,0 +1,112 @@
+package mxj
+
+// leafnode.go - return leaf nodes with paths and values for the Map
+// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw
+
+import (
+	"strconv"
+	"strings"
+)
+
+const (
+	NoAttributes = true // suppress LeafNode values that are attributes
+)
+
+// LeafNode - a terminal path value in a Map.
+// For XML Map values it represents an attribute or simple element value  - of type
+// string unless Map was created using Cast flag. For JSON Map values it represents
+// a string, numeric, boolean, or null value.
+type LeafNode struct {
+	Path  string      // a dot-notation representation of the path with array subscripting
+	Value interface{} // the value at the path termination
+}
+
+// LeafNodes - returns an array of all LeafNode values for the Map.
+// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-')
+// as well as the "#text" key for the associated simple element value.
+//
+// PrependAttrWithHypen(false) will result in attributes having .attr-name as 
+// terminal node in 'path' while the path for the element value, itself, will be 
+// the base path w/o "#text". 
+//
+// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax
+// rather than "[N]" syntax.
+func (mv Map) LeafNodes(no_attr ...bool) []LeafNode {
+	var a bool
+	if len(no_attr) == 1 {
+		a = no_attr[0]
+	}
+
+	l := make([]LeafNode, 0)
+	getLeafNodes("", "", map[string]interface{}(mv), &l, a)
+	return l
+}
+
+func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) {
+	// if stripping attributes, then also strip "#text" key
+	if !noattr || node != "#text" {
+		if path != "" && node[:1] != "[" {
+			path += "."
+		}
+		path += node
+	}
+	switch mv.(type) {
+	case map[string]interface{}:
+		for k, v := range mv.(map[string]interface{}) {
+			// if noattr && k[:1] == "-" {
+			if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue
+			}
+			getLeafNodes(path, k, v, l, noattr)
+		}
+	case []interface{}:
+		for i, v := range mv.([]interface{}) {
+			if useDotNotation {
+				getLeafNodes(path, strconv.Itoa(i), v, l, noattr)
+			} else {
+				getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr)
+			}
+		}
+	default:
+		// can't walk any further, so create leaf
+		n := LeafNode{path, mv}
+		*l = append(*l, n)
+	}
+}
+
+// LeafPaths - all paths that terminate in LeafNode values.
+func (mv Map) LeafPaths(no_attr ...bool) []string {
+	ln := mv.LeafNodes()
+	ss := make([]string, len(ln))
+	for i := 0; i < len(ln); i++ {
+		ss[i] = ln[i].Path
+	}
+	return ss
+}
+
+// LeafValues - all terminal values in the Map.
+func (mv Map) LeafValues(no_attr ...bool) []interface{} {
+	ln := mv.LeafNodes()
+	vv := make([]interface{}, len(ln))
+	for i := 0; i < len(ln); i++ {
+		vv[i] = ln[i].Value
+	}
+	return vv
+}
+
+// ====================== utilities ======================
+
+// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I
+var useDotNotation bool
+
+// LeafUseDotNotation sets a flag that list members in LeafNode paths
+// should be identified using ".N" syntax rather than the default "[N]"
+// syntax.  Calling LeafUseDotNotation with no arguments toggles the 
+// flag on/off; otherwise, the argument sets the flag value 'true'/'false'.
+func LeafUseDotNotation(b ...bool) {
+	if len(b) == 0 {
+		useDotNotation = !useDotNotation
+		return
+	}
+	useDotNotation = b[0]
+}

+ 86 - 0
vendor/github.com/clbanning/mxj/v2/misc.go

@@ -0,0 +1,86 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// misc.go - mimic functions (+others) called out in:
+//          https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ
+// Primarily these methods let you retrive XML structure information.
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Return the root element of the Map. If there is not a single key in Map,
+// then an error is returned.
+func (mv Map) Root() (string, error) {
+	mm := map[string]interface{}(mv)
+	if len(mm) != 1 {
+		return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm))
+	}
+	for k, _ := range mm {
+		return k, nil
+	}
+	return "", nil
+}
+
+// If the path is an element with sub-elements, return a list of the sub-element
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are prefixed with
+// '-', a hyphen, are considered attributes; see m.Attributes(path).
+func (mv Map) Elements(path string) ([]string, error) {
+	e, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch e.(type) {
+	case map[string]interface{}:
+		ee := e.(map[string]interface{})
+		elems := make([]string, len(ee))
+		var i int
+		for k, _ := range ee {
+			if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 {
+				continue // skip attributes
+			}
+			elems[i] = k
+			i++
+		}
+		elems = elems[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(elems)
+		return elems, nil
+	}
+	return nil, fmt.Errorf("no elements for path: %s", path)
+}
+
+// If the path is an element with attributes, return a list of the attribute
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are not prefixed with
+// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the
+// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then
+// there are no identifiable attributes.
+func (mv Map) Attributes(path string) ([]string, error) {
+	a, err := mv.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch a.(type) {
+	case map[string]interface{}:
+		aa := a.(map[string]interface{})
+		attrs := make([]string, len(aa))
+		var i int
+		for k, _ := range aa {
+			if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 {
+				continue // skip non-attributes
+			}
+			attrs[i] = k[len(attrPrefix):]
+			i++
+		}
+		attrs = attrs[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(attrs)
+		return attrs, nil
+	}
+	return nil, fmt.Errorf("no attributes for path: %s", path)
+}

+ 128 - 0
vendor/github.com/clbanning/mxj/v2/mxj.go

@@ -0,0 +1,128 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+)
+
+const (
+	Cast         = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast)
+	SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding)
+)
+
+type Map map[string]interface{}
+
+// Allocate a Map.
+func New() Map {
+	m := make(map[string]interface{}, 0)
+	return m
+}
+
+// Cast a Map to map[string]interface{}
+func (mv Map) Old() map[string]interface{} {
+	return mv
+}
+
+// Return a copy of mv as a newly allocated Map.  If the Map only contains string,
+// numeric, map[string]interface{}, and []interface{} values, then it can be thought
+// of as a "deep copy."  Copying a structure (or structure reference) value is subject
+// to the noted restrictions.
+//	NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags
+//	      then only public fields of the structure are in the new Map - and with
+//	      keys that conform to any encoding tag instructions. The structure itself will
+//	      be represented as a map[string]interface{} value.
+func (mv Map) Copy() (Map, error) {
+	// this is the poor-man's deep copy
+	// not efficient, but it works
+	j, jerr := mv.Json()
+	// must handle, we don't know how mv got built
+	if jerr != nil {
+		return nil, jerr
+	}
+	return NewMapJson(j)
+}
+
+// --------------- StringIndent ... from x2j.WriteMap -------------
+
+// Pretty print a Map.
+func (mv Map) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (mv Map) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), false, true, offset...)
+}
+
+// writeMap - dumps the map[string]interface{} for examination.
+// 'typeInfo' causes value type to be printed.
+//	'offset' is initial indentation count; typically: Write(m).
+func writeMap(m interface{}, typeInfo, root bool, offset ...int) string {
+	var indent int
+	if len(offset) == 1 {
+		indent = offset[0]
+	}
+
+	var s string
+	switch m.(type) {
+	case []interface{}:
+		if typeInfo {
+			s += "[[]interface{}]"
+		}
+		for _, v := range m.([]interface{}) {
+			s += "\n"
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += writeMap(v, typeInfo, false, indent+1)
+		}
+	case map[string]interface{}:
+		list := make([][2]string, len(m.(map[string]interface{})))
+		var n int
+		for k, v := range m.(map[string]interface{}) {
+			list[n][0] = k
+			list[n][1] = writeMap(v, typeInfo, false, indent+1)
+			n++
+		}
+		sort.Sort(mapList(list))
+		for _, v := range list {
+			if root {
+				root = false
+			} else {
+				s += "\n"
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += v[0] + " : " + v[1]
+		}
+	default:
+		if typeInfo {
+			s += fmt.Sprintf("[%T] %+v", m, m)
+		} else {
+			s += fmt.Sprintf("%+v", m)
+		}
+	}
+	return s
+}
+
+// ======================== utility ===============
+
+type mapList [][2]string
+
+func (ml mapList) Len() int {
+	return len(ml)
+}
+
+func (ml mapList) Swap(i, j int) {
+	ml[i], ml[j] = ml[j], ml[i]
+}
+
+func (ml mapList) Less(i, j int) bool {
+	return ml[i][0] <= ml[j][0]
+}

+ 184 - 0
vendor/github.com/clbanning/mxj/v2/newmap.go

@@ -0,0 +1,184 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014, 2018 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings
+//            keys can use dot-notation, keyOld can use wildcard, '*'
+//
+// Computational strategy -
+// Using the key path - []string - traverse a new map[string]interface{} and
+// insert the oldVal as the newVal when we arrive at the end of the path.
+// If the type at the end is nil, then that is newVal
+// If the type at the end is a singleton (string, float64, bool) an array is created.
+// If the type at the end is an array, newVal is just appended.
+// If the type at the end is a map, it is inserted if possible or the map value
+//    is converted into an array if necessary.
+
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// (Map)NewMap - create a new Map from data in the current Map.
+//	'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey'
+//	should be the value for 'newKey' in the returned Map.
+//		- 'oldKey' supports dot-notation as described for (Map)ValuesForPath()
+//		- 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays
+//		- "oldKey" is shorthand for the keypair value "oldKey:oldKey"
+//		- "oldKey:" and ":newKey" are invalid keypair values
+//		- if 'oldKey' does not exist in the current Map, it is not written to the new Map.
+//		  "null" is not supported unless it is the current Map.
+//		- see newmap_test.go for several syntax examples
+// 	- mv.NewMap() == mxj.New()
+//
+//	NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc.
+func (mv Map) NewMap(keypairs ...string) (Map, error) {
+	n := make(map[string]interface{}, 0)
+	if len(keypairs) == 0 {
+		return n, nil
+	}
+
+	// loop through the pairs
+	var oldKey, newKey string
+	var path []string
+	for _, v := range keypairs {
+		if len(v) == 0 {
+			continue // just skip over empty keypair arguments
+		}
+
+		// initialize oldKey, newKey and check
+		vv := strings.Split(v, ":")
+		if len(vv) > 2 {
+			return n, errors.New("oldKey:newKey keypair value not valid - " + v)
+		}
+		if len(vv) == 1 {
+			oldKey, newKey = vv[0], vv[0]
+		} else {
+			oldKey, newKey = vv[0], vv[1]
+		}
+		strings.TrimSpace(oldKey)
+		strings.TrimSpace(newKey)
+		if i := strings.Index(newKey, "*"); i > -1 {
+			return n, errors.New("newKey value cannot contain wildcard character - " + v)
+		}
+		if i := strings.Index(newKey, "["); i > -1 {
+			return n, errors.New("newKey value cannot contain indexed arrays - " + v)
+		}
+		if oldKey == "" || newKey == "" {
+			return n, errors.New("oldKey or newKey is not specified - " + v)
+		}
+
+		// get oldKey value
+		oldVal, err := mv.ValuesForPath(oldKey)
+		if err != nil {
+			return n, err
+		}
+		if len(oldVal) == 0 {
+			continue // oldKey has no value, may not exist in mv
+		}
+
+		// break down path
+		path = strings.Split(newKey, ".")
+		if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec
+			path = path[:len(path)-1]
+		}
+
+		addNewVal(&n, path, oldVal)
+	}
+
+	return n, nil
+}
+
+// navigate 'n' to end of path and add val
+func addNewVal(n *map[string]interface{}, path []string, val []interface{}) {
+	// newVal - either singleton or array
+	var newVal interface{}
+	if len(val) == 1 {
+		newVal = val[0] // is type interface{}
+	} else {
+		newVal = interface{}(val)
+	}
+
+	// walk to the position of interest, create it if necessary
+	m := (*n)           // initialize map walker
+	var k string        // key for m
+	lp := len(path) - 1 // when to stop looking
+	for i := 0; i < len(path); i++ {
+		k = path[i]
+		if i == lp {
+			break
+		}
+		var nm map[string]interface{} // holds position of next-map
+		switch m[k].(type) {
+		case nil: // need a map for next node in path, so go there
+			nm = make(map[string]interface{}, 0)
+			m[k] = interface{}(nm)
+			m = m[k].(map[string]interface{})
+		case map[string]interface{}:
+			// OK - got somewhere to walk to, go there
+			m = m[k].(map[string]interface{})
+		case []interface{}:
+			// add a map and nm points to new map unless there's already
+			// a map in the array, then nm points there
+			// The placement of the next value in the array is dependent
+			// on the sequence of members - could land on a map or a nil
+			// value first.  TODO: how to test this.
+			a := make([]interface{}, 0)
+			var foundmap bool
+			for _, vv := range m[k].([]interface{}) {
+				switch vv.(type) {
+				case nil: // doesn't appear that this occurs, need a test case
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = make(map[string]interface{}, 0)
+					a = append(a, interface{}(nm))
+					foundmap = true
+				case map[string]interface{}:
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = vv.(map[string]interface{})
+					a = append(a, vv)
+					foundmap = true
+				default:
+					a = append(a, vv)
+				}
+			}
+			// no map found in array
+			if !foundmap {
+				nm = make(map[string]interface{}, 0)
+				a = append(a, interface{}(nm))
+			}
+			m[k] = interface{}(a) // must insert in map
+			m = nm
+		default: // it's a string, float, bool, etc.
+			aa := make([]interface{}, 0)
+			nm = make(map[string]interface{}, 0)
+			aa = append(aa, m[k], nm)
+			m[k] = interface{}(aa)
+			m = nm
+		}
+	}
+
+	// value is nil, array or a singleton of some kind
+	// initially m.(type) == map[string]interface{}
+	v := m[k]
+	switch v.(type) {
+	case nil: // initialized
+		m[k] = newVal
+	case []interface{}:
+		a := m[k].([]interface{})
+		a = append(a, newVal)
+		m[k] = interface{}(a)
+	default: // v exists:string, float64, bool, map[string]interface, etc.
+		a := make([]interface{}, 0)
+		a = append(a, v, newVal)
+		m[k] = interface{}(a)
+	}
+}

+ 207 - 0
vendor/github.com/clbanning/mxj/v2/readme.md

@@ -0,0 +1,207 @@
+<h2>mxj - to/from maps, XML and JSON</h2>
+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages.
+
+<h4>Installation</h4>
+Using go.mod:
+<pre>
+go get github.com/clbanning/mxj/v2@v2.3.2
+</pre>
+
+<pre>
+import "github.com/clbanning/mxj/v2"
+</pre>
+
+... or just vendor the package.
+
+<h4>Related Packages</h4>
+
+https://github.com/clbanning/checkxml provides functions for validating XML data.
+
+<h4>Refactor Encoder - 2020.05.01</h4>
+Issue #70 highlighted that encoding large maps does not scale well, since the original logic used string appends operations. Using bytes.Buffer results in linear scaling for very large XML docs. (Metrics based on MacBook Pro i7 w/ 16 GB.)
+
+	Nodes      m.XML() time
+	54809       12.53708ms
+	109780      32.403183ms
+	164678      59.826412ms
+	482598     109.358007ms
+
+<h4>Refactor Decoder - 2015.11.15</h4>
+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant.  I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi.  Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value.  As shown by:
+
+	BenchmarkNewMapXml-4         	  100000	     18043 ns/op
+	BenchmarkNewStructXml-4      	  100000	     14892 ns/op
+	BenchmarkNewMapJson-4        	  300000	      4633 ns/op
+	BenchmarkNewStructJson-4     	  300000	      3427 ns/op
+	BenchmarkNewMapXmlBooks-4    	   20000	     82850 ns/op
+	BenchmarkNewStructXmlBooks-4 	   20000	     67822 ns/op
+	BenchmarkNewMapJsonBooks-4   	  100000	     17222 ns/op
+	BenchmarkNewStructJsonBooks-4	  100000	     15309 ns/op
+
+<h4>Notices</h4>
+
+	2021.02.02: v2.5 - add XmlCheckIsValid toggle to force checking that the encoded XML is valid
+	2020.12.14: v2.4 - add XMLEscapeCharsDecoder to preserve XML escaped characters in Map values
+	2020.10.28: v2.3 - add TrimWhiteSpace option
+	2020.05.01: v2.2 - optimize map to XML encoding for large XML docs.
+	2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq.
+	2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq.
+	2019.01.21: DecodeSimpleValuesAsMap - decode to map[<tag>:map["#text":<value>]] rather than map[<tag>:<value>]
+	2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc.
+	2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps.
+	2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package.
+	2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing.
+	2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods.
+	2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag().
+	2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc.
+	2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix().
+	2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable.
+	2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars().
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq()
+	            and mv.XmlSeq() / mv.XmlSeqIndent().
+	2015-05-20: New: mv.StringIndentNoTypeInfo().
+	            Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(),
+	            mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo().
+	2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information.
+	            (NOTE: PreserveXmlList() is similar and will be here soon.)
+	2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+	2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references.
+
+<h4>Basic Unmarshal XML to map[string]interface{}</h4>
+<pre>type Map map[string]interface{}</pre>
+
+Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v':
+<pre>mv := Map(v)</pre>
+
+Unmarshal / marshal XML as a `Map` value, 'mv':
+<pre>mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal</pre>
+
+Unmarshal XML from an `io.Reader` as a `Map` value, 'mv':
+<pre>mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded</pre>
+
+Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`):
+<pre>err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter</pre>
+   
+Also, for prettified output:
+<pre>xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)</pre>
+
+Bulk process XML with error handling (note: handlers must return a boolean value):
+<pre>err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))</pre>
+
+Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`.
+
+There are comparable functions and methods for JSON processing.
+
+Arbitrary structure values can be decoded to / encoded from `Map` values:
+<pre>mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)</pre>
+
+<h4>Extract / modify Map values</h4>
+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then:
+<pre>paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)</pre>
+
+Get everything at once, irrespective of path depth:
+<pre>leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()</pre>
+
+A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML
+or JSON. (Note: keys can use dot-notation.)
+<pre>newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto</pre>
+
+<h4>Usage</h4>
+
+The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj).
+
+Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions.
+
+<h4>XML parsing conventions</h4>
+
+Using NewMapXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or
+     `SetAttrPrefix()`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+Using NewMapXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+   - Name space syntax is preserved: 
+      - `<ns:key>something</ns.key>` parses to `map["ns:key"]interface{}{"something"}`
+      - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}`
+
+Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+<h4>XML encoding conventions</h4>
+
+   - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as `<tag/>`.
+     NOTE: the operation is not symmetric as `<tag/>` elements are decoded as `tag:""` `Map` values,
+           which, then, encode in JSON as `"tag":""` values.
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+<h4>Running "go test"</h4>
+
+Because there are no guarantees on the sequence map elements are retrieved, the tests have been 
+written for visual verification in most cases.  One advantage is that you can easily use the 
+output from running "go test" as examples of calling the various functions and methods.
+
+<h4>Motivation</h4>
+
+I make extensive use of JSON for messaging and typically unmarshal the messages into
+`map[string]interface{}` values.  This is easily done using `json.Unmarshal` from the
+standard Go libraries.  Unfortunately, many legacy solutions use structured
+XML messages; in those environments the applications would have to be refactored to
+interoperate with my components.
+
+The better solution is to just provide an alternative HTTP handler that receives
+XML messages and parses it into a `map[string]interface{}` value and then reuse
+all the JSON-based code.  The Go `xml.Unmarshal()` function does not provide the same
+option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote
+a couple of small functions to fill this gap and released them as the x2j package.
+
+Over the next year and a half additional features were added, and the companion j2x
+package was released to address XML encoding of arbitrary JSON and `map[string]interface{}`
+values.  As part of a refactoring of our production system and looking at how we had been
+using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or
+JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}`
+values was the primary value.  Thus, everything was refactored into the mxj package.
+

+ 37 - 0
vendor/github.com/clbanning/mxj/v2/remove.go

@@ -0,0 +1,37 @@
+package mxj
+
+import "strings"
+
+// Removes the path.
+func (mv Map) Remove(path string) error {
+	m := map[string]interface{}(mv)
+	return remove(m, path)
+}
+
+func remove(m interface{}, path string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	lastKey := lastKey(path)
+	delete(val, lastKey)
+
+	return nil
+}
+
+// returns the last key of the path.
+// lastKey("a.b.c") would had returned "c"
+func lastKey(path string) string {
+	keys := strings.Split(path, ".")
+	key := keys[len(keys)-1]
+	return key
+}
+
+// returns the path without the last key
+// parentPath("a.b.c") whould had returned "a.b"
+func parentPath(path string) string {
+	keys := strings.Split(path, ".")
+	parentPath := strings.Join(keys[0:len(keys)-1], ".")
+	return parentPath
+}

+ 61 - 0
vendor/github.com/clbanning/mxj/v2/rename.go

@@ -0,0 +1,61 @@
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// RenameKey renames a key in a Map.
+// It works only for nested maps. 
+// It doesn't work for cases when the key is in a list.
+func (mv Map) RenameKey(path string, newName string) error {
+	var v bool
+	var err error
+	if v, err = mv.Exists(path); err == nil && !v {
+		return errors.New("RenameKey: path not found: " + path)
+	} else if err != nil {
+		return err
+	}
+	if v, err = mv.Exists(parentPath(path) + "." + newName); err == nil && v {
+		return errors.New("RenameKey: key already exists: " + newName)
+	} else if err != nil {
+		return err
+	}
+
+	m := map[string]interface{}(mv)
+	return renameKey(m, path, newName)
+}
+
+func renameKey(m interface{}, path string, newName string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	oldName := lastKey(path)
+	val[newName] = val[oldName]
+	delete(val, oldName)
+
+	return nil
+}
+
+// returns a value which contains a last key in the path
+// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3}
+func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) {
+	keys := strings.Split(path, ".")
+
+	switch mValue := m.(type) {
+	case map[string]interface{}:
+		for key, value := range mValue {
+			if key == keys[0] {
+				if len(keys) == 1 {
+					return mValue, nil
+				} else {
+					// keep looking for the full path to the key
+					return prevValueByPath(value, strings.Join(keys[1:], "."))
+				}
+			}
+		}
+	}
+	return nil, errors.New("prevValueByPath: didn't find path – " + path)
+}

+ 26 - 0
vendor/github.com/clbanning/mxj/v2/set.go

@@ -0,0 +1,26 @@
+package mxj
+
+import (
+	"strings"
+)
+
+// Sets the value for the path
+func (mv Map) SetValueForPath(value interface{}, path string) error {
+	pathAry := strings.Split(path, ".")
+	parentPathAry := pathAry[0 : len(pathAry)-1]
+	parentPath := strings.Join(parentPathAry, ".")
+
+	val, err := mv.ValueForPath(parentPath)
+	if err != nil {
+		return err
+	}
+	if val == nil {
+		return nil // we just ignore the request if there's no val
+	}
+
+	key := pathAry[len(pathAry)-1]
+	cVal := val.(map[string]interface{})
+	cVal[key] = value
+
+	return nil
+}

+ 20 - 0
vendor/github.com/clbanning/mxj/v2/setfieldsep.go

@@ -0,0 +1,20 @@
+package mxj
+
+// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862
+var fieldSep string = ":"
+
+// SetFieldSeparator changes the default field separator, ":", for the
+// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments
+// in mv.ValuesForKey and mv.ValuesForPath. 
+// 
+// E.g., if the newVal value is "http://blah/blah", setting the field separator
+// to "|" will allow the newVal specification, "<key>|http://blah/blah" to parse
+// properly.  If called with no argument or an empty string value, the field
+// separator is set to the default, ":".
+func SetFieldSeparator(s ...string) {
+	if len(s) == 0 || s[0] == "" {
+		fieldSep = ":" // the default
+		return
+	}
+	fieldSep = s[0]
+}

+ 29 - 0
vendor/github.com/clbanning/mxj/v2/songtext.xml

@@ -0,0 +1,29 @@
+<msg mtype="alert" mpriority="1">
+	<text>help me!</text>
+	<song title="A Long Time" author="Mayer Hawthorne">
+		<verses>
+			<verse name="verse 1" no="1">
+				<line no="1">Henry was a renegade</line>
+				<line no="2">Didn't like to play it safe</line>
+				<line no="3">One component at a time</line>
+				<line no="4">There's got to be a better way</line>
+				<line no="5">Oh, people came from miles around</line>
+				<line no="6">Searching for a steady job</line>
+				<line no="7">Welcome to the Motor Town</line>
+				<line no="8">Booming like an atom bomb</line>
+			</verse>
+			<verse name="verse 2" no="2">
+				<line no="1">Oh, Henry was the end of the story</line>
+				<line no="2">Then everything went wrong</line>
+				<line no="3">And we'll return it to its former glory</line>
+				<line no="4">But it just takes so long</line>
+			</verse>
+		</verses>
+		<chorus>
+			<line no="1">It's going to take a long time</line>
+			<line no="2">It's going to take it, but we'll make it one day</line>
+			<line no="3">It's going to take a long time</line>
+			<line no="4">It's going to take it, but we'll make it one day</line>
+		</chorus>
+	</song>
+</msg>

+ 30 - 0
vendor/github.com/clbanning/mxj/v2/strict.go

@@ -0,0 +1,30 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// strict.go actually addresses setting xml.Decoder attribute
+// values.  This'll let you parse non-standard XML.
+
+package mxj
+
+import (
+	"encoding/xml"
+)
+
+// CustomDecoder can be used to specify xml.Decoder attribute
+// values, e.g., Strict:false, to be used.  By default CustomDecoder
+// is nil.  If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is
+// ignored and must be set as part of the CustomDecoder value, if needed.
+//	Usage:
+//		mxj.CustomDecoder = &xml.Decoder{Strict:false}
+var CustomDecoder *xml.Decoder
+
+// useCustomDecoder copy over public attributes from customDecoder
+func useCustomDecoder(d *xml.Decoder) {
+	d.Strict = CustomDecoder.Strict
+	d.AutoClose = CustomDecoder.AutoClose
+	d.Entity = CustomDecoder.Entity
+	d.CharsetReader = CustomDecoder.CharsetReader
+	d.DefaultSpace = CustomDecoder.DefaultSpace
+}
+

+ 54 - 0
vendor/github.com/clbanning/mxj/v2/struct.go

@@ -0,0 +1,54 @@
+// Copyright 2012-2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"encoding/json"
+	"errors"
+	"reflect"
+
+	// "github.com/fatih/structs"
+)
+
+// Create a new Map value from a structure.  Error returned if argument is not a structure.
+// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map
+// for handling of "structs" tags.
+
+// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map.
+//	import "github.com/fatih/structs"
+//	...
+//	   sm, err := structs.Map(<some struct>)
+//	   if err != nil {
+//	      // handle error
+//	   }
+//	   m := mxj.Map(sm)
+// Alernatively uncomment the old source and import in struct.go.
+func NewMapStruct(structVal interface{}) (Map, error) {
+	return nil, errors.New("deprecated - see package documentation")
+	/*
+		if !structs.IsStruct(structVal) {
+			return nil, errors.New("NewMapStruct() error: argument is not type Struct")
+		}
+		return structs.Map(structVal), nil
+	*/
+}
+
+// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned
+// if argument is not a pointer or if json.Unmarshal returns an error.
+//	json.Unmarshal structure encoding rules are followed to encode public structure fields.
+func (mv Map) Struct(structPtr interface{}) error {
+	// should check that we're getting a pointer.
+	if reflect.ValueOf(structPtr).Kind() != reflect.Ptr {
+		return errors.New("mv.Struct() error: argument is not type Ptr")
+	}
+
+	m := map[string]interface{}(mv)
+	j, err := json.Marshal(m)
+	if err != nil {
+		return err
+	}
+
+	return json.Unmarshal(j, structPtr)
+}

+ 258 - 0
vendor/github.com/clbanning/mxj/v2/updatevalues.go

@@ -0,0 +1,258 @@
+// Copyright 2012-2014, 2017 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// updatevalues.go - modify a value based on path and possibly sub-keys
+// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values.
+
+package mxj
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Update value based on path and possible sub-key values.
+// A count of the number of values changed and any error are returned.
+// If the count == 0, then no path (and subkeys) matched.
+//	'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified
+//	             or a string value "key:value[:type]" where type is "bool" or "num" to cast the value.
+//	'path' is dot-notation list of keys to traverse; last key in path can be newVal key
+//	       NOTE: 'path' spec does not currently support indexed array references.
+//	'subkeys' are "key:value[:type]" entries that must match for path node
+//             - For attributes prefix the label with the attribute prefix character, by default a 
+//               hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.)
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//	              exclusion critera - e.g., "!author:William T. Gaddis".
+//
+//	NOTES:
+//		1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value.
+//		2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key.
+//		3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol,
+//	      perhaps "|".
+func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) {
+	m := map[string]interface{}(mv)
+
+	// extract the subkeys
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	// extract key and value from newVal
+	var key string
+	var val interface{}
+	switch newVal.(type) {
+	case map[string]interface{}, Map:
+		switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec)
+		case Map:
+			newVal = newVal.(Map).Old()
+		}
+		if len(newVal.(map[string]interface{})) != 1 {
+			return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal)
+		}
+		for key, val = range newVal.(map[string]interface{}) {
+		}
+	case string: // split it as a key:value pair
+		ss := strings.Split(newVal.(string), fieldSep)
+		n := len(ss)
+		if n < 2 || n > 3 {
+			return 0, fmt.Errorf("unknown newVal spec - %+v", newVal)
+		}
+		key = ss[0]
+		if n == 2 {
+			val = interface{}(ss[1])
+		} else if n == 3 {
+			switch ss[2] {
+			case "bool", "boolean":
+				nv, err := strconv.ParseBool(ss[1])
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal)
+				}
+				val = interface{}(nv)
+			case "num", "numeric", "float", "int":
+				nv, err := strconv.ParseFloat(ss[1], 64)
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal)
+				}
+				val = interface{}(nv)
+			default:
+				return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal)
+			}
+		}
+	default:
+		return 0, fmt.Errorf("invalid newVal type - %+v", newVal)
+	}
+
+	// parse path
+	keys := strings.Split(path, ".")
+
+	var count int
+	updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count)
+
+	return count, nil
+}
+
+// navigate the path
+func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) {
+	// ----- at end node: looking at possible node to get 'key' ----
+	if len(keys) == 1 {
+		updateValue(key, value, m, keys[0], subkeys, cnt)
+		return
+	}
+
+	// ----- here we are navigating the path thru the penultimate node --------
+	// key of interest is keys[0] - the next in the path
+	switch keys[0] {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				default:
+					updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[keys[0]]; ok {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[keys[0]]; ok {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				}
+			}
+		}
+	}
+}
+
+// change value if key and subkeys are present
+func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) {
+	// there are two possible options for the value of 'keys0': map[string]interface, []interface{}
+	// and 'key' is a key in the map or is a key in a map in a list.
+	switch m.(type) {
+	case map[string]interface{}: // gotta have the last key
+		if keys0 == "*" {
+			for k := range m.(map[string]interface{}) {
+				updateValue(key, value, m, k, subkeys, cnt)
+			}
+			return
+		}
+		endVal, _ := m.(map[string]interface{})[keys0]
+
+		// if newV key is the end of path, replace the value for path-end
+		// may be []interface{} - means replace just an entry w/ subkeys
+		// otherwise replace the keys0 value if subkeys are there
+		// NOTE: this will replace the subkeys, also
+		if key == keys0 {
+			switch endVal.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			case []interface{}:
+				// without subkeys can't select list member to modify
+				// so key:value spec is it ...
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+					break
+				}
+				nv := make([]interface{}, 0)
+				var valmodified bool
+				for _, v := range endVal.([]interface{}) {
+					// check entry subkeys
+					if hasSubKeys(v, subkeys) {
+						// replace v with value
+						nv = append(nv, value)
+						valmodified = true
+						(*cnt)++
+						continue
+					}
+					nv = append(nv, v)
+				}
+				if valmodified {
+					(m.(map[string]interface{}))[keys0] = interface{}(nv)
+				}
+			default: // anything else is a strict replacement
+				if hasSubKeys(m, subkeys) {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			}
+			return
+		}
+
+		// so value is for an element of endVal
+		// if endVal is a map then 'key' must be there w/ subkeys
+		// if endVal is a list then 'key' must be in a list member w/ subkeys
+		switch endVal.(type) {
+		case map[string]interface{}:
+			if !hasSubKeys(endVal, subkeys) {
+				return
+			}
+			if _, ok := (endVal.(map[string]interface{}))[key]; ok {
+				(endVal.(map[string]interface{}))[key] = value
+				(*cnt)++
+			}
+		case []interface{}: // keys0 points to a list, check subkeys
+			for _, v := range endVal.([]interface{}) {
+				// got to be a map so we can replace value for 'key'
+				vv, vok := v.(map[string]interface{})
+				if !vok {
+					continue
+				}
+				if _, ok := vv[key]; !ok {
+					continue
+				}
+				if !hasSubKeys(vv, subkeys) {
+					continue
+				}
+				vv[key] = value
+				(*cnt)++
+			}
+		}
+	case []interface{}: // key may be in a list member
+		// don't need to handle keys0 == "*"; we're looking at everything, anyway.
+		for _, v := range m.([]interface{}) {
+			// only map values - we're looking for 'key'
+			mm, ok := v.(map[string]interface{})
+			if !ok {
+				continue
+			}
+			if _, ok := mm[key]; !ok {
+				continue
+			}
+			if !hasSubKeys(mm, subkeys) {
+				continue
+			}
+			mm[key] = value
+			(*cnt)++
+		}
+	}
+
+	// return
+}

+ 1410 - 0
vendor/github.com/clbanning/mxj/v2/xml.go

@@ -0,0 +1,1410 @@
+// Copyright 2012-2016, 2018-2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xml.go - basically the core of X2j for map[string]interface{} values.
+//          NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter
+// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ------------------- NewMapXml & NewMapXmlReader ... -------------------------
+
+// If XmlCharsetReader != nil, it will be used to decode the XML, if required.
+// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored;
+// set the CustomDecoder attribute instead.
+//   import (
+//	     charset "code.google.com/p/go-charset/charset"
+//	     github.com/clbanning/mxj
+//	 )
+//   ...
+//   mxj.XmlCharsetReader = charset.NewReader
+//   m, merr := mxj.NewMapXml(xmlValue)
+var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+// NewMapXml - convert a XML doc into a Map
+// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().)
+//	If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+//
+//	Converting XML to JSON is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapXml(xmlVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		jsonVal, jerr := mapVal.Json()
+//		if jerr != nil {
+//			// handle error
+//		}
+//
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//	   5. If DisableTrimWhiteSpace(b bool) has been called, then all values will be trimmed or not. 'true' by default.
+func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlToMap(xmlVal, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   3. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlReaderToMap(xmlReader, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value and slice with the raw XML.
+//	NOTES:
+//	   1. Declarations, directives, process instructions and comments are NOT parsed.
+//	   2. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	   3. The 'raw' return value may be larger than the XML text value.
+//	   4. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   5. If CoerceKeysToLower() has been called, then all key values will be lower case.
+//	   6. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb) // see code at EOF
+
+	m, err := xmlReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	if err != nil {
+		return nil, b, err
+	}
+
+	return m, b, nil
+}
+
+// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// xmlToMap - convert a XML doc into map[string]interface{} value
+func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// PrependAttrWithHyphen. Prepend attribute tags with a hyphen.
+// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.)
+//	Note:
+//		If 'false', unmarshaling and marshaling is not symmetric. Attributes will be
+//		marshal'd as <attr_tag>attr</attr_tag> and may be part of a list.
+func PrependAttrWithHyphen(v bool) {
+	if v {
+		attrPrefix = "-"
+		lenAttrPrefix = len(attrPrefix)
+		return
+	}
+	attrPrefix = ""
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com.
+var includeTagSeqNum bool
+
+// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting
+// its position when parsed. This is of limited usefulness, since list values cannot
+// be tagged with "_seq" without changing their depth in the Map.
+// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what
+// you get.
+/*
+		<Obj c="la" x="dee" h="da">
+			<IntObj id="3"/>
+			<IntObj1 id="1"/>
+			<IntObj id="2"/>
+			<StrObj>hello</StrObj>
+		</Obj>
+
+	parses as:
+
+		{
+		Obj:{
+			"-c":"la",
+			"-h":"da",
+			"-x":"dee",
+			"intObj":[
+				{
+					"-id"="3",
+					"_seq":"0" // if mxj.Cast is passed, then: "_seq":0
+				},
+				{
+					"-id"="2",
+					"_seq":"2"
+				}],
+			"intObj1":{
+				"-id":"1",
+				"_seq":"1"
+				},
+			"StrObj":{
+				"#text":"hello", // simple element value gets "#text" tag
+				"_seq":"3"
+				}
+			}
+		}
+*/
+func IncludeTagSeqNum(b ...bool) {
+	if len(b) == 0 {
+		includeTagSeqNum = !includeTagSeqNum
+	} else if len(b) == 1 {
+		includeTagSeqNum = b[0]
+	}
+}
+
+// all keys will be "lower case"
+var lowerCase bool
+
+// Coerce all tag values to keys in lower case.  This is useful if you've got sources with variable
+// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec
+// in lower case.
+//	CoerceKeysToLower() will toggle the coercion flag true|false - on|off
+//	CoerceKeysToLower(true|false) will set the coercion flag on|off
+//
+//	NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as
+//	      the associated HandleXmlReader and HandleXmlReaderRaw.
+func CoerceKeysToLower(b ...bool) {
+	if len(b) == 0 {
+		lowerCase = !lowerCase
+	} else if len(b) == 1 {
+		lowerCase = b[0]
+	}
+}
+
+// disableTrimWhiteSpace sets if the white space should be removed or not
+var disableTrimWhiteSpace bool
+var trimRunes = "\t\r\b\n "
+
+// DisableTrimWhiteSpace set if the white space should be trimmed or not. By default white space is always trimmed. If
+// no argument is provided, trim white space will be disabled.
+func DisableTrimWhiteSpace(b ...bool) {
+	if len(b) == 0 {
+		disableTrimWhiteSpace = true
+	} else {
+		disableTrimWhiteSpace = b[0]
+	}
+
+	if disableTrimWhiteSpace {
+		trimRunes = "\t\r\b\n"
+	} else {
+		trimRunes = "\t\r\b\n "
+	}
+}
+
+// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels.
+// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "",
+// and adding a SetAttrPrefix(s string) function.
+
+var attrPrefix string = `-` // the default
+var lenAttrPrefix int = 1   // the default
+
+// SetAttrPrefix changes the default, "-", to the specified value, s.
+// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false).
+// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.)
+func SetAttrPrefix(s string) {
+	attrPrefix = s
+	lenAttrPrefix = len(attrPrefix)
+}
+
+// 18jan17: Allows user to specify if the map keys should be in snake case instead
+// of the default hyphenated notation.
+var snakeCaseKeys bool
+
+// CoerceKeysToSnakeCase changes the default, false, to the specified value, b.
+// Note: the attribute prefix will be a hyphen, '-', or what ever string value has
+// been specified using SetAttrPrefix.
+func CoerceKeysToSnakeCase(b ...bool) {
+	if len(b) == 0 {
+		snakeCaseKeys = !snakeCaseKeys
+	} else if len(b) == 1 {
+		snakeCaseKeys = b[0]
+	}
+}
+
+// 10jan19: use of pull request #57 should be conditional - legacy code assumes
+// numeric values are float64.
+var castToInt bool
+
+// CastValuesToInt tries to coerce numeric valus to int64 or uint64 instead of the
+// default float64. Repeated calls with no argument will toggle this on/off, or this
+// handling will be set with the value of 'b'.
+func CastValuesToInt(b ...bool) {
+	if len(b) == 0 {
+		castToInt = !castToInt
+	} else if len(b) == 1 {
+		castToInt = b[0]
+	}
+}
+
+// 05feb17: support processing XMPP streams (issue #36)
+var handleXMPPStreamTag bool
+
+// HandleXMPPStreamTag causes decoder to parse XMPP <stream:stream> elements.
+// If called with no argument, XMPP stream element handling is toggled on/off.
+// (See xmppStream_test.go for example.)
+//	If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream"
+//	element will be  returned as:
+//		map["stream"]interface{}{map[-<attrs>]interface{}}.
+//	If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream"
+//	element will be returned as:
+//		map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}}
+//		where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.)
+func HandleXMPPStreamTag(b ...bool) {
+	if len(b) == 0 {
+		handleXMPPStreamTag = !handleXMPPStreamTag
+	} else if len(b) == 1 {
+		handleXMPPStreamTag = b[0]
+	}
+}
+
+// 21jan18 - decode all values as map["#text":value] (issue #56)
+var decodeSimpleValuesAsMap bool
+
+// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":<value>].
+// If called with no argument, the decoding is toggled on/off.
+//
+// By default the NewMapXml functions decode simple values without attributes as
+// map[<tag>:<value>]. This function causes simple values without attributes to be
+// decoded the same as simple values with attributes - map[<tag>:map["#text":<value>]].
+func DecodeSimpleValuesAsMap(b ...bool) {
+	if len(b) == 0 {
+		decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap
+	} else if len(b) == 1 {
+		decodeSimpleValuesAsMap = b[0]
+	}
+}
+
+// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly.
+// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one.
+// We've removed the intermediate *node tree with the allocation and subsequent rescanning.
+func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if lowerCase {
+		skey = strings.ToLower(skey)
+	}
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	// Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value.
+	var n, na map[string]interface{}
+	var seq int // for includeTagSeqNum
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		n = make(map[string]interface{})  // old n
+		na = make(map[string]interface{}) // old n.nodes
+		if len(a) > 0 {
+			for _, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				var key string
+				key = attrPrefix + v.Name.Local
+				if lowerCase {
+					key = strings.ToLower(key)
+				}
+				if xmlEscapeCharsDecoder { // per issue#84
+					v.Value = escapeChars(v.Value)
+				}
+				na[key] = cast(v.Value, r, key)
+			}
+		}
+	}
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.Token()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				return xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			// Note:
+			// * if CoerceKeysToLower() called, then key will be lower case.
+			// * if CoerceKeysToSnakeCase() called, then key will be converted to snake case.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element.
+			// In theory, we don't need this if len(na) == 1. But, we don't know what might
+			// come next - we're only parsing forward.  So if you ask for 'includeTagSeqNum' you
+			// get it on every element. (Personally, I never liked this, but I added it on request
+			// and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!)
+			if includeTagSeqNum {
+				switch val.(type) {
+				case []interface{}:
+					// noop - There's no clean way to handle this w/o changing message structure.
+				case map[string]interface{}:
+					val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag
+					seq++
+				case interface{}: // a non-nil simple element: string, float64, bool
+					v := map[string]interface{}{"#text": val}
+					v["_seq"] = seq
+					seq++
+					val = v
+				}
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Note: in original node-tree parser, val defaulted to "";
+				// so we always had the default if len(node.nodes) == 0.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			} else if len(n) == 1 && len(na) > 0 {
+				// it's a simple element w/ no attributes w/ subelements
+				for _, v := range n {
+					na["#text"] = v
+				}
+				n[skey] = na
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), trimRunes)
+			if xmlEscapeCharsDecoder { // issue#84
+				tt = escapeChars(tt)
+			}
+			if len(tt) > 0 {
+				if len(na) > 0 || decodeSimpleValuesAsMap {
+					na["#text"] = cast(tt, r, "#text")
+				} else if skey != "" {
+					n[skey] = cast(tt, r, skey)
+				} else {
+					// per Adrian (http://www.adrianlungu.com/) catch stray text
+					// in decoder stream -
+					// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+					// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+					// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+					continue
+				}
+			}
+		default:
+			// noop
+		}
+	}
+}
+
+var castNanInf bool
+
+// Cast "Nan", "Inf", "-Inf" XML values to 'float64'.
+// By default, these values will be decoded as 'string'.
+func CastNanInf(b ...bool) {
+	if len(b) == 0 {
+		castNanInf = !castNanInf
+	} else if len(b) == 1 {
+		castNanInf = b[0]
+	}
+}
+
+// cast - try to cast string values to bool or float64
+// 't' is the tag key that can be checked for 'not-casting'
+func cast(s string, r bool, t string) interface{} {
+	if checkTagToSkip != nil && t != "" && checkTagToSkip(t) {
+		// call the check-function here with 't[0]'
+		// if 'true' return s
+		return s
+	}
+
+	if r {
+		// handle nan and inf
+		if !castNanInf {
+			switch strings.ToLower(s) {
+			case "nan", "inf", "-inf":
+				return s
+			}
+		}
+
+		// handle numeric strings ahead of boolean
+		if castToInt {
+			if f, err := strconv.ParseInt(s, 10, 64); err == nil {
+				return f
+			}
+			if f, err := strconv.ParseUint(s, 10, 64); err == nil {
+				return f
+			}
+		}
+
+		if castToFloat {
+			if f, err := strconv.ParseFloat(s, 64); err == nil {
+				return f
+			}
+		}
+
+		// ParseBool treats "1"==true & "0"==false, we've already scanned those
+		// values as float64. See if value has 't' or 'f' as initial screen to
+		// minimize calls to ParseBool; also, see if len(s) < 6.
+		if castToBool {
+			if len(s) > 0 && len(s) < 6 {
+				switch s[:1] {
+				case "t", "T", "f", "F":
+					if b, err := strconv.ParseBool(s); err == nil {
+						return b
+					}
+				}
+			}
+		}
+	}
+	return s
+}
+
+// pull request, #59
+var castToFloat = true
+
+// CastValuesToFloat can be used to skip casting to float64 when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToFloat(b ...bool) {
+	if len(b) == 0 {
+		castToFloat = !castToFloat
+	} else if len(b) == 1 {
+		castToFloat = b[0]
+	}
+}
+
+var castToBool = true
+
+// CastValuesToBool can be used to skip casting to bool when
+// "cast" argument is 'true' in NewMapXml, etc.
+// Default is true.
+func CastValuesToBool(b ...bool) {
+	if len(b) == 0 {
+		castToBool = !castToBool
+	} else if len(b) == 1 {
+		castToBool = b[0]
+	}
+}
+
+// checkTagToSkip - switch to address Issue #58
+
+var checkTagToSkip func(string) bool
+
+// SetCheckTagToSkipFunc registers function to test whether the value
+// for a tag should be cast to bool or float64 when "cast" argument is 'true'.
+// (Dot tag path notation is not supported.)
+// NOTE: key may be "#text" if it's a simple element with attributes
+//       or "decodeSimpleValuesAsMap == true".
+// NOTE: does not apply to NewMapXmlSeq... functions.
+func SetCheckTagToSkipFunc(fn func(string) bool) {
+	checkTagToSkip = fn
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------
+
+const (
+	DefaultRootTag = "doc"
+)
+
+var useGoXmlEmptyElemSyntax bool
+
+// XmlGoEmptyElemSyntax() - <tag ...></tag> rather than <tag .../>.
+//	Go's encoding/xml package marshals empty XML elements as <tag ...></tag>.  By default this package
+//	encodes empty elements as <tag .../>.  If you're marshaling Map values that include structures
+//	(which are passed to xml.Marshal for encoding), this will let you conform to the standard package.
+func XmlGoEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = true
+}
+
+// XmlDefaultEmptyElemSyntax() - <tag .../> rather than <tag ...></tag>.
+// Return XML encoding for empty elements to the default package setting.
+// Reverses effect of XmlGoEmptyElemSyntax().
+func XmlDefaultEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = false
+}
+
+// ------- issue #88 ----------
+// xmlCheckIsValid set switch to force decoding the encoded XML to
+// see if it is valid XML.
+var xmlCheckIsValid bool
+
+// XmlCheckIsValid forces the encoded XML to be checked for validity.
+func XmlCheckIsValid(b ...bool) {
+	if len(b) == 1 {
+		xmlCheckIsValid = b[0]
+		return
+	}
+	xmlCheckIsValid = !xmlCheckIsValid
+}
+
+// Encode a Map as XML.  The companion of NewMapXml().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - Map keys that begin with a hyphen, '-', are interpreted as attributes.
+//      It is an error if the attribute doesn't have a []byte, string, number, or boolean value.
+//    - Map value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>".
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+//    - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax().
+// The attributes tag=value pairs are alphabetized by "tag".  Also, when encoding map[string]interface{} values -
+// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted.
+func (mv Map) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = marshalMapToXmlIndent(false, b, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(false, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p)
+	}
+done:
+	if xmlCheckIsValid {
+		d := xml.NewDecoder(bytes.NewReader(b.Bytes()))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return b.Bytes(), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as  XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as  XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// Writes the Map as pretty XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+/*
+func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// --------------  Handle XML stream by processing Map value --------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var xhandlerPollInterval = time.Millisecond
+
+// Bulk process XML using handlers that process a Map value.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapXmlReader(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process XML using handlers that process a Map value and the raw XML.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+//	See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader.
+func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapXmlReaderRaw(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// ----------------- END: Handle XML stream by processing Map value --------------
+
+// --------  a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ----------
+
+// This is a clone of io.TeeReader with the additional method t.ReadByte().
+// Thus, this TeeReader is also an io.ByteReader.
+// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written
+// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte()..
+// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with
+// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic.
+
+type teeReader struct {
+	r io.Reader
+	w io.Writer
+	b []byte
+}
+
+func myTeeReader(r io.Reader, w io.Writer) io.Reader {
+	b := make([]byte, 1)
+	return &teeReader{r, w, b}
+}
+
+// need for io.Reader - but we don't use it ...
+func (t *teeReader) Read(p []byte) (int, error) {
+	return 0, nil
+}
+
+func (t *teeReader) ReadByte() (byte, error) {
+	n, err := t.r.Read(t.b)
+	if n > 0 {
+		if _, err := t.w.Write(t.b[:1]); err != nil {
+			return t.b[0], err
+		}
+	}
+	return t.b[0], err
+}
+
+// For use with NewMapXmlReader & NewMapXmlSeqReader.
+type byteReader struct {
+	r io.Reader
+	b []byte
+}
+
+func myByteReader(r io.Reader) io.Reader {
+	b := make([]byte, 1)
+	return &byteReader{r, b}
+}
+
+// Need for io.Reader interface ...
+// Needed if reading a malformed http.Request.Body - issue #38.
+func (b *byteReader) Read(p []byte) (int, error) {
+	return b.r.Read(p)
+}
+
+func (b *byteReader) ReadByte() (byte, error) {
+	_, err := b.r.Read(b.b)
+	if len(b.b) > 0 {
+		return b.b[0], nil
+	}
+	var c byte
+	return c, err
+}
+
+// ----------------------- END: io.TeeReader hack -----------------------------------
+
+// ---------------------- XmlIndent - from j2x package ----------------------------
+
+// Encode a map[string]interface{} as a pretty XML string.
+// See Xml for encoding rules.
+func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	b := new(bytes.Buffer)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+			} else {
+				err = marshalMapToXmlIndent(true, b, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = marshalMapToXmlIndent(true, b, rootTag[0], m, p)
+	} else {
+		err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p)
+	}
+	if xmlCheckIsValid {
+		d := xml.NewDecoder(bytes.NewReader(b.Bytes()))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return b.Bytes(), err
+}
+
+type pretty struct {
+	indent   string
+	cnt      int
+	padding  string
+	mapDepth int
+	start    int
+}
+
+func (p *pretty) Indent() {
+	p.padding += p.indent
+	p.cnt++
+}
+
+func (p *pretty) Outdent() {
+	if p.cnt > 0 {
+		p.padding = p.padding[:len(p.padding)-len(p.indent)]
+		p.cnt--
+	}
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+// NOTE: 01may20 - replaces mapToXmlIndent(); uses bytes.Buffer instead for string appends.
+func marshalMapToXmlIndent(doIndent bool, b *bytes.Buffer, key string, value interface{}, pp *pretty) error {
+	var err error
+	var endTag bool
+	var isSimple bool
+	var elen int
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	// per issue #48, 18apr18 - try and coerce maps to map[string]interface{}
+	// Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq().
+	if reflect.ValueOf(value).Kind() == reflect.Map {
+		switch value.(type) {
+		case map[string]interface{}:
+		default:
+			val := make(map[string]interface{})
+			vv := reflect.ValueOf(value)
+			keys := vv.MapKeys()
+			for _, k := range keys {
+				val[fmt.Sprint(k)] = vv.MapIndex(k).Interface()
+			}
+			value = val
+		}
+	}
+
+	// 14jul20.  The following block of code has become something of a catch all for odd stuff
+	// that might be passed in as a result of casting an arbitrary map[<T>]<T> to an mxj.Map
+	// value and then call m.Xml or m.XmlIndent. See issue #71 (and #73) for such edge cases.
+	switch value.(type) {
+	// these types are handled during encoding
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number:
+	case []map[string]interface{}, []string, []float64, []bool, []int, []int32, []int64, []float32, []json.Number:
+	case []interface{}:
+	case nil:
+		value = ""
+	default:
+		// see if value is a struct, if so marshal using encoding/xml package
+		if reflect.ValueOf(value).Kind() == reflect.Struct {
+			if v, err := xml.Marshal(value); err != nil {
+				return err
+			} else {
+				value = string(v)
+			}
+		} else {
+			// coerce eveything else into a string value
+			value = fmt.Sprint(value)
+		}
+	}
+
+	// start the XML tag with required indentaton and padding
+	if doIndent {
+		if _, err = b.WriteString(p.padding); err != nil {
+			return err
+		}
+	}
+	switch value.(type) {
+	case []interface{}:
+	default:
+		if _, err = b.WriteString(`<` + key); err != nil {
+			return err
+		}
+	}
+
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		// scan out attributes - attribute keys have prepended attrPrefix
+		attrlist := make([][2]string, len(vv))
+		var n int
+		var ss string
+		for k, v := range vv {
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				switch v.(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(v.(string))
+					} else {
+						ss = v.(string)
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				case float64, bool, int, int32, int64, float32, json.Number:
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = fmt.Sprintf("%v", v)
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(v.([]byte)))
+					} else {
+						ss = string(v.([]byte))
+					}
+					attrlist[n][0] = k[lenAttrPrefix:]
+					attrlist[n][1] = ss
+				default:
+					return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v)
+				}
+				n++
+			}
+		}
+		if n > 0 {
+			attrlist = attrlist[:n]
+			sort.Sort(attrList(attrlist))
+			for _, v := range attrlist {
+				if _, err = b.WriteString(` ` + v[0] + `="` + v[1] + `"`); err != nil {
+					return err
+				}
+			}
+		}
+		// only attributes?
+		if n == lenvv {
+			if useGoXmlEmptyElemSyntax {
+				if _, err = b.WriteString(`</` + key + ">"); err != nil {
+					return err
+				}
+			} else {
+				if _, err = b.WriteString(`/>`); err != nil {
+					return err
+				}
+			}
+			break
+		}
+
+		// simple element? Note: '#text" is an invalid XML tag.
+		isComplex := false
+		if v, ok := vv["#text"]; ok && n+1 == lenvv {
+			// just the value and attributes
+			switch v.(type) {
+			case string:
+				if xmlEscapeChars {
+					v = escapeChars(v.(string))
+				} else {
+					v = v.(string)
+				}
+			case []byte:
+				if xmlEscapeChars {
+					v = escapeChars(string(v.([]byte)))
+				} else {
+					v = string(v.([]byte))
+				}
+			}
+			if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil {
+				return err
+			}
+			endTag = true
+			elen = 1
+			isSimple = true
+			break
+		} else if ok {
+			// need to handle when there are subelements in addition to the simple element value
+			// issue #90
+			switch v.(type) {
+			case string:
+				if xmlEscapeChars {
+					v = escapeChars(v.(string))
+				} else {
+					v = v.(string)
+				}
+			case []byte:
+				if xmlEscapeChars {
+					v = escapeChars(string(v.([]byte)))
+				} else {
+					v = string(v.([]byte))
+				}
+			}
+			if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil {
+				return err
+			}
+			isComplex = true
+		}
+
+		// close tag with possible attributes
+		if !isComplex {
+			if _, err = b.WriteString(">"); err != nil {
+				return err
+			}
+		}
+		if doIndent {
+			// *s += "\n"
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		// something more complex
+		p.mapDepth++
+		// extract the map k:v pairs and sort on key
+		elemlist := make([][2]interface{}, len(vv))
+		n = 0
+		for k, v := range vv {
+			if k == "#text" {
+				// simple element handled above
+				continue
+			}
+			if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix {
+				continue
+			}
+			elemlist[n][0] = k
+			elemlist[n][1] = v
+			n++
+		}
+		elemlist = elemlist[:n]
+		sort.Sort(elemList(elemlist))
+		var i int
+		for _, v := range elemlist {
+			switch v[1].(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := marshalMapToXmlIndent(doIndent, b, v[0].(string), v[1], p); err != nil {
+				return err
+			}
+			switch v[1].(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content ...
+	case []interface{}:
+		// special case - found during implementing Issue #23
+		if len(value.([]interface{})) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case []string:
+		// This was added by https://github.com/slotix ... not a type that
+		// would be encountered if mv generated from NewMapXml, NewMapJson.
+		// Could be encountered in AnyXml(), so we'll let it stay, though
+		// it should be merged with case []interface{}, above.
+		//quick fix for []string type
+		//[]string should be treated exaclty as []interface{}
+		if len(value.([]string)) == 0 {
+			if doIndent {
+				if _, err = b.WriteString(p.padding + p.indent); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString("<" + key); err != nil {
+				return err
+			}
+			elen = 0
+			endTag = true
+			break
+		}
+		for _, v := range value.([]string) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			// *s += p.padding
+			if _, err = b.WriteString(p.padding); err != nil {
+				return err
+			}
+		}
+		if _, err = b.WriteString("<" + key); err != nil {
+			return err
+		}
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			v := value.(string)
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		case float64, bool, int, int32, int64, float32, json.Number:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v) // always > 0
+			if _, err = b.WriteString(">" + v); err != nil {
+				return err
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			v := string(value.([]byte))
+			if xmlEscapeChars {
+				v = escapeChars(v)
+			}
+			elen = len(v)
+			if elen > 0 {
+				// *s += ">" + v
+				if _, err = b.WriteString(">" + v); err != nil {
+					return err
+				}
+			}
+		default:
+			if _, err = b.WriteString(">"); err != nil {
+				return err
+			}
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				if _, err = b.WriteString(">UNKNOWN"); err != nil {
+					return err
+				}
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					if _, err = b.Write(v); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				if _, err = b.WriteString(p.padding); err != nil {
+					return err
+				}
+			}
+		}
+		if elen > 0 || useGoXmlEmptyElemSyntax {
+			if elen == 0 {
+				if _, err = b.WriteString(">"); err != nil {
+					return err
+				}
+			}
+			if _, err = b.WriteString(`</` + key + ">"); err != nil {
+				return err
+			}
+		} else {
+			if _, err = b.WriteString(`/>`); err != nil {
+				return err
+			}
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			if _, err = b.WriteString("\n"); err != nil {
+				return err
+			}
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// ============================ sort interface implementation =================
+
+type attrList [][2]string
+
+func (a attrList) Len() int {
+	return len(a)
+}
+
+func (a attrList) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a attrList) Less(i, j int) bool {
+	return a[i][0] <= a[j][0]
+}
+
+type elemList [][2]interface{}
+
+func (e elemList) Len() int {
+	return len(e)
+}
+
+func (e elemList) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemList) Less(i, j int) bool {
+	return e[i][0].(string) <= e[j][0].(string)
+}

+ 877 - 0
vendor/github.com/clbanning/mxj/v2/xmlseq.go

@@ -0,0 +1,877 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding.
+// Also, handles comments, directives and process instructions.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+)
+
+// MapSeq is like Map but contains seqencing indices to allow recovering the original order of
+// the XML elements when the map[string]interface{} is marshaled. Element attributes are
+// stored as a map["#attr"]map[<attr_key>]map[string]interface{}{"#text":"<value>", "#seq":<attr_index>}
+// value instead of denoting the keys with a prefix character.  Also, comments, directives and
+// process instructions are preserved.
+type MapSeq map[string]interface{}
+
+// NoRoot is returned by NewXmlSeq, etc., when a comment, directive or procinstr element is parsed
+// in the XML data stream and the element is not contained in an XML object with a root element.
+var NoRoot = errors.New("no root key")
+var NO_ROOT = NoRoot // maintain backwards compatibility
+
+// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... -------------------------
+
+// NewMapXmlSeq converts a XML doc into a MapSeq value with elements id'd with decoding sequence key represented
+// as map["#seq"]<int value>.
+// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+// NOTE: "#seq" key/value pairs are removed on encoding with msv.Xml() / msv.XmlIndent().
+//	• attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":<aval>, "#seq":<num>}
+//	• all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well.
+//	• lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that
+//	  include a "#seq" k:v pair based on sequence they are decoded.  Thus, XML like:
+//	      <doc>
+//	         <ltag>value 1</ltag>
+//	         <newtag>value 2</newtag>
+//	         <ltag>value 3</ltag>
+//	      </doc>
+//	  is decoded as:
+//	    doc :
+//	      ltag :[[]interface{}]
+//	        [item: 0]
+//	          #seq :[int] 0
+//	          #text :[string] value 1
+//	        [item: 1]
+//	          #seq :[int] 2
+//	          #text :[string] value 3
+//	      newtag :
+//	        #seq :[int] 1
+//	        #text :[string] value 2
+//	  It will encode in proper sequence even though the MapSeq representation merges all "ltag" elements in an array.
+//	• comments - "<!--comment-->" -  are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair.
+//	• directives - "<!text>" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair.
+//	• process instructions  - "<?instr?>" - are decoded as map["#procinst"]interface{} where the #procinst value
+//	  is of map[string]interface{} type with the following keys: #target, #inst, and #seq.
+//	• comments, directives, and procinsts that are NOT part of a document with a root key will be returned as
+//	  map[string]interface{} and the error value 'NoRoot'.
+//	• note: "<![CDATA[" syntax is lost in xml.Decode parser - and is not handled here, either.
+//	   and: "\r\n" is converted to "\n"
+//
+//	NOTES:
+//	   1. The 'xmlVal' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	NAME SPACES:
+//	   1. Keys in the MapSeq value that are parsed from a <name space prefix>:<local name> tag preserve the
+//	      "<prefix>:" notation rather than stripping it as with NewMapXml().
+//	   2. Attribute keys for name space prefix declarations preserve "xmlns:<prefix>" notation.
+//
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment",
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlSeqToMap(xmlVal, r)
+}
+
+// NewMpaXmlSeqReader returns next XML doc from an io.Reader as a MapSeq value.
+//	NOTES:
+//	   1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+//	   3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	ERRORS:
+//	   1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment",
+//	      "#directive" or #procinst" key.
+func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (MapSeq, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// We need to put an *os.File reader in a ByteReader or the xml.NewDecoder
+	// will wrap it in a bufio.Reader and seek on the file beyond where the
+	// xml.Decoder parses!
+	if _, ok := xmlReader.(io.ByteReader); !ok {
+		xmlReader = myByteReader(xmlReader) // see code at EOF
+	}
+
+	// build the map
+	return xmlSeqReaderToMap(xmlReader, r)
+}
+
+// NewMapXmlSeqReaderRaw returns the  next XML doc from  an io.Reader as a MapSeq value.
+// Returns MapSeq value, slice with the raw XML, and any error.
+//	NOTES:
+//	   1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	    2. The 'raw' return value may be larger than the XML text value.
+//	    3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	       extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	    4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	       re-encode the message in its original structure.
+//	    5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case.
+//
+//	ERRORS:
+//	    1. If a NoRoot error, "no root key," is returned, check if the initial map key is "#comment",
+//	       "#directive" or #procinst" key.
+func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (MapSeq, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, 0)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb)
+
+	m, err := xmlSeqReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := wb.Bytes()
+
+	// err may be NoRoot
+	return m, b, err
+}
+
+// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// xmlSeqToMap - convert a XML doc into map[string]interface{} value
+func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	if CustomDecoder != nil {
+		useCustomDecoder(p)
+	} else {
+		p.CharsetReader = XmlCharsetReader
+	}
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly.
+// Add #seq tag value for each element decoded - to be used for Encoding later.
+func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if snakeCaseKeys {
+		skey = strings.Replace(skey, "-", "_", -1)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'.
+	var n, na map[string]interface{}
+	var seq int // for including seq num when decoding
+
+	// Allocate maps and load attributes, if any.
+	// NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through
+	//       to get StartElement then recurse with skey==xml.StartElement.Name.Local
+	//       where we begin allocating map[string]interface{} values 'n' and 'na'.
+	if skey != "" {
+		// 'n' only needs one slot - save call to runtime•hashGrow()
+		// 'na' we don't know
+		n = make(map[string]interface{}, 1)
+		na = make(map[string]interface{})
+		if len(a) > 0 {
+			// xml.Attr is decoded into: map["#attr"]map[<attr_label>]interface{}
+			// where interface{} is map[string]interface{}{"#text":<attr_val>, "#seq":<attr_seq>}
+			aa := make(map[string]interface{}, len(a))
+			for i, v := range a {
+				if snakeCaseKeys {
+					v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1)
+				}
+				if xmlEscapeCharsDecoder { // per issue#84
+					v.Value = escapeChars(v.Value)
+				}
+				if len(v.Name.Space) > 0 {
+					aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i}
+				} else {
+					aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i}
+				}
+			}
+			na["#attr"] = aa
+		}
+	}
+
+	// Return XMPP <stream:stream> message.
+	if handleXMPPStreamTag && skey == "stream:stream" {
+		n[skey] = na
+		return n, nil
+	}
+
+	for {
+		t, err := p.RawToken()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				if len(tt.Name.Space) > 0 {
+					return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+				} else {
+					return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+				}
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			var nn map[string]interface{}
+			if len(tt.Name.Space) > 0 {
+				nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r)
+			} else {
+				nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// add "#seq" k:v pair -
+			// Sequence number included even in list elements - this should allow us
+			// to properly resequence even something goofy like:
+			//     <list>item 1</list>
+			//     <subelement>item 2</subelement>
+			//     <list>item 3</list>
+			// where all the "list" subelements are decoded into an array.
+			switch val.(type) {
+			case map[string]interface{}:
+				val.(map[string]interface{})["#seq"] = seq
+				seq++
+			case interface{}: // a non-nil simple element: string, float64, bool
+				v := map[string]interface{}{"#text": val, "#seq": seq}
+				seq++
+				val = v
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			if skey != "" {
+				tt := t.(xml.EndElement)
+				if snakeCaseKeys {
+					tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1)
+				}
+				var name string
+				if len(tt.Name.Space) > 0 {
+					name = tt.Name.Space + `:` + tt.Name.Local
+				} else {
+					name = tt.Name.Local
+				}
+				if skey != name {
+					return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d",
+						skey, name, p.InputOffset())
+				}
+			}
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Empty element content will be  map["etag"]map["#text"]""
+				// after #seq injection - map["etag"]map["#seq"]seq - after return.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), trimRunes)
+			if xmlEscapeCharsDecoder { // issue#84
+				tt = escapeChars(tt)
+			}
+			if skey == "" {
+				// per Adrian (http://www.adrianlungu.com/) catch stray text
+				// in decoder stream -
+				// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+				// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+				// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+				continue
+			}
+			if len(tt) > 0 {
+				// every simple element is a #text and has #seq associated with it
+				na["#text"] = cast(tt, r, "")
+				na["#seq"] = seq
+				seq++
+			}
+		case xml.Comment:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#comment": string(t.(xml.Comment))}
+				return n, NoRoot
+			}
+			cm := make(map[string]interface{}, 2)
+			cm["#text"] = string(t.(xml.Comment))
+			cm["#seq"] = seq
+			seq++
+			na["#comment"] = cm
+		case xml.Directive:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#directive": string(t.(xml.Directive))}
+				return n, NoRoot
+			}
+			dm := make(map[string]interface{}, 2)
+			dm["#text"] = string(t.(xml.Directive))
+			dm["#seq"] = seq
+			seq++
+			na["#directive"] = dm
+		case xml.ProcInst:
+			if n == nil {
+				na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)}
+				n = map[string]interface{}{"#procinst": na}
+				return n, NoRoot
+			}
+			pm := make(map[string]interface{}, 3)
+			pm["#target"] = t.(xml.ProcInst).Target
+			pm["#inst"] = string(t.(xml.ProcInst).Inst)
+			pm["#seq"] = seq
+			seq++
+			na["#procinst"] = pm
+		default:
+			// noop - shouldn't ever get here, now, since we handle all token types
+		}
+	}
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// --------------------- mv.XmlSeq & mv.XmlSeqWriter -------------------------
+
+// Xml encodes a MapSeq as XML with elements sorted on #seq.  The companion of NewMapXmlSeq().
+// The following rules apply.
+//    - The "#seq" key value is used to seqence the subelements or attributes only.
+//    - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key.
+//    - The "#comment" map key identifies a comment in the value "#text" map entry - <!--comment-->.
+//    - The "#directive" map key identifies a directive in the value "#text" map entry - <!directive>.
+//    - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst"
+//      map entries - <?target inst?>.
+//    - Value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called.
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+func (mv MapSeq) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it's an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlSeqIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	if xmlCheckIsValid {
+		d := xml.NewDecoder(bytes.NewReader([]byte(*s)))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return []byte(*s), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// XmlWriter Writes the MapSeq value as  XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlWriteRaw writes the MapSeq value as XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// XmlIndentWriter writes the MapSeq value as pretty XML on the Writer.
+// See MapSeq.Xml() for encoding rules.
+func (mv MapSeq) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// XmlIndentWriterRaw writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Map.XmlSeq() for encoding rules.
+/*
+func (mv MapSeq) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlSeqIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+*/
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// ---------------------- XmlSeqIndent ----------------------------
+
+// XmlIndent encodes a map[string]interface{} as a pretty XML string.
+// See MapSeq.XmlSeq() for encoding rules.
+func (mv MapSeq) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlSeqIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+	}
+	if xmlCheckIsValid {
+		if _, err = NewMapXml([]byte(*s)); err != nil {
+			return nil, err
+		}
+		d := xml.NewDecoder(bytes.NewReader([]byte(*s)))
+		for {
+			_, err = d.Token()
+			if err == io.EOF {
+				err = nil
+				break
+			} else if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return []byte(*s), err
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	var noEndTag bool
+	var elen int
+	var ss string
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		if key != "#comment" && key != "#directive" && key != "#procinst" {
+			*s += `<` + key
+		}
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		val := value.(map[string]interface{})
+
+		if key == "#comment" {
+			*s += `<!--` + val["#text"].(string) + `-->`
+			noEndTag = true
+			break
+		}
+
+		if key == "#directive" {
+			*s += `<!` + val["#text"].(string) + `>`
+			noEndTag = true
+			break
+		}
+
+		if key == "#procinst" {
+			*s += `<?` + val["#target"].(string) + ` ` + val["#inst"].(string) + `?>`
+			noEndTag = true
+			break
+		}
+
+		haveAttrs := false
+		// process attributes first
+		if v, ok := val["#attr"].(map[string]interface{}); ok {
+			// First, unroll the map[string]interface{} into a []keyval array.
+			// Then sequence it.
+			kv := make([]keyval, len(v))
+			n := 0
+			for ak, av := range v {
+				kv[n] = keyval{ak, av}
+				n++
+			}
+			sort.Sort(elemListSeq(kv))
+			// Now encode the attributes in original decoding sequence, using keyval array.
+			for _, a := range kv {
+				vv := a.v.(map[string]interface{})
+				switch vv["#text"].(type) {
+				case string:
+					if xmlEscapeChars {
+						ss = escapeChars(vv["#text"].(string))
+					} else {
+						ss = vv["#text"].(string)
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				case float64, bool, int, int32, int64, float32:
+					*s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"`
+				case []byte:
+					if xmlEscapeChars {
+						ss = escapeChars(string(vv["#text"].([]byte)))
+					} else {
+						ss = string(vv["#text"].([]byte))
+					}
+					*s += ` ` + a.k + `="` + ss + `"`
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", a.k)
+				}
+			}
+			haveAttrs = true
+		}
+
+		// simple element?
+		// every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr"
+		_, seqOK := val["#seq"] // have key
+		if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK {
+			if stmp, ok := v.(string); ok && stmp != "" {
+				if xmlEscapeChars {
+					stmp = escapeChars(stmp)
+				}
+				*s += ">" + stmp
+				endTag = true
+				elen = 1
+			}
+			isSimple = true
+			break
+		} else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK {
+			// here no #text but have #seq or #seq+#attr
+			endTag = false
+			break
+		}
+
+		// we now need to sequence everything except attributes
+		// 'kv' will hold everything that needs to be written
+		kv := make([]keyval, 0)
+		for k, v := range val {
+			if k == "#attr" { // already processed
+				continue
+			}
+			if k == "#seq" { // ignore - just for sorting
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+				// unwind the array as separate entries
+				for _, vv := range v.([]interface{}) {
+					kv = append(kv, keyval{k, vv})
+				}
+			default:
+				kv = append(kv, keyval{k, v})
+			}
+		}
+
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		sort.Sort(elemListSeq(kv))
+		i := 0
+		for _, v := range kv {
+			switch v.v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil {
+				return err
+			}
+			switch v.v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content other than attrs
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil {
+				return err
+			}
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		if doIndent {
+			*s += p.padding
+		}
+		*s += "<" + key
+		endTag, isSimple = true, true
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string:
+			if xmlEscapeChars {
+				ss = escapeChars(value.(string))
+			} else {
+				ss = value.(string)
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		case float64, bool, int, int32, int64, float32:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			if xmlEscapeChars {
+				ss = escapeChars(string(value.([]byte)))
+			} else {
+				ss = string(value.([]byte))
+			}
+			elen = len(ss)
+			if elen > 0 {
+				*s += ">" + ss
+			}
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					*s += string(v)
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag && !noEndTag {
+		if doIndent {
+			if !isSimple {
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			if elen > 0 || useGoXmlEmptyElemSyntax {
+				if elen == 0 {
+					*s += ">"
+				}
+				*s += `</` + key + ">"
+			} else {
+				*s += `/>`
+			}
+		}
+	} else if !noEndTag {
+		if useGoXmlEmptyElemSyntax {
+			*s += `</` + key + ">"
+			// *s += "></" + key + ">"
+		} else {
+			*s += "/>"
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// the element sort implementation
+
+type keyval struct {
+	k string
+	v interface{}
+}
+type elemListSeq []keyval
+
+func (e elemListSeq) Len() int {
+	return len(e)
+}
+
+func (e elemListSeq) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemListSeq) Less(i, j int) bool {
+	var iseq, jseq int
+	var fiseq, fjseq float64
+	var ok bool
+	if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok {
+		if fiseq, ok = e[i].v.(map[string]interface{})["#seq"].(float64); ok {
+			iseq = int(fiseq)
+		} else {
+			iseq = 9999999
+		}
+	}
+
+	if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok {
+		if fjseq, ok = e[j].v.(map[string]interface{})["#seq"].(float64); ok {
+			jseq = int(fjseq)
+		} else {
+			jseq = 9999999
+		}
+	}
+
+	return iseq <= jseq
+}
+
+// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio
+
+// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent().
+// It preserves comments, directives and process instructions,
+func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) {
+	x, err := NewMapXmlSeq(b)
+	if err != nil {
+		return nil, err
+	}
+	return x.XmlIndent(prefix, indent)
+}

+ 18 - 0
vendor/github.com/clbanning/mxj/v2/xmlseq2.go

@@ -0,0 +1,18 @@
+// Copyright 2012-2016, 2019 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+// ---------------- expose Map methods to MapSeq type ---------------------------
+
+// Pretty print a Map.
+func (msv MapSeq) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), true, true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (msv MapSeq) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMap(map[string]interface{}(msv), false, true, offset...)
+}
+

+ 21 - 0
vendor/github.com/dgryski/go-rendezvous/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 79 - 0
vendor/github.com/dgryski/go-rendezvous/rdv.go

@@ -0,0 +1,79 @@
+package rendezvous
+
+type Rendezvous struct {
+	nodes map[string]int
+	nstr  []string
+	nhash []uint64
+	hash  Hasher
+}
+
+type Hasher func(s string) uint64
+
+func New(nodes []string, hash Hasher) *Rendezvous {
+	r := &Rendezvous{
+		nodes: make(map[string]int, len(nodes)),
+		nstr:  make([]string, len(nodes)),
+		nhash: make([]uint64, len(nodes)),
+		hash:  hash,
+	}
+
+	for i, n := range nodes {
+		r.nodes[n] = i
+		r.nstr[i] = n
+		r.nhash[i] = hash(n)
+	}
+
+	return r
+}
+
+func (r *Rendezvous) Lookup(k string) string {
+	// short-circuit if we're empty
+	if len(r.nodes) == 0 {
+		return ""
+	}
+
+	khash := r.hash(k)
+
+	var midx int
+	var mhash = xorshiftMult64(khash ^ r.nhash[0])
+
+	for i, nhash := range r.nhash[1:] {
+		if h := xorshiftMult64(khash ^ nhash); h > mhash {
+			midx = i + 1
+			mhash = h
+		}
+	}
+
+	return r.nstr[midx]
+}
+
+func (r *Rendezvous) Add(node string) {
+	r.nodes[node] = len(r.nstr)
+	r.nstr = append(r.nstr, node)
+	r.nhash = append(r.nhash, r.hash(node))
+}
+
+func (r *Rendezvous) Remove(node string) {
+	// find index of node to remove
+	nidx := r.nodes[node]
+
+	// remove from the slices
+	l := len(r.nstr)
+	r.nstr[nidx] = r.nstr[l]
+	r.nstr = r.nstr[:l]
+
+	r.nhash[nidx] = r.nhash[l]
+	r.nhash = r.nhash[:l]
+
+	// update the map
+	delete(r.nodes, node)
+	moved := r.nstr[nidx]
+	r.nodes[moved] = nidx
+}
+
+func xorshiftMult64(x uint64) uint64 {
+	x ^= x >> 12 // a
+	x ^= x << 25 // b
+	x ^= x >> 27 // c
+	return x * 2685821657736338717
+}

+ 3 - 0
vendor/github.com/go-redis/redis/v8/.gitignore

@@ -0,0 +1,3 @@
+*.rdb
+testdata/*/
+.idea/

+ 4 - 0
vendor/github.com/go-redis/redis/v8/.golangci.yml

@@ -0,0 +1,4 @@
+run:
+  concurrency: 8
+  deadline: 5m
+  tests: false

+ 4 - 0
vendor/github.com/go-redis/redis/v8/.prettierrc.yml

@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100

+ 177 - 0
vendor/github.com/go-redis/redis/v8/CHANGELOG.md

@@ -0,0 +1,177 @@
+## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
+
+
+### Bug Fixes
+
+* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
+* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
+* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
+* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
+* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
+* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
+* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
+* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
+* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
+
+
+### Features
+
+* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
+* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
+* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
+* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
+* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
+* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
+* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
+
+
+
+## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
+
+
+### Features
+
+* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
+* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
+* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
+
+
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+  single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+  decision:
+
+  - Traces become smaller and less noisy.
+  - It may be costly to process those 3 extra spans for each query.
+  - go-redis no longer depends on OpenTelemetry.
+
+  Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+  Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+  `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+  you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+  struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+  [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+  using `context.Context` yet, the simplest option is to define global package variable
+  `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+  [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+  existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+    NewConsistentHash: func() {
+        return consistenthash.New(100, crc32.ChecksumIEEE)
+    },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+  URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+  interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+  transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+  when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+  detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+  the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+  `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+  occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+  Servers that don't have cluster mode enabled. See
+  https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup

+ 25 - 0
vendor/github.com/go-redis/redis/v8/LICENSE

@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 35 - 0
vendor/github.com/go-redis/redis/v8/Makefile

@@ -0,0 +1,35 @@
+PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+	go test ./...
+	go test ./... -short -race
+	go test ./... -run=NONE -bench=. -benchmem
+	env GOOS=linux GOARCH=386 go test ./...
+	go vet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+	go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+	mkdir -p $@
+	wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+	cd $< && make all
+
+fmt:
+	gofmt -w -s ./
+	goimports -w  -local github.com/go-redis/redis ./
+
+go_mod_tidy:
+	go get -u && go mod tidy
+	set -e; for dir in $(PACKAGE_DIRS); do \
+	  echo "go mod tidy in $${dir}"; \
+	  (cd "$${dir}" && \
+	    go get -u && \
+	    go mod tidy); \
+	done

+ 175 - 0
vendor/github.com/go-redis/redis/v8/README.md

@@ -0,0 +1,175 @@
+# Redis client for Go
+
+![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+
+go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
+OpenTelemetry and ClickHouse. Give it a star as well!
+
+## Resources
+
+- [Discussions](https://github.com/go-redis/redis/discussions)
+- [Documentation](https://redis.uptrace.dev)
+- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
+- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+
+Other projects you may like:
+
+- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
+- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+  [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
+- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
+  [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
+- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
+- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
+- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
+- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
+  without using cluster mode and Redis Sentinel.
+- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
+
+```shell
+go get github.com/go-redis/redis/v8
+```
+
+## Quickstart
+
+```go
+import (
+    "context"
+    "github.com/go-redis/redis/v8"
+    "fmt"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+    rdb := redis.NewClient(&redis.Options{
+        Addr:     "localhost:6379",
+        Password: "", // no password set
+        DB:       0,  // use default DB
+    })
+
+    err := rdb.Set(ctx, "key", "value", 0).Err()
+    if err != nil {
+        panic(err)
+    }
+
+    val, err := rdb.Get(ctx, "key").Result()
+    if err != nil {
+        panic(err)
+    }
+    fmt.Println("key", val)
+
+    val2, err := rdb.Get(ctx, "key2").Result()
+    if err == redis.Nil {
+        fmt.Println("key2 does not exist")
+    } else if err != nil {
+        panic(err)
+    } else {
+        fmt.Println("key2", val2)
+    }
+    // Output: key value
+    // key2 does not exist
+}
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+    Min: "-inf",
+    Max: "+inf",
+    Offset: 0,
+    Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+    Keys: []string{"zset1", "zset2"},
+    Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```
+var (
+	redisServerBin, _  = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+	redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```
+go test
+```
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/go-redis/redis/graphs/contributors">
+  <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
+</a>

+ 15 - 0
vendor/github.com/go-redis/redis/v8/RELEASING.md

@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```

+ 1750 - 0
vendor/github.com/go-redis/redis/v8/cluster.go

@@ -0,0 +1,1750 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"fmt"
+	"math"
+	"net"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hashtag"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+	// A seed list of host:port addresses of cluster nodes.
+	Addrs []string
+
+	// NewClient creates a cluster node client with provided name and options.
+	NewClient func(opt *Options) *Client
+
+	// The maximum number of retries before giving up. Command is retried
+	// on network errors and MOVED/ASK redirects.
+	// Default is 3 retries.
+	MaxRedirects int
+
+	// Enables read-only commands on slave nodes.
+	ReadOnly bool
+	// Allows routing read-only commands to the closest master or slave node.
+	// It automatically enables ReadOnly.
+	RouteByLatency bool
+	// Allows routing read-only commands to the random master or slave node.
+	// It automatically enables ReadOnly.
+	RouteRandomly bool
+
+	// Optional function that returns cluster slots information.
+	// It is useful to manually create cluster of standalone Redis servers
+	// and load-balance read/write operations between master and slaves.
+	// It can use service like ZooKeeper to maintain configuration information
+	// and Cluster.ReloadState to manually trigger state reloading.
+	ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+	// Following options are copied from Options struct.
+
+	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username string
+	Password string
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	// PoolSize applies per cluster node and not for the whole cluster.
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+	if opt.MaxRedirects == -1 {
+		opt.MaxRedirects = 0
+	} else if opt.MaxRedirects == 0 {
+		opt.MaxRedirects = 3
+	}
+
+	if opt.RouteByLatency || opt.RouteRandomly {
+		opt.ReadOnly = true
+	}
+
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
+	}
+
+	switch opt.ReadTimeout {
+	case -1:
+		opt.ReadTimeout = 0
+	case 0:
+		opt.ReadTimeout = 3 * time.Second
+	}
+	switch opt.WriteTimeout {
+	case -1:
+		opt.WriteTimeout = 0
+	case 0:
+		opt.WriteTimeout = opt.ReadTimeout
+	}
+
+	if opt.MaxRetries == 0 {
+		opt.MaxRetries = -1
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+
+	if opt.NewClient == nil {
+		opt.NewClient = NewClient
+	}
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+	const disableIdleCheck = -1
+
+	return &Options{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		Username: opt.Username,
+		Password: opt.Password,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: disableIdleCheck,
+
+		TLSConfig: opt.TLSConfig,
+		// If ClusterSlots is populated, then we probably have an artificial
+		// cluster whose nodes are not in clustering mode (otherwise there isn't
+		// much use for ClusterSlots config).  This means we cannot execute the
+		// READONLY command against that node -- setting readOnly to false in such
+		// situations in the options below will prevent that from happening.
+		readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+	Client *Client
+
+	latency    uint32 // atomic
+	generation uint32 // atomic
+	failing    uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+	opt := clOpt.clientOptions()
+	opt.Addr = addr
+	node := clusterNode{
+		Client: clOpt.NewClient(opt),
+	}
+
+	node.latency = math.MaxUint32
+	if clOpt.RouteByLatency {
+		go node.updateLatency()
+	}
+
+	return &node
+}
+
+func (n *clusterNode) String() string {
+	return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+	return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+	const numProbe = 10
+	var dur uint64
+
+	for i := 0; i < numProbe; i++ {
+		time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+		start := time.Now()
+		n.Client.Ping(context.TODO())
+		dur += uint64(time.Since(start) / time.Microsecond)
+	}
+
+	latency := float64(dur) / float64(numProbe)
+	atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+	latency := atomic.LoadUint32(&n.latency)
+	return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+	atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+	const timeout = 15 // 15 seconds
+
+	failing := atomic.LoadUint32(&n.failing)
+	if failing == 0 {
+		return false
+	}
+	if time.Now().Unix()-int64(failing) < timeout {
+		return true
+	}
+	atomic.StoreUint32(&n.failing, 0)
+	return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+	return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+	for {
+		v := atomic.LoadUint32(&n.generation)
+		if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+			break
+		}
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+	opt *ClusterOptions
+
+	mu          sync.RWMutex
+	addrs       []string
+	nodes       map[string]*clusterNode
+	activeAddrs []string
+	closed      bool
+
+	_generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+	return &clusterNodes{
+		opt: opt,
+
+		addrs: opt.Addrs,
+		nodes: make(map[string]*clusterNode),
+	}
+}
+
+func (c *clusterNodes) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, node := range c.nodes {
+		if err := node.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	c.nodes = nil
+	c.activeAddrs = nil
+
+	return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+	var addrs []string
+
+	c.mu.RLock()
+	closed := c.closed //nolint:ifshort
+	if !closed {
+		if len(c.activeAddrs) > 0 {
+			addrs = c.activeAddrs
+		} else {
+			addrs = c.addrs
+		}
+	}
+	c.mu.RUnlock()
+
+	if closed {
+		return nil, pool.ErrClosed
+	}
+	if len(addrs) == 0 {
+		return nil, errClusterNoNodes
+	}
+	return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+	return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+	//nolint:prealloc
+	var collected []*clusterNode
+
+	c.mu.Lock()
+
+	c.activeAddrs = c.activeAddrs[:0]
+	for addr, node := range c.nodes {
+		if node.Generation() >= generation {
+			c.activeAddrs = append(c.activeAddrs, addr)
+			if c.opt.RouteByLatency {
+				go node.updateLatency()
+			}
+			continue
+		}
+
+		delete(c.nodes, addr)
+		collected = append(collected, node)
+	}
+
+	c.mu.Unlock()
+
+	for _, node := range collected {
+		_ = node.Client.Close()
+	}
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+	node, err := c.get(addr)
+	if err != nil {
+		return nil, err
+	}
+	if node != nil {
+		return node, nil
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	node, ok := c.nodes[addr]
+	if ok {
+		return node, nil
+	}
+
+	node = newClusterNode(c.opt, addr)
+
+	c.addrs = appendIfNotExists(c.addrs, addr)
+	c.nodes[addr] = node
+
+	return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+	var node *clusterNode
+	var err error
+	c.mu.RLock()
+	if c.closed {
+		err = pool.ErrClosed
+	} else {
+		node = c.nodes[addr]
+	}
+	c.mu.RUnlock()
+	return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	cp := make([]*clusterNode, 0, len(c.nodes))
+	for _, node := range c.nodes {
+		cp = append(cp, node)
+	}
+	return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+	addrs, err := c.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	n := rand.Intn(len(addrs))
+	return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+	start, end int
+	nodes      []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+	return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+	return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+	nodes   *clusterNodes
+	Masters []*clusterNode
+	Slaves  []*clusterNode
+
+	slots []*clusterSlot
+
+	generation uint32
+	createdAt  time.Time
+}
+
+func newClusterState(
+	nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+	c := clusterState{
+		nodes: nodes,
+
+		slots: make([]*clusterSlot, 0, len(slots)),
+
+		generation: nodes.NextGeneration(),
+		createdAt:  time.Now(),
+	}
+
+	originHost, _, _ := net.SplitHostPort(origin)
+	isLoopbackOrigin := isLoopback(originHost)
+
+	for _, slot := range slots {
+		var nodes []*clusterNode
+		for i, slotNode := range slot.Nodes {
+			addr := slotNode.Addr
+			if !isLoopbackOrigin {
+				addr = replaceLoopbackHost(addr, originHost)
+			}
+
+			node, err := c.nodes.GetOrCreate(addr)
+			if err != nil {
+				return nil, err
+			}
+
+			node.SetGeneration(c.generation)
+			nodes = append(nodes, node)
+
+			if i == 0 {
+				c.Masters = appendUniqueNode(c.Masters, node)
+			} else {
+				c.Slaves = appendUniqueNode(c.Slaves, node)
+			}
+		}
+
+		c.slots = append(c.slots, &clusterSlot{
+			start: slot.Start,
+			end:   slot.End,
+			nodes: nodes,
+		})
+	}
+
+	sort.Sort(clusterSlotSlice(c.slots))
+
+	time.AfterFunc(time.Minute, func() {
+		nodes.GC(c.generation)
+	})
+
+	return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+	nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+	if err != nil {
+		return nodeAddr
+	}
+
+	nodeIP := net.ParseIP(nodeHost)
+	if nodeIP == nil {
+		return nodeAddr
+	}
+
+	if !nodeIP.IsLoopback() {
+		return nodeAddr
+	}
+
+	// Use origin host which is not loopback and node port.
+	return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+	ip := net.ParseIP(host)
+	if ip == nil {
+		return true
+	}
+	return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) > 0 {
+		return nodes[0], nil
+	}
+	return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	switch len(nodes) {
+	case 0:
+		return c.nodes.Random()
+	case 1:
+		return nodes[0], nil
+	case 2:
+		if slave := nodes[1]; !slave.Failing() {
+			return slave, nil
+		}
+		return nodes[0], nil
+	default:
+		var slave *clusterNode
+		for i := 0; i < 10; i++ {
+			n := rand.Intn(len(nodes)-1) + 1
+			slave = nodes[n]
+			if !slave.Failing() {
+				return slave, nil
+			}
+		}
+
+		// All slaves are loading - use master.
+		return nodes[0], nil
+	}
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+
+	var node *clusterNode
+	for _, n := range nodes {
+		if n.Failing() {
+			continue
+		}
+		if node == nil || n.Latency() < node.Latency() {
+			node = n
+		}
+	}
+	if node != nil {
+		return node, nil
+	}
+
+	// If all nodes are failing - return random node
+	return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+	if len(nodes) == 1 {
+		return nodes[0], nil
+	}
+	randomNodes := rand.Perm(len(nodes))
+	for _, idx := range randomNodes {
+		if node := nodes[idx]; !node.Failing() {
+			return node, nil
+		}
+	}
+	return nodes[randomNodes[0]], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+	i := sort.Search(len(c.slots), func(i int) bool {
+		return c.slots[i].end >= slot
+	})
+	if i >= len(c.slots) {
+		return nil
+	}
+	x := c.slots[i]
+	if slot >= x.start && slot <= x.end {
+		return x.nodes
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+	load func(ctx context.Context) (*clusterState, error)
+
+	state     atomic.Value
+	reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+	return &clusterStateHolder{
+		load: fn,
+	}
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+	state, err := c.load(ctx)
+	if err != nil {
+		return nil, err
+	}
+	c.state.Store(state)
+	return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+	if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+		return
+	}
+	go func() {
+		defer atomic.StoreUint32(&c.reloading, 0)
+
+		_, err := c.Reload(context.Background())
+		if err != nil {
+			return
+		}
+		time.Sleep(200 * time.Millisecond)
+	}()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+	v := c.state.Load()
+	if v == nil {
+		return c.Reload(ctx)
+	}
+
+	state := v.(*clusterState)
+	if time.Since(state.createdAt) > 10*time.Second {
+		c.LazyReload()
+	}
+	return state, nil
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+	state, err := c.Reload(ctx)
+	if err == nil {
+		return state, nil
+	}
+	return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+	opt           *ClusterOptions
+	nodes         *clusterNodes
+	state         *clusterStateHolder //nolint:structcheck
+	cmdsInfoCache *cmdsInfoCache      //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+	*clusterClient
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+	opt.init()
+
+	c := &ClusterClient{
+		clusterClient: &clusterClient{
+			opt:   opt,
+			nodes: newClusterNodes(opt),
+		},
+		ctx: context.Background(),
+	}
+	c.state = newClusterStateHolder(c.loadState)
+	c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+	c.cmdable = c.Process
+
+	if opt.IdleCheckFrequency > 0 {
+		go c.reaper(opt.IdleCheckFrequency)
+	}
+
+	return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+	return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+	return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+	c.state.LazyReload()
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+	return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+	cmdInfo := c.cmdInfo(cmd.Name())
+	slot := c.cmdSlot(cmd)
+
+	var node *clusterNode
+	var ask bool
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		if node == nil {
+			var err error
+			node, err = c.cmdNode(ctx, cmdInfo, slot)
+			if err != nil {
+				return err
+			}
+		}
+
+		if ask {
+			pipe := node.Client.Pipeline()
+			_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+			_ = pipe.Process(ctx, cmd)
+			_, lastErr = pipe.Exec(ctx)
+			_ = pipe.Close()
+			ask = false
+		} else {
+			lastErr = node.Client.Process(ctx, cmd)
+		}
+
+		// If there is no error - we are done.
+		if lastErr == nil {
+			return nil
+		}
+		if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+			if isReadOnly {
+				c.state.LazyReload()
+			}
+			node = nil
+			continue
+		}
+
+		// If slave is loading - pick another node.
+		if c.opt.ReadOnly && isLoadingError(lastErr) {
+			node.MarkAsFailing()
+			node = nil
+			continue
+		}
+
+		var moved bool
+		var addr string
+		moved, ask, addr = isMovedError(lastErr)
+		if moved || ask {
+			c.state.LazyReload()
+
+			var err error
+			node, err = c.nodes.GetOrCreate(addr)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+			// First retry the same node.
+			if attempt == 0 {
+				continue
+			}
+
+			// Second try another node.
+			node.MarkAsFailing()
+			node = nil
+			continue
+		}
+
+		return lastErr
+	}
+	return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	state, err := c.state.ReloadOrGet(ctx)
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+
+	for _, master := range state.Masters {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(ctx, node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(master)
+	}
+
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	state, err := c.state.ReloadOrGet(ctx)
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+
+	for _, slave := range state.Slaves {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(ctx, node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(slave)
+	}
+
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	state, err := c.state.ReloadOrGet(ctx)
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+
+	worker := func(node *clusterNode) {
+		defer wg.Done()
+		err := fn(ctx, node.Client)
+		if err != nil {
+			select {
+			case errCh <- err:
+			default:
+			}
+		}
+	}
+
+	for _, node := range state.Masters {
+		wg.Add(1)
+		go worker(node)
+	}
+	for _, node := range state.Slaves {
+		wg.Add(1)
+		go worker(node)
+	}
+
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+	var acc PoolStats
+
+	state, _ := c.state.Get(context.TODO())
+	if state == nil {
+		return &acc
+	}
+
+	for _, node := range state.Masters {
+		s := node.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+		acc.StaleConns += s.StaleConns
+	}
+
+	for _, node := range state.Slaves {
+		s := node.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+		acc.StaleConns += s.StaleConns
+	}
+
+	return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+	if c.opt.ClusterSlots != nil {
+		slots, err := c.opt.ClusterSlots(ctx)
+		if err != nil {
+			return nil, err
+		}
+		return newClusterState(c.nodes, slots, "")
+	}
+
+	addrs, err := c.nodes.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	var firstErr error
+
+	for _, idx := range rand.Perm(len(addrs)) {
+		addr := addrs[idx]
+
+		node, err := c.nodes.GetOrCreate(addr)
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		slots, err := node.Client.ClusterSlots(ctx).Result()
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+	}
+
+	/*
+	 * No node is connectable. It's possible that all nodes' IP has changed.
+	 * Clear activeAddrs to let client be able to re-connect using the initial
+	 * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+	 * which might have chance to resolve domain name and get updated IP address.
+	 */
+	c.nodes.mu.Lock()
+	c.nodes.activeAddrs = nil
+	c.nodes.mu.Unlock()
+
+	return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+	ticker := time.NewTicker(idleCheckFrequency)
+	defer ticker.Stop()
+
+	for range ticker.C {
+		nodes, err := c.nodes.All()
+		if err != nil {
+			break
+		}
+
+		for _, node := range nodes {
+			_, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+			if err != nil {
+				internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
+			}
+		}
+	}
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+	cmdsMap := newCmdsMap()
+	err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				setCmdsErr(cmds, err)
+				return err
+			}
+		}
+
+		failedCmds := newCmdsMap()
+		var wg sync.WaitGroup
+
+		for node, cmds := range cmdsMap.m {
+			wg.Add(1)
+			go func(node *clusterNode, cmds []Cmder) {
+				defer wg.Done()
+
+				err := c._processPipelineNode(ctx, node, cmds, failedCmds)
+				if err == nil {
+					return
+				}
+				if attempt < c.opt.MaxRedirects {
+					if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+						setCmdsErr(cmds, err)
+					}
+				} else {
+					setCmdsErr(cmds, err)
+				}
+			}(node, cmds)
+		}
+
+		wg.Wait()
+		if len(failedCmds.m) == 0 {
+			break
+		}
+		cmdsMap = failedCmds
+	}
+
+	return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return err
+	}
+
+	if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+		for _, cmd := range cmds {
+			slot := c.cmdSlot(cmd)
+			node, err := c.slotReadOnlyNode(state, slot)
+			if err != nil {
+				return err
+			}
+			cmdsMap.Add(node, cmd)
+		}
+		return nil
+	}
+
+	for _, cmd := range cmds {
+		slot := c.cmdSlot(cmd)
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			return err
+		}
+		cmdsMap.Add(node, cmd)
+	}
+	return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(cmd.Name())
+		if cmdInfo == nil || !cmdInfo.ReadOnly {
+			return false
+		}
+	}
+	return true
+}
+
+func (c *ClusterClient) _processPipelineNode(
+	ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+	return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+				return writeCmds(wr, cmds)
+			})
+			if err != nil {
+				return err
+			}
+
+			return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+				return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+			})
+		})
+	})
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+	ctx context.Context,
+	node *clusterNode,
+	rd *proto.Reader,
+	cmds []Cmder,
+	failedCmds *cmdsMap,
+) error {
+	for _, cmd := range cmds {
+		err := cmd.readReply(rd)
+		cmd.SetErr(err)
+
+		if err == nil {
+			continue
+		}
+
+		if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+			continue
+		}
+
+		if c.opt.ReadOnly && isLoadingError(err) {
+			node.MarkAsFailing()
+			return err
+		}
+		if isRedisError(err) {
+			continue
+		}
+		return err
+	}
+	return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+	ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+	moved, ask, addr := isMovedError(err)
+	if !moved && !ask {
+		return false
+	}
+
+	node, err := c.nodes.GetOrCreate(addr)
+	if err != nil {
+		return false
+	}
+
+	if moved {
+		c.state.LazyReload()
+		failedCmds.Add(node, cmd)
+		return true
+	}
+
+	if ask {
+		failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+		return true
+	}
+
+	panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	// Trim multi .. exec.
+	cmds = cmds[1 : len(cmds)-1]
+
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	cmdsMap := c.mapCmdsBySlot(cmds)
+	for slot, cmds := range cmdsMap {
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			setCmdsErr(cmds, err)
+			continue
+		}
+
+		cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+		for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+			if attempt > 0 {
+				if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+					setCmdsErr(cmds, err)
+					return err
+				}
+			}
+
+			failedCmds := newCmdsMap()
+			var wg sync.WaitGroup
+
+			for node, cmds := range cmdsMap {
+				wg.Add(1)
+				go func(node *clusterNode, cmds []Cmder) {
+					defer wg.Done()
+
+					err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
+					if err == nil {
+						return
+					}
+
+					if attempt < c.opt.MaxRedirects {
+						if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+							setCmdsErr(cmds, err)
+						}
+					} else {
+						setCmdsErr(cmds, err)
+					}
+				}(node, cmds)
+			}
+
+			wg.Wait()
+			if len(failedCmds.m) == 0 {
+				break
+			}
+			cmdsMap = failedCmds.m
+		}
+	}
+
+	return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+	cmdsMap := make(map[int][]Cmder)
+	for _, cmd := range cmds {
+		slot := c.cmdSlot(cmd)
+		cmdsMap[slot] = append(cmdsMap[slot], cmd)
+	}
+	return cmdsMap
+}
+
+func (c *ClusterClient) _processTxPipelineNode(
+	ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+	return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+				return writeCmds(wr, cmds)
+			})
+			if err != nil {
+				return err
+			}
+
+			return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+				statusCmd := cmds[0].(*StatusCmd)
+				// Trim multi and exec.
+				cmds = cmds[1 : len(cmds)-1]
+
+				err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
+				if err != nil {
+					moved, ask, addr := isMovedError(err)
+					if moved || ask {
+						return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
+					}
+					return err
+				}
+
+				return pipelineReadCmds(rd, cmds)
+			})
+		})
+	})
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+	ctx context.Context,
+	rd *proto.Reader,
+	statusCmd *StatusCmd,
+	cmds []Cmder,
+	failedCmds *cmdsMap,
+) error {
+	// Parse queued replies.
+	if err := statusCmd.readReply(rd); err != nil {
+		return err
+	}
+
+	for _, cmd := range cmds {
+		err := statusCmd.readReply(rd)
+		if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+			continue
+		}
+		return err
+	}
+
+	// Parse number of replies.
+	line, err := rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		return fmt.Errorf("redis: expected '*', but got line %q", line)
+	}
+
+	return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+	ctx context.Context, cmds []Cmder,
+	moved, ask bool,
+	addr string,
+	failedCmds *cmdsMap,
+) error {
+	node, err := c.nodes.GetOrCreate(addr)
+	if err != nil {
+		return err
+	}
+
+	if moved {
+		c.state.LazyReload()
+		for _, cmd := range cmds {
+			failedCmds.Add(node, cmd)
+		}
+		return nil
+	}
+
+	if ask {
+		for _, cmd := range cmds {
+			failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+		}
+		return nil
+	}
+
+	return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	if len(keys) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one key")
+	}
+
+	slot := hashtag.Slot(keys[0])
+	for _, key := range keys[1:] {
+		if hashtag.Slot(key) != slot {
+			err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+			return err
+		}
+	}
+
+	node, err := c.slotMasterNode(ctx, slot)
+	if err != nil {
+		return err
+	}
+
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		err = node.Client.Watch(ctx, fn, keys...)
+		if err == nil {
+			break
+		}
+
+		moved, ask, addr := isMovedError(err)
+		if moved || ask {
+			node, err = c.nodes.GetOrCreate(addr)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+			if isReadOnly {
+				c.state.LazyReload()
+			}
+			node, err = c.slotMasterNode(ctx, slot)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if shouldRetry(err, true) {
+			continue
+		}
+
+		return err
+	}
+
+	return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+	var node *clusterNode
+	pubsub := &PubSub{
+		opt: c.opt.clientOptions(),
+
+		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+			if node != nil {
+				panic("node != nil")
+			}
+
+			var err error
+			if len(channels) > 0 {
+				slot := hashtag.Slot(channels[0])
+				node, err = c.slotMasterNode(ctx, slot)
+			} else {
+				node, err = c.nodes.Random()
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			cn, err := node.Client.newConn(context.TODO())
+			if err != nil {
+				node = nil
+
+				return nil, err
+			}
+
+			return cn, nil
+		},
+		closeConn: func(cn *pool.Conn) error {
+			err := node.Client.connPool.CloseConn(cn)
+			node = nil
+			return err
+		},
+	}
+	pubsub.init()
+
+	return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+	// Try 3 random nodes.
+	const nodeLimit = 3
+
+	addrs, err := c.nodes.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	var firstErr error
+
+	perm := rand.Perm(len(addrs))
+	if len(perm) > nodeLimit {
+		perm = perm[:nodeLimit]
+	}
+
+	for _, idx := range perm {
+		addr := addrs[idx]
+
+		node, err := c.nodes.GetOrCreate(addr)
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		info, err := node.Client.Command(ctx).Result()
+		if err == nil {
+			return info, nil
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	if firstErr == nil {
+		panic("not reached")
+	}
+	return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+	cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+	if err != nil {
+		return nil
+	}
+
+	info := cmdsInfo[name]
+	if info == nil {
+		internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+	}
+	return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+	args := cmd.Args()
+	if args[0] == "cluster" && args[1] == "getkeysinslot" {
+		return args[2].(int)
+	}
+
+	cmdInfo := c.cmdInfo(cmd.Name())
+	return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+	if pos == 0 {
+		return hashtag.RandomSlot()
+	}
+	firstKey := cmd.stringArg(pos)
+	return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+	ctx context.Context,
+	cmdInfo *CommandInfo,
+	slot int,
+) (*clusterNode, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
+		return c.slotReadOnlyNode(state, slot)
+	}
+	return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+	if c.opt.RouteByLatency {
+		return state.slotClosestNode(slot)
+	}
+	if c.opt.RouteRandomly {
+		return state.slotRandomNode(slot)
+	}
+	return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return state.slotMasterNode(slot)
+}
+
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	slot := hashtag.Slot(key)
+	node, err := c.slotReadOnlyNode(state, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+	slot := hashtag.Slot(key)
+	node, err := c.slotMasterNode(ctx, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+	for _, n := range nodes {
+		if n == node {
+			return nodes
+		}
+	}
+	return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+	for _, e := range es {
+		for _, s := range ss {
+			if s == e {
+				continue loop
+			}
+		}
+		ss = append(ss, e)
+	}
+	return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+	mu sync.Mutex
+	m  map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+	return &cmdsMap{
+		m: make(map[*clusterNode][]Cmder),
+	}
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+	m.mu.Lock()
+	m.m[node] = append(m.m[node], cmds...)
+	m.mu.Unlock()
+}

+ 109 - 0
vendor/github.com/go-redis/redis/v8/cluster_commands.go

@@ -0,0 +1,109 @@
+package redis
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "dbsize")
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		var size int64
+		err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+			n, err := master.DBSize(ctx).Result()
+			if err != nil {
+				return err
+			}
+			atomic.AddInt64(&size, n)
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		} else {
+			cmd.val = size
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+	cmd := NewStringCmd(ctx, "script", "load", script)
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptLoad(ctx, script).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			if cmd.Val() == "" {
+				cmd.val = val
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "flush")
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			return shard.ScriptFlush(ctx).Err()
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
+	}
+	cmd := NewBoolSliceCmd(ctx, args...)
+
+	result := make([]bool, len(hashes))
+	for i := range result {
+		result[i] = true
+	}
+
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptExists(ctx, hashes...).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			for i, v := range val {
+				result[i] = result[i] && v
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		} else {
+			cmd.val = result
+		}
+		return nil
+	})
+	return cmd
+}

+ 3478 - 0
vendor/github.com/go-redis/redis/v8/command.go

@@ -0,0 +1,3478 @@
+package redis
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hscan"
+	"github.com/go-redis/redis/v8/internal/proto"
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+type Cmder interface {
+	Name() string
+	FullName() string
+	Args() []interface{}
+	String() string
+	stringArg(int) string
+	firstKeyPos() int8
+	SetFirstKeyPos(int8)
+
+	readTimeout() *time.Duration
+	readReply(rd *proto.Reader) error
+
+	SetErr(error)
+	Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+	for _, cmd := range cmds {
+		if cmd.Err() == nil {
+			cmd.SetErr(e)
+		}
+	}
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+	for _, cmd := range cmds {
+		if err := cmd.Err(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+	for _, cmd := range cmds {
+		if err := writeCmd(wr, cmd); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+	return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+	if pos := cmd.firstKeyPos(); pos != 0 {
+		return int(pos)
+	}
+
+	switch cmd.Name() {
+	case "eval", "evalsha":
+		if cmd.stringArg(2) != "0" {
+			return 3
+		}
+
+		return 0
+	case "publish":
+		return 1
+	case "memory":
+		// https://github.com/redis/redis/issues/7493
+		if cmd.stringArg(1) == "usage" {
+			return 2
+		}
+	}
+
+	if info != nil {
+		return int(info.FirstKeyPos)
+	}
+	return 0
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+	b := make([]byte, 0, 64)
+
+	for i, arg := range cmd.Args() {
+		if i > 0 {
+			b = append(b, ' ')
+		}
+		b = internal.AppendArg(b, arg)
+	}
+
+	if err := cmd.Err(); err != nil {
+		b = append(b, ": "...)
+		b = append(b, err.Error()...)
+	} else if val != nil {
+		b = append(b, ": "...)
+		b = internal.AppendArg(b, val)
+	}
+
+	return internal.String(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+	ctx    context.Context
+	args   []interface{}
+	err    error
+	keyPos int8
+
+	_readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+	if len(cmd.args) == 0 {
+		return ""
+	}
+	// Cmd name must be lower cased.
+	return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+	switch name := cmd.Name(); name {
+	case "cluster", "command":
+		if len(cmd.args) == 1 {
+			return name
+		}
+		if s2, ok := cmd.args[1].(string); ok {
+			return name + " " + s2
+		}
+		return name
+	default:
+		return name
+	}
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+	return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+	if pos < 0 || pos >= len(cmd.args) {
+		return ""
+	}
+	arg := cmd.args[pos]
+	switch v := arg.(type) {
+	case string:
+		return v
+	default:
+		// TODO: consider using appendArg
+		return fmt.Sprint(v)
+	}
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+	return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+	cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+	cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+	return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+	return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+	cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+	baseCmd
+
+	val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+	return &Cmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *Cmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+	cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+	return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+	if cmd.err != nil {
+		return "", cmd.err
+	}
+	return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+	switch val := val.(type) {
+	case string:
+		return val, nil
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for String", val)
+		return "", err
+	}
+}
+
+func (cmd *Cmd) Int() (int, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return int(val), nil
+	case string:
+		return strconv.Atoi(val)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+	switch val := val.(type) {
+	case int64:
+		return val, nil
+	case string:
+		return strconv.ParseInt(val, 10, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+	switch val := val.(type) {
+	case int64:
+		return uint64(val), nil
+	case string:
+		return strconv.ParseUint(val, 10, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+	switch val := val.(type) {
+	case int64:
+		return float32(val), nil
+	case string:
+		f, err := strconv.ParseFloat(val, 32)
+		if err != nil {
+			return 0, err
+		}
+		return float32(f), nil
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+	switch val := val.(type) {
+	case int64:
+		return float64(val), nil
+	case string:
+		return strconv.ParseFloat(val, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+	switch val := val.(type) {
+	case int64:
+		return val != 0, nil
+	case string:
+		return strconv.ParseBool(val)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+		return false, err
+	}
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+	if cmd.err != nil {
+		return nil, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case []interface{}:
+		return val, nil
+	default:
+		return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+	}
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	ss := make([]string, len(slice))
+	for i, iface := range slice {
+		val, err := toString(iface)
+		if err != nil {
+			return nil, err
+		}
+		ss[i] = val
+	}
+	return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]int64, len(slice))
+	for i, iface := range slice {
+		val, err := toInt64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]uint64, len(slice))
+	for i, iface := range slice {
+		val, err := toUint64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float32, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat32(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float64, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat64(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	bools := make([]bool, len(slice))
+	for i, iface := range slice {
+		val, err := toBool(iface)
+		if err != nil {
+			return nil, err
+		}
+		bools[i] = val
+	}
+	return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadReply(sliceParser)
+	return err
+}
+
+// sliceParser implements proto.MultiBulkParse.
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	vals := make([]interface{}, n)
+	for i := 0; i < len(vals); i++ {
+		v, err := rd.ReadReply(sliceParser)
+		if err != nil {
+			if err == Nil {
+				vals[i] = nil
+				continue
+			}
+			if err, ok := err.(proto.RedisError); ok {
+				vals[i] = err
+				continue
+			}
+			return nil, err
+		}
+		vals[i] = v
+	}
+	return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+	baseCmd
+
+	val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+	return &SliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+	cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+	return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	// Pass the list of keys and values.
+	// Skip the first two args for: HMGET key
+	var args []interface{}
+	if cmd.args[0] == "hmget" {
+		args = cmd.args[2:]
+	} else {
+		// Otherwise, it's: MGET field field ...
+		args = cmd.args[1:]
+	}
+
+	return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadArrayReply(sliceParser)
+	if err != nil {
+		return err
+	}
+	cmd.val = v.([]interface{})
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+	baseCmd
+
+	val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+	return &StatusCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+	cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadString()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+	baseCmd
+
+	val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+	return &IntCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+	cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+	return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+	return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadIntReply()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+	baseCmd
+
+	val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+	return &IntSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+	cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+	return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]int64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			num, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[i] = num
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+	baseCmd
+
+	val       time.Duration
+	precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+	return &DurationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		precision: precision,
+	}
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+	cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+	return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadIntReply()
+	if err != nil {
+		return err
+	}
+	switch n {
+	// -2 if the key does not exist
+	// -1 if the key exists but has no associated expire
+	case -2, -1:
+		cmd.val = time.Duration(n)
+	default:
+		cmd.val = time.Duration(n) * cmd.precision
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+	baseCmd
+
+	val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+	return &TimeCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+	cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+	return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d elements, expected 2", n)
+		}
+
+		sec, err := rd.ReadInt()
+		if err != nil {
+			return nil, err
+		}
+
+		microsec, err := rd.ReadInt()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = time.Unix(sec, microsec*1000)
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+	baseCmd
+
+	val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+	return &BoolCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+	cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+	return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadReply(nil)
+	// `SET key value NX` returns nil when key already exists. But
+	// `SETNX key value` returns bool (0/1). So convert nil to bool.
+	if err == Nil {
+		cmd.val = false
+		return nil
+	}
+	if err != nil {
+		return err
+	}
+	switch v := v.(type) {
+	case int64:
+		cmd.val = v == 1
+		return nil
+	case string:
+		cmd.val = v == "OK"
+		return nil
+	default:
+		return fmt.Errorf("got %T, wanted int64 or string", v)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+	baseCmd
+
+	val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+	return &StringCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+	cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+	return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+	return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	f, err := strconv.ParseFloat(cmd.Val(), 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+	if cmd.err != nil {
+		return time.Time{}, cmd.err
+	}
+	return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+	return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadString()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+	baseCmd
+
+	val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+	return &FloatCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+	cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+	return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadFloatReply()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+	baseCmd
+
+	val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+	return &FloatSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+	cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+	return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]float64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch num, err := rd.ReadFloatReply(); {
+			case err == Nil:
+				cmd.val[i] = 0
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = num
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+	baseCmd
+
+	val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+	return &StringSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+	cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+	return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+	return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]string, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.val[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = s
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+	baseCmd
+
+	val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+	return &BoolSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+	cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+	return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]bool, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[i] = n == 1
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+	baseCmd
+
+	val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
+	return &StringStringMapCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
+	cmd.val = val
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+	return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	strct, err := hscan.Struct(dest)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range cmd.val {
+		if err := strct.Scan(k, v); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]string, n/2)
+		for i := int64(0); i < n; i += 2 {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			value, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[key] = value
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+	baseCmd
+
+	val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
+	return &StringIntMapCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
+	cmd.val = val
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+	return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]int64, n/2)
+		for i := int64(0); i < n; i += 2 {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			n, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[key] = n
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+	baseCmd
+
+	val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+	return &StringStructMapCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+	cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+	return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]struct{}, n)
+		for i := int64(0); i < n; i++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[key] = struct{}{}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+	ID     string
+	Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+	baseCmd
+
+	val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+	return &XMessageSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+	cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+	return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+	var err error
+	cmd.val, err = readXMessageSlice(rd)
+	return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	msgs := make([]XMessage, n)
+	for i := 0; i < n; i++ {
+		var err error
+		msgs[i], err = readXMessage(rd)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return XMessage{}, err
+	}
+	if n != 2 {
+		return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
+	}
+
+	id, err := rd.ReadString()
+	if err != nil {
+		return XMessage{}, err
+	}
+
+	var values map[string]interface{}
+
+	v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+	if err != nil {
+		if err != proto.Nil {
+			return XMessage{}, err
+		}
+	} else {
+		values = v.(map[string]interface{})
+	}
+
+	return XMessage{
+		ID:     id,
+		Values: values,
+	}, nil
+}
+
+// stringInterfaceMapParser implements proto.MultiBulkParse.
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+	m := make(map[string]interface{}, n/2)
+	for i := int64(0); i < n; i += 2 {
+		key, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		value, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		m[key] = value
+	}
+	return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+	Stream   string
+	Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+	baseCmd
+
+	val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+	return &XStreamSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+	cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+	return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]XStream, n)
+		for i := 0; i < len(cmd.val); i++ {
+			i := i
+			_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				if n != 2 {
+					return nil, fmt.Errorf("got %d, wanted 2", n)
+				}
+
+				stream, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				msgs, err := readXMessageSlice(rd)
+				if err != nil {
+					return nil, err
+				}
+
+				cmd.val[i] = XStream{
+					Stream:   stream,
+					Messages: msgs,
+				}
+				return nil, nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+	Count     int64
+	Lower     string
+	Higher    string
+	Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+	baseCmd
+	val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+	return &XPendingCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+	cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+	return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 4 {
+			return nil, fmt.Errorf("got %d, wanted 4", n)
+		}
+
+		count, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		lower, err := rd.ReadString()
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		higher, err := rd.ReadString()
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		cmd.val = &XPending{
+			Count:  count,
+			Lower:  lower,
+			Higher: higher,
+		}
+		_, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+			for i := int64(0); i < n; i++ {
+				_, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+					if n != 2 {
+						return nil, fmt.Errorf("got %d, wanted 2", n)
+					}
+
+					consumerName, err := rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					consumerPending, err := rd.ReadInt()
+					if err != nil {
+						return nil, err
+					}
+
+					if cmd.val.Consumers == nil {
+						cmd.val.Consumers = make(map[string]int64)
+					}
+					cmd.val.Consumers[consumerName] = consumerPending
+
+					return nil, nil
+				})
+				if err != nil {
+					return nil, err
+				}
+			}
+			return nil, nil
+		})
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+	ID         string
+	Consumer   string
+	Idle       time.Duration
+	RetryCount int64
+}
+
+type XPendingExtCmd struct {
+	baseCmd
+	val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+	return &XPendingExtCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+	cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+	return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]XPendingExt, 0, n)
+		for i := int64(0); i < n; i++ {
+			_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				if n != 4 {
+					return nil, fmt.Errorf("got %d, wanted 4", n)
+				}
+
+				id, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				consumer, err := rd.ReadString()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				idle, err := rd.ReadIntReply()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				retryCount, err := rd.ReadIntReply()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				cmd.val = append(cmd.val, XPendingExt{
+					ID:         id,
+					Consumer:   consumer,
+					Idle:       time.Duration(idle) * time.Millisecond,
+					RetryCount: retryCount,
+				})
+				return nil, nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+	baseCmd
+
+	start string
+	val   []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+	return &XAutoClaimCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val, err = readXMessageSlice(rd)
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+	baseCmd
+
+	start string
+	val   []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+	return &XAutoClaimJustIDCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = make([]string, nn)
+		for i := 0; i < nn; i++ {
+			cmd.val[i], err = rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+	baseCmd
+	val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+	Name    string
+	Pending int64
+	Idle    int64
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+	return &XInfoConsumersCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "consumers", stream, group},
+		},
+	}
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+	cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+	return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]XInfoConsumer, n)
+
+	for i := 0; i < n; i++ {
+		cmd.val[i], err = readXConsumerInfo(rd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
+	var consumer XInfoConsumer
+
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return consumer, err
+	}
+	if n != 6 {
+		return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
+	}
+
+	for i := 0; i < 3; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		val, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		switch key {
+		case "name":
+			consumer.Name = val
+		case "pending":
+			consumer.Pending, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		case "idle":
+			consumer.Idle, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		default:
+			return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+		}
+	}
+
+	return consumer, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+	baseCmd
+	val []XInfoGroup
+}
+
+type XInfoGroup struct {
+	Name            string
+	Consumers       int64
+	Pending         int64
+	LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+	return &XInfoGroupsCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "groups", stream},
+		},
+	}
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+	cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+	return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]XInfoGroup, n)
+
+	for i := 0; i < n; i++ {
+		cmd.val[i], err = readXGroupInfo(rd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
+	var group XInfoGroup
+
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return group, err
+	}
+	if n != 8 {
+		return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
+	}
+
+	for i := 0; i < 4; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return group, err
+		}
+
+		val, err := rd.ReadString()
+		if err != nil {
+			return group, err
+		}
+
+		switch key {
+		case "name":
+			group.Name = val
+		case "consumers":
+			group.Consumers, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return group, err
+			}
+		case "pending":
+			group.Pending, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return group, err
+			}
+		case "last-delivered-id":
+			group.LastDeliveredID = val
+		default:
+			return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
+		}
+	}
+
+	return group, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+	baseCmd
+	val *XInfoStream
+}
+
+type XInfoStream struct {
+	Length          int64
+	RadixTreeKeys   int64
+	RadixTreeNodes  int64
+	Groups          int64
+	LastGeneratedID string
+	FirstEntry      XMessage
+	LastEntry       XMessage
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+	return &XInfoStreamCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "stream", stream},
+		},
+	}
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+	cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+	return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadReply(xStreamInfoParser)
+	if err != nil {
+		return err
+	}
+	cmd.val = v.(*XInfoStream)
+	return nil
+}
+
+func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	if n != 14 {
+		return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+			"wanted 14", n)
+	}
+	var info XInfoStream
+	for i := 0; i < 7; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		switch key {
+		case "length":
+			info.Length, err = rd.ReadIntReply()
+		case "radix-tree-keys":
+			info.RadixTreeKeys, err = rd.ReadIntReply()
+		case "radix-tree-nodes":
+			info.RadixTreeNodes, err = rd.ReadIntReply()
+		case "groups":
+			info.Groups, err = rd.ReadIntReply()
+		case "last-generated-id":
+			info.LastGeneratedID, err = rd.ReadString()
+		case "first-entry":
+			info.FirstEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
+		case "last-entry":
+			info.LastEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
+		default:
+			return nil, fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO STREAM reply", key)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	return &info, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+	baseCmd
+	val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+	Length          int64
+	RadixTreeKeys   int64
+	RadixTreeNodes  int64
+	LastGeneratedID string
+	Entries         []XMessage
+	Groups          []XInfoStreamGroup
+}
+
+type XInfoStreamGroup struct {
+	Name            string
+	LastDeliveredID string
+	PelCount        int64
+	Pending         []XInfoStreamGroupPending
+	Consumers       []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+	ID            string
+	Consumer      string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+	Name     string
+	SeenTime time.Time
+	PelCount int64
+	Pending  []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+	ID            string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+	return &XInfoStreamFullCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+	cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+	return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+	if n != 12 {
+		return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+			"wanted 12", n)
+	}
+
+	cmd.val = &XInfoStreamFull{}
+
+	for i := 0; i < 6; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return err
+		}
+
+		switch key {
+		case "length":
+			cmd.val.Length, err = rd.ReadIntReply()
+		case "radix-tree-keys":
+			cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
+		case "radix-tree-nodes":
+			cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
+		case "last-generated-id":
+			cmd.val.LastGeneratedID, err = rd.ReadString()
+		case "entries":
+			cmd.val.Entries, err = readXMessageSlice(rd)
+		case "groups":
+			cmd.val.Groups, err = readStreamGroups(rd)
+		default:
+			return fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO STREAM reply", key)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+	groups := make([]XInfoStreamGroup, 0, n)
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 10 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 10", nn)
+		}
+
+		group := XInfoStreamGroup{}
+
+		for f := 0; f < 5; f++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch key {
+			case "name":
+				group.Name, err = rd.ReadString()
+			case "last-delivered-id":
+				group.LastDeliveredID, err = rd.ReadString()
+			case "pel-count":
+				group.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				group.Pending, err = readXInfoStreamGroupPending(rd)
+			case "consumers":
+				group.Consumers, err = readXInfoStreamConsumers(rd)
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", key)
+			}
+
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		groups = append(groups, group)
+	}
+
+	return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	pending := make([]XInfoStreamGroupPending, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 4 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 4", nn)
+		}
+
+		p := XInfoStreamGroupPending{}
+
+		p.ID, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		p.Consumer, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		delivery, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+		p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+		p.DeliveryCount, err = rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		pending = append(pending, p)
+	}
+
+	return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	consumers := make([]XInfoStreamConsumer, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 8 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 8", nn)
+		}
+
+		c := XInfoStreamConsumer{}
+
+		for f := 0; f < 4; f++ {
+			cKey, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch cKey {
+			case "name":
+				c.Name, err = rd.ReadString()
+			case "seen-time":
+				seen, err := rd.ReadIntReply()
+				if err != nil {
+					return nil, err
+				}
+				c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
+			case "pel-count":
+				c.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				pendingNumber, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+
+				c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+				for pn := 0; pn < pendingNumber; pn++ {
+					nn, err := rd.ReadArrayLen()
+					if err != nil {
+						return nil, err
+					}
+					if nn != 3 {
+						return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+							"wanted 3", nn)
+					}
+
+					p := XInfoStreamConsumerPending{}
+
+					p.ID, err = rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					delivery, err := rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+					p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+					p.DeliveryCount, err = rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+
+					c.Pending = append(c.Pending, p)
+				}
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", cKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+		consumers = append(consumers, c)
+	}
+
+	return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+	baseCmd
+
+	val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+	return &ZSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+	cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+	return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]Z, n/2)
+		for i := 0; i < len(cmd.val); i++ {
+			member, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			score, err := rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[i] = Z{
+				Member: member,
+				Score:  score,
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+	baseCmd
+
+	val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+	return &ZWithKeyCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+	cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+	return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 3 {
+			return nil, fmt.Errorf("got %d elements, expected 3", n)
+		}
+
+		cmd.val = &ZWithKey{}
+		var err error
+
+		cmd.val.Key, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val.Member, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val.Score, err = rd.ReadFloatReply()
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+	baseCmd
+
+	page   []string
+	cursor uint64
+
+	process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+	return &ScanCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		process: process,
+	}
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+	cmd.page = page
+	cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+	return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+	return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+	return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.page, cmd.cursor, err = rd.ReadScanReply()
+	return err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+	return &ScanIterator{
+		cmd: cmd,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+	ID   string
+	Addr string
+}
+
+type ClusterSlot struct {
+	Start int
+	End   int
+	Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+	baseCmd
+
+	val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+	return &ClusterSlotsCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+	cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+	return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]ClusterSlot, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n < 2 {
+				err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+				return nil, err
+			}
+
+			start, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			end, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			nodes := make([]ClusterNode, n-2)
+			for j := 0; j < len(nodes); j++ {
+				n, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+				if n != 2 && n != 3 {
+					err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+					return nil, err
+				}
+
+				ip, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				port, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				nodes[j].Addr = net.JoinHostPort(ip, port)
+
+				if n == 3 {
+					id, err := rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+					nodes[j].ID = id
+				}
+			}
+
+			cmd.val[i] = ClusterSlot{
+				Start: int(start),
+				End:   int(end),
+				Nodes: nodes,
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+	Name                      string
+	Longitude, Latitude, Dist float64
+	GeoHash                   int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+	Radius float64
+	// Can be m, km, ft, or mi. Default is km.
+	Unit        string
+	WithCoord   bool
+	WithDist    bool
+	WithGeoHash bool
+	Count       int
+	// Can be ASC or DESC. Default is no sort order.
+	Sort      string
+	Store     string
+	StoreDist string
+}
+
+type GeoLocationCmd struct {
+	baseCmd
+
+	q         *GeoRadiusQuery
+	locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+	return &GeoLocationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: geoLocationArgs(q, args...),
+		},
+		q: q,
+	}
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+	args = append(args, q.Radius)
+	if q.Unit != "" {
+		args = append(args, q.Unit)
+	} else {
+		args = append(args, "km")
+	}
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithGeoHash {
+		args = append(args, "withhash")
+	}
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+	}
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+	if q.Store != "" {
+		args = append(args, "store")
+		args = append(args, q.Store)
+	}
+	if q.StoreDist != "" {
+		args = append(args, "storedist")
+		args = append(args, q.StoreDist)
+	}
+	return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+	cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+	return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+	return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+	if err != nil {
+		return err
+	}
+	cmd.locations = v.([]GeoLocation)
+	return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		locs := make([]GeoLocation, 0, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(newGeoLocationParser(q))
+			if err != nil {
+				return nil, err
+			}
+			switch vv := v.(type) {
+			case string:
+				locs = append(locs, GeoLocation{
+					Name: vv,
+				})
+			case *GeoLocation:
+				// TODO: avoid copying
+				locs = append(locs, *vv)
+			default:
+				return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+			}
+		}
+		return locs, nil
+	}
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		var loc GeoLocation
+		var err error
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		if q.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithGeoHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithCoord {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n != 2 {
+				return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return &loc, nil
+	}
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+	Member string
+
+	// Latitude and Longitude when using FromLonLat option.
+	Longitude float64
+	Latitude  float64
+
+	// Distance and unit when using ByRadius option.
+	// Can use m, km, ft, or mi. Default is km.
+	Radius     float64
+	RadiusUnit string
+
+	// Height, width and unit when using ByBox option.
+	// Can be m, km, ft, or mi. Default is km.
+	BoxWidth  float64
+	BoxHeight float64
+	BoxUnit   string
+
+	// Can be ASC or DESC. Default is no sort order.
+	Sort     string
+	Count    int
+	CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+	GeoSearchQuery
+
+	WithCoord bool
+	WithDist  bool
+	WithHash  bool
+}
+
+type GeoSearchStoreQuery struct {
+	GeoSearchQuery
+
+	// When using the StoreDist option, the command stores the items in a
+	// sorted set populated with their distance from the center of the circle or box,
+	// as a floating-point number, in the same unit specified for that shape.
+	StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithHash {
+		args = append(args, "withhash")
+	}
+
+	return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+	if q.Member != "" {
+		args = append(args, "frommember", q.Member)
+	} else {
+		args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+	}
+
+	if q.Radius > 0 {
+		if q.RadiusUnit == "" {
+			q.RadiusUnit = "km"
+		}
+		args = append(args, "byradius", q.Radius, q.RadiusUnit)
+	} else {
+		if q.BoxUnit == "" {
+			q.BoxUnit = "km"
+		}
+		args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+	}
+
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+		if q.CountAny {
+			args = append(args, "any")
+		}
+	}
+
+	return args
+}
+
+type GeoSearchLocationCmd struct {
+	baseCmd
+
+	opt *GeoSearchLocationQuery
+	val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+	ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+	return &GeoSearchLocationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		opt: opt,
+	}
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+	cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+	return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]GeoLocation, n)
+	for i := 0; i < n; i++ {
+		_, err = rd.ReadArrayLen()
+		if err != nil {
+			return err
+		}
+
+		var loc GeoLocation
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return err
+		}
+		if cmd.opt.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithCoord {
+			nn, err := rd.ReadArrayLen()
+			if err != nil {
+				return err
+			}
+			if nn != 2 {
+				return fmt.Errorf("got %d coordinates, expected 2", nn)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+
+		cmd.val[i] = loc
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+	Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+	baseCmd
+
+	val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+	return &GeoPosCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+	cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+	return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]*GeoPos, n)
+		for i := 0; i < len(cmd.val); i++ {
+			i := i
+			_, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				longitude, err := rd.ReadFloatReply()
+				if err != nil {
+					return nil, err
+				}
+
+				latitude, err := rd.ReadFloatReply()
+				if err != nil {
+					return nil, err
+				}
+
+				cmd.val[i] = &GeoPos{
+					Longitude: longitude,
+					Latitude:  latitude,
+				}
+				return nil, nil
+			})
+			if err != nil {
+				if err == Nil {
+					cmd.val[i] = nil
+					continue
+				}
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+	Name        string
+	Arity       int8
+	Flags       []string
+	ACLFlags    []string
+	FirstKeyPos int8
+	LastKeyPos  int8
+	StepCount   int8
+	ReadOnly    bool
+}
+
+type CommandsInfoCmd struct {
+	baseCmd
+
+	val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+	return &CommandsInfoCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+	cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+	return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]*CommandInfo, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(commandInfoParser)
+			if err != nil {
+				return nil, err
+			}
+			vv := v.(*CommandInfo)
+			cmd.val[vv.Name] = vv
+		}
+		return nil, nil
+	})
+	return err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	const numArgRedis5 = 6
+	const numArgRedis6 = 7
+
+	switch n {
+	case numArgRedis5, numArgRedis6:
+		// continue
+	default:
+		return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
+	}
+
+	var cmd CommandInfo
+	var err error
+
+	cmd.Name, err = rd.ReadString()
+	if err != nil {
+		return nil, err
+	}
+
+	arity, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Arity = int8(arity)
+
+	_, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.Flags = make([]string, n)
+		for i := 0; i < len(cmd.Flags); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.Flags[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.Flags[i] = s
+			}
+		}
+		return nil, nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	firstKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.FirstKeyPos = int8(firstKeyPos)
+
+	lastKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.LastKeyPos = int8(lastKeyPos)
+
+	stepCount, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.StepCount = int8(stepCount)
+
+	for _, flag := range cmd.Flags {
+		if flag == "readonly" {
+			cmd.ReadOnly = true
+			break
+		}
+	}
+
+	if n == numArgRedis5 {
+		return &cmd, nil
+	}
+
+	_, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.ACLFlags = make([]string, n)
+		for i := 0; i < len(cmd.ACLFlags); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.ACLFlags[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.ACLFlags[i] = s
+			}
+		}
+		return nil, nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+	fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+	once internal.Once
+	cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+	return &cmdsInfoCache{
+		fn: fn,
+	}
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+	err := c.once.Do(func() error {
+		cmds, err := c.fn(ctx)
+		if err != nil {
+			return err
+		}
+
+		// Extensions have cmd names in upper case. Convert them to lower case.
+		for k, v := range cmds {
+			lower := internal.ToLower(k)
+			if lower != k {
+				cmds[lower] = v
+			}
+		}
+
+		c.cmds = cmds
+		return nil
+	})
+	return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+	ID       int64
+	Time     time.Time
+	Duration time.Duration
+	Args     []string
+	// These are also optional fields emitted only by Redis 4.0 or greater:
+	// https://redis.io/commands/slowlog#output-format
+	ClientAddr string
+	ClientName string
+}
+
+type SlowLogCmd struct {
+	baseCmd
+
+	val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+	return &SlowLogCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+	cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+	return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]SlowLog, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n < 4 {
+				err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
+				return nil, err
+			}
+
+			id, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			createdAt, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			createdAtTime := time.Unix(createdAt, 0)
+
+			costs, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			costsDuration := time.Duration(costs) * time.Microsecond
+
+			cmdLen, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if cmdLen < 1 {
+				err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+				return nil, err
+			}
+
+			cmdString := make([]string, cmdLen)
+			for i := 0; i < cmdLen; i++ {
+				cmdString[i], err = rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+			}
+
+			var address, name string
+			for i := 4; i < n; i++ {
+				str, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+				if i == 4 {
+					address = str
+				} else if i == 5 {
+					name = str
+				}
+			}
+
+			cmd.val[i] = SlowLog{
+				ID:         id,
+				Time:       createdAtTime,
+				Duration:   costsDuration,
+				Args:       cmdString,
+				ClientAddr: address,
+				ClientName: name,
+			}
+		}
+		return nil, nil
+	})
+	return err
+}

+ 3475 - 0
vendor/github.com/go-redis/redis/v8/commands.go

@@ -0,0 +1,3475 @@
+package redis
+
+import (
+	"context"
+	"errors"
+	"io"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+//    rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+	return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+	if dur > 0 && dur < time.Millisecond {
+		internal.Logger.Printf(
+			ctx,
+			"specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+			dur, time.Millisecond,
+		)
+		return 1
+	}
+	return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+	if dur > 0 && dur < time.Second {
+		internal.Logger.Printf(
+			ctx,
+			"specified duration is %s, but minimal supported value is %s - truncating to 1s",
+			dur, time.Second,
+		)
+		return 1
+	}
+	return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+	if len(src) == 1 {
+		return appendArg(dst, src[0])
+	}
+
+	dst = append(dst, src...)
+	return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+	switch arg := arg.(type) {
+	case []string:
+		for _, s := range arg {
+			dst = append(dst, s)
+		}
+		return dst
+	case []interface{}:
+		dst = append(dst, arg...)
+		return dst
+	case map[string]interface{}:
+		for k, v := range arg {
+			dst = append(dst, k, v)
+		}
+		return dst
+	case map[string]string:
+		for k, v := range arg {
+			dst = append(dst, k, v)
+		}
+		return dst
+	default:
+		return append(dst, arg)
+	}
+}
+
+type Cmdable interface {
+	Pipeline() Pipeliner
+	Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+	TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+	TxPipeline() Pipeliner
+
+	Command(ctx context.Context) *CommandsInfoCmd
+	ClientGetName(ctx context.Context) *StringCmd
+	Echo(ctx context.Context, message interface{}) *StringCmd
+	Ping(ctx context.Context) *StatusCmd
+	Quit(ctx context.Context) *StatusCmd
+	Del(ctx context.Context, keys ...string) *IntCmd
+	Unlink(ctx context.Context, keys ...string) *IntCmd
+	Dump(ctx context.Context, key string) *StringCmd
+	Exists(ctx context.Context, keys ...string) *IntCmd
+	Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+	ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	Keys(ctx context.Context, pattern string) *StringSliceCmd
+	Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+	Move(ctx context.Context, key string, db int) *BoolCmd
+	ObjectRefCount(ctx context.Context, key string) *IntCmd
+	ObjectEncoding(ctx context.Context, key string) *StringCmd
+	ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+	Persist(ctx context.Context, key string) *BoolCmd
+	PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+	PTTL(ctx context.Context, key string) *DurationCmd
+	RandomKey(ctx context.Context) *StringCmd
+	Rename(ctx context.Context, key, newkey string) *StatusCmd
+	RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+	Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+	RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+	Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+	SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+	SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+	Touch(ctx context.Context, keys ...string) *IntCmd
+	TTL(ctx context.Context, key string) *DurationCmd
+	Type(ctx context.Context, key string) *StatusCmd
+	Append(ctx context.Context, key, value string) *IntCmd
+	Decr(ctx context.Context, key string) *IntCmd
+	DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+	Get(ctx context.Context, key string) *StringCmd
+	GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+	GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+	GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+	GetDel(ctx context.Context, key string) *StringCmd
+	Incr(ctx context.Context, key string) *IntCmd
+	IncrBy(ctx context.Context, key string, value int64) *IntCmd
+	IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+	MGet(ctx context.Context, keys ...string) *SliceCmd
+	MSet(ctx context.Context, values ...interface{}) *StatusCmd
+	MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+	Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+	// TODO: rename to SetEx
+	SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+	StrLen(ctx context.Context, key string) *IntCmd
+	Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
+
+	GetBit(ctx context.Context, key string, offset int64) *IntCmd
+	SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+	BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+	BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+	BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+	BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+	BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+	BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+	BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
+
+	Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+	ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+	SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+	HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+	ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+
+	HDel(ctx context.Context, key string, fields ...string) *IntCmd
+	HExists(ctx context.Context, key, field string) *BoolCmd
+	HGet(ctx context.Context, key, field string) *StringCmd
+	HGetAll(ctx context.Context, key string) *StringStringMapCmd
+	HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+	HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+	HKeys(ctx context.Context, key string) *StringSliceCmd
+	HLen(ctx context.Context, key string) *IntCmd
+	HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+	HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+	HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+	HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+	HVals(ctx context.Context, key string) *StringSliceCmd
+	HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
+
+	BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+	LIndex(ctx context.Context, key string, index int64) *StringCmd
+	LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+	LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+	LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+	LLen(ctx context.Context, key string) *IntCmd
+	LPop(ctx context.Context, key string) *StringCmd
+	LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+	LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+	LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+	LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+	LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+	LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+	LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+	RPop(ctx context.Context, key string) *StringCmd
+	RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+	RPopLPush(ctx context.Context, source, destination string) *StringCmd
+	RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+	RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+	BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
+
+	SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+	SCard(ctx context.Context, key string) *IntCmd
+	SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+	SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+	SInter(ctx context.Context, keys ...string) *StringSliceCmd
+	SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+	SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+	SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+	SMembers(ctx context.Context, key string) *StringSliceCmd
+	SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+	SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+	SPop(ctx context.Context, key string) *StringCmd
+	SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+	SRandMember(ctx context.Context, key string) *StringCmd
+	SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+	SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+	SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+	SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+	XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+	XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+	XLen(ctx context.Context, stream string) *IntCmd
+	XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+	XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+	XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+	XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+	XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+	XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+	XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+	XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+	XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+	XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+	XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+	XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+	XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+	XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+	XPending(ctx context.Context, stream, group string) *XPendingCmd
+	XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+	XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+	XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+	XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+	XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+
+	// TODO: XTrim and XTrimApprox remove in v9.
+	XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+	XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+	XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+	XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+	XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+	XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+	XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+
+	BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+	BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+
+	// TODO: remove
+	//		ZAddCh
+	//		ZIncr
+	//		ZAddNXCh
+	//		ZAddXXCh
+	//		ZIncrNX
+	//		ZIncrXX
+	// 	in v9.
+	// 	use ZAddArgs and ZAddArgsIncr.
+
+	ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+	ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+	ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
+	ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
+	ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
+	ZCard(ctx context.Context, key string) *IntCmd
+	ZCount(ctx context.Context, key, min, max string) *IntCmd
+	ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+	ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+	ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+	ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+	ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+	ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+	ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+	ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+	ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+	ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+	ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+	ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+	ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+	ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+	ZRank(ctx context.Context, key, member string) *IntCmd
+	ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+	ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+	ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+	ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+	ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+	ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+	ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+	ZRevRank(ctx context.Context, key, member string) *IntCmd
+	ZScore(ctx context.Context, key, member string) *FloatCmd
+	ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+	ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+	ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+	ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
+	ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+	ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+	ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+	PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+	PFCount(ctx context.Context, keys ...string) *IntCmd
+	PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+
+	BgRewriteAOF(ctx context.Context) *StatusCmd
+	BgSave(ctx context.Context) *StatusCmd
+	ClientKill(ctx context.Context, ipPort string) *StatusCmd
+	ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+	ClientList(ctx context.Context) *StringCmd
+	ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+	ClientID(ctx context.Context) *IntCmd
+	ConfigGet(ctx context.Context, parameter string) *SliceCmd
+	ConfigResetStat(ctx context.Context) *StatusCmd
+	ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+	ConfigRewrite(ctx context.Context) *StatusCmd
+	DBSize(ctx context.Context) *IntCmd
+	FlushAll(ctx context.Context) *StatusCmd
+	FlushAllAsync(ctx context.Context) *StatusCmd
+	FlushDB(ctx context.Context) *StatusCmd
+	FlushDBAsync(ctx context.Context) *StatusCmd
+	Info(ctx context.Context, section ...string) *StringCmd
+	LastSave(ctx context.Context) *IntCmd
+	Save(ctx context.Context) *StatusCmd
+	Shutdown(ctx context.Context) *StatusCmd
+	ShutdownSave(ctx context.Context) *StatusCmd
+	ShutdownNoSave(ctx context.Context) *StatusCmd
+	SlaveOf(ctx context.Context, host, port string) *StatusCmd
+	Time(ctx context.Context) *TimeCmd
+	DebugObject(ctx context.Context, key string) *StringCmd
+	ReadOnly(ctx context.Context) *StatusCmd
+	ReadWrite(ctx context.Context) *StatusCmd
+	MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+	ScriptFlush(ctx context.Context) *StatusCmd
+	ScriptKill(ctx context.Context) *StatusCmd
+	ScriptLoad(ctx context.Context, script string) *StringCmd
+
+	Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+	PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+	PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
+	PubSubNumPat(ctx context.Context) *IntCmd
+
+	ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+	ClusterNodes(ctx context.Context) *StringCmd
+	ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+	ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+	ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+	ClusterResetSoft(ctx context.Context) *StatusCmd
+	ClusterResetHard(ctx context.Context) *StatusCmd
+	ClusterInfo(ctx context.Context) *StringCmd
+	ClusterKeySlot(ctx context.Context, key string) *IntCmd
+	ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+	ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+	ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+	ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+	ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+	ClusterSaveConfig(ctx context.Context) *StatusCmd
+	ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+	ClusterFailover(ctx context.Context) *StatusCmd
+	ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+	ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+
+	GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+	GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+	GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+	GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+	GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+	GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+	GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
+	GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+	GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+type StatefulCmdable interface {
+	Cmdable
+	Auth(ctx context.Context, password string) *StatusCmd
+	AuthACL(ctx context.Context, username, password string) *StatusCmd
+	Select(ctx context.Context, index int) *StatusCmd
+	SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+	ClientSetName(ctx context.Context, name string) *BoolCmd
+}
+
+var (
+	_ Cmdable = (*Client)(nil)
+	_ Cmdable = (*Tx)(nil)
+	_ Cmdable = (*Ring)(nil)
+	_ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "auth", password)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "auth", username, password)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+	cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "select", index)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "client", "setname", name)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+	cmd := NewCommandsInfoCmd(ctx, "command")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "client", "getname")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+	cmd := NewStringCmd(ctx, "echo", message)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "ping")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+	panic("not implemented")
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "del"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unlink"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "dump", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "exists"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+	ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+	args := make([]interface{}, 3, 4)
+	args[0] = "expire"
+	args[1] = key
+	args[2] = formatSec(ctx, expiration)
+	if mode != "" {
+		args = append(args, mode)
+	}
+
+	cmd := NewBoolCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "keys", pattern)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"migrate",
+		host,
+		port,
+		key,
+		db,
+		formatMs(ctx, timeout),
+	)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "move", key, db)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "object", "refcount", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "object", "encoding", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+	cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "persist", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd(
+		ctx,
+		"pexpireat",
+		key,
+		tm.UnixNano()/int64(time.Millisecond),
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+	cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "randomkey")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "rename", key, newkey)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"restore",
+		key,
+		formatMs(ctx, ttl),
+		value,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"restore",
+		key,
+		formatMs(ctx, ttl),
+		value,
+		"replace",
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type Sort struct {
+	By            string
+	Offset, Count int64
+	Get           []string
+	Order         string
+	Alpha         bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+	args := []interface{}{"sort", key}
+	if sort.By != "" {
+		args = append(args, "by", sort.By)
+	}
+	if sort.Offset != 0 || sort.Count != 0 {
+		args = append(args, "limit", sort.Offset, sort.Count)
+	}
+	for _, get := range sort.Get {
+		args = append(args, "get", get)
+	}
+	if sort.Order != "" {
+		args = append(args, sort.Order)
+	}
+	if sort.Alpha {
+		args = append(args, "alpha")
+	}
+	return args
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+	args := sort.args(key)
+	if store != "" {
+		args = append(args, "store", store)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+	cmd := NewSliceCmd(ctx, sort.args(key)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, len(keys)+1)
+	args[0] = "touch"
+	for i, key := range keys {
+		args[i+1] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+	cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "type", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+	cmd := NewIntCmd(ctx, "append", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "decr", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "decrby", key, decrement)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "get", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+	cmd := NewStringCmd(ctx, "getrange", key, start, end)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+	cmd := NewStringCmd(ctx, "getset", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+	args := make([]interface{}, 0, 4)
+	args = append(args, "getex", key)
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(ctx, expiration))
+		} else {
+			args = append(args, "ex", formatSec(ctx, expiration))
+		}
+	} else if expiration == 0 {
+		args = append(args, "persist")
+	}
+
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "getdel", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "incr", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "incrby", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "mget"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+//   - MSet("key1", "value1", "key2", "value2")
+//   - MSet([]string{"key1", "value1", "key2", "value2"})
+//   - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+	args := make([]interface{}, 1, 1+len(values))
+	args[0] = "mset"
+	args = appendArgs(args, values)
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+//   - MSetNX("key1", "value1", "key2", "value2")
+//   - MSetNX([]string{"key1", "value1", "key2", "value2"})
+//   - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+	args := make([]interface{}, 1, 1+len(values))
+	args[0] = "msetnx"
+	args = appendArgs(args, values)
+	cmd := NewBoolCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEX`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+	args := make([]interface{}, 3, 5)
+	args[0] = "set"
+	args[1] = key
+	args[2] = value
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(ctx, expiration))
+		} else {
+			args = append(args, "ex", formatSec(ctx, expiration))
+		}
+	} else if expiration == KeepTTL {
+		args = append(args, "keepttl")
+	}
+
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+	// Mode can be `NX` or `XX` or empty.
+	Mode string
+
+	// Zero `TTL` or `Expiration` means that the key has no expiration time.
+	TTL      time.Duration
+	ExpireAt time.Time
+
+	// When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+	Get bool
+
+	// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+	// otherwise you will receive an error: (error) ERR syntax error.
+	KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+	args := []interface{}{"set", key, value}
+
+	if a.KeepTTL {
+		args = append(args, "keepttl")
+	}
+
+	if !a.ExpireAt.IsZero() {
+		args = append(args, "exat", a.ExpireAt.Unix())
+	}
+	if a.TTL > 0 {
+		if usePrecise(a.TTL) {
+			args = append(args, "px", formatMs(ctx, a.TTL))
+		} else {
+			args = append(args, "ex", formatSec(ctx, a.TTL))
+		}
+	}
+
+	if a.Mode != "" {
+		args = append(args, a.Mode)
+	}
+
+	if a.Get {
+		args = append(args, "get")
+	}
+
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetEX Redis `SETEX key expiration value` command.
+func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	switch expiration {
+	case 0:
+		// Use old `SETNX` to support old Redis versions.
+		cmd = NewBoolCmd(ctx, "setnx", key, value)
+	case KeepTTL:
+		cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+	default:
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+		} else {
+			cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+		}
+	}
+
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	switch expiration {
+	case 0:
+		cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+	case KeepTTL:
+		cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+	default:
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+		} else {
+			cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+		}
+	}
+
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+	cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "strlen", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+	args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+	if replace {
+		args = append(args, "REPLACE")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "getbit", key, offset)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+	cmd := NewIntCmd(
+		ctx,
+		"setbit",
+		key,
+		offset,
+		value,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type BitCount struct {
+	Start, End int64
+}
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+	args := []interface{}{"bitcount", key}
+	if bitCount != nil {
+		args = append(
+			args,
+			bitCount.Start,
+			bitCount.End,
+		)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "bitop"
+	args[1] = op
+	args[2] = destKey
+	for i, key := range keys {
+		args[3+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+	return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+	return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+	return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+	return c.bitOp(ctx, "not", destKey, key)
+}
+
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+	args := make([]interface{}, 3+len(pos))
+	args[0] = "bitpos"
+	args[1] = key
+	args[2] = bit
+	switch len(pos) {
+	case 0:
+	case 1:
+		args[3] = pos[0]
+	case 2:
+		args[3] = pos[0]
+		args[4] = pos[1]
+	default:
+		panic("too many arguments")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
+	a := make([]interface{}, 0, 2+len(args))
+	a = append(a, "bitfield")
+	a = append(a, key)
+	a = append(a, args...)
+	cmd := NewIntSliceCmd(ctx, a...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	if keyType != "" {
+		args = append(args, "type", keyType)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"sscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"hscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"zscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hdel"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "hexists", key, field)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+	cmd := NewStringCmd(ctx, "hget", key, field)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "hkeys", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "hlen", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hmget"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HSet accepts values in following formats:
+//   - HSet("myhash", "key1", "value1", "key2", "value2")
+//   - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//   - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "hset"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "hmset"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewBoolCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "hvals", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "hrandfield", key, count)
+	if withValues {
+		args = append(args, "withvalues")
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "blpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(ctx, timeout)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "brpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(keys)+1] = formatSec(ctx, timeout)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+	cmd := NewStringCmd(
+		ctx,
+		"brpoplpush",
+		source,
+		destination,
+		formatSec(ctx, timeout),
+	)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+	cmd := NewStringCmd(ctx, "lindex", key, index)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "llen", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "lpop", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type LPosArgs struct {
+	Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+	args := []interface{}{"lpos", key, value}
+	if a.Rank != 0 {
+		args = append(args, "rank", a.Rank)
+	}
+	if a.MaxLen != 0 {
+		args = append(args, "maxlen", a.MaxLen)
+	}
+
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+	args := []interface{}{"lpos", key, value, "count", count}
+	if a.Rank != 0 {
+		args = append(args, "rank", a.Rank)
+	}
+	if a.MaxLen != 0 {
+		args = append(args, "maxlen", a.MaxLen)
+	}
+	cmd := NewIntSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "lpush"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "lpushx"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(
+		ctx,
+		"lrange",
+		key,
+		start,
+		stop,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "lrem", key, count, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "lset", key, index, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"ltrim",
+		key,
+		start,
+		stop,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "rpop", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+	cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "rpush"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "rpushx"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+	cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BLMove(
+	ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+	cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "sadd"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "scard", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sdiff"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sdiffstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sinter"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sinterstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "sismember", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "smismember"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewBoolSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "smembers", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+	cmd := NewStringStructMapCmd(ctx, "smembers", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "spop", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "spop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "srandmember", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "srem"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sunion"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sunionstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// XAddArgs accepts values in the following formats:
+//   - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+//   - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+//   - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+	Stream     string
+	NoMkStream bool
+	MaxLen     int64 // MAXLEN N
+
+	// Deprecated: use MaxLen+Approx, remove in v9.
+	MaxLenApprox int64 // MAXLEN ~ N
+
+	MinID string
+	// Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+	Approx bool
+	Limit  int64
+	ID     string
+	Values interface{}
+}
+
+// XAdd a.Limit has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+	args := make([]interface{}, 0, 11)
+	args = append(args, "xadd", a.Stream)
+	if a.NoMkStream {
+		args = append(args, "nomkstream")
+	}
+	switch {
+	case a.MaxLen > 0:
+		if a.Approx {
+			args = append(args, "maxlen", "~", a.MaxLen)
+		} else {
+			args = append(args, "maxlen", a.MaxLen)
+		}
+	case a.MaxLenApprox > 0:
+		// TODO remove in v9.
+		args = append(args, "maxlen", "~", a.MaxLenApprox)
+	case a.MinID != "":
+		if a.Approx {
+			args = append(args, "minid", "~", a.MinID)
+		} else {
+			args = append(args, "minid", a.MinID)
+		}
+	}
+	if a.Limit > 0 {
+		args = append(args, "limit", a.Limit)
+	}
+	if a.ID != "" {
+		args = append(args, a.ID)
+	} else {
+		args = append(args, "*")
+	}
+	args = appendArg(args, a.Values)
+
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+	args := []interface{}{"xdel", stream}
+	for _, id := range ids {
+		args = append(args, id)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xlen", stream)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XReadArgs struct {
+	Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+	Count   int64
+	Block   time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+	args := make([]interface{}, 0, 6+len(a.Streams))
+	args = append(args, "xread")
+
+	keyPos := int8(1)
+	if a.Count > 0 {
+		args = append(args, "count")
+		args = append(args, a.Count)
+		keyPos += 2
+	}
+	if a.Block >= 0 {
+		args = append(args, "block")
+		args = append(args, int64(a.Block/time.Millisecond))
+		keyPos += 2
+	}
+	args = append(args, "streams")
+	keyPos++
+	for _, s := range a.Streams {
+		args = append(args, s)
+	}
+
+	cmd := NewXStreamSliceCmd(ctx, args...)
+	if a.Block >= 0 {
+		cmd.setReadTimeout(a.Block)
+	}
+	cmd.SetFirstKeyPos(keyPos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+	return c.XRead(ctx, &XReadArgs{
+		Streams: streams,
+		Block:   -1,
+	})
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XReadGroupArgs struct {
+	Group    string
+	Consumer string
+	Streams  []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+	Count    int64
+	Block    time.Duration
+	NoAck    bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+	args := make([]interface{}, 0, 10+len(a.Streams))
+	args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+	keyPos := int8(4)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+		keyPos += 2
+	}
+	if a.Block >= 0 {
+		args = append(args, "block", int64(a.Block/time.Millisecond))
+		keyPos += 2
+	}
+	if a.NoAck {
+		args = append(args, "noack")
+		keyPos++
+	}
+	args = append(args, "streams")
+	keyPos++
+	for _, s := range a.Streams {
+		args = append(args, s)
+	}
+
+	cmd := NewXStreamSliceCmd(ctx, args...)
+	if a.Block >= 0 {
+		cmd.setReadTimeout(a.Block)
+	}
+	cmd.SetFirstKeyPos(keyPos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+	args := []interface{}{"xack", stream, group}
+	for _, id := range ids {
+		args = append(args, id)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+	cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XPendingExtArgs struct {
+	Stream   string
+	Group    string
+	Idle     time.Duration
+	Start    string
+	End      string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+	args := make([]interface{}, 0, 9)
+	args = append(args, "xpending", a.Stream, a.Group)
+	if a.Idle != 0 {
+		args = append(args, "idle", formatMs(ctx, a.Idle))
+	}
+	args = append(args, a.Start, a.End, a.Count)
+	if a.Consumer != "" {
+		args = append(args, a.Consumer)
+	}
+	cmd := NewXPendingExtCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XAutoClaimArgs struct {
+	Stream   string
+	Group    string
+	MinIdle  time.Duration
+	Start    string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+	args := xAutoClaimArgs(ctx, a)
+	cmd := NewXAutoClaimCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+	args := xAutoClaimArgs(ctx, a)
+	args = append(args, "justid")
+	cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 8)
+	args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+	}
+	return args
+}
+
+type XClaimArgs struct {
+	Stream   string
+	Group    string
+	Consumer string
+	MinIdle  time.Duration
+	Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+	args := xClaimArgs(a)
+	cmd := NewXMessageSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+	args := xClaimArgs(a)
+	args = append(args, "justid")
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 5+len(a.Messages))
+	args = append(args,
+		"xclaim",
+		a.Stream,
+		a.Group, a.Consumer,
+		int64(a.MinIdle/time.Millisecond))
+	for _, id := range a.Messages {
+		args = append(args, id)
+	}
+	return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//		XTRIM key MAXLEN/MINID threshold LIMIT limit.
+//		XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+	ctx context.Context, key, strategy string,
+	approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+	args := make([]interface{}, 0, 7)
+	args = append(args, "xtrim", key, strategy)
+	if approx {
+		args = append(args, "~")
+	}
+	args = append(args, threshold)
+	if limit > 0 {
+		args = append(args, "limit", limit)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// Deprecated: use XTrimMaxLen, remove in v9.
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// Deprecated: use XTrimMaxLenApprox, remove in v9.
+func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+// XTrimMinID No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MINID minID
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+	return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MINID ~ minID LIMIT limit
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+	cmd := NewXInfoConsumersCmd(ctx, key, group)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+	cmd := NewXInfoGroupsCmd(ctx, key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+	cmd := NewXInfoStreamCmd(ctx, key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+	args := make([]interface{}, 0, 6)
+	args = append(args, "xinfo", "stream", key, "full")
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewXInfoStreamFullCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+	Score  float64
+	Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+	Z
+	Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+	Keys    []string
+	Weights []float64
+	// Can be SUM, MIN or MAX.
+	Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+	n = len(z.Keys)
+	if len(z.Weights) > 0 {
+		n += 1 + len(z.Weights)
+	}
+	if z.Aggregate != "" {
+		n += 2
+	}
+	return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+	for _, key := range z.Keys {
+		args = append(args, key)
+	}
+	if len(z.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weights := range z.Weights {
+			args = append(args, weights)
+		}
+	}
+	if z.Aggregate != "" {
+		args = append(args, "aggregate", z.Aggregate)
+	}
+	return args
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "bzpopmax"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(ctx, timeout)
+	cmd := NewZWithKeyCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "bzpopmin"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(ctx, timeout)
+	cmd := NewZWithKeyCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+	NX      bool
+	XX      bool
+	LT      bool
+	GT      bool
+	Ch      bool
+	Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+	a := make([]interface{}, 0, 6+2*len(args.Members))
+	a = append(a, "zadd", key)
+
+	// The GT, LT and NX options are mutually exclusive.
+	if args.NX {
+		a = append(a, "nx")
+	} else {
+		if args.XX {
+			a = append(a, "xx")
+		}
+		if args.GT {
+			a = append(a, "gt")
+		} else if args.LT {
+			a = append(a, "lt")
+		}
+	}
+	if args.Ch {
+		a = append(a, "ch")
+	}
+	if incr {
+		a = append(a, "incr")
+	}
+	for _, m := range args.Members {
+		a = append(a, m.Score)
+		a = append(a, m.Member)
+	}
+	return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+	cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// TODO: Compatible with v8 api, will be removed in v9.
+func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
+	args.Members = make([]Z, len(members))
+	for i, m := range members {
+		args.Members[i] = *m
+	}
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{}, members...)
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+	}, members...)
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+	}, members...)
+}
+
+// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		Ch: true,
+	}, members...)
+}
+
+// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			NX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+		Ch: true,
+	}, members...)
+}
+
+// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			XX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+		Ch: true,
+	}, members...)
+}
+
+// ZIncr Redis `ZADD key INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		Members: []Z{*member},
+	})
+}
+
+// ZIncrNX Redis `ZADD key NX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			NX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		NX:      true,
+		Members: []Z{*member},
+	})
+}
+
+// ZIncrXX Redis `ZADD key XX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			XX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		XX:      true,
+		Members: []Z{*member},
+	})
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zcard", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zcount", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinterstore", destination, len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "zmscore"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewFloatSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+	args := []interface{}{
+		"zpopmax",
+		key,
+	}
+
+	switch len(count) {
+	case 0:
+		break
+	case 1:
+		args = append(args, count[0])
+	default:
+		panic("too many arguments")
+	}
+
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+	args := []interface{}{
+		"zpopmin",
+		key,
+	}
+
+	switch len(count) {
+	case 0:
+		break
+	case 1:
+		args = append(args, count[0])
+	default:
+		panic("too many arguments")
+	}
+
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//		ZREVRANGE,
+//		ZRANGEBYSCORE,
+//		ZREVRANGEBYSCORE,
+//		ZRANGEBYLEX,
+//		ZREVRANGEBYLEX.
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+	Key string
+
+	// When the ByScore option is provided, the open interval(exclusive) can be set.
+	// By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+	// It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+	// For example:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"(3",
+	//	 		Stop: 				8,
+	//			ByScore:			true,
+	//	 	}
+	// 	 	cmd: "ZRange example-key (3 8 ByScore"  (3 < score <= 8).
+	//
+	// For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+	// You can set the <Start> and <Stop> options as follows:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"[abc",
+	//	 		Stop: 				"(def",
+	//			ByLex:				true,
+	//	 	}
+	//		cmd: "ZRange example-key [abc (def ByLex"
+	//
+	// For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+	// You can read the documentation for more information: https://redis.io/commands/zrange
+	Start interface{}
+	Stop  interface{}
+
+	// The ByScore and ByLex options are mutually exclusive.
+	ByScore bool
+	ByLex   bool
+
+	Rev bool
+
+	// limit offset count.
+	Offset int64
+	Count  int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+	// For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+	if z.Rev && (z.ByScore || z.ByLex) {
+		args = append(args, z.Key, z.Stop, z.Start)
+	} else {
+		args = append(args, z.Key, z.Start, z.Stop)
+	}
+
+	if z.ByScore {
+		args = append(args, "byscore")
+	} else if z.ByLex {
+		args = append(args, "bylex")
+	}
+	if z.Rev {
+		args = append(args, "rev")
+	}
+	if z.Offset != 0 || z.Count != 0 {
+		args = append(args, "limit", z.Offset, z.Count)
+	}
+	return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+	args := make([]interface{}, 0, 9)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+	return c.ZRangeArgs(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+	return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
+}
+
+type ZRangeBy struct {
+	Min, Max      string
+	Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Min, opt.Max}
+	if withScores {
+		args = append(args, "withscores")
+	}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrangestore", dst)
+	args = z.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zrank", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "zrem"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+	cmd := NewIntCmd(
+		ctx,
+		"zremrangebyrank",
+		key,
+		start,
+		stop,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+	cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Max, opt.Min}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zrevrank", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "zscore", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunionstore", dest, len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "zrandmember", key, count)
+	if withScores {
+		args = append(args, "withscores")
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+	args[len(keys)+2] = "withscores"
+
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 0, 3+len(keys))
+	args = append(args, "zdiffstore", destination, len(keys))
+	for _, key := range keys {
+		args = append(args, key)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(els))
+	args[0] = "pfadd"
+	args[1] = key
+	args = appendArgs(args, els)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "pfcount"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "pfmerge"
+	args[1] = dest
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "bgrewriteaof")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "bgsave")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+//   CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "client"
+	args[1] = "kill"
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "client", "list")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "client", "id")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "client", "unblock", id)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "config", "get", parameter)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "config", "resetstat")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "config", "rewrite")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "dbsize")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushall")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushall", "async")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushdb")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushdb", "async")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
+	args := []interface{}{"info"}
+	if len(section) > 0 {
+		args = append(args, section[0])
+	}
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "lastsave")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "save")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+	var args []interface{}
+	if modifier == "" {
+		args = []interface{}{"shutdown"}
+	} else {
+		args = []interface{}{"shutdown", modifier}
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	if err := cmd.Err(); err != nil {
+		if err == io.EOF {
+			// Server quit as expected.
+			cmd.err = nil
+		}
+	} else {
+		// Server did not quit. String reply contains the reason.
+		cmd.err = errors.New(cmd.val)
+		cmd.val = ""
+	}
+	return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+	return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+	return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+	return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "slaveof", host, port)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+	cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Sync(_ context.Context) {
+	panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+	cmd := NewTimeCmd(ctx, "time")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "debug", "object", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "readonly")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "readwrite")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+	args := []interface{}{"memory", "usage", key}
+	if len(samples) > 0 {
+		if len(samples) != 1 {
+			panic("MemoryUsage expects single sample count")
+		}
+		args = append(args, "SAMPLES", samples[0])
+	}
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+	cmdArgs[0] = "eval"
+	cmdArgs[1] = script
+	cmdArgs[2] = len(keys)
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	cmdArgs = appendArgs(cmdArgs, args)
+	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+	cmdArgs[0] = "evalsha"
+	cmdArgs[1] = sha1
+	cmdArgs[2] = len(keys)
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	cmdArgs = appendArgs(cmdArgs, args)
+	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
+	}
+	cmd := NewBoolSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "flush")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "kill")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+	cmd := NewStringCmd(ctx, "script", "load", script)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "publish", channel, message)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+	args := []interface{}{"pubsub", "channels"}
+	if pattern != "*" {
+		args = append(args, pattern)
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
+	args := make([]interface{}, 2+len(channels))
+	args[0] = "pubsub"
+	args[1] = "numsub"
+	for i, channel := range channels {
+		args[2+i] = channel
+	}
+	cmd := NewStringIntMapCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "pubsub", "numpat")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+	cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "cluster", "nodes")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "cluster", "info")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+	cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+	cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "delslots"
+	for i, slot := range slots {
+		args[2+i] = slot
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "failover")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "addslots"
+	for i, num := range slots {
+		args[2+i] = num
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterAddSlots(ctx, slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+	args := make([]interface{}, 2+3*len(geoLocation))
+	args[0] = "geoadd"
+	args[1] = key
+	for i, eachLoc := range geoLocation {
+		args[2+3*i] = eachLoc.Longitude
+		args[2+3*i+1] = eachLoc.Latitude
+		args[2+3*i+2] = eachLoc.Name
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+	ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+	if query.Store != "" || query.StoreDist != "" {
+		cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+	ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+	args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+	cmd := NewIntCmd(ctx, args...)
+	if query.Store == "" && query.StoreDist == "" {
+		cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+	ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+	if query.Store != "" || query.StoreDist != "" {
+		cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+	ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+	args := geoLocationArgs(query, "georadiusbymember", key, member)
+	cmd := NewIntCmd(ctx, args...)
+	if query.Store == "" && query.StoreDist == "" {
+		cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+	args := make([]interface{}, 0, 13)
+	args = append(args, "geosearch", key)
+	args = geoSearchArgs(q, args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+	ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+	args := make([]interface{}, 0, 16)
+	args = append(args, "geosearch", key)
+	args = geoSearchLocationArgs(q, args)
+	cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+	args := make([]interface{}, 0, 15)
+	args = append(args, "geosearchstore", store, key)
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+	if q.StoreDist {
+		args = append(args, "storedist")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoDist(
+	ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+	if unit == "" {
+		unit = "km"
+	}
+	cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geohash"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geopos"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewGeoPosCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}

+ 4 - 0
vendor/github.com/go-redis/redis/v8/doc.go

@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis

+ 144 - 0
vendor/github.com/go-redis/redis/v8/error.go

@@ -0,0 +1,144 @@
+package redis
+
+import (
+	"context"
+	"io"
+	"net"
+	"strings"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// ErrClosed performs any operation on the closed client will return this error.
+var ErrClosed = pool.ErrClosed
+
+type Error interface {
+	error
+
+	// RedisError is a no-op function but
+	// serves to distinguish types that are Redis
+	// errors from ordinary errors: a type is a
+	// Redis error if it has a RedisError method.
+	RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func shouldRetry(err error, retryTimeout bool) bool {
+	switch err {
+	case io.EOF, io.ErrUnexpectedEOF:
+		return true
+	case nil, context.Canceled, context.DeadlineExceeded:
+		return false
+	}
+
+	if v, ok := err.(timeoutError); ok {
+		if v.Timeout() {
+			return retryTimeout
+		}
+		return true
+	}
+
+	s := err.Error()
+	if s == "ERR max number of clients reached" {
+		return true
+	}
+	if strings.HasPrefix(s, "LOADING ") {
+		return true
+	}
+	if strings.HasPrefix(s, "READONLY ") {
+		return true
+	}
+	if strings.HasPrefix(s, "CLUSTERDOWN ") {
+		return true
+	}
+	if strings.HasPrefix(s, "TRYAGAIN ") {
+		return true
+	}
+
+	return false
+}
+
+func isRedisError(err error) bool {
+	_, ok := err.(proto.RedisError)
+	return ok
+}
+
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+	switch err {
+	case nil:
+		return false
+	case context.Canceled, context.DeadlineExceeded:
+		return true
+	}
+
+	if isRedisError(err) {
+		switch {
+		case isReadOnlyError(err):
+			// Close connections in read only state in case domain addr is used
+			// and domain resolves to a different Redis Server. See #790.
+			return true
+		case isMovedSameConnAddr(err, addr):
+			// Close connections when we are asked to move to the same addr
+			// of the connection. Force a DNS resolution when all connections
+			// of the pool are recycled
+			return true
+		default:
+			return false
+		}
+	}
+
+	if allowTimeout {
+		if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+			return !netErr.Temporary()
+		}
+	}
+
+	return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+	if !isRedisError(err) {
+		return
+	}
+
+	s := err.Error()
+	switch {
+	case strings.HasPrefix(s, "MOVED "):
+		moved = true
+	case strings.HasPrefix(s, "ASK "):
+		ask = true
+	default:
+		return
+	}
+
+	ind := strings.LastIndex(s, " ")
+	if ind == -1 {
+		return false, false, ""
+	}
+	addr = s[ind+1:]
+	return
+}
+
+func isLoadingError(err error) bool {
+	return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+	return strings.HasPrefix(err.Error(), "READONLY ")
+}
+
+func isMovedSameConnAddr(err error, addr string) bool {
+	redisError := err.Error()
+	if !strings.HasPrefix(redisError, "MOVED ") {
+		return false
+	}
+	return strings.HasSuffix(redisError, " "+addr)
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+	Timeout() bool
+}

+ 56 - 0
vendor/github.com/go-redis/redis/v8/internal/arg.go

@@ -0,0 +1,56 @@
+package internal
+
+import (
+	"fmt"
+	"strconv"
+	"time"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+	switch v := v.(type) {
+	case nil:
+		return append(b, "<nil>"...)
+	case string:
+		return appendUTF8String(b, Bytes(v))
+	case []byte:
+		return appendUTF8String(b, v)
+	case int:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int8:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int16:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int32:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int64:
+		return strconv.AppendInt(b, v, 10)
+	case uint:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint8:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint16:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint32:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint64:
+		return strconv.AppendUint(b, v, 10)
+	case float32:
+		return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+	case float64:
+		return strconv.AppendFloat(b, v, 'f', -1, 64)
+	case bool:
+		if v {
+			return append(b, "true"...)
+		}
+		return append(b, "false"...)
+	case time.Time:
+		return v.AppendFormat(b, time.RFC3339Nano)
+	default:
+		return append(b, fmt.Sprint(v)...)
+	}
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+	dst = append(dst, src...)
+	return dst
+}

+ 78 - 0
vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go

@@ -0,0 +1,78 @@
+package hashtag
+
+import (
+	"strings"
+
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+	if s := strings.IndexByte(key, '{'); s > -1 {
+		if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+			return key[s+1 : s+e+1]
+		}
+	}
+	return key
+}
+
+func RandomSlot() int {
+	return rand.Intn(slotNumber)
+}
+
+// Slot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+	if key == "" {
+		return RandomSlot()
+	}
+	key = Key(key)
+	return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+	for i := 0; i < len(key); i++ {
+		crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+	}
+	return
+}

+ 201 - 0
vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go

@@ -0,0 +1,201 @@
+package hscan
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+	// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+	decoders = []decoderFunc{
+		reflect.Bool:          decodeBool,
+		reflect.Int:           decodeInt,
+		reflect.Int8:          decodeInt8,
+		reflect.Int16:         decodeInt16,
+		reflect.Int32:         decodeInt32,
+		reflect.Int64:         decodeInt64,
+		reflect.Uint:          decodeUint,
+		reflect.Uint8:         decodeUint8,
+		reflect.Uint16:        decodeUint16,
+		reflect.Uint32:        decodeUint32,
+		reflect.Uint64:        decodeUint64,
+		reflect.Float32:       decodeFloat32,
+		reflect.Float64:       decodeFloat64,
+		reflect.Complex64:     decodeUnsupported,
+		reflect.Complex128:    decodeUnsupported,
+		reflect.Array:         decodeUnsupported,
+		reflect.Chan:          decodeUnsupported,
+		reflect.Func:          decodeUnsupported,
+		reflect.Interface:     decodeUnsupported,
+		reflect.Map:           decodeUnsupported,
+		reflect.Ptr:           decodeUnsupported,
+		reflect.Slice:         decodeSlice,
+		reflect.String:        decodeString,
+		reflect.Struct:        decodeUnsupported,
+		reflect.UnsafePointer: decodeUnsupported,
+	}
+
+	// Global map of struct field specs that is populated once for every new
+	// struct type that is scanned. This caches the field types and the corresponding
+	// decoder functions to avoid iterating through struct fields on subsequent scans.
+	globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+	v := reflect.ValueOf(dst)
+
+	// The destination to scan into should be a struct pointer.
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+	}
+
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+	}
+
+	return StructValue{
+		spec:  globalStructMap.get(v.Type()),
+		value: v,
+	}, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+	if len(keys) != len(vals) {
+		return errors.New("args should have the same number of keys and vals")
+	}
+
+	strct, err := Struct(dst)
+	if err != nil {
+		return err
+	}
+
+	// Iterate through the (key, value) sequence.
+	for i := 0; i < len(vals); i++ {
+		key, ok := keys[i].(string)
+		if !ok {
+			continue
+		}
+
+		val, ok := vals[i].(string)
+		if !ok {
+			continue
+		}
+
+		if err := strct.Scan(key, val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+	b, err := strconv.ParseBool(s)
+	if err != nil {
+		return err
+	}
+	f.SetBool(b)
+	return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseInt(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetInt(v)
+	return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseUint(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetUint(v)
+	return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+	f.SetString(s)
+	return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+	// []byte slice ([]uint8).
+	if f.Type().Elem().Kind() == reflect.Uint8 {
+		f.SetBytes([]byte(s))
+	}
+	return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+	return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}

+ 93 - 0
vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go

@@ -0,0 +1,93 @@
+package hscan
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+	m sync.Map
+}
+
+func newStructMap() *structMap {
+	return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+	if v, ok := s.m.Load(t); ok {
+		return v.(*structSpec)
+	}
+
+	spec := newStructSpec(t, "redis")
+	s.m.Store(t, spec)
+	return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+	m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+	s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+	numField := t.NumField()
+	out := &structSpec{
+		m: make(map[string]*structField, numField),
+	}
+
+	for i := 0; i < numField; i++ {
+		f := t.Field(i)
+
+		tag := f.Tag.Get(fieldTag)
+		if tag == "" || tag == "-" {
+			continue
+		}
+
+		tag = strings.Split(tag, ",")[0]
+		if tag == "" {
+			continue
+		}
+
+		// Use the built-in decoder.
+		out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+	}
+
+	return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+	index int
+	fn    decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+	spec  *structSpec
+	value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+	field, ok := s.spec.m[key]
+	if !ok {
+		return nil
+	}
+	if err := field.fn(s.value.Field(field.index), value); err != nil {
+		t := s.value.Type()
+		return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+			value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+	}
+	return nil
+}

+ 29 - 0
vendor/github.com/go-redis/redis/v8/internal/internal.go

@@ -0,0 +1,29 @@
+package internal
+
+import (
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+	if retry < 0 {
+		panic("not reached")
+	}
+	if minBackoff == 0 {
+		return 0
+	}
+
+	d := minBackoff << uint(retry)
+	if d < minBackoff {
+		return maxBackoff
+	}
+
+	d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+	if d > maxBackoff || d < minBackoff {
+		d = maxBackoff
+	}
+
+	return d
+}

+ 26 - 0
vendor/github.com/go-redis/redis/v8/internal/log.go

@@ -0,0 +1,26 @@
+package internal
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"os"
+)
+
+type Logging interface {
+	Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+	log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+	_ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
+var Logger Logging = &logger{
+	log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}

+ 60 - 0
vendor/github.com/go-redis/redis/v8/internal/once.go

@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+	m    sync.Mutex
+	done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once.  In other words, given
+// 	var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error.  A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once.  Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// 	err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+	if atomic.LoadUint32(&o.done) == 1 {
+		return nil
+	}
+	// Slow-path.
+	o.m.Lock()
+	defer o.m.Unlock()
+	var err error
+	if o.done == 0 {
+		err = f()
+		if err == nil {
+			atomic.StoreUint32(&o.done, 1)
+		}
+	}
+	return err
+}

+ 121 - 0
vendor/github.com/go-redis/redis/v8/internal/pool/conn.go

@@ -0,0 +1,121 @@
+package pool
+
+import (
+	"bufio"
+	"context"
+	"net"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+	usedAt  int64 // atomic
+	netConn net.Conn
+
+	rd *proto.Reader
+	bw *bufio.Writer
+	wr *proto.Writer
+
+	Inited    bool
+	pooled    bool
+	createdAt time.Time
+}
+
+func NewConn(netConn net.Conn) *Conn {
+	cn := &Conn{
+		netConn:   netConn,
+		createdAt: time.Now(),
+	}
+	cn.rd = proto.NewReader(netConn)
+	cn.bw = bufio.NewWriter(netConn)
+	cn.wr = proto.NewWriter(cn.bw)
+	cn.SetUsedAt(time.Now())
+	return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+	unix := atomic.LoadInt64(&cn.usedAt)
+	return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+	atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+	cn.netConn = netConn
+	cn.rd.Reset(netConn)
+	cn.bw.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+	return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+	if cn.netConn != nil {
+		return cn.netConn.RemoteAddr()
+	}
+	return nil
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+	if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
+	return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(
+	ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+	if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
+
+	if cn.bw.Buffered() > 0 {
+		cn.bw.Reset(cn.netConn)
+	}
+
+	if err := fn(cn.wr); err != nil {
+		return err
+	}
+
+	return cn.bw.Flush()
+}
+
+func (cn *Conn) Close() error {
+	return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+	tm := time.Now()
+	cn.SetUsedAt(tm)
+
+	if timeout > 0 {
+		tm = tm.Add(timeout)
+	}
+
+	if ctx != nil {
+		deadline, ok := ctx.Deadline()
+		if ok {
+			if timeout == 0 {
+				return deadline
+			}
+			if deadline.Before(tm) {
+				return deadline
+			}
+			return tm
+		}
+	}
+
+	if timeout > 0 {
+		return tm
+	}
+
+	return noDeadline
+}

+ 557 - 0
vendor/github.com/go-redis/redis/v8/internal/pool/pool.go

@@ -0,0 +1,557 @@
+package pool
+
+import (
+	"context"
+	"errors"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+)
+
+var (
+	// ErrClosed performs any operation on the closed client will return this error.
+	ErrClosed = errors.New("redis: client is closed")
+
+	// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
+	ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
+
+var timers = sync.Pool{
+	New: func() interface{} {
+		t := time.NewTimer(time.Hour)
+		t.Stop()
+		return t
+	},
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+	Hits     uint32 // number of times free connection was found in the pool
+	Misses   uint32 // number of times free connection was NOT found in the pool
+	Timeouts uint32 // number of times a wait timeout occurred
+
+	TotalConns uint32 // number of total connections in the pool
+	IdleConns  uint32 // number of idle connections in the pool
+	StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+	NewConn(context.Context) (*Conn, error)
+	CloseConn(*Conn) error
+
+	Get(context.Context) (*Conn, error)
+	Put(context.Context, *Conn)
+	Remove(context.Context, *Conn, error)
+
+	Len() int
+	IdleLen() int
+	Stats() *Stats
+
+	Close() error
+}
+
+type Options struct {
+	Dialer  func(context.Context) (net.Conn, error)
+	OnClose func(*Conn) error
+
+	PoolFIFO           bool
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+type lastDialErrorWrap struct {
+	err error
+}
+
+type ConnPool struct {
+	opt *Options
+
+	dialErrorsNum uint32 // atomic
+
+	lastDialError atomic.Value
+
+	queue chan struct{}
+
+	connsMu      sync.Mutex
+	conns        []*Conn
+	idleConns    []*Conn
+	poolSize     int
+	idleConnsLen int
+
+	stats Stats
+
+	_closed  uint32 // atomic
+	closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+	p := &ConnPool{
+		opt: opt,
+
+		queue:     make(chan struct{}, opt.PoolSize),
+		conns:     make([]*Conn, 0, opt.PoolSize),
+		idleConns: make([]*Conn, 0, opt.PoolSize),
+		closedCh:  make(chan struct{}),
+	}
+
+	p.connsMu.Lock()
+	p.checkMinIdleConns()
+	p.connsMu.Unlock()
+
+	if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+		go p.reaper(opt.IdleCheckFrequency)
+	}
+
+	return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+	if p.opt.MinIdleConns == 0 {
+		return
+	}
+	for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+		p.poolSize++
+		p.idleConnsLen++
+
+		go func() {
+			err := p.addIdleConn()
+			if err != nil && err != ErrClosed {
+				p.connsMu.Lock()
+				p.poolSize--
+				p.idleConnsLen--
+				p.connsMu.Unlock()
+			}
+		}()
+	}
+}
+
+func (p *ConnPool) addIdleConn() error {
+	cn, err := p.dialConn(context.TODO(), true)
+	if err != nil {
+		return err
+	}
+
+	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return ErrClosed
+	}
+
+	p.conns = append(p.conns, cn)
+	p.idleConns = append(p.idleConns, cn)
+	return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+	cn, err := p.dialConn(ctx, pooled)
+	if err != nil {
+		return nil, err
+	}
+
+	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return nil, ErrClosed
+	}
+
+	p.conns = append(p.conns, cn)
+	if pooled {
+		// If pool is full remove the cn on next Put.
+		if p.poolSize >= p.opt.PoolSize {
+			cn.pooled = false
+		} else {
+			p.poolSize++
+		}
+	}
+
+	return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+
+	if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+		return nil, p.getLastDialError()
+	}
+
+	netConn, err := p.opt.Dialer(ctx)
+	if err != nil {
+		p.setLastDialError(err)
+		if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+			go p.tryDial()
+		}
+		return nil, err
+	}
+
+	cn := NewConn(netConn)
+	cn.pooled = pooled
+	return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+	for {
+		if p.closed() {
+			return
+		}
+
+		conn, err := p.opt.Dialer(context.Background())
+		if err != nil {
+			p.setLastDialError(err)
+			time.Sleep(time.Second)
+			continue
+		}
+
+		atomic.StoreUint32(&p.dialErrorsNum, 0)
+		_ = conn.Close()
+		return
+	}
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+	p.lastDialError.Store(&lastDialErrorWrap{err: err})
+}
+
+func (p *ConnPool) getLastDialError() error {
+	err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+	if err != nil {
+		return err.err
+	}
+	return nil
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+
+	if err := p.waitTurn(ctx); err != nil {
+		return nil, err
+	}
+
+	for {
+		p.connsMu.Lock()
+		cn, err := p.popIdle()
+		p.connsMu.Unlock()
+
+		if err != nil {
+			return nil, err
+		}
+
+		if cn == nil {
+			break
+		}
+
+		if p.isStaleConn(cn) {
+			_ = p.CloseConn(cn)
+			continue
+		}
+
+		atomic.AddUint32(&p.stats.Hits, 1)
+		return cn, nil
+	}
+
+	atomic.AddUint32(&p.stats.Misses, 1)
+
+	newcn, err := p.newConn(ctx, true)
+	if err != nil {
+		p.freeTurn()
+		return nil, err
+	}
+
+	return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+	p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	select {
+	case p.queue <- struct{}{}:
+		return nil
+	default:
+	}
+
+	timer := timers.Get().(*time.Timer)
+	timer.Reset(p.opt.PoolTimeout)
+
+	select {
+	case <-ctx.Done():
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+		return ctx.Err()
+	case p.queue <- struct{}{}:
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+		return nil
+	case <-timer.C:
+		timers.Put(timer)
+		atomic.AddUint32(&p.stats.Timeouts, 1)
+		return ErrPoolTimeout
+	}
+}
+
+func (p *ConnPool) freeTurn() {
+	<-p.queue
+}
+
+func (p *ConnPool) popIdle() (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+	n := len(p.idleConns)
+	if n == 0 {
+		return nil, nil
+	}
+
+	var cn *Conn
+	if p.opt.PoolFIFO {
+		cn = p.idleConns[0]
+		copy(p.idleConns, p.idleConns[1:])
+		p.idleConns = p.idleConns[:n-1]
+	} else {
+		idx := n - 1
+		cn = p.idleConns[idx]
+		p.idleConns = p.idleConns[:idx]
+	}
+	p.idleConnsLen--
+	p.checkMinIdleConns()
+	return cn, nil
+}
+
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
+	if cn.rd.Buffered() > 0 {
+		internal.Logger.Printf(ctx, "Conn has unread data")
+		p.Remove(ctx, cn, BadConnError{})
+		return
+	}
+
+	if !cn.pooled {
+		p.Remove(ctx, cn, nil)
+		return
+	}
+
+	p.connsMu.Lock()
+	p.idleConns = append(p.idleConns, cn)
+	p.idleConnsLen++
+	p.connsMu.Unlock()
+	p.freeTurn()
+}
+
+func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+	p.removeConnWithLock(cn)
+	p.freeTurn()
+	_ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+	p.removeConnWithLock(cn)
+	return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+	p.connsMu.Lock()
+	p.removeConn(cn)
+	p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+	for i, c := range p.conns {
+		if c == cn {
+			p.conns = append(p.conns[:i], p.conns[i+1:]...)
+			if cn.pooled {
+				p.poolSize--
+				p.checkMinIdleConns()
+			}
+			return
+		}
+	}
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+	if p.opt.OnClose != nil {
+		_ = p.opt.OnClose(cn)
+	}
+	return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+	p.connsMu.Lock()
+	n := len(p.conns)
+	p.connsMu.Unlock()
+	return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+	p.connsMu.Lock()
+	n := p.idleConnsLen
+	p.connsMu.Unlock()
+	return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+	idleLen := p.IdleLen()
+	return &Stats{
+		Hits:     atomic.LoadUint32(&p.stats.Hits),
+		Misses:   atomic.LoadUint32(&p.stats.Misses),
+		Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+		TotalConns: uint32(p.Len()),
+		IdleConns:  uint32(idleLen),
+		StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+	}
+}
+
+func (p *ConnPool) closed() bool {
+	return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	var firstErr error
+	for _, cn := range p.conns {
+		if fn(cn) {
+			if err := p.closeConn(cn); err != nil && firstErr == nil {
+				firstErr = err
+			}
+		}
+	}
+	return firstErr
+}
+
+func (p *ConnPool) Close() error {
+	if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+		return ErrClosed
+	}
+	close(p.closedCh)
+
+	var firstErr error
+	p.connsMu.Lock()
+	for _, cn := range p.conns {
+		if err := p.closeConn(cn); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	p.conns = nil
+	p.poolSize = 0
+	p.idleConns = nil
+	p.idleConnsLen = 0
+	p.connsMu.Unlock()
+
+	return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			// It is possible that ticker and closedCh arrive together,
+			// and select pseudo-randomly pick ticker case, we double
+			// check here to prevent being executed after closed.
+			if p.closed() {
+				return
+			}
+			_, err := p.ReapStaleConns()
+			if err != nil {
+				internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
+				continue
+			}
+		case <-p.closedCh:
+			return
+		}
+	}
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+	var n int
+	for {
+		p.getTurn()
+
+		p.connsMu.Lock()
+		cn := p.reapStaleConn()
+		p.connsMu.Unlock()
+
+		p.freeTurn()
+
+		if cn != nil {
+			_ = p.closeConn(cn)
+			n++
+		} else {
+			break
+		}
+	}
+	atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+	return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+	if len(p.idleConns) == 0 {
+		return nil
+	}
+
+	cn := p.idleConns[0]
+	if !p.isStaleConn(cn) {
+		return nil
+	}
+
+	p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+	p.idleConnsLen--
+	p.removeConn(cn)
+
+	return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+	if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+		return false
+	}
+
+	now := time.Now()
+	if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+		return true
+	}
+	if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+		return true
+	}
+
+	return false
+}

+ 58 - 0
vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go

@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+	pool      Pooler
+	cn        *Conn
+	stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+	return &SingleConnPool{
+		pool: pool,
+		cn:   cn,
+	}
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+	return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+	if p.stickyErr != nil {
+		return nil, p.stickyErr
+	}
+	return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+	p.cn = nil
+	p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+	p.cn = nil
+	p.stickyErr = ErrClosed
+	return nil
+}
+
+func (p *SingleConnPool) Len() int {
+	return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+	return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+	return &Stats{}
+}

+ 201 - 0
vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go

@@ -0,0 +1,201 @@
+package pool
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync/atomic"
+)
+
+const (
+	stateDefault = 0
+	stateInited  = 1
+	stateClosed  = 2
+)
+
+type BadConnError struct {
+	wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+	s := "redis: Conn is in a bad state"
+	if e.wrapped != nil {
+		s += ": " + e.wrapped.Error()
+	}
+	return s
+}
+
+func (e BadConnError) Unwrap() error {
+	return e.wrapped
+}
+
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+	pool   Pooler
+	shared int32 // atomic
+
+	state uint32 // atomic
+	ch    chan *Conn
+
+	_badConnError atomic.Value
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+	p, ok := pool.(*StickyConnPool)
+	if !ok {
+		p = &StickyConnPool{
+			pool: pool,
+			ch:   make(chan *Conn, 1),
+		}
+	}
+	atomic.AddInt32(&p.shared, 1)
+	return p
+}
+
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.pool.NewConn(ctx)
+}
+
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
+	return p.pool.CloseConn(cn)
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+	// In worst case this races with Close which is not a very common operation.
+	for i := 0; i < 1000; i++ {
+		switch atomic.LoadUint32(&p.state) {
+		case stateDefault:
+			cn, err := p.pool.Get(ctx)
+			if err != nil {
+				return nil, err
+			}
+			if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+				return cn, nil
+			}
+			p.pool.Remove(ctx, cn, ErrClosed)
+		case stateInited:
+			if err := p.badConnError(); err != nil {
+				return nil, err
+			}
+			cn, ok := <-p.ch
+			if !ok {
+				return nil, ErrClosed
+			}
+			return cn, nil
+		case stateClosed:
+			return nil, ErrClosed
+		default:
+			panic("not reached")
+		}
+	}
+	return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
+}
+
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
+	defer func() {
+		if recover() != nil {
+			p.freeConn(ctx, cn)
+		}
+	}()
+	p.ch <- cn
+}
+
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
+	if err := p.badConnError(); err != nil {
+		p.pool.Remove(ctx, cn, err)
+	} else {
+		p.pool.Put(ctx, cn)
+	}
+}
+
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+	defer func() {
+		if recover() != nil {
+			p.pool.Remove(ctx, cn, ErrClosed)
+		}
+	}()
+	p._badConnError.Store(BadConnError{wrapped: reason})
+	p.ch <- cn
+}
+
+func (p *StickyConnPool) Close() error {
+	if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
+		return nil
+	}
+
+	for i := 0; i < 1000; i++ {
+		state := atomic.LoadUint32(&p.state)
+		if state == stateClosed {
+			return ErrClosed
+		}
+		if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+			close(p.ch)
+			cn, ok := <-p.ch
+			if ok {
+				p.freeConn(context.TODO(), cn)
+			}
+			return nil
+		}
+	}
+
+	return errors.New("redis: StickyConnPool.Close: infinite loop")
+}
+
+func (p *StickyConnPool) Reset(ctx context.Context) error {
+	if p.badConnError() == nil {
+		return nil
+	}
+
+	select {
+	case cn, ok := <-p.ch:
+		if !ok {
+			return ErrClosed
+		}
+		p.pool.Remove(ctx, cn, ErrClosed)
+		p._badConnError.Store(BadConnError{wrapped: nil})
+	default:
+		return errors.New("redis: StickyConnPool does not have a Conn")
+	}
+
+	if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+		state := atomic.LoadUint32(&p.state)
+		return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
+	}
+
+	return nil
+}
+
+func (p *StickyConnPool) badConnError() error {
+	if v := p._badConnError.Load(); v != nil {
+		if err := v.(BadConnError); err.wrapped != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *StickyConnPool) Len() int {
+	switch atomic.LoadUint32(&p.state) {
+	case stateDefault:
+		return 0
+	case stateInited:
+		return 1
+	case stateClosed:
+		return 0
+	default:
+		panic("not reached")
+	}
+}
+
+func (p *StickyConnPool) IdleLen() int {
+	return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+	return &Stats{}
+}

+ 332 - 0
vendor/github.com/go-redis/redis/v8/internal/proto/reader.go

@@ -0,0 +1,332 @@
+package proto
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+// redis resp protocol data type.
+const (
+	ErrorReply  = '-'
+	StatusReply = '+'
+	IntReply    = ':'
+	StringReply = '$'
+	ArrayReply  = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil") // nolint:errname
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+	rd   *bufio.Reader
+	_buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+	return &Reader{
+		rd:   bufio.NewReader(rd),
+		_buf: make([]byte, 64),
+	}
+}
+
+func (r *Reader) Buffered() int {
+	return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+	return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+	r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+	line, err := r.readLine()
+	if err != nil {
+		return nil, err
+	}
+	if isNilReply(line) {
+		return nil, Nil
+	}
+	return line, nil
+}
+
+// readLine that returns an error if:
+//   - there is a pending read error;
+//   - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+	b, err := r.rd.ReadSlice('\n')
+	if err != nil {
+		if err != bufio.ErrBufferFull {
+			return nil, err
+		}
+
+		full := make([]byte, len(b))
+		copy(full, b)
+
+		b, err = r.rd.ReadBytes('\n')
+		if err != nil {
+			return nil, err
+		}
+
+		full = append(full, b...) //nolint:makezero
+		b = full
+	}
+	if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+		return nil, fmt.Errorf("redis: invalid reply: %q", b)
+	}
+	return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StatusReply:
+		return string(line[1:]), nil
+	case IntReply:
+		return util.ParseInt(line[1:], 10, 64)
+	case StringReply:
+		return r.readStringReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		if m == nil {
+			err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+			return nil, err
+		}
+		return m(r, n)
+	}
+	return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case IntReply:
+		return util.ParseInt(line[1:], 10, 64)
+	default:
+		return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadString() (string, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return "", err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return "", ParseErrorReply(line)
+	case StringReply:
+		return r.readStringReply(line)
+	case StatusReply:
+		return string(line[1:]), nil
+	case IntReply:
+		return string(line[1:]), nil
+	default:
+		return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+	}
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+	if isNilReply(line) {
+		return "", Nil
+	}
+
+	replyLen, err := util.Atoi(line[1:])
+	if err != nil {
+		return "", err
+	}
+
+	b := make([]byte, replyLen+2)
+	_, err = io.ReadFull(r.rd, b)
+	if err != nil {
+		return "", err
+	}
+
+	return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		return m(r, n)
+	default:
+		return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadArrayLen() (int, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return 0, err
+		}
+		return int(n), nil
+	default:
+		return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+	n, err := r.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+	if n != 2 {
+		return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+	}
+
+	cursor, err := r.ReadUint()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	n, err = r.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	keys := make([]string, n)
+
+	for i := 0; i < n; i++ {
+		key, err := r.ReadString()
+		if err != nil {
+			return nil, 0, err
+		}
+		keys[i] = key
+	}
+
+	return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StringReply:
+		return r._readTmpBytesReply(line)
+	case StatusReply:
+		return line[1:], nil
+	default:
+		return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+	}
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+	if isNilReply(line) {
+		return nil, Nil
+	}
+
+	replyLen, err := util.Atoi(line[1:])
+	if err != nil {
+		return nil, err
+	}
+
+	buf := r.buf(replyLen + 2)
+	_, err = io.ReadFull(r.rd, buf)
+	if err != nil {
+		return nil, err
+	}
+
+	return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+	if n <= cap(r._buf) {
+		return r._buf[:n]
+	}
+	d := n - cap(r._buf)
+	r._buf = append(r._buf, make([]byte, d)...)
+	return r._buf
+}
+
+func isNilReply(b []byte) bool {
+	return len(b) == 3 &&
+		(b[0] == StringReply || b[0] == ArrayReply) &&
+		b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+	return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+	if isNilReply(line) {
+		return 0, Nil
+	}
+	return util.ParseInt(line[1:], 10, 64)
+}

+ 180 - 0
vendor/github.com/go-redis/redis/v8/internal/proto/scan.go

@@ -0,0 +1,180 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+// Scan parses bytes `b` to `v` with appropriate type.
+//nolint:gocyclo
+func Scan(b []byte, v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return fmt.Errorf("redis: Scan(nil)")
+	case *string:
+		*v = util.BytesToString(b)
+		return nil
+	case *[]byte:
+		*v = b
+		return nil
+	case *int:
+		var err error
+		*v, err = util.Atoi(b)
+		return err
+	case *int8:
+		n, err := util.ParseInt(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = int8(n)
+		return nil
+	case *int16:
+		n, err := util.ParseInt(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = int16(n)
+		return nil
+	case *int32:
+		n, err := util.ParseInt(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = int32(n)
+		return nil
+	case *int64:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *uint:
+		n, err := util.ParseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = uint(n)
+		return nil
+	case *uint8:
+		n, err := util.ParseUint(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = uint8(n)
+		return nil
+	case *uint16:
+		n, err := util.ParseUint(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = uint16(n)
+		return nil
+	case *uint32:
+		n, err := util.ParseUint(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = uint32(n)
+		return nil
+	case *uint64:
+		n, err := util.ParseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *float32:
+		n, err := util.ParseFloat(b, 32)
+		if err != nil {
+			return err
+		}
+		*v = float32(n)
+		return err
+	case *float64:
+		var err error
+		*v, err = util.ParseFloat(b, 64)
+		return err
+	case *bool:
+		*v = len(b) == 1 && b[0] == '1'
+		return nil
+	case *time.Time:
+		var err error
+		*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+		return err
+	case *time.Duration:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = time.Duration(n)
+		return nil
+	case encoding.BinaryUnmarshaler:
+		return v.UnmarshalBinary(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+	}
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+	v := reflect.ValueOf(slice)
+	if !v.IsValid() {
+		return fmt.Errorf("redis: ScanSlice(nil)")
+	}
+	if v.Kind() != reflect.Ptr {
+		return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Slice {
+		return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+	}
+
+	next := makeSliceNextElemFunc(v)
+	for i, s := range data {
+		elem := next()
+		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
+			return err
+		}
+	}
+
+	return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+	elemType := v.Type().Elem()
+
+	if elemType.Kind() == reflect.Ptr {
+		elemType = elemType.Elem()
+		return func() reflect.Value {
+			if v.Len() < v.Cap() {
+				v.Set(v.Slice(0, v.Len()+1))
+				elem := v.Index(v.Len() - 1)
+				if elem.IsNil() {
+					elem.Set(reflect.New(elemType))
+				}
+				return elem.Elem()
+			}
+
+			elem := reflect.New(elemType)
+			v.Set(reflect.Append(v, elem))
+			return elem.Elem()
+		}
+	}
+
+	zero := reflect.Zero(elemType)
+	return func() reflect.Value {
+		if v.Len() < v.Cap() {
+			v.Set(v.Slice(0, v.Len()+1))
+			return v.Index(v.Len() - 1)
+		}
+
+		v.Set(reflect.Append(v, zero))
+		return v.Index(v.Len() - 1)
+	}
+}

+ 155 - 0
vendor/github.com/go-redis/redis/v8/internal/proto/writer.go

@@ -0,0 +1,155 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"strconv"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+type writer interface {
+	io.Writer
+	io.ByteWriter
+	// io.StringWriter
+	WriteString(s string) (n int, err error)
+}
+
+type Writer struct {
+	writer
+
+	lenBuf []byte
+	numBuf []byte
+}
+
+func NewWriter(wr writer) *Writer {
+	return &Writer{
+		writer: wr,
+
+		lenBuf: make([]byte, 64),
+		numBuf: make([]byte, 64),
+	}
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+	if err := w.WriteByte(ArrayReply); err != nil {
+		return err
+	}
+
+	if err := w.writeLen(len(args)); err != nil {
+		return err
+	}
+
+	for _, arg := range args {
+		if err := w.WriteArg(arg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+	w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+	w.lenBuf = append(w.lenBuf, '\r', '\n')
+	_, err := w.Write(w.lenBuf)
+	return err
+}
+
+func (w *Writer) WriteArg(v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return w.string("")
+	case string:
+		return w.string(v)
+	case []byte:
+		return w.bytes(v)
+	case int:
+		return w.int(int64(v))
+	case int8:
+		return w.int(int64(v))
+	case int16:
+		return w.int(int64(v))
+	case int32:
+		return w.int(int64(v))
+	case int64:
+		return w.int(v)
+	case uint:
+		return w.uint(uint64(v))
+	case uint8:
+		return w.uint(uint64(v))
+	case uint16:
+		return w.uint(uint64(v))
+	case uint32:
+		return w.uint(uint64(v))
+	case uint64:
+		return w.uint(v)
+	case float32:
+		return w.float(float64(v))
+	case float64:
+		return w.float(v)
+	case bool:
+		if v {
+			return w.int(1)
+		}
+		return w.int(0)
+	case time.Time:
+		w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+		return w.bytes(w.numBuf)
+	case time.Duration:
+		return w.int(v.Nanoseconds())
+	case encoding.BinaryMarshaler:
+		b, err := v.MarshalBinary()
+		if err != nil {
+			return err
+		}
+		return w.bytes(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+	}
+}
+
+func (w *Writer) bytes(b []byte) error {
+	if err := w.WriteByte(StringReply); err != nil {
+		return err
+	}
+
+	if err := w.writeLen(len(b)); err != nil {
+		return err
+	}
+
+	if _, err := w.Write(b); err != nil {
+		return err
+	}
+
+	return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+	return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+	w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+	w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+	w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+	if err := w.WriteByte('\r'); err != nil {
+		return err
+	}
+	return w.WriteByte('\n')
+}

+ 50 - 0
vendor/github.com/go-redis/redis/v8/internal/rand/rand.go

@@ -0,0 +1,50 @@
+package rand
+
+import (
+	"math/rand"
+	"sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+	src rand.Source
+	mu  sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+	s.mu.Lock()
+	n := s.src.Int63()
+	s.mu.Unlock()
+	return n
+}
+
+func (s *source) Seed(seed int64) {
+	s.mu.Lock()
+	s.src.Seed(seed)
+	s.mu.Unlock()
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }

+ 12 - 0
vendor/github.com/go-redis/redis/v8/internal/safe.go

@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package internal
+
+func String(b []byte) string {
+	return string(b)
+}
+
+func Bytes(s string) []byte {
+	return []byte(s)
+}

+ 21 - 0
vendor/github.com/go-redis/redis/v8/internal/unsafe.go

@@ -0,0 +1,21 @@
+//go:build !appengine
+// +build !appengine
+
+package internal
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(
+		&struct {
+			string
+			Cap int
+		}{s, len(s)},
+	))
+}

+ 46 - 0
vendor/github.com/go-redis/redis/v8/internal/util.go

@@ -0,0 +1,46 @@
+package internal
+
+import (
+	"context"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+	t := time.NewTimer(dur)
+	defer t.Stop()
+
+	select {
+	case <-t.C:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+func ToLower(s string) string {
+	if isLower(s) {
+		return s
+	}
+
+	b := make([]byte, len(s))
+	for i := range b {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			c += 'a' - 'A'
+		}
+		b[i] = c
+	}
+	return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			return false
+		}
+	}
+	return true
+}

+ 12 - 0
vendor/github.com/go-redis/redis/v8/internal/util/safe.go

@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+	return string(b)
+}
+
+func StringToBytes(s string) []byte {
+	return []byte(s)
+}

+ 19 - 0
vendor/github.com/go-redis/redis/v8/internal/util/strconv.go

@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+	return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+	return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+	return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+	return strconv.ParseFloat(BytesToString(b), bitSize)
+}

+ 23 - 0
vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go

@@ -0,0 +1,23 @@
+//go:build !appengine
+// +build !appengine
+
+package util
+
+import (
+	"unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(
+		&struct {
+			string
+			Cap int
+		}{s, len(s)},
+	))
+}

+ 77 - 0
vendor/github.com/go-redis/redis/v8/iterator.go

@@ -0,0 +1,77 @@
+package redis
+
+import (
+	"context"
+	"sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+	mu  sync.Mutex // protects Scanner and pos
+	cmd *ScanCmd
+	pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+	it.mu.Lock()
+	err := it.cmd.Err()
+	it.mu.Unlock()
+	return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next(ctx context.Context) bool {
+	it.mu.Lock()
+	defer it.mu.Unlock()
+
+	// Instantly return on errors.
+	if it.cmd.Err() != nil {
+		return false
+	}
+
+	// Advance cursor, check if we are still within range.
+	if it.pos < len(it.cmd.page) {
+		it.pos++
+		return true
+	}
+
+	for {
+		// Return if there is no more data to fetch.
+		if it.cmd.cursor == 0 {
+			return false
+		}
+
+		// Fetch next page.
+		switch it.cmd.args[0] {
+		case "scan", "qscan":
+			it.cmd.args[1] = it.cmd.cursor
+		default:
+			it.cmd.args[2] = it.cmd.cursor
+		}
+
+		err := it.cmd.process(ctx, it.cmd)
+		if err != nil {
+			return false
+		}
+
+		it.pos = 1
+
+		// Redis can occasionally return empty page.
+		if len(it.cmd.page) > 0 {
+			return true
+		}
+	}
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+	var v string
+	it.mu.Lock()
+	if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+		v = it.cmd.page[it.pos-1]
+	}
+	it.mu.Unlock()
+	return v
+}

+ 429 - 0
vendor/github.com/go-redis/redis/v8/options.go

@@ -0,0 +1,429 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+	// Allow returns nil if operation is allowed or an error otherwise.
+	// If operation is allowed client must ReportResult of the operation
+	// whether it is a success or a failure.
+	Allow() error
+	// ReportResult reports the result of the previously allowed operation.
+	// nil indicates a success, non-nil error usually indicates a failure.
+	ReportResult(result error)
+}
+
+// Options keeps the settings to setup redis connection.
+type Options struct {
+	// The network type, either tcp or unix.
+	// Default is tcp.
+	Network string
+	// host:port address.
+	Addr string
+
+	// Dialer creates new network connection and has priority over
+	// Network and Addr options.
+	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	// Hook that is called when new connection is established.
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	// Use the specified Username to authenticate the current connection
+	// with one of the connections defined in the ACL list when connecting
+	// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+	Username string
+	// Optional password. Must match the password specified in the
+	// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
+	// or the User Password when connecting to a Redis 6.0 instance, or greater,
+	// that is using the Redis ACL system.
+	Password string
+
+	// Database to be selected after connecting to the server.
+	DB int
+
+	// Maximum number of retries before giving up.
+	// Default is 3 retries; -1 (not 0) disables retries.
+	MaxRetries int
+	// Minimum backoff between each retry.
+	// Default is 8 milliseconds; -1 disables backoff.
+	MinRetryBackoff time.Duration
+	// Maximum backoff between each retry.
+	// Default is 512 milliseconds; -1 disables backoff.
+	MaxRetryBackoff time.Duration
+
+	// Dial timeout for establishing new connections.
+	// Default is 5 seconds.
+	DialTimeout time.Duration
+	// Timeout for socket reads. If reached, commands will fail
+	// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+	// Default is 3 seconds.
+	ReadTimeout time.Duration
+	// Timeout for socket writes. If reached, commands will fail
+	// with a timeout instead of blocking.
+	// Default is ReadTimeout.
+	WriteTimeout time.Duration
+
+	// Type of connection pool.
+	// true for FIFO pool, false for LIFO pool.
+	// Note that fifo has higher overhead compared to lifo.
+	PoolFIFO bool
+	// Maximum number of socket connections.
+	// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
+	PoolSize int
+	// Minimum number of idle connections which is useful when establishing
+	// new connection is slow.
+	MinIdleConns int
+	// Connection age at which client retires (closes) the connection.
+	// Default is to not close aged connections.
+	MaxConnAge time.Duration
+	// Amount of time client waits for connection if all connections
+	// are busy before returning an error.
+	// Default is ReadTimeout + 1 second.
+	PoolTimeout time.Duration
+	// Amount of time after which client closes idle connections.
+	// Should be less than server's timeout.
+	// Default is 5 minutes. -1 disables idle timeout check.
+	IdleTimeout time.Duration
+	// Frequency of idle checks made by idle connections reaper.
+	// Default is 1 minute. -1 disables idle connections reaper,
+	// but idle connections are still discarded by the client
+	// if IdleTimeout is set.
+	IdleCheckFrequency time.Duration
+
+	// Enables read only queries on slave nodes.
+	readOnly bool
+
+	// TLS Config to use. When set TLS will be negotiated.
+	TLSConfig *tls.Config
+
+	// Limiter interface used to implemented circuit breaker or rate limiter.
+	Limiter Limiter
+}
+
+func (opt *Options) init() {
+	if opt.Addr == "" {
+		opt.Addr = "localhost:6379"
+	}
+	if opt.Network == "" {
+		if strings.HasPrefix(opt.Addr, "/") {
+			opt.Network = "unix"
+		} else {
+			opt.Network = "tcp"
+		}
+	}
+	if opt.DialTimeout == 0 {
+		opt.DialTimeout = 5 * time.Second
+	}
+	if opt.Dialer == nil {
+		opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+			netDialer := &net.Dialer{
+				Timeout:   opt.DialTimeout,
+				KeepAlive: 5 * time.Minute,
+			}
+			if opt.TLSConfig == nil {
+				return netDialer.DialContext(ctx, network, addr)
+			}
+			return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+		}
+	}
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
+	}
+	switch opt.ReadTimeout {
+	case -1:
+		opt.ReadTimeout = 0
+	case 0:
+		opt.ReadTimeout = 3 * time.Second
+	}
+	switch opt.WriteTimeout {
+	case -1:
+		opt.WriteTimeout = 0
+	case 0:
+		opt.WriteTimeout = opt.ReadTimeout
+	}
+	if opt.PoolTimeout == 0 {
+		opt.PoolTimeout = opt.ReadTimeout + time.Second
+	}
+	if opt.IdleTimeout == 0 {
+		opt.IdleTimeout = 5 * time.Minute
+	}
+	if opt.IdleCheckFrequency == 0 {
+		opt.IdleCheckFrequency = time.Minute
+	}
+
+	if opt.MaxRetries == -1 {
+		opt.MaxRetries = 0
+	} else if opt.MaxRetries == 0 {
+		opt.MaxRetries = 3
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *Options) clone() *Options {
+	clone := *opt
+	return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+//		redis://<user>:<password>@<host>:<port>/<db_number>
+// Unix connection:
+//		unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+// Most Option fields can be set using query parameters, with the following restrictions:
+//	- field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+//	- only scalar type fields are supported (bool, int, time.Duration)
+//	- for time.Duration fields, values must be a valid input for time.ParseDuration();
+//	  additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+//	- to disable a duration field, use value less than or equal to 0; to use the default
+//	  value, leave the value blank or remove the parameter
+//	- only the last value is interpreted if a parameter is given multiple times
+//	- fields "network", "addr", "username" and "password" can only be set using other
+//	  URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+//	  names will be treated as unknown parameters
+//	- unknown parameter names will result in an error
+// Examples:
+//		redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+//		is equivalent to:
+//		&Options{
+//			Network:     "tcp",
+//			Addr:        "localhost:6789",
+//			DB:          1,               // path "/3" was overridden by "&db=1"
+//			DialTimeout: 3 * time.Second, // no time unit = seconds
+//			ReadTimeout: 6 * time.Second,
+//			MaxRetries:  2,
+//		}
+func ParseURL(redisURL string) (*Options, error) {
+	u, err := url.Parse(redisURL)
+	if err != nil {
+		return nil, err
+	}
+
+	switch u.Scheme {
+	case "redis", "rediss":
+		return setupTCPConn(u)
+	case "unix":
+		return setupUnixConn(u)
+	default:
+		return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+	}
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+	o := &Options{Network: "tcp"}
+
+	o.Username, o.Password = getUserPassword(u)
+
+	h, p, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		h = u.Host
+	}
+	if h == "" {
+		h = "localhost"
+	}
+	if p == "" {
+		p = "6379"
+	}
+	o.Addr = net.JoinHostPort(h, p)
+
+	f := strings.FieldsFunc(u.Path, func(r rune) bool {
+		return r == '/'
+	})
+	switch len(f) {
+	case 0:
+		o.DB = 0
+	case 1:
+		if o.DB, err = strconv.Atoi(f[0]); err != nil {
+			return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+		}
+	default:
+		return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+	}
+
+	if u.Scheme == "rediss" {
+		o.TLSConfig = &tls.Config{ServerName: h}
+	}
+
+	return setupConnParams(u, o)
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+	o := &Options{
+		Network: "unix",
+	}
+
+	if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+		return nil, errors.New("redis: empty unix socket path")
+	}
+	o.Addr = u.Path
+	o.Username, o.Password = getUserPassword(u)
+	return setupConnParams(u, o)
+}
+
+type queryOptions struct {
+	q   url.Values
+	err error
+}
+
+func (o *queryOptions) string(name string) string {
+	vs := o.q[name]
+	if len(vs) == 0 {
+		return ""
+	}
+	delete(o.q, name) // enable detection of unknown parameters
+	return vs[len(vs)-1]
+}
+
+func (o *queryOptions) int(name string) int {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	i, err := strconv.Atoi(s)
+	if err == nil {
+		return i
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	// try plain number first
+	if i, err := strconv.Atoi(s); err == nil {
+		if i <= 0 {
+			// disable timeouts
+			return -1
+		}
+		return time.Duration(i) * time.Second
+	}
+	dur, err := time.ParseDuration(s)
+	if err == nil {
+		return dur
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+	switch s := o.string(name); s {
+	case "true", "1":
+		return true
+	case "false", "0", "":
+		return false
+	default:
+		if o.err == nil {
+			o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+		}
+		return false
+	}
+}
+
+func (o *queryOptions) remaining() []string {
+	if len(o.q) == 0 {
+		return nil
+	}
+	keys := make([]string, 0, len(o.q))
+	for k := range o.q {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+	q := queryOptions{q: u.Query()}
+
+	// compat: a future major release may use q.int("db")
+	if tmp := q.string("db"); tmp != "" {
+		db, err := strconv.Atoi(tmp)
+		if err != nil {
+			return nil, fmt.Errorf("redis: invalid database number: %w", err)
+		}
+		o.DB = db
+	}
+
+	o.MaxRetries = q.int("max_retries")
+	o.MinRetryBackoff = q.duration("min_retry_backoff")
+	o.MaxRetryBackoff = q.duration("max_retry_backoff")
+	o.DialTimeout = q.duration("dial_timeout")
+	o.ReadTimeout = q.duration("read_timeout")
+	o.WriteTimeout = q.duration("write_timeout")
+	o.PoolFIFO = q.bool("pool_fifo")
+	o.PoolSize = q.int("pool_size")
+	o.MinIdleConns = q.int("min_idle_conns")
+	o.MaxConnAge = q.duration("max_conn_age")
+	o.PoolTimeout = q.duration("pool_timeout")
+	o.IdleTimeout = q.duration("idle_timeout")
+	o.IdleCheckFrequency = q.duration("idle_check_frequency")
+	if q.err != nil {
+		return nil, q.err
+	}
+
+	// any parameters left?
+	if r := q.remaining(); len(r) > 0 {
+		return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+	}
+
+	return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+	var user, password string
+	if u.User != nil {
+		user = u.User.Username()
+		if p, ok := u.User.Password(); ok {
+			password = p
+		}
+	}
+	return user, password
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+	return pool.NewConnPool(&pool.Options{
+		Dialer: func(ctx context.Context) (net.Conn, error) {
+			return opt.Dialer(ctx, opt.Network, opt.Addr)
+		},
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+	})
+}

+ 8 - 0
vendor/github.com/go-redis/redis/v8/package.json

@@ -0,0 +1,8 @@
+{
+  "name": "redis",
+  "version": "8.11.5",
+  "main": "index.js",
+  "repository": "git@github.com:go-redis/redis.git",
+  "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
+  "license": "BSD-2-clause"
+}

+ 147 - 0
vendor/github.com/go-redis/redis/v8/pipeline.go

@@ -0,0 +1,147 @@
+package redis
+
+import (
+	"context"
+	"sync"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+	StatefulCmdable
+	Len() int
+	Do(ctx context.Context, args ...interface{}) *Cmd
+	Process(ctx context.Context, cmd Cmder) error
+	Close() error
+	Discard() error
+	Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+	cmdable
+	statefulCmdable
+
+	ctx  context.Context
+	exec pipelineExecer
+
+	mu     sync.Mutex
+	cmds   []Cmder
+	closed bool
+}
+
+func (c *Pipeline) init() {
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+}
+
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+	c.mu.Lock()
+	ln := len(c.cmds)
+	c.mu.Unlock()
+	return ln
+}
+
+// Do queues the custom command for later execution.
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+	c.mu.Lock()
+	c.cmds = append(c.cmds, cmd)
+	c.mu.Unlock()
+	return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+	c.mu.Lock()
+	_ = c.discard()
+	c.closed = true
+	c.mu.Unlock()
+	return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+	c.mu.Lock()
+	err := c.discard()
+	c.mu.Unlock()
+	return err
+}
+
+func (c *Pipeline) discard() error {
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.cmds = c.cmds[:0]
+	return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	if len(c.cmds) == 0 {
+		return nil, nil
+	}
+
+	cmds := c.cmds
+	c.cmds = nil
+
+	return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	if err := fn(c); err != nil {
+		return nil, err
+	}
+	cmds, err := c.Exec(ctx)
+	_ = c.Close()
+	return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+	return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+	return c
+}

+ 668 - 0
vendor/github.com/go-redis/redis/v8/pubsub.go

@@ -0,0 +1,668 @@
+package redis
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+	opt *Options
+
+	newConn   func(ctx context.Context, channels []string) (*pool.Conn, error)
+	closeConn func(*pool.Conn) error
+
+	mu       sync.Mutex
+	cn       *pool.Conn
+	channels map[string]struct{}
+	patterns map[string]struct{}
+
+	closed bool
+	exit   chan struct{}
+
+	cmd *Cmd
+
+	chOnce sync.Once
+	msgCh  *channel
+	allCh  *channel
+}
+
+func (c *PubSub) init() {
+	c.exit = make(chan struct{})
+}
+
+func (c *PubSub) String() string {
+	channels := mapKeys(c.channels)
+	channels = append(channels, mapKeys(c.patterns)...)
+	return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+	c.mu.Lock()
+	cn, err := c.conn(ctx, nil)
+	c.mu.Unlock()
+	return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+	if c.cn != nil {
+		return c.cn, nil
+	}
+
+	channels := mapKeys(c.channels)
+	channels = append(channels, newChannels...)
+
+	cn, err := c.newConn(ctx, channels)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := c.resubscribe(ctx, cn); err != nil {
+		_ = c.closeConn(cn)
+		return nil, err
+	}
+
+	c.cn = cn
+	return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+	return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmd(wr, cmd)
+	})
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+	var firstErr error
+
+	if len(c.channels) > 0 {
+		firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+	}
+
+	if len(c.patterns) > 0 {
+		err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+	s := make([]string, len(m))
+	i := 0
+	for k := range m {
+		s[i] = k
+		i++
+	}
+	return s
+}
+
+func (c *PubSub) _subscribe(
+	ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+	args := make([]interface{}, 0, 1+len(channels))
+	args = append(args, redisCmd)
+	for _, channel := range channels {
+		args = append(args, channel)
+	}
+	cmd := NewSliceCmd(ctx, args...)
+	return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+	ctx context.Context,
+	cn *pool.Conn,
+	err error,
+	allowTimeout bool,
+) {
+	c.mu.Lock()
+	c.releaseConn(ctx, cn, err, allowTimeout)
+	c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+	if c.cn != cn {
+		return
+	}
+	if isBadConn(err, allowTimeout, c.opt.Addr) {
+		c.reconnect(ctx, err)
+	}
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+	_ = c.closeTheCn(reason)
+	_, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+	if c.cn == nil {
+		return nil
+	}
+	if !c.closed {
+		internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+	}
+	err := c.closeConn(c.cn)
+	c.cn = nil
+	return err
+}
+
+func (c *PubSub) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.closed = true
+	close(c.exit)
+
+	return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	err := c.subscribe(ctx, "subscribe", channels...)
+	if c.channels == nil {
+		c.channels = make(map[string]struct{})
+	}
+	for _, s := range channels {
+		c.channels[s] = struct{}{}
+	}
+	return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	err := c.subscribe(ctx, "psubscribe", patterns...)
+	if c.patterns == nil {
+		c.patterns = make(map[string]struct{})
+	}
+	for _, s := range patterns {
+		c.patterns[s] = struct{}{}
+	}
+	return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	for _, channel := range channels {
+		delete(c.channels, channel)
+	}
+	err := c.subscribe(ctx, "unsubscribe", channels...)
+	return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	for _, pattern := range patterns {
+		delete(c.patterns, pattern)
+	}
+	err := c.subscribe(ctx, "punsubscribe", patterns...)
+	return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+	cn, err := c.conn(ctx, channels)
+	if err != nil {
+		return err
+	}
+
+	err = c._subscribe(ctx, cn, redisCmd, channels)
+	c.releaseConn(ctx, cn, err, false)
+	return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+	args := []interface{}{"ping"}
+	if len(payload) == 1 {
+		args = append(args, payload[0])
+	}
+	cmd := NewCmd(ctx, args...)
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	cn, err := c.conn(ctx, nil)
+	if err != nil {
+		return err
+	}
+
+	err = c.writeCmd(ctx, cn, cmd)
+	c.releaseConn(ctx, cn, err, false)
+	return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+	// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+	Kind string
+	// Channel name we have subscribed to.
+	Channel string
+	// Number of channels we are currently subscribed to.
+	Count int
+}
+
+func (m *Subscription) String() string {
+	return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+	Channel      string
+	Pattern      string
+	Payload      string
+	PayloadSlice []string
+}
+
+func (m *Message) String() string {
+	return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+	Payload string
+}
+
+func (p *Pong) String() string {
+	if p.Payload != "" {
+		return fmt.Sprintf("Pong<%s>", p.Payload)
+	}
+	return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+	switch reply := reply.(type) {
+	case string:
+		return &Pong{
+			Payload: reply,
+		}, nil
+	case []interface{}:
+		switch kind := reply[0].(string); kind {
+		case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+			// Can be nil in case of "unsubscribe".
+			channel, _ := reply[1].(string)
+			return &Subscription{
+				Kind:    kind,
+				Channel: channel,
+				Count:   int(reply[2].(int64)),
+			}, nil
+		case "message":
+			switch payload := reply[2].(type) {
+			case string:
+				return &Message{
+					Channel: reply[1].(string),
+					Payload: payload,
+				}, nil
+			case []interface{}:
+				ss := make([]string, len(payload))
+				for i, s := range payload {
+					ss[i] = s.(string)
+				}
+				return &Message{
+					Channel:      reply[1].(string),
+					PayloadSlice: ss,
+				}, nil
+			default:
+				return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+			}
+		case "pmessage":
+			return &Message{
+				Pattern: reply[1].(string),
+				Channel: reply[2].(string),
+				Payload: reply[3].(string),
+			}, nil
+		case "pong":
+			return &Pong{
+				Payload: reply[1].(string),
+			}, nil
+		default:
+			return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+		}
+	default:
+		return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+	}
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+	if c.cmd == nil {
+		c.cmd = NewCmd(ctx)
+	}
+
+	// Don't hold the lock to allow subscriptions and pings.
+
+	cn, err := c.connWithLock(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+		return c.cmd.readReply(rd)
+	})
+
+	c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+	return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+	for {
+		msg, err := c.Receive(ctx)
+		if err != nil {
+			return nil, err
+		}
+
+		switch msg := msg.(type) {
+		case *Subscription:
+			// Ignore.
+		case *Pong:
+			// Ignore.
+		case *Message:
+			return msg, nil
+		default:
+			err := fmt.Errorf("redis: unknown message: %T", msg)
+			return nil, err
+		}
+	}
+}
+
+func (c *PubSub) getContext() context.Context {
+	if c.cmd != nil {
+		return c.cmd.ctx
+	}
+	return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+	c.chOnce.Do(func() {
+		c.msgCh = newChannel(c, opts...)
+		c.msgCh.initMsgChan()
+	})
+	if c.msgCh == nil {
+		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+		panic(err)
+	}
+	return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+	return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+	c.chOnce.Do(func() {
+		c.allCh = newChannel(c, WithChannelSize(size))
+		c.allCh.initAllChan()
+	})
+	if c.allCh == nil {
+		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+		panic(err)
+	}
+	return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+	return func(c *channel) {
+		c.chanSize = size
+	}
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.checkInterval = d
+	}
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.chanSendTimeout = d
+	}
+}
+
+type channel struct {
+	pubSub *PubSub
+
+	msgCh chan *Message
+	allCh chan interface{}
+	ping  chan struct{}
+
+	chanSize        int
+	chanSendTimeout time.Duration
+	checkInterval   time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+	c := &channel{
+		pubSub: pubSub,
+
+		chanSize:        100,
+		chanSendTimeout: time.Minute,
+		checkInterval:   3 * time.Second,
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	if c.checkInterval > 0 {
+		c.initHealthCheck()
+	}
+	return c
+}
+
+func (c *channel) initHealthCheck() {
+	ctx := context.TODO()
+	c.ping = make(chan struct{}, 1)
+
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		timer.Stop()
+
+		for {
+			timer.Reset(c.checkInterval)
+			select {
+			case <-c.ping:
+				if !timer.Stop() {
+					<-timer.C
+				}
+			case <-timer.C:
+				if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+					c.pubSub.mu.Lock()
+					c.pubSub.reconnect(ctx, pingErr)
+					c.pubSub.mu.Unlock()
+				}
+			case <-c.pubSub.exit:
+				return
+			}
+		}
+	}()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *channel) initMsgChan() {
+	ctx := context.TODO()
+	c.msgCh = make(chan *Message, c.chanSize)
+
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		timer.Stop()
+
+		var errCount int
+		for {
+			msg, err := c.pubSub.Receive(ctx)
+			if err != nil {
+				if err == pool.ErrClosed {
+					close(c.msgCh)
+					return
+				}
+				if errCount > 0 {
+					time.Sleep(100 * time.Millisecond)
+				}
+				errCount++
+				continue
+			}
+
+			errCount = 0
+
+			// Any message is as good as a ping.
+			select {
+			case c.ping <- struct{}{}:
+			default:
+			}
+
+			switch msg := msg.(type) {
+			case *Subscription:
+				// Ignore.
+			case *Pong:
+				// Ignore.
+			case *Message:
+				timer.Reset(c.chanSendTimeout)
+				select {
+				case c.msgCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
+				}
+			default:
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+			}
+		}
+	}()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *channel) initAllChan() {
+	ctx := context.TODO()
+	c.allCh = make(chan interface{}, c.chanSize)
+
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		timer.Stop()
+
+		var errCount int
+		for {
+			msg, err := c.pubSub.Receive(ctx)
+			if err != nil {
+				if err == pool.ErrClosed {
+					close(c.allCh)
+					return
+				}
+				if errCount > 0 {
+					time.Sleep(100 * time.Millisecond)
+				}
+				errCount++
+				continue
+			}
+
+			errCount = 0
+
+			// Any message is as good as a ping.
+			select {
+			case c.ping <- struct{}{}:
+			default:
+			}
+
+			switch msg := msg.(type) {
+			case *Pong:
+				// Ignore.
+			case *Subscription, *Message:
+				timer.Reset(c.chanSendTimeout)
+				select {
+				case c.allCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
+				}
+			default:
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+			}
+		}
+	}()
+}

+ 773 - 0
vendor/github.com/go-redis/redis/v8/redis.go

@@ -0,0 +1,773 @@
+package redis
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger internal.Logging) {
+	internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+	BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+	AfterProcess(ctx context.Context, cmd Cmder) error
+
+	BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+	AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+	hooks []Hook
+}
+
+func (hs *hooks) lock() {
+	hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) clone() hooks {
+	clone := hs
+	clone.lock()
+	return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+	hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+	ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+	if len(hs.hooks) == 0 {
+		err := fn(ctx, cmd)
+		cmd.SetErr(err)
+		return err
+	}
+
+	var hookIndex int
+	var retErr error
+
+	for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+		ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
+		if retErr != nil {
+			cmd.SetErr(retErr)
+		}
+	}
+
+	if retErr == nil {
+		retErr = fn(ctx, cmd)
+		cmd.SetErr(retErr)
+	}
+
+	for hookIndex--; hookIndex >= 0; hookIndex-- {
+		if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
+			retErr = err
+			cmd.SetErr(retErr)
+		}
+	}
+
+	return retErr
+}
+
+func (hs hooks) processPipeline(
+	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+	if len(hs.hooks) == 0 {
+		err := fn(ctx, cmds)
+		return err
+	}
+
+	var hookIndex int
+	var retErr error
+
+	for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+		ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
+		if retErr != nil {
+			setCmdsErr(cmds, retErr)
+		}
+	}
+
+	if retErr == nil {
+		retErr = fn(ctx, cmds)
+	}
+
+	for hookIndex--; hookIndex >= 0; hookIndex-- {
+		if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
+			retErr = err
+			setCmdsErr(cmds, retErr)
+		}
+	}
+
+	return retErr
+}
+
+func (hs hooks) processTxPipeline(
+	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+	cmds = wrapMultiExec(ctx, cmds)
+	return hs.processPipeline(ctx, cmds, fn)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+	opt      *Options
+	connPool pool.Pooler
+
+	onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+	return &baseClient{
+		opt:      opt,
+		connPool: connPool,
+	}
+}
+
+func (c *baseClient) clone() *baseClient {
+	clone := *c
+	return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+	opt := c.opt.clone()
+	opt.ReadTimeout = timeout
+	opt.WriteTimeout = timeout
+
+	clone := c.clone()
+	clone.opt = opt
+
+	return clone
+}
+
+func (c *baseClient) String() string {
+	return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+	cn, err := c.connPool.NewConn(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	err = c.initConn(ctx, cn)
+	if err != nil {
+		_ = c.connPool.CloseConn(cn)
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+	if c.opt.Limiter != nil {
+		err := c.opt.Limiter.Allow()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	cn, err := c._getConn(ctx)
+	if err != nil {
+		if c.opt.Limiter != nil {
+			c.opt.Limiter.ReportResult(err)
+		}
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+	cn, err := c.connPool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if cn.Inited {
+		return cn, nil
+	}
+
+	if err := c.initConn(ctx, cn); err != nil {
+		c.connPool.Remove(ctx, cn, err)
+		if err := errors.Unwrap(err); err != nil {
+			return nil, err
+		}
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+	if cn.Inited {
+		return nil
+	}
+	cn.Inited = true
+
+	if c.opt.Password == "" &&
+		c.opt.DB == 0 &&
+		!c.opt.readOnly &&
+		c.opt.OnConnect == nil {
+		return nil
+	}
+
+	connPool := pool.NewSingleConnPool(c.connPool, cn)
+	conn := newConn(ctx, c.opt, connPool)
+
+	_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+		if c.opt.Password != "" {
+			if c.opt.Username != "" {
+				pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
+			} else {
+				pipe.Auth(ctx, c.opt.Password)
+			}
+		}
+
+		if c.opt.DB > 0 {
+			pipe.Select(ctx, c.opt.DB)
+		}
+
+		if c.opt.readOnly {
+			pipe.ReadOnly(ctx)
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	if c.opt.OnConnect != nil {
+		return c.opt.OnConnect(ctx, conn)
+	}
+	return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+	if c.opt.Limiter != nil {
+		c.opt.Limiter.ReportResult(err)
+	}
+
+	if isBadConn(err, false, c.opt.Addr) {
+		c.connPool.Remove(ctx, cn, err)
+	} else {
+		c.connPool.Put(ctx, cn)
+	}
+}
+
+func (c *baseClient) withConn(
+	ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+	cn, err := c.getConn(ctx)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		c.releaseConn(ctx, cn, err)
+	}()
+
+	done := ctx.Done() //nolint:ifshort
+
+	if done == nil {
+		err = fn(ctx, cn)
+		return err
+	}
+
+	errc := make(chan error, 1)
+	go func() { errc <- fn(ctx, cn) }()
+
+	select {
+	case <-done:
+		_ = cn.Close()
+		// Wait for the goroutine to finish and send something.
+		<-errc
+
+		err = ctx.Err()
+		return err
+	case err = <-errc:
+		return err
+	}
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		attempt := attempt
+
+		retry, err := c._process(ctx, cmd, attempt)
+		if err == nil || !retry {
+			return err
+		}
+
+		lastErr = err
+	}
+	return lastErr
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+	if attempt > 0 {
+		if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+			return false, err
+		}
+	}
+
+	retryTimeout := uint32(1)
+	err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+		err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+			return writeCmd(wr, cmd)
+		})
+		if err != nil {
+			return err
+		}
+
+		err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+		if err != nil {
+			if cmd.readTimeout() == nil {
+				atomic.StoreUint32(&retryTimeout, 1)
+			}
+			return err
+		}
+
+		return nil
+	})
+	if err == nil {
+		return false, nil
+	}
+
+	retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+	return retry, err
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+	if timeout := cmd.readTimeout(); timeout != nil {
+		t := *timeout
+		if t == 0 {
+			return 0
+		}
+		return t + 10*time.Second
+	}
+	return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+	var firstErr error
+	if c.onClose != nil {
+		if err := c.onClose(); err != nil {
+			firstErr = err
+		}
+	}
+	if err := c.connPool.Close(); err != nil && firstErr == nil {
+		firstErr = err
+	}
+	return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+	return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+	err := c._generalProcessPipeline(ctx, cmds, p)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+	return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		var canRetry bool
+		lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			var err error
+			canRetry, err = p(ctx, cn, cmds)
+			return err
+		})
+		if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+	ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmds(wr, cmds)
+	})
+	if err != nil {
+		return true, err
+	}
+
+	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+		return pipelineReadCmds(rd, cmds)
+	})
+	return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+	for _, cmd := range cmds {
+		err := cmd.readReply(rd)
+		cmd.SetErr(err)
+		if err != nil && !isRedisError(err) {
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+	ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmds(wr, cmds)
+	})
+	if err != nil {
+		return true, err
+	}
+
+	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+		statusCmd := cmds[0].(*StatusCmd)
+		// Trim multi and exec.
+		cmds = cmds[1 : len(cmds)-1]
+
+		err := txPipelineReadQueued(rd, statusCmd, cmds)
+		if err != nil {
+			return err
+		}
+
+		return pipelineReadCmds(rd, cmds)
+	})
+	return false, err
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+	if len(cmds) == 0 {
+		panic("not reached")
+	}
+	cmdCopy := make([]Cmder, len(cmds)+2)
+	cmdCopy[0] = NewStatusCmd(ctx, "multi")
+	copy(cmdCopy[1:], cmds)
+	cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
+	return cmdCopy
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+	// Parse queued replies.
+	if err := statusCmd.readReply(rd); err != nil {
+		return err
+	}
+
+	for range cmds {
+		if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+			return err
+		}
+	}
+
+	// Parse number of replies.
+	line, err := rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		err := fmt.Errorf("redis: expected '*', but got line %q", line)
+		return err
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+	*baseClient
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+	opt.init()
+
+	c := Client{
+		baseClient: newBaseClient(opt, newConnPool(opt)),
+		ctx:        context.Background(),
+	}
+	c.cmdable = c.Process
+
+	return &c
+}
+
+func (c *Client) clone() *Client {
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.lock()
+	return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+	clone := c.clone()
+	clone.baseClient = c.baseClient.withTimeout(timeout)
+	return clone
+}
+
+func (c *Client) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := c.clone()
+	clone.ctx = ctx
+	return clone
+}
+
+func (c *Client) Conn(ctx context.Context) *Conn {
+	return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+	return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+	stats := c.connPool.Stats()
+	return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+	pubsub := &PubSub{
+		opt: c.opt,
+
+		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+			return c.newConn(ctx)
+		},
+		closeConn: c.connPool.CloseConn,
+	}
+	pubsub.init()
+	return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+//    sub := client.Subscribe(queryResp)
+//    iface, err := sub.Receive()
+//    if err != nil {
+//        // handle error
+//    }
+//
+//    // Should be *Subscription, but others are possible if other actions have been
+//    // taken on sub since it was created.
+//    switch iface.(type) {
+//    case *Subscription:
+//        // subscribe succeeded
+//    case *Message:
+//        // received first message
+//    case *Pong:
+//        // pong received
+//    default:
+//        // handle error
+//    }
+//
+//    ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+	baseClient
+	cmdable
+	statefulCmdable
+	hooks // TODO: inherit hooks
+}
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+	*conn
+	ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+	c := Conn{
+		conn: &conn{
+			baseClient: baseClient{
+				opt:      opt,
+				connPool: connPool,
+			},
+		},
+		ctx: ctx,
+	}
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+	return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}

+ 180 - 0
vendor/github.com/go-redis/redis/v8/result.go

@@ -0,0 +1,180 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+	var cmd Cmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+	var cmd SliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+	var cmd StatusCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+	var cmd IntCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+	var cmd DurationCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+	var cmd BoolCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+	var cmd StringCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+	var cmd FloatCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+	var cmd StringSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+	var cmd BoolSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+	var cmd StringStringMapCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+	var cmd StringIntMapCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+	var cmd TimeCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+	var cmd ZSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+	var cmd ZWithKeyCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+	var cmd ScanCmd
+	cmd.page = keys
+	cmd.cursor = cursor
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+	var cmd ClusterSlotsCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+	var cmd GeoLocationCmd
+	cmd.locations = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+	var cmd GeoPosCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+	var cmd CommandsInfoCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+	var cmd XMessageSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+	var cmd XStreamSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}

+ 736 - 0
vendor/github.com/go-redis/redis/v8/ring.go

@@ -0,0 +1,736 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/cespare/xxhash/v2"
+	rendezvous "github.com/dgryski/go-rendezvous" //nolint
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hashtag"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+	Get(string) string
+}
+
+type rendezvousWrapper struct {
+	*rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+	return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+	return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+	// Map of name => host:port addresses of ring shards.
+	Addrs map[string]string
+
+	// NewClient creates a shard client with provided name and options.
+	NewClient func(name string, opt *Options) *Client
+
+	// Frequency of PING commands sent to check shards availability.
+	// Shard is considered down after 3 subsequent failed checks.
+	HeartbeatFrequency time.Duration
+
+	// NewConsistentHash returns a consistent hash that is used
+	// to distribute keys across the shards.
+	//
+	// See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+	// for consistent hashing algorithmic tradeoffs.
+	NewConsistentHash func(shards []string) ConsistentHash
+
+	// Following options are copied from Options struct.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username string
+	Password string
+	DB       int
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+	Limiter   Limiter
+}
+
+func (opt *RingOptions) init() {
+	if opt.NewClient == nil {
+		opt.NewClient = func(name string, opt *Options) *Client {
+			return NewClient(opt)
+		}
+	}
+
+	if opt.HeartbeatFrequency == 0 {
+		opt.HeartbeatFrequency = 500 * time.Millisecond
+	}
+
+	if opt.NewConsistentHash == nil {
+		opt.NewConsistentHash = newRendezvous
+	}
+
+	if opt.MaxRetries == -1 {
+		opt.MaxRetries = 0
+	} else if opt.MaxRetries == 0 {
+		opt.MaxRetries = 3
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+	return &Options{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		Username: opt.Username,
+		Password: opt.Password,
+		DB:       opt.DB,
+
+		MaxRetries: -1,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+
+		TLSConfig: opt.TLSConfig,
+		Limiter:   opt.Limiter,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+	Client *Client
+	down   int32
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+	clopt := opt.clientOptions()
+	clopt.Addr = addr
+
+	return &ringShard{
+		Client: opt.NewClient(name, clopt),
+	}
+}
+
+func (shard *ringShard) String() string {
+	var state string
+	if shard.IsUp() {
+		state = "up"
+	} else {
+		state = "down"
+	}
+	return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+	const threshold = 3
+	return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+	return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+	if up {
+		changed := shard.IsDown()
+		atomic.StoreInt32(&shard.down, 0)
+		return changed
+	}
+
+	if shard.IsDown() {
+		return false
+	}
+
+	atomic.AddInt32(&shard.down, 1)
+	return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+	opt *RingOptions
+
+	mu       sync.RWMutex
+	hash     ConsistentHash
+	shards   map[string]*ringShard // read only
+	list     []*ringShard          // read only
+	numShard int
+	closed   bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+	shards := make(map[string]*ringShard, len(opt.Addrs))
+	list := make([]*ringShard, 0, len(shards))
+
+	for name, addr := range opt.Addrs {
+		shard := newRingShard(opt, name, addr)
+		shards[name] = shard
+
+		list = append(list, shard)
+	}
+
+	c := &ringShards{
+		opt: opt,
+
+		shards: shards,
+		list:   list,
+	}
+	c.rebalance()
+
+	return c
+}
+
+func (c *ringShards) List() []*ringShard {
+	var list []*ringShard
+
+	c.mu.RLock()
+	if !c.closed {
+		list = c.list
+	}
+	c.mu.RUnlock()
+
+	return list
+}
+
+func (c *ringShards) Hash(key string) string {
+	key = hashtag.Key(key)
+
+	var hash string
+
+	c.mu.RLock()
+	if c.numShard > 0 {
+		hash = c.hash.Get(key)
+	}
+	c.mu.RUnlock()
+
+	return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+	key = hashtag.Key(key)
+
+	c.mu.RLock()
+
+	if c.closed {
+		c.mu.RUnlock()
+		return nil, pool.ErrClosed
+	}
+
+	if c.numShard == 0 {
+		c.mu.RUnlock()
+		return nil, errRingShardsDown
+	}
+
+	hash := c.hash.Get(key)
+	if hash == "" {
+		c.mu.RUnlock()
+		return nil, errRingShardsDown
+	}
+
+	shard := c.shards[hash]
+	c.mu.RUnlock()
+
+	return shard, nil
+}
+
+func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+	if shardName == "" {
+		return c.Random()
+	}
+
+	c.mu.RLock()
+	shard := c.shards[shardName]
+	c.mu.RUnlock()
+	return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+	return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+
+	ctx := context.Background()
+	for range ticker.C {
+		var rebalance bool
+
+		for _, shard := range c.List() {
+			err := shard.Client.Ping(ctx).Err()
+			isUp := err == nil || err == pool.ErrPoolTimeout
+			if shard.Vote(isUp) {
+				internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
+				rebalance = true
+			}
+		}
+
+		if rebalance {
+			c.rebalance()
+		}
+	}
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+	c.mu.RLock()
+	shards := c.shards
+	c.mu.RUnlock()
+
+	liveShards := make([]string, 0, len(shards))
+
+	for name, shard := range shards {
+		if shard.IsUp() {
+			liveShards = append(liveShards, name)
+		}
+	}
+
+	hash := c.opt.NewConsistentHash(liveShards)
+
+	c.mu.Lock()
+	c.hash = hash
+	c.numShard = len(liveShards)
+	c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+	c.mu.RLock()
+	l := c.numShard
+	c.mu.RUnlock()
+	return l
+}
+
+func (c *ringShards) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, shard := range c.shards {
+		if err := shard.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	c.hash = nil
+	c.shards = nil
+	c.list = nil
+
+	return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+	opt           *RingOptions
+	shards        *ringShards
+	cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+	*ring
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+	opt.init()
+
+	ring := Ring{
+		ring: &ring{
+			opt:    opt,
+			shards: newRingShards(opt),
+		},
+		ctx: context.Background(),
+	}
+
+	ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+	ring.cmdable = ring.Process
+
+	go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+	return &ring
+}
+
+func (c *Ring) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+	return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+	shards := c.shards.List()
+	var acc PoolStats
+	for _, shard := range shards {
+		s := shard.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+	}
+	return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+	return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	if len(channels) == 0 {
+		panic("at least one channel is required")
+	}
+
+	shard, err := c.shards.GetByKey(channels[0])
+	if err != nil {
+		// TODO: return PubSub with sticky error
+		panic(err)
+	}
+	return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	if len(channels) == 0 {
+		panic("at least one channel is required")
+	}
+
+	shard, err := c.shards.GetByKey(channels[0])
+	if err != nil {
+		// TODO: return PubSub with sticky error
+		panic(err)
+	}
+	return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	shards := c.shards.List()
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, shard := range shards {
+		if shard.IsDown() {
+			continue
+		}
+
+		wg.Add(1)
+		go func(shard *ringShard) {
+			defer wg.Done()
+			err := fn(ctx, shard.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(shard)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+	shards := c.shards.List()
+	var firstErr error
+	for _, shard := range shards {
+		cmdsInfo, err := shard.Client.Command(ctx).Result()
+		if err == nil {
+			return cmdsInfo, nil
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+	if firstErr == nil {
+		return nil, errRingShardsDown
+	}
+	return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
+	cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+	if err != nil {
+		return nil
+	}
+	info := cmdsInfo[name]
+	if info == nil {
+		internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
+	}
+	return info
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+	cmdInfo := c.cmdInfo(ctx, cmd.Name())
+	pos := cmdFirstKeyPos(cmd, cmdInfo)
+	if pos == 0 {
+		return c.shards.Random()
+	}
+	firstKey := cmd.stringArg(pos)
+	return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		shard, err := c.cmdShard(ctx, cmd)
+		if err != nil {
+			return err
+		}
+
+		lastErr = shard.Client.Process(ctx, cmd)
+		if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return c.generalProcessPipeline(ctx, cmds, false)
+	})
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return c.generalProcessPipeline(ctx, cmds, true)
+	})
+}
+
+func (c *Ring) generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, tx bool,
+) error {
+	cmdsMap := make(map[string][]Cmder)
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(ctx, cmd.Name())
+		hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+		if hash != "" {
+			hash = c.shards.Hash(hash)
+		}
+		cmdsMap[hash] = append(cmdsMap[hash], cmd)
+	}
+
+	var wg sync.WaitGroup
+	for hash, cmds := range cmdsMap {
+		wg.Add(1)
+		go func(hash string, cmds []Cmder) {
+			defer wg.Done()
+
+			_ = c.processShardPipeline(ctx, hash, cmds, tx)
+		}(hash, cmds)
+	}
+
+	wg.Wait()
+	return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+	ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+	// TODO: retry?
+	shard, err := c.shards.GetByName(hash)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	if tx {
+		return shard.Client.processTxPipeline(ctx, cmds)
+	}
+	return shard.Client.processPipeline(ctx, cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	if len(keys) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one key")
+	}
+
+	var shards []*ringShard
+	for _, key := range keys {
+		if key != "" {
+			shard, err := c.shards.GetByKey(hashtag.Key(key))
+			if err != nil {
+				return err
+			}
+
+			shards = append(shards, shard)
+		}
+	}
+
+	if len(shards) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one shard")
+	}
+
+	if len(shards) > 1 {
+		for _, shard := range shards[1:] {
+			if shard.Client != shards[0].Client {
+				err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+				return err
+			}
+		}
+	}
+
+	return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+	return c.shards.Close()
+}

+ 65 - 0
vendor/github.com/go-redis/redis/v8/script.go

@@ -0,0 +1,65 @@
+package redis
+
+import (
+	"context"
+	"crypto/sha1"
+	"encoding/hex"
+	"io"
+	"strings"
+)
+
+type Scripter interface {
+	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+	ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+	_ Scripter = (*Client)(nil)
+	_ Scripter = (*Ring)(nil)
+	_ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+	src, hash string
+}
+
+func NewScript(src string) *Script {
+	h := sha1.New()
+	_, _ = io.WriteString(h, src)
+	return &Script{
+		src:  src,
+		hash: hex.EncodeToString(h.Sum(nil)),
+	}
+}
+
+func (s *Script) Hash() string {
+	return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+	return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+	return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+	return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+	return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+	r := s.EvalSha(ctx, c, keys, args...)
+	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+		return s.Eval(ctx, c, keys, args...)
+	}
+	return r
+}

+ 796 - 0
vendor/github.com/go-redis/redis/v8/sentinel.go

@@ -0,0 +1,796 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+	// The master name.
+	MasterName string
+	// A seed list of host:port addresses of sentinel nodes.
+	SentinelAddrs []string
+
+	// If specified with SentinelPassword, enables ACL-based authentication (via
+	// AUTH <user> <pass>).
+	SentinelUsername string
+	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel
+	// configuration, or, if SentinelUsername is also supplied, used for ACL-based
+	// authentication.
+	SentinelPassword string
+
+	// Allows routing read-only commands to the closest master or slave node.
+	// This option only works with NewFailoverClusterClient.
+	RouteByLatency bool
+	// Allows routing read-only commands to the random master or slave node.
+	// This option only works with NewFailoverClusterClient.
+	RouteRandomly bool
+
+	// Route all commands to slave read-only nodes.
+	SlaveOnly bool
+
+	// Use slaves disconnected with master when cannot get connected slaves
+	// Now, this option only works in RandomSlaveAddr function.
+	UseDisconnectedSlaves bool
+
+	// Following options are copied from Options struct.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username string
+	Password string
+	DB       int
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+	return &Options{
+		Addr: "FailoverClient",
+
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		DB:       opt.DB,
+		Username: opt.Username,
+		Password: opt.Password,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+	return &Options{
+		Addr: addr,
+
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		DB:       0,
+		Username: opt.SentinelUsername,
+		Password: opt.SentinelPassword,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+	return &ClusterOptions{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		Username: opt.Username,
+		Password: opt.Password,
+
+		MaxRedirects: opt.MaxRetries,
+
+		RouteByLatency: opt.RouteByLatency,
+		RouteRandomly:  opt.RouteRandomly,
+
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+	if failoverOpt.RouteByLatency {
+		panic("to route commands by latency, use NewFailoverClusterClient")
+	}
+	if failoverOpt.RouteRandomly {
+		panic("to route commands randomly, use NewFailoverClusterClient")
+	}
+
+	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+	copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+	rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+		sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+	})
+
+	failover := &sentinelFailover{
+		opt:           failoverOpt,
+		sentinelAddrs: sentinelAddrs,
+	}
+
+	opt := failoverOpt.clientOptions()
+	opt.Dialer = masterSlaveDialer(failover)
+	opt.init()
+
+	connPool := newConnPool(opt)
+
+	failover.mu.Lock()
+	failover.onFailover = func(ctx context.Context, addr string) {
+		_ = connPool.Filter(func(cn *pool.Conn) bool {
+			return cn.RemoteAddr().String() != addr
+		})
+	}
+	failover.mu.Unlock()
+
+	c := Client{
+		baseClient: newBaseClient(opt, connPool),
+		ctx:        context.Background(),
+	}
+	c.cmdable = c.Process
+	c.onClose = failover.Close
+
+	return &c
+}
+
+func masterSlaveDialer(
+	failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+	return func(ctx context.Context, network, _ string) (net.Conn, error) {
+		var addr string
+		var err error
+
+		if failover.opt.SlaveOnly {
+			addr, err = failover.RandomSlaveAddr(ctx)
+		} else {
+			addr, err = failover.MasterAddr(ctx)
+			if err == nil {
+				failover.trySwitchMaster(ctx, addr)
+			}
+		}
+		if err != nil {
+			return nil, err
+		}
+		if failover.opt.Dialer != nil {
+			return failover.opt.Dialer(ctx, network, addr)
+		}
+
+		netDialer := &net.Dialer{
+			Timeout:   failover.opt.DialTimeout,
+			KeepAlive: 5 * time.Minute,
+		}
+		if failover.opt.TLSConfig == nil {
+			return netDialer.DialContext(ctx, network, addr)
+		}
+		return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+	*baseClient
+	hooks
+	ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+	opt.init()
+	c := &SentinelClient{
+		baseClient: &baseClient{
+			opt:      opt,
+			connPool: newConnPool(opt),
+		},
+		ctx: context.Background(),
+	}
+	return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+	return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.ctx = ctx
+	return &clone
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+	pubsub := &PubSub{
+		opt: c.opt,
+
+		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+			return c.newConn(ctx)
+		},
+		closeConn: c.connPool.CloseConn,
+	}
+	pubsub.init()
+	return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "ping")
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+	cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "sentinel", "masters")
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+	opt *FailoverOptions
+
+	sentinelAddrs []string
+
+	onFailover func(ctx context.Context, addr string)
+	onUpdate   func(ctx context.Context)
+
+	mu          sync.RWMutex
+	_masterAddr string
+	sentinel    *SentinelClient
+	pubsub      *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.sentinel != nil {
+		return c.closeSentinel()
+	}
+	return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+	firstErr := c.pubsub.Close()
+	c.pubsub = nil
+
+	err := c.sentinel.Close()
+	if err != nil && firstErr == nil {
+		firstErr = err
+	}
+	c.sentinel = nil
+
+	return firstErr
+}
+
+func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+	if c.opt == nil {
+		return "", errors.New("opt is nil")
+	}
+
+	addresses, err := c.slaveAddrs(ctx, false)
+	if err != nil {
+		return "", err
+	}
+
+	if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
+		addresses, err = c.slaveAddrs(ctx, true)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	if len(addresses) == 0 {
+		return c.MasterAddr(ctx)
+	}
+	return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+	c.mu.RLock()
+	sentinel := c.sentinel
+	c.mu.RUnlock()
+
+	if sentinel != nil {
+		addr := c.getMasterAddr(ctx, sentinel)
+		if addr != "" {
+			return addr, nil
+		}
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.sentinel != nil {
+		addr := c.getMasterAddr(ctx, c.sentinel)
+		if addr != "" {
+			return addr, nil
+		}
+		_ = c.closeSentinel()
+	}
+
+	for i, sentinelAddr := range c.sentinelAddrs {
+		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+		masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+		if err != nil {
+			internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+				c.opt.MasterName, err)
+			_ = sentinel.Close()
+			continue
+		}
+
+		// Push working sentinel to the top.
+		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+		c.setSentinel(ctx, sentinel)
+
+		addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+		return addr, nil
+	}
+
+	return "", errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+	c.mu.RLock()
+	sentinel := c.sentinel
+	c.mu.RUnlock()
+
+	if sentinel != nil {
+		addrs := c.getSlaveAddrs(ctx, sentinel)
+		if len(addrs) > 0 {
+			return addrs, nil
+		}
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.sentinel != nil {
+		addrs := c.getSlaveAddrs(ctx, c.sentinel)
+		if len(addrs) > 0 {
+			return addrs, nil
+		}
+		_ = c.closeSentinel()
+	}
+
+	var sentinelReachable bool
+
+	for i, sentinelAddr := range c.sentinelAddrs {
+		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+		slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+		if err != nil {
+			internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
+				c.opt.MasterName, err)
+			_ = sentinel.Close()
+			continue
+		}
+		sentinelReachable = true
+		addrs := parseSlaveAddrs(slaves, useDisconnected)
+		if len(addrs) == 0 {
+			continue
+		}
+		// Push working sentinel to the top.
+		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+		c.setSentinel(ctx, sentinel)
+
+		return addrs, nil
+	}
+
+	if sentinelReachable {
+		return []string{}, nil
+	}
+	return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+	addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+	if err != nil {
+		internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+			c.opt.MasterName, err)
+		return ""
+	}
+	return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
+	addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+	if err != nil {
+		internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+			c.opt.MasterName, err)
+		return []string{}
+	}
+	return parseSlaveAddrs(addrs, false)
+}
+
+func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
+	nodes := make([]string, 0, len(addrs))
+	for _, node := range addrs {
+		ip := ""
+		port := ""
+		flags := []string{}
+		lastkey := ""
+		isDown := false
+
+		for _, key := range node.([]interface{}) {
+			switch lastkey {
+			case "ip":
+				ip = key.(string)
+			case "port":
+				port = key.(string)
+			case "flags":
+				flags = strings.Split(key.(string), ",")
+			}
+			lastkey = key.(string)
+		}
+
+		for _, flag := range flags {
+			switch flag {
+			case "s_down", "o_down":
+				isDown = true
+			case "disconnected":
+				if !keepDisconnected {
+					isDown = true
+				}
+			}
+		}
+
+		if !isDown {
+			nodes = append(nodes, net.JoinHostPort(ip, port))
+		}
+	}
+
+	return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+	c.mu.RLock()
+	currentAddr := c._masterAddr //nolint:ifshort
+	c.mu.RUnlock()
+
+	if addr == currentAddr {
+		return
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if addr == c._masterAddr {
+		return
+	}
+	c._masterAddr = addr
+
+	internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+		c.opt.MasterName, addr)
+	if c.onFailover != nil {
+		c.onFailover(ctx, addr)
+	}
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+	if c.sentinel != nil {
+		panic("not reached")
+	}
+	c.sentinel = sentinel
+	c.discoverSentinels(ctx)
+
+	c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+	go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+	sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+	if err != nil {
+		internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+		return
+	}
+	for _, sentinel := range sentinels {
+		vals := sentinel.([]interface{})
+		var ip, port string
+		for i := 0; i < len(vals); i += 2 {
+			key := vals[i].(string)
+			switch key {
+			case "ip":
+				ip = vals[i+1].(string)
+			case "port":
+				port = vals[i+1].(string)
+			}
+		}
+		if ip != "" && port != "" {
+			sentinelAddr := net.JoinHostPort(ip, port)
+			if !contains(c.sentinelAddrs, sentinelAddr) {
+				internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+					sentinelAddr, c.opt.MasterName)
+				c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+			}
+		}
+	}
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+	ctx := context.TODO()
+
+	if c.onUpdate != nil {
+		c.onUpdate(ctx)
+	}
+
+	ch := pubsub.Channel()
+	for msg := range ch {
+		if msg.Channel == "+switch-master" {
+			parts := strings.Split(msg.Payload, " ")
+			if parts[0] != c.opt.MasterName {
+				internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+				continue
+			}
+			addr := net.JoinHostPort(parts[3], parts[4])
+			c.trySwitchMaster(pubsub.getContext(), addr)
+		}
+
+		if c.onUpdate != nil {
+			c.onUpdate(ctx)
+		}
+	}
+}
+
+func contains(slice []string, str string) bool {
+	for _, s := range slice {
+		if s == str {
+			return true
+		}
+	}
+	return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a slave node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+	copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+	failover := &sentinelFailover{
+		opt:           failoverOpt,
+		sentinelAddrs: sentinelAddrs,
+	}
+
+	opt := failoverOpt.clusterOptions()
+	opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+		masterAddr, err := failover.MasterAddr(ctx)
+		if err != nil {
+			return nil, err
+		}
+
+		nodes := []ClusterNode{{
+			Addr: masterAddr,
+		}}
+
+		slaveAddrs, err := failover.slaveAddrs(ctx, false)
+		if err != nil {
+			return nil, err
+		}
+
+		for _, slaveAddr := range slaveAddrs {
+			nodes = append(nodes, ClusterNode{
+				Addr: slaveAddr,
+			})
+		}
+
+		slots := []ClusterSlot{
+			{
+				Start: 0,
+				End:   16383,
+				Nodes: nodes,
+			},
+		}
+		return slots, nil
+	}
+
+	c := NewClusterClient(opt)
+
+	failover.mu.Lock()
+	failover.onUpdate = func(ctx context.Context) {
+		c.ReloadState(ctx)
+	}
+	failover.mu.Unlock()
+
+	return c
+}

+ 149 - 0
vendor/github.com/go-redis/redis/v8/tx.go

@@ -0,0 +1,149 @@
+package redis
+
+import (
+	"context"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+//
+// If you don't need WATCH, use Pipeline instead.
+type Tx struct {
+	baseClient
+	cmdable
+	statefulCmdable
+	hooks
+	ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+	tx := Tx{
+		baseClient: baseClient{
+			opt:      c.opt,
+			connPool: pool.NewStickyConnPool(c.connPool),
+		},
+		hooks: c.hooks.clone(),
+		ctx:   ctx,
+	}
+	tx.init()
+	return &tx
+}
+
+func (c *Tx) init() {
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.init()
+	clone.hooks.lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	tx := c.newTx(ctx)
+	defer tx.Close(ctx)
+	if len(keys) > 0 {
+		if err := tx.Watch(ctx, keys...).Err(); err != nil {
+			return err
+		}
+	}
+	return fn(tx)
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+	_ = c.Unwatch(ctx).Err()
+	return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "watch"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unwatch"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx: c.ctx,
+		exec: func(ctx context.Context, cmds []Cmder) error {
+			return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+		},
+	}
+	pipe.init()
+	return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx: c.ctx,
+		exec: func(ctx context.Context, cmds []Cmder) error {
+			return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+		},
+	}
+	pipe.init()
+	return &pipe
+}

+ 215 - 0
vendor/github.com/go-redis/redis/v8/universal.go

@@ -0,0 +1,215 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+	// Either a single address or a seed list of host:port addresses
+	// of cluster/sentinel nodes.
+	Addrs []string
+
+	// Database to be selected after connecting to the server.
+	// Only single-node and failover clients.
+	DB int
+
+	// Common options.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username         string
+	Password         string
+	SentinelUsername string
+	SentinelPassword string
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+
+	// Only cluster clients.
+
+	MaxRedirects   int
+	ReadOnly       bool
+	RouteByLatency bool
+	RouteRandomly  bool
+
+	// The sentinel master name.
+	// Only failover clients.
+
+	MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+	if len(o.Addrs) == 0 {
+		o.Addrs = []string{"127.0.0.1:6379"}
+	}
+
+	return &ClusterOptions{
+		Addrs:     o.Addrs,
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		Username: o.Username,
+		Password: o.Password,
+
+		MaxRedirects:   o.MaxRedirects,
+		ReadOnly:       o.ReadOnly,
+		RouteByLatency: o.RouteByLatency,
+		RouteRandomly:  o.RouteRandomly,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:        o.DialTimeout,
+		ReadTimeout:        o.ReadTimeout,
+		WriteTimeout:       o.WriteTimeout,
+		PoolFIFO:           o.PoolFIFO,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+	if len(o.Addrs) == 0 {
+		o.Addrs = []string{"127.0.0.1:26379"}
+	}
+
+	return &FailoverOptions{
+		SentinelAddrs: o.Addrs,
+		MasterName:    o.MasterName,
+
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		DB:               o.DB,
+		Username:         o.Username,
+		Password:         o.Password,
+		SentinelUsername: o.SentinelUsername,
+		SentinelPassword: o.SentinelPassword,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:  o.DialTimeout,
+		ReadTimeout:  o.ReadTimeout,
+		WriteTimeout: o.WriteTimeout,
+
+		PoolFIFO:           o.PoolFIFO,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+	addr := "127.0.0.1:6379"
+	if len(o.Addrs) > 0 {
+		addr = o.Addrs[0]
+	}
+
+	return &Options{
+		Addr:      addr,
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		DB:       o.DB,
+		Username: o.Username,
+		Password: o.Password,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:  o.DialTimeout,
+		ReadTimeout:  o.ReadTimeout,
+		WriteTimeout: o.WriteTimeout,
+
+		PoolFIFO:           o.PoolFIFO,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
+type UniversalClient interface {
+	Cmdable
+	Context() context.Context
+	AddHook(Hook)
+	Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+	Do(ctx context.Context, args ...interface{}) *Cmd
+	Process(ctx context.Context, cmd Cmder) error
+	Subscribe(ctx context.Context, channels ...string) *PubSub
+	PSubscribe(ctx context.Context, channels ...string) *PubSub
+	Close() error
+	PoolStats() *PoolStats
+}
+
+var (
+	_ UniversalClient = (*Client)(nil)
+	_ UniversalClient = (*ClusterClient)(nil)
+	_ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
+//
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+	if opts.MasterName != "" {
+		return NewFailoverClient(opts.Failover())
+	} else if len(opts.Addrs) > 1 {
+		return NewClusterClient(opts.Cluster())
+	}
+	return NewClient(opts.Simple())
+}

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä