Parcourir la source

更新api,删除图片rpc

lijian il y a 6 ans
Parent
commit
e4b36fc702
100 fichiers modifiés avec 18274 ajouts et 27 suppressions
  1. 3 0
      .vscode/settings.json
  2. 17 1
      pkg/models/device.go
  3. 4 1
      services/fileaccess/fileaccess.go
  4. 1 1
      services/fileaccess/flags.go
  5. 171 0
      services/knowoapi/controllers/device.go
  6. 21 19
      services/knowoapi/main.go
  7. 12 0
      services/knowoapi/main_test.go
  8. 2 0
      services/knowoapi/model/all.go
  9. 145 0
      services/knowoapi/model/device.go
  10. 119 0
      services/knowoapi/model/device_test.go
  11. 4 1
      services/knowoapi/router.go
  12. 51 0
      services/knowoapi/services/device.go
  13. 6 4
      services/registry/registry.go
  14. 27 0
      vendor/github.com/ajg/form/LICENSE
  15. 370 0
      vendor/github.com/ajg/form/decode.go
  16. 388 0
      vendor/github.com/ajg/form/encode.go
  17. 14 0
      vendor/github.com/ajg/form/form.go
  18. 152 0
      vendor/github.com/ajg/form/node.go
  19. 15 0
      vendor/github.com/davecgh/go-spew/LICENSE
  20. 152 0
      vendor/github.com/davecgh/go-spew/spew/bypass.go
  21. 38 0
      vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
  22. 341 0
      vendor/github.com/davecgh/go-spew/spew/common.go
  23. 306 0
      vendor/github.com/davecgh/go-spew/spew/config.go
  24. 211 0
      vendor/github.com/davecgh/go-spew/spew/doc.go
  25. 509 0
      vendor/github.com/davecgh/go-spew/spew/dump.go
  26. 419 0
      vendor/github.com/davecgh/go-spew/spew/format.go
  27. 148 0
      vendor/github.com/davecgh/go-spew/spew/spew.go
  28. 201 0
      vendor/github.com/gavv/monotime/LICENSE
  29. 36 0
      vendor/github.com/gavv/monotime/monotime.go
  30. 6 0
      vendor/github.com/gavv/monotime/monotime.s
  31. 27 0
      vendor/github.com/google/go-querystring/LICENSE
  32. 320 0
      vendor/github.com/google/go-querystring/query/encode.go
  33. 20 0
      vendor/github.com/imkira/go-interpol/LICENSE
  34. 171 0
      vendor/github.com/imkira/go-interpol/interpol.go
  35. 52 0
      vendor/github.com/imkira/go-interpol/io.go
  36. 68 0
      vendor/github.com/imkira/go-interpol/options.go
  37. 117 0
      vendor/github.com/iris-contrib/httpexpect/Gopkg.lock
  38. 66 0
      vendor/github.com/iris-contrib/httpexpect/Gopkg.toml
  39. 21 0
      vendor/github.com/iris-contrib/httpexpect/LICENSE
  40. 7 0
      vendor/github.com/iris-contrib/httpexpect/README.md
  41. 299 0
      vendor/github.com/iris-contrib/httpexpect/array.go
  42. 99 0
      vendor/github.com/iris-contrib/httpexpect/binder.go
  43. 82 0
      vendor/github.com/iris-contrib/httpexpect/boolean.go
  44. 38 0
      vendor/github.com/iris-contrib/httpexpect/chain.go
  45. 105 0
      vendor/github.com/iris-contrib/httpexpect/cookie.go
  46. 129 0
      vendor/github.com/iris-contrib/httpexpect/datetime.go
  47. 375 0
      vendor/github.com/iris-contrib/httpexpect/expect.go
  48. 184 0
      vendor/github.com/iris-contrib/httpexpect/helpers.go
  49. 198 0
      vendor/github.com/iris-contrib/httpexpect/match.go
  50. 244 0
      vendor/github.com/iris-contrib/httpexpect/number.go
  51. 329 0
      vendor/github.com/iris-contrib/httpexpect/object.go
  52. 100 0
      vendor/github.com/iris-contrib/httpexpect/printer.go
  53. 40 0
      vendor/github.com/iris-contrib/httpexpect/reporter.go
  54. 860 0
      vendor/github.com/iris-contrib/httpexpect/request.go
  55. 515 0
      vendor/github.com/iris-contrib/httpexpect/response.go
  56. 320 0
      vendor/github.com/iris-contrib/httpexpect/string.go
  57. 286 0
      vendor/github.com/iris-contrib/httpexpect/value.go
  58. 143 0
      vendor/github.com/kataras/iris/httptest/httptest.go
  59. 97 0
      vendor/github.com/kataras/iris/httptest/netutils.go
  60. 73 0
      vendor/github.com/kataras/iris/httptest/status.go
  61. 22 0
      vendor/github.com/moul/http2curl/LICENSE
  62. 50 0
      vendor/github.com/moul/http2curl/Makefile
  63. 71 0
      vendor/github.com/moul/http2curl/http2curl.go
  64. 27 0
      vendor/github.com/pmezard/go-difflib/LICENSE
  65. 772 0
      vendor/github.com/pmezard/go-difflib/difflib/difflib.go
  66. 20 0
      vendor/github.com/sergi/go-diff/LICENSE
  67. 1344 0
      vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go
  68. 46 0
      vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go
  69. 160 0
      vendor/github.com/sergi/go-diff/diffmatchpatch/match.go
  70. 23 0
      vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go
  71. 556 0
      vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go
  72. 88 0
      vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go
  73. 22 0
      vendor/github.com/stretchr/testify/LICENSE
  74. 387 0
      vendor/github.com/stretchr/testify/assert/assertion_forward.go
  75. 4 0
      vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
  76. 1052 0
      vendor/github.com/stretchr/testify/assert/assertions.go
  77. 45 0
      vendor/github.com/stretchr/testify/assert/doc.go
  78. 10 0
      vendor/github.com/stretchr/testify/assert/errors.go
  79. 16 0
      vendor/github.com/stretchr/testify/assert/forward_assertions.go
  80. 106 0
      vendor/github.com/stretchr/testify/assert/http_assertions.go
  81. 28 0
      vendor/github.com/stretchr/testify/require/doc.go
  82. 16 0
      vendor/github.com/stretchr/testify/require/forward_requirements.go
  83. 464 0
      vendor/github.com/stretchr/testify/require/require.go
  84. 6 0
      vendor/github.com/stretchr/testify/require/require.go.tmpl
  85. 388 0
      vendor/github.com/stretchr/testify/require/require_forward.go
  86. 4 0
      vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
  87. 9 0
      vendor/github.com/stretchr/testify/require/requirements.go
  88. 202 0
      vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
  89. 190 0
      vendor/github.com/xeipuuv/gojsonpointer/pointer.go
  90. 202 0
      vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
  91. 141 0
      vendor/github.com/xeipuuv/gojsonreference/reference.go
  92. 202 0
      vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
  93. 283 0
      vendor/github.com/xeipuuv/gojsonschema/errors.go
  94. 203 0
      vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
  95. 37 0
      vendor/github.com/xeipuuv/gojsonschema/internalLog.go
  96. 72 0
      vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
  97. 341 0
      vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
  98. 286 0
      vendor/github.com/xeipuuv/gojsonschema/locales.go
  99. 172 0
      vendor/github.com/xeipuuv/gojsonschema/result.go
  100. 933 0
      vendor/github.com/xeipuuv/gojsonschema/schema.go

+ 3 - 0
.vscode/settings.json

@@ -0,0 +1,3 @@
+{
+    "go.testFlags": ["-v"]
+}

+ 17 - 1
pkg/models/device.go

@@ -1,9 +1,9 @@
-// device is a product instance, which is managed by our platform
 package models
 
 import "github.com/jinzhu/gorm"
 
 // Device device model
+// device is a product instance, which is managed by our platform
 type Device struct {
 	gorm.Model
 	// which product the device belongs to
@@ -20,4 +20,20 @@ type Device struct {
 	DeviceDescription string `sql:"type:text;not null;"`
 	// device version(the agent version)
 	DeviceVersion string `sql:"type:text;not null;"`
+	// vendor id
+	VendorID uint
+	//通讯模组名称
+	ModuleName string
+}
+
+// DeviceQuery device query
+type DeviceQuery struct {
+	Device
+	ProductName string
+}
+
+// DeviceChartData 设备数据图表
+type DeviceChartData struct {
+	Dt    string
+	Count int
 }

+ 4 - 1
services/fileaccess/fileaccess.go

@@ -126,7 +126,6 @@ func (f *FileAccess) getTempFileFromRedis() error {
 func (f *FileAccess) checker() {
 	server.Log.Info("start temp file checker")
 	f.getTempFileFromRedis()
-	server.Log.Info("start temp file checker")
 	for {
 		for k, v := range f.tempFiles {
 			if time.Now().Sub(v.CreateTime) > checkTimeOut {
@@ -147,6 +146,10 @@ func (f *FileAccess) checker() {
 
 // DeleteFile 删除文件
 func (f *FileAccess) DeleteFile(args *rpcs.ArgsDeleteFile, reply *rpcs.ReplyEmptyResult) error {
+	reg := regexp.MustCompile(`upload/\$*.*`)
+	src := reg.FindString(args.FileName)
+	err := os.Remove(src)
+	server.Log.Error(err)
 	return nil
 }
 

+ 1 - 1
services/fileaccess/flags.go

@@ -19,6 +19,6 @@ var (
 	conStaticPath = flag.String(flagStaticPath, defaultStaticPath, "static file path")
 	conMaxSize    = flag.Int(flagMaxSize, defaultMaxSize, "允许上传的最大文件尺寸")
 	conAllowExt   = flag.String(flagAllowExt, defaultAllowExt, "允许上传的文件格式")
-	conDomain     = flag.String(flagDomain, "", "文件服务器域名 http://xxxxxx.xxx")
+	conDomain     = flag.String(flagDomain, "", "文件服务器域名 http://dfsdf.com")
 	conRedisHost  = flag.String(flagRedis, defaultRedisHost, "redis 服务器地址 ip:host")
 )

+ 171 - 0
services/knowoapi/controllers/device.go

@@ -0,0 +1,171 @@
+package controllers
+
+import (
+	"errors"
+	"fmt"
+	"sparrow/services/knowoapi/services"
+	"strconv"
+	"time"
+
+	"github.com/kataras/iris"
+)
+
+// DeviceController api
+type DeviceController struct {
+	Ctx     iris.Context
+	Service services.DeviceService
+	Token
+}
+
+// Get 获取激活设备列表
+// GET /device?pi=&ps=&proid=&device_id=
+func (a *DeviceController) Get() {
+	pi, err := a.Ctx.URLParamInt("pi")
+	if err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	ps, err := a.Ctx.URLParamInt("ps")
+	if err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	proid, err := a.Ctx.URLParamInt("proid")
+	if err != nil {
+		proid = 0
+	}
+	deviceid := a.Ctx.URLParam("device_id")
+	vid := a.getVendorID(a.Ctx)
+	datas, total, err := a.Service.GetDevices(vid, proid, pi, ps, deviceid)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, map[string]interface{}{
+		"list":  datas,
+		"total": total,
+	})
+}
+
+// GetBannerdata 获取设备激活和活跃数据
+// GET /bannerdata?proid=
+func (a *DeviceController) GetBannerdata() {
+	proid, err := a.Ctx.URLParamInt("proid")
+	if err != nil {
+		proid = 0
+	}
+	var (
+		//今日激活
+		tact = 0
+		// 今日活跃
+		tlive = 0
+		// 昨日激活
+		yact = 0
+		// 昨日活跃
+		ylive = 0
+		// 累计激活
+		totolact = 0
+		// 环比昨日激活
+		comparedWithYesterdayActive = 0.00
+		//活跃占比
+		rateOfTodayLive = 0.00
+		//环比昨日活跃
+		comparedWithYesterdayLive = 0.00
+	)
+	vendorid := a.getVendorID(a.Ctx)
+	totolact, err = a.Service.GetDeviceCount(vendorid)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	today := time.Now().Format("2006-01-02")
+	yestoday := time.Now().AddDate(0, 0, -1).Format("2006-01-02")
+	tact, err = a.Service.GetActiveNumberOfDate(vendorid, today)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	tlive, err = a.Service.GetLivelyCountOfDate(vendorid, today)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	yact, err = a.Service.GetActiveNumberOfDate(vendorid, yestoday)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	ylive, err = a.Service.GetLivelyCountOfDate(vendorid, yestoday)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	if tact != 0 {
+		comparedWithYesterdayActive = (float64(tact) / float64(yact)) * 100
+	}
+	if tlive != 0 {
+		comparedWithYesterdayLive = (float64(tlive) / float64(ylive)) * 100
+		rateOfTodayLive = float64(tlive) / float64(totolact) * 100
+	}
+	//TODO 这里的productid要去掉
+	result := map[string]interface{}{
+		"todayAct":                    tact,
+		"todayLive":                   tlive,
+		"yestodayAct":                 yact,
+		"yestodayLive":                ylive,
+		"totalAct":                    totolact,
+		"comparedWithYesterdayActive": fmt.Sprintf("%s%%", strconv.FormatFloat(comparedWithYesterdayActive, 'f', 2, 64)),
+		"comparedWithYesterdayLive":   fmt.Sprintf("%s%%", strconv.FormatFloat(comparedWithYesterdayLive, 'f', 2, 64)),
+		"rateOfTodayLive":             fmt.Sprintf("%s%%", strconv.FormatFloat(rateOfTodayLive, 'f', 2, 64)),
+		"productid":                   proid,
+	}
+	done(a.Ctx, result)
+}
+
+// GetActivechart 获取N日的激活数据趋势
+func (a *DeviceController) GetActivechart() {
+	// proid, err := a.Ctx.URLParamInt("proid")
+	// if err != nil {
+	// 	proid = 0
+	// }
+	days, err := a.Ctx.URLParamInt("days")
+	if err != nil {
+		days = 7
+	}
+	if days > 30 {
+		badRequest(a.Ctx, errors.New("非法参数"))
+		return
+	}
+	vendorid := a.getVendorID(a.Ctx)
+	datas, err := a.Service.GetActiveOfNumDays(vendorid, days)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, datas)
+}
+
+// GetLivechart 获取N日内的活跃数据
+func (a *DeviceController) GetLivechart() {
+	// proid, err := a.Ctx.URLParamInt("proid")
+	// if err != nil {
+	// 	proid = 0
+	// }
+	days, err := a.Ctx.URLParamInt("days")
+	if err != nil {
+		days = 7
+	}
+	if days > 30 {
+		badRequest(a.Ctx, errors.New("非法参数"))
+		return
+	}
+	vendorid := a.getVendorID(a.Ctx)
+	datas, err := a.Service.GetLivelyOfNumDays(vendorid, days)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, map[string]interface{}{
+		"chart": datas,
+	})
+}

+ 21 - 19
services/knowoapi/main.go

@@ -16,20 +16,22 @@ func main() {
 		server.Log.Fatal(err)
 		return
 	}
-
-	//iris init
-
-	app := iris.New()
-	db, err := getDB()
+	app := newApp()
+	// register a http handler
+	err = server.RegisterHTTPHandler(app)
 	if err != nil {
-		server.Log.Fatal(err)
+		server.Log.Errorf("RegisterHTTPHandler Error: %s", err)
+		return
 	}
-	models := new(model.All).Init(db)
-
-	gen, err := generator.NewKeyGenerator(*confAESKey)
+	// go
+	err = server.Run()
 	if err != nil {
 		server.Log.Fatal(err)
 	}
+}
+
+func newApp() *iris.Application {
+	app := iris.New()
 	//cors
 	opts := cors.Options{
 		AllowedOrigins: []string{"*"},
@@ -41,18 +43,18 @@ func main() {
 	app.Use(cors.New(opts))
 	app.AllowMethods(iris.MethodOptions)
 	registerErrors(app)
-	//注册路由
-	registerRouters(app, models, gen)
-	app.Build()
-	// register a http handler
-	err = server.RegisterHTTPHandler(app)
+	db, err := getDB()
 	if err != nil {
-		server.Log.Errorf("RegisterHTTPHandler Error: %s", err)
-		return
+		//server.Log.Fatal(err)
 	}
-	// go
-	err = server.Run()
+	models := new(model.All).Init(db)
+	gen, err := generator.NewKeyGenerator(*confAESKey)
 	if err != nil {
-		server.Log.Fatal(err)
+		//server.Log.Fatal(err)
 	}
+
+	//注册路由
+	registerRouters(app, models, gen)
+	app.Build()
+	return app
 }

+ 12 - 0
services/knowoapi/main_test.go

@@ -0,0 +1,12 @@
+package main
+
+import (
+	"testing"
+
+	"github.com/kataras/iris/httptest"
+)
+
+func TestController(t *testing.T) {
+	e := httptest.New(t, newApp())
+	e.POST("/").Expect().Status(httptest.StatusOK)
+}

+ 2 - 0
services/knowoapi/model/all.go

@@ -10,6 +10,7 @@ type All struct {
 	Application *Application
 	Protocal    *Protocal
 	Alert       *Alert
+	Device      *Device
 }
 
 // Init 初始化所有model
@@ -20,5 +21,6 @@ func (a *All) Init(db *gorm.DB) *All {
 	a.Application = new(Application).Init(db)
 	a.Protocal = new(Protocal).Init(db)
 	a.Alert = new(Alert).Init(db)
+	a.Device = new(Device).Init(db)
 	return a
 }

+ 145 - 0
services/knowoapi/model/device.go

@@ -0,0 +1,145 @@
+package model
+
+import (
+	"sparrow/pkg/models"
+	"time"
+
+	"github.com/jinzhu/gorm"
+)
+
+// Device devices data
+type Device struct {
+	db *gorm.DB
+}
+
+// Init device
+//TODO:增加产品ID查询,目前的查询都没有指定产品ID的查询
+func (a *Device) Init(db *gorm.DB) *Device {
+	a.db = db
+	return a
+}
+
+// GetDeviceCount 获取vendorid下的设备激活总数
+func (a *Device) GetDeviceCount(vendorid uint) (count int, err error) {
+	device := &models.Device{
+		VendorID: uint(vendorid),
+	}
+	err = a.db.Model(device).Where(device).Count(&count).Error
+	return
+}
+
+// GetActiveNumberOfDate 获取某一天的设备激活数量
+func (a *Device) GetActiveNumberOfDate(vendorid uint, datetime string) (count int, err error) {
+	device := &models.Device{
+		VendorID: uint(vendorid),
+	}
+	err = a.db.Model(device).
+		Where("vendor_id = ? and DATE_FORMAT(created_at, '%Y-%m-%d') = ?",
+			vendorid, datetime).
+		Count(&count).
+		Error
+	return
+}
+
+// GetLivelyCountOfDate 获取某一天的设备活跃数据
+func (a *Device) GetLivelyCountOfDate(vendorid uint, datetime string) (count int, err error) {
+	device := &models.Device{
+		VendorID: uint(vendorid),
+	}
+	err = a.db.Model(device).
+		Where("vendor_id = ? and DATE_FORMAT(updated_at, '%Y-%m-%d') = ?",
+			vendorid, datetime).Count(&count).Error
+	return
+}
+
+// // GetActiveOf7Days 设备近7日的激活数据
+// func (a *Device) GetActiveOf7Days(vendorid int) (datas []models.DeviceChartData, err error) {
+// 	err = a.db.Model(&models.Device{}).
+// 		Select("DATE_FORMAT(created_at,'%Y-%m-%d') as dt, count(id) as count").
+// 		Where("DATE_SUB(CURDATE(), INTERVAL 7 DAY) <= date(created_at) and vendor_id = ?",
+// 			vendorid).
+// 		Group("dt").Order("dt").Scan(&datas).Error
+// 	if err != nil {
+// 		return
+// 	}
+// 	return
+// }
+
+// // GetActiveOf14Days 设备近14日的激活数据
+// func (a *Device) GetActiveOf14Days(vendorid int) (datas []models.DeviceChartData, err error) {
+// 	err = a.db.Model(&models.Device{}).
+// 		Select("DATE_FORMAT(created_at,'%Y-%m-%d') as dt, count(id) as count").
+// 		Where("DATE_SUB(CURDATE(), INTERVAL 14 DAY) <= date(created_at) and vendor_id = ?",
+// 			vendorid).
+// 		Group("dt").Order("dt").Scan(&datas).Error
+// 	if err != nil {
+// 		return
+// 	}
+// 	return
+// }
+
+// // GetActiveOf30Days 设备近30日的激活数据
+// func (a *Device) GetActiveOf30Days(vendorid int) (datas []models.DeviceChartData, err error) {
+// 	err = a.db.Model(&models.Device{}).
+// 		Select("DATE_FORMAT(created_at,'%Y-%m-%d') as dt, count(id) as count").
+// 		Where("DATE_SUB(CURDATE(), INTERVAL 30 DAY) <= date(created_at) and vendor_id = ?",
+// 			vendorid).
+// 		Group("dt").Order("dt").Scan(&datas).Error
+// 	if err != nil {
+// 		return
+// 	}
+// 	return
+// }
+
+// GetActiveOfNumDays 查询几日内的激活趋势数据
+// days参数为7则查询7日的数据
+func (a *Device) GetActiveOfNumDays(vendorid uint, days int) ([]map[string]interface{}, error) {
+	i := 1
+	datas := make([]map[string]interface{}, 0)
+	for i <= days {
+		day := i - days
+		i = i + 1
+		datetime := time.Now().AddDate(0, 0, day).Format("2006-01-02")
+		count, err := a.GetActiveNumberOfDate(vendorid, datetime)
+		if err != nil {
+			return nil, err
+		}
+		data := make(map[string]interface{})
+		data[datetime] = count
+		datas = append(datas, data)
+	}
+	return datas, nil
+}
+
+// GetLivelyOfNumDays 获取设备N日内活跃趋势数据
+func (a *Device) GetLivelyOfNumDays(vendorid uint, days int) ([]map[string]interface{}, error) {
+	i := 1
+	datas := make([]map[string]interface{}, 0)
+	for i <= days {
+		day := i - days
+		i = i + 1
+		datetime := time.Now().AddDate(0, 0, day).Format("2006-01-02")
+		count, err := a.GetLivelyCountOfDate(vendorid, datetime)
+		if err != nil {
+			return nil, err
+		}
+		data := make(map[string]interface{})
+		data[datetime] = count
+		datas = append(datas, data)
+	}
+	return datas, nil
+}
+
+// GetDevices 获取厂商已经激活的设备列表
+func (a *Device) GetDevices(vendorid uint, proid, pi, ps int, deviceid string) (datas []models.Device, total int, err error) {
+	tx := a.db.Where("vendor_id = ?", vendorid)
+	if proid != 0 {
+		tx = tx.Where("product_id = ?", proid)
+	}
+	if deviceid != "" {
+		tx = tx.Where("device_identifier = ?", deviceid)
+	}
+	err = tx.Limit(ps).Offset((pi - 1) * ps).Find(&datas).Error
+	tx.Model(&models.Device{}).Count(&total)
+	return
+}

+ 119 - 0
services/knowoapi/model/device_test.go

@@ -0,0 +1,119 @@
+package model
+
+import (
+	"sparrow/pkg/mysql"
+	"testing"
+
+	"github.com/jinzhu/gorm"
+)
+
+func TestGetDeviceCount(t *testing.T) {
+
+	device := newDevice(t)
+	count, err := device.GetDeviceCount(1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log(count)
+}
+
+func TestGetActiveNumberOfDate(t *testing.T) {
+	d := newDevice(t)
+	count, err := d.GetActiveNumberOfDate(1, "2018-12-27")
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log(count)
+}
+
+func TestGetLivelyCountOfDate(t *testing.T) {
+	d := newDevice(t)
+	count, err := d.GetLivelyCountOfDate(1, "2019-01-02")
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log(count)
+}
+
+// func TestGetActiveOf7Days(t *testing.T) {
+// 	d := newDevice(t)
+// 	datas, err := d.GetActiveOf7Days(1)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	for k, v := range datas {
+// 		t.Logf("%d, %v", k, v)
+// 	}
+// }
+
+// func TestGetActiveOf14Days(t *testing.T) {
+// 	d := newDevice(t)
+// 	datas, err := d.GetActiveOf14Days(1)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	for k, v := range datas {
+// 		t.Logf("%d, %v", k, v)
+// 	}
+// }
+// func TestGetActiveOf30Days(t *testing.T) {
+// 	d := newDevice(t)
+// 	datas, err := d.GetActiveOf30Days(1)
+// 	if err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	for k, v := range datas {
+// 		t.Logf("%d, %v", k, v)
+// 	}
+// }
+
+func TestGetActiveOfNumDays(t *testing.T) {
+	d := newDevice(t)
+	datas, err := d.GetActiveOfNumDays(1, 30)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for k, v := range datas {
+		t.Logf("%d, %v", k, v)
+	}
+}
+func TestGetLivelyOfNumDays(t *testing.T) {
+	d := newDevice(t)
+	datas, err := d.GetLivelyOfNumDays(1, 30)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for k, v := range datas {
+		t.Logf("%d, %v", k, v)
+	}
+}
+func TestGetDevices(t *testing.T) {
+	d := newDevice(t)
+	datas, total, err := d.GetDevices(1, 0, 1, 10, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Logf("%v,%d", datas, total)
+}
+func newDevice(t *testing.T) *Device {
+	db, err := getDB()
+	if err != nil {
+		t.Fatal(err)
+	}
+	device := new(Device).Init(db)
+	return device
+}
+
+func getDB() (*gorm.DB, error) {
+	db, err := mysql.GetClient("192.168.175.60", "3306", "SparrowCloud", "SparrowCloud", "123456")
+	if err != nil {
+		return nil, err
+	}
+	gormdb, err := gorm.Open("mysql", db)
+	if err != nil {
+		return nil, err
+	}
+	gormdb.SingularTable(true)
+	gormdb.LogMode(true)
+	return gormdb, nil
+}

+ 4 - 1
services/knowoapi/router.go

@@ -47,6 +47,7 @@ func registerRouters(srv *iris.Application, models *model.All, gen *generator.Ke
 	appService := services.NewAppService(models, gen)
 	protocalService := services.NewProtocalService(models)
 	alertService := services.NewAlertService(models)
+	deviceService := services.NewDeviceService(models)
 	v1router := srv.Party("/api/v1")
 
 	// 登陆,注册
@@ -71,5 +72,7 @@ func registerRouters(srv *iris.Application, models *model.All, gen *generator.Ke
 	//alert api
 	alertAPI := mvc.New(userRouter.Party("/alert"))
 	alertAPI.Register(alertService).Handle(new(controllers.AlertController))
-
+	//device api
+	deviceAPI := mvc.New(userRouter.Party("/device"))
+	deviceAPI.Register(deviceService).Handle(new(controllers.DeviceController))
 }

+ 51 - 0
services/knowoapi/services/device.go

@@ -0,0 +1,51 @@
+package services
+
+import (
+	"sparrow/pkg/models"
+	"sparrow/services/knowoapi/model"
+)
+
+// DeviceService device service接口
+type DeviceService interface {
+	// 获取厂商已经激活的设备总数
+	GetDeviceCount(vendorid uint) (int, error)
+	//返回厂商某天激活的设备数
+	GetActiveNumberOfDate(vendor uint, datetime string) (int, error)
+	//获取厂商某天活跃的设备数
+	GetLivelyCountOfDate(uint, string) (int, error)
+	//获取近N日激活设备数据
+	GetActiveOfNumDays(uint, int) ([]map[string]interface{}, error)
+	//获取近N日活跃设备数据
+	GetLivelyOfNumDays(uint, int) ([]map[string]interface{}, error)
+	//获取已经激活的设备列表
+	GetDevices(vendorid uint, proid, pi, ps int, deviceid string) ([]models.Device, int, error)
+}
+
+type deviceservice struct {
+	models *model.All
+}
+
+// NewDeviceService create device service
+func NewDeviceService(models *model.All) DeviceService {
+	return deviceservice{
+		models: models,
+	}
+}
+func (a deviceservice) GetDevices(vendorid uint, proid, pi, ps int, deviceid string) ([]models.Device, int, error) {
+	return a.models.Device.GetDevices(vendorid, proid, pi, ps, deviceid)
+}
+func (a deviceservice) GetDeviceCount(vendorid uint) (int, error) {
+	return a.models.Device.GetDeviceCount(vendorid)
+}
+func (a deviceservice) GetActiveNumberOfDate(vendorid uint, datetime string) (int, error) {
+	return a.models.Device.GetActiveNumberOfDate(vendorid, datetime)
+}
+func (a deviceservice) GetLivelyCountOfDate(vendorid uint, datetime string) (int, error) {
+	return a.models.Device.GetLivelyCountOfDate(vendorid, datetime)
+}
+func (a deviceservice) GetActiveOfNumDays(vendorid uint, days int) ([]map[string]interface{}, error) {
+	return a.models.Device.GetActiveOfNumDays(vendorid, days)
+}
+func (a deviceservice) GetLivelyOfNumDays(vendorid uint, days int) ([]map[string]interface{}, error) {
+	return a.models.Device.GetLivelyOfNumDays(vendorid, days)
+}

+ 6 - 4
services/registry/registry.go

@@ -21,6 +21,7 @@ type Registry struct {
 	keygen *generator.KeyGenerator
 }
 
+// NewRegistry create new registry
 func NewRegistry() (*Registry, error) {
 	gen, err := generator.NewKeyGenerator(*confAESKey)
 	if err != nil {
@@ -234,7 +235,7 @@ func (r *Registry) GetApplications(noarg int, reply *[]models.Application) error
 	return db.Find(reply).Error
 }
 
-// FindAppliation will find product by specified ID
+// FindApplication will find product by specified ID
 func (r *Registry) FindApplication(id int32, reply *models.Application) error {
 	db, err := getDB()
 	if err != nil {
@@ -282,6 +283,7 @@ func (r *Registry) RegisterDevice(args *rpcs.ArgsDeviceRegister, reply *models.D
 		reply.DeviceName = product.ProductName // product name as default device name.
 		reply.DeviceDescription = product.ProductDescription
 		reply.DeviceVersion = args.DeviceVersion
+		reply.VendorID = uint(product.VendorID)
 		err = db.Save(reply).Error
 		if err != nil {
 			return err
@@ -384,7 +386,7 @@ func (r *Registry) ValidateDevice(key string, device *models.Device) error {
 	return nil
 }
 
-// UpdateDevice will update a device info by identifier
+// UpdateDeviceInfo will update a device info by identifier
 func (r *Registry) UpdateDeviceInfo(args *rpcs.ArgsDeviceUpdate, reply *models.Device) error {
 	db, err := getDB()
 	if err != nil {
@@ -414,7 +416,7 @@ func (r *Registry) UpdateDeviceInfo(args *rpcs.ArgsDeviceUpdate, reply *models.D
 	return nil
 }
 
-// createRule create a new rule with specified parameters.
+// CreateRule create a new rule with specified parameters.
 func (r *Registry) CreateRule(args *models.Rule, reply *rpcs.ReplyEmptyResult) error {
 	db, err := getDB()
 	if err != nil {
@@ -424,7 +426,7 @@ func (r *Registry) CreateRule(args *models.Rule, reply *rpcs.ReplyEmptyResult) e
 	return db.Save(args).Error
 }
 
-// queryRules queries rules by trigger and rule type.
+// QueryRules queries rules by trigger and rule type.
 func (r *Registry) QueryRules(args *models.Rule, reply *[]models.Rule) error {
 	db, err := getDB()
 	if err != nil {

+ 27 - 0
vendor/github.com/ajg/form/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2014 Alvaro J. Genial. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 370 - 0
vendor/github.com/ajg/form/decode.go

@@ -0,0 +1,370 @@
+// Copyright 2014 Alvaro J. Genial. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package form
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// NewDecoder returns a new form Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r, defaultDelimiter, defaultEscape, false, false}
+}
+
+// Decoder decodes data from a form (application/x-www-form-urlencoded).
+type Decoder struct {
+	r             io.Reader
+	d             rune
+	e             rune
+	ignoreUnknown bool
+	ignoreCase    bool
+}
+
+// DelimitWith sets r as the delimiter used for composite keys by Decoder d and returns the latter; it is '.' by default.
+func (d *Decoder) DelimitWith(r rune) *Decoder {
+	d.d = r
+	return d
+}
+
+// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Decoder d and returns the latter; it is '\\' by default.
+func (d *Decoder) EscapeWith(r rune) *Decoder {
+	d.e = r
+	return d
+}
+
+// Decode reads in and decodes form-encoded data into dst.
+func (d Decoder) Decode(dst interface{}) error {
+	bs, err := ioutil.ReadAll(d.r)
+	if err != nil {
+		return err
+	}
+	vs, err := url.ParseQuery(string(bs))
+	if err != nil {
+		return err
+	}
+	v := reflect.ValueOf(dst)
+	return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
+}
+
+// IgnoreUnknownKeys if set to true it will make the Decoder ignore values
+// that are not found in the destination object instead of returning an error.
+func (d *Decoder) IgnoreUnknownKeys(ignoreUnknown bool) {
+	d.ignoreUnknown = ignoreUnknown
+}
+
+// IgnoreCase if set to true it will make the Decoder try to set values in the
+// destination object even if the case does not match.
+func (d *Decoder) IgnoreCase(ignoreCase bool) {
+	d.ignoreCase = ignoreCase
+}
+
+// DecodeString decodes src into dst.
+func (d Decoder) DecodeString(dst interface{}, src string) error {
+	vs, err := url.ParseQuery(src)
+	if err != nil {
+		return err
+	}
+	v := reflect.ValueOf(dst)
+	return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
+}
+
+// DecodeValues decodes vs into dst.
+func (d Decoder) DecodeValues(dst interface{}, vs url.Values) error {
+	v := reflect.ValueOf(dst)
+	return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
+}
+
+// DecodeString decodes src into dst.
+func DecodeString(dst interface{}, src string) error {
+	return NewDecoder(nil).DecodeString(dst, src)
+}
+
+// DecodeValues decodes vs into dst.
+func DecodeValues(dst interface{}, vs url.Values) error {
+	return NewDecoder(nil).DecodeValues(dst, vs)
+}
+
+func (d Decoder) decodeNode(v reflect.Value, n node) (err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			err = fmt.Errorf("%v", e)
+		}
+	}()
+
+	if v.Kind() == reflect.Slice {
+		return fmt.Errorf("could not decode directly into slice; use pointer to slice")
+	}
+	d.decodeValue(v, n)
+	return nil
+}
+
+func (d Decoder) decodeValue(v reflect.Value, x interface{}) {
+	t := v.Type()
+	k := v.Kind()
+
+	if k == reflect.Ptr && v.IsNil() {
+		v.Set(reflect.New(t.Elem()))
+	}
+
+	if unmarshalValue(v, x) {
+		return
+	}
+
+	empty := isEmpty(x)
+
+	switch k {
+	case reflect.Ptr:
+		d.decodeValue(v.Elem(), x)
+		return
+	case reflect.Interface:
+		if !v.IsNil() {
+			d.decodeValue(v.Elem(), x)
+			return
+
+		} else if empty {
+			return // Allow nil interfaces only if empty.
+		} else {
+			panic("form: cannot decode non-empty value into into nil interface")
+		}
+	}
+
+	if empty {
+		v.Set(reflect.Zero(t)) // Treat the empty string as the zero value.
+		return
+	}
+
+	switch k {
+	case reflect.Struct:
+		if t.ConvertibleTo(timeType) {
+			d.decodeTime(v, x)
+		} else if t.ConvertibleTo(urlType) {
+			d.decodeURL(v, x)
+		} else {
+			d.decodeStruct(v, x)
+		}
+	case reflect.Slice:
+		d.decodeSlice(v, x)
+	case reflect.Array:
+		d.decodeArray(v, x)
+	case reflect.Map:
+		d.decodeMap(v, x)
+	case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		panic(t.String() + " has unsupported kind " + k.String())
+	default:
+		d.decodeBasic(v, x)
+	}
+}
+
+func (d Decoder) decodeStruct(v reflect.Value, x interface{}) {
+	t := v.Type()
+	for k, c := range getNode(x) {
+		if f, ok := findField(v, k, d.ignoreCase); !ok && k == "" {
+			panic(getString(x) + " cannot be decoded as " + t.String())
+		} else if !ok {
+			if !d.ignoreUnknown {
+				panic(k + " doesn't exist in " + t.String())
+			}
+		} else if !f.CanSet() {
+			panic(k + " cannot be set in " + t.String())
+		} else {
+			d.decodeValue(f, c)
+		}
+	}
+}
+
+func (d Decoder) decodeMap(v reflect.Value, x interface{}) {
+	t := v.Type()
+	if v.IsNil() {
+		v.Set(reflect.MakeMap(t))
+	}
+	for k, c := range getNode(x) {
+		i := reflect.New(t.Key()).Elem()
+		d.decodeValue(i, k)
+
+		w := v.MapIndex(i)
+		if w.IsValid() { // We have an actual element value to decode into.
+			if w.Kind() == reflect.Interface {
+				w = w.Elem()
+			}
+			w = reflect.New(w.Type()).Elem()
+		} else if t.Elem().Kind() != reflect.Interface { // The map's element type is concrete.
+			w = reflect.New(t.Elem()).Elem()
+		} else {
+			// The best we can do here is to decode as either a string (for scalars) or a map[string]interface {} (for the rest).
+			// We could try to guess the type based on the string (e.g. true/false => bool) but that'll get ugly fast,
+			// especially if we have to guess the kind (slice vs. array vs. map) and index type (e.g. string, int, etc.)
+			switch c.(type) {
+			case node:
+				w = reflect.MakeMap(stringMapType)
+			case string:
+				w = reflect.New(stringType).Elem()
+			default:
+				panic("value is neither node nor string")
+			}
+		}
+
+		d.decodeValue(w, c)
+		v.SetMapIndex(i, w)
+	}
+}
+
+func (d Decoder) decodeArray(v reflect.Value, x interface{}) {
+	t := v.Type()
+	for k, c := range getNode(x) {
+		i, err := strconv.Atoi(k)
+		if err != nil {
+			panic(k + " is not a valid index for type " + t.String())
+		}
+		if l := v.Len(); i >= l {
+			panic("index is above array size")
+		}
+		d.decodeValue(v.Index(i), c)
+	}
+}
+
+func (d Decoder) decodeSlice(v reflect.Value, x interface{}) {
+	t := v.Type()
+	if t.Elem().Kind() == reflect.Uint8 {
+		// Allow, but don't require, byte slices to be encoded as a single string.
+		if s, ok := x.(string); ok {
+			v.SetBytes([]byte(s))
+			return
+		}
+	}
+
+	// NOTE: Implicit indexing is currently done at the parseValues level,
+	//       so if if an implicitKey reaches here it will always replace the last.
+	implicit := 0
+	for k, c := range getNode(x) {
+		var i int
+		if k == implicitKey {
+			i = implicit
+			implicit++
+		} else {
+			explicit, err := strconv.Atoi(k)
+			if err != nil {
+				panic(k + " is not a valid index for type " + t.String())
+			}
+			i = explicit
+			implicit = explicit + 1
+		}
+		// "Extend" the slice if it's too short.
+		if l := v.Len(); i >= l {
+			delta := i - l + 1
+			v.Set(reflect.AppendSlice(v, reflect.MakeSlice(t, delta, delta)))
+		}
+		d.decodeValue(v.Index(i), c)
+	}
+}
+
+func (d Decoder) decodeBasic(v reflect.Value, x interface{}) {
+	t := v.Type()
+	switch k, s := t.Kind(), getString(x); k {
+	case reflect.Bool:
+		if b, e := strconv.ParseBool(s); e == nil {
+			v.SetBool(b)
+		} else {
+			panic("could not parse bool from " + strconv.Quote(s))
+		}
+	case reflect.Int,
+		reflect.Int8,
+		reflect.Int16,
+		reflect.Int32,
+		reflect.Int64:
+		if i, e := strconv.ParseInt(s, 10, 64); e == nil {
+			v.SetInt(i)
+		} else {
+			panic("could not parse int from " + strconv.Quote(s))
+		}
+	case reflect.Uint,
+		reflect.Uint8,
+		reflect.Uint16,
+		reflect.Uint32,
+		reflect.Uint64:
+		if u, e := strconv.ParseUint(s, 10, 64); e == nil {
+			v.SetUint(u)
+		} else {
+			panic("could not parse uint from " + strconv.Quote(s))
+		}
+	case reflect.Float32,
+		reflect.Float64:
+		if f, e := strconv.ParseFloat(s, 64); e == nil {
+			v.SetFloat(f)
+		} else {
+			panic("could not parse float from " + strconv.Quote(s))
+		}
+	case reflect.Complex64,
+		reflect.Complex128:
+		var c complex128
+		if n, err := fmt.Sscanf(s, "%g", &c); n == 1 && err == nil {
+			v.SetComplex(c)
+		} else {
+			panic("could not parse complex from " + strconv.Quote(s))
+		}
+	case reflect.String:
+		v.SetString(s)
+	default:
+		panic(t.String() + " has unsupported kind " + k.String())
+	}
+}
+
+func (d Decoder) decodeTime(v reflect.Value, x interface{}) {
+	t := v.Type()
+	s := getString(x)
+	// TODO: Find a more efficient way to do this.
+	for _, f := range allowedTimeFormats {
+		if p, err := time.Parse(f, s); err == nil {
+			v.Set(reflect.ValueOf(p).Convert(v.Type()))
+			return
+		}
+	}
+	panic("cannot decode string `" + s + "` as " + t.String())
+}
+
+func (d Decoder) decodeURL(v reflect.Value, x interface{}) {
+	t := v.Type()
+	s := getString(x)
+	if u, err := url.Parse(s); err == nil {
+		v.Set(reflect.ValueOf(*u).Convert(v.Type()))
+		return
+	}
+	panic("cannot decode string `" + s + "` as " + t.String())
+}
+
+var allowedTimeFormats = []string{
+	"2006-01-02T15:04:05.999999999Z07:00",
+	"2006-01-02T15:04:05.999999999Z07",
+	"2006-01-02T15:04:05.999999999Z",
+	"2006-01-02T15:04:05.999999999",
+	"2006-01-02T15:04:05Z07:00",
+	"2006-01-02T15:04:05Z07",
+	"2006-01-02T15:04:05Z",
+	"2006-01-02T15:04:05",
+	"2006-01-02T15:04Z",
+	"2006-01-02T15:04",
+	"2006-01-02T15Z",
+	"2006-01-02T15",
+	"2006-01-02",
+	"2006-01",
+	"2006",
+	"15:04:05.999999999Z07:00",
+	"15:04:05.999999999Z07",
+	"15:04:05.999999999Z",
+	"15:04:05.999999999",
+	"15:04:05Z07:00",
+	"15:04:05Z07",
+	"15:04:05Z",
+	"15:04:05",
+	"15:04Z",
+	"15:04",
+	"15Z",
+	"15",
+}

+ 388 - 0
vendor/github.com/ajg/form/encode.go

@@ -0,0 +1,388 @@
+// Copyright 2014 Alvaro J. Genial. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package form
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// NewEncoder returns a new form Encoder.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{w, defaultDelimiter, defaultEscape, false}
+}
+
+// Encoder provides a way to encode to a Writer.
+type Encoder struct {
+	w io.Writer
+	d rune
+	e rune
+	z bool
+}
+
+// DelimitWith sets r as the delimiter used for composite keys by Encoder e and returns the latter; it is '.' by default.
+func (e *Encoder) DelimitWith(r rune) *Encoder {
+	e.d = r
+	return e
+}
+
+// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Encoder e and returns the latter; it is '\\' by default.
+func (e *Encoder) EscapeWith(r rune) *Encoder {
+	e.e = r
+	return e
+}
+
+// KeepZeros sets whether Encoder e should keep zero (default) values in their literal form when encoding, and returns the former; by default zero values are not kept, but are rather encoded as the empty string.
+func (e *Encoder) KeepZeros(z bool) *Encoder {
+	e.z = z
+	return e
+}
+
+// Encode encodes dst as form and writes it out using the Encoder's Writer.
+func (e Encoder) Encode(dst interface{}) error {
+	v := reflect.ValueOf(dst)
+	n, err := encodeToNode(v, e.z)
+	if err != nil {
+		return err
+	}
+	s := n.values(e.d, e.e).Encode()
+	l, err := io.WriteString(e.w, s)
+	switch {
+	case err != nil:
+		return err
+	case l != len(s):
+		return errors.New("could not write data completely")
+	}
+	return nil
+}
+
+// EncodeToString encodes dst as a form and returns it as a string.
+func EncodeToString(dst interface{}) (string, error) {
+	v := reflect.ValueOf(dst)
+	n, err := encodeToNode(v, false)
+	if err != nil {
+		return "", err
+	}
+	vs := n.values(defaultDelimiter, defaultEscape)
+	return vs.Encode(), nil
+}
+
+// EncodeToValues encodes dst as a form and returns it as Values.
+func EncodeToValues(dst interface{}) (url.Values, error) {
+	v := reflect.ValueOf(dst)
+	n, err := encodeToNode(v, false)
+	if err != nil {
+		return nil, err
+	}
+	vs := n.values(defaultDelimiter, defaultEscape)
+	return vs, nil
+}
+
+func encodeToNode(v reflect.Value, z bool) (n node, err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			err = fmt.Errorf("%v", e)
+		}
+	}()
+	return getNode(encodeValue(v, z)), nil
+}
+
+func encodeValue(v reflect.Value, z bool) interface{} {
+	t := v.Type()
+	k := v.Kind()
+
+	if s, ok := marshalValue(v); ok {
+		return s
+	} else if !z && isEmptyValue(v) {
+		return "" // Treat the zero value as the empty string.
+	}
+
+	switch k {
+	case reflect.Ptr, reflect.Interface:
+		return encodeValue(v.Elem(), z)
+	case reflect.Struct:
+		if t.ConvertibleTo(timeType) {
+			return encodeTime(v)
+		} else if t.ConvertibleTo(urlType) {
+			return encodeURL(v)
+		}
+		return encodeStruct(v, z)
+	case reflect.Slice:
+		return encodeSlice(v, z)
+	case reflect.Array:
+		return encodeArray(v, z)
+	case reflect.Map:
+		return encodeMap(v, z)
+	case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		panic(t.String() + " has unsupported kind " + t.Kind().String())
+	default:
+		return encodeBasic(v)
+	}
+}
+
+func encodeStruct(v reflect.Value, z bool) interface{} {
+	t := v.Type()
+	n := node{}
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		k, oe := fieldInfo(f)
+
+		if k == "-" {
+			continue
+		} else if fv := v.Field(i); oe && isEmptyValue(fv) {
+			delete(n, k)
+		} else {
+			n[k] = encodeValue(fv, z)
+		}
+	}
+	return n
+}
+
+func encodeMap(v reflect.Value, z bool) interface{} {
+	n := node{}
+	for _, i := range v.MapKeys() {
+		k := getString(encodeValue(i, z))
+		n[k] = encodeValue(v.MapIndex(i), z)
+	}
+	return n
+}
+
+func encodeArray(v reflect.Value, z bool) interface{} {
+	n := node{}
+	for i := 0; i < v.Len(); i++ {
+		n[strconv.Itoa(i)] = encodeValue(v.Index(i), z)
+	}
+	return n
+}
+
+func encodeSlice(v reflect.Value, z bool) interface{} {
+	t := v.Type()
+	if t.Elem().Kind() == reflect.Uint8 {
+		return string(v.Bytes()) // Encode byte slices as a single string by default.
+	}
+	n := node{}
+	for i := 0; i < v.Len(); i++ {
+		n[strconv.Itoa(i)] = encodeValue(v.Index(i), z)
+	}
+	return n
+}
+
+func encodeTime(v reflect.Value) string {
+	t := v.Convert(timeType).Interface().(time.Time)
+	if t.Year() == 0 && (t.Month() == 0 || t.Month() == 1) && (t.Day() == 0 || t.Day() == 1) {
+		return t.Format("15:04:05.999999999Z07:00")
+	} else if t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 && t.Nanosecond() == 0 {
+		return t.Format("2006-01-02")
+	}
+	return t.Format("2006-01-02T15:04:05.999999999Z07:00")
+}
+
+func encodeURL(v reflect.Value) string {
+	u := v.Convert(urlType).Interface().(url.URL)
+	return u.String()
+}
+
+func encodeBasic(v reflect.Value) string {
+	t := v.Type()
+	switch k := t.Kind(); k {
+	case reflect.Bool:
+		return strconv.FormatBool(v.Bool())
+	case reflect.Int,
+		reflect.Int8,
+		reflect.Int16,
+		reflect.Int32,
+		reflect.Int64:
+		return strconv.FormatInt(v.Int(), 10)
+	case reflect.Uint,
+		reflect.Uint8,
+		reflect.Uint16,
+		reflect.Uint32,
+		reflect.Uint64:
+		return strconv.FormatUint(v.Uint(), 10)
+	case reflect.Float32:
+		return strconv.FormatFloat(v.Float(), 'g', -1, 32)
+	case reflect.Float64:
+		return strconv.FormatFloat(v.Float(), 'g', -1, 64)
+	case reflect.Complex64, reflect.Complex128:
+		s := fmt.Sprintf("%g", v.Complex())
+		return strings.TrimSuffix(strings.TrimPrefix(s, "("), ")")
+	case reflect.String:
+		return v.String()
+	}
+	panic(t.String() + " has unsupported kind " + t.Kind().String())
+}
+
+func isEmptyValue(v reflect.Value) bool {
+	switch t := v.Type(); v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Struct:
+		if t.ConvertibleTo(timeType) {
+			return v.Convert(timeType).Interface().(time.Time).IsZero()
+		}
+		return reflect.DeepEqual(v, reflect.Zero(t))
+	}
+	return false
+}
+
+// canIndexOrdinally returns whether a value contains an ordered sequence of elements.
+func canIndexOrdinally(v reflect.Value) bool {
+	if !v.IsValid() {
+		return false
+	}
+	switch t := v.Type(); t.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return canIndexOrdinally(v.Elem())
+	case reflect.Slice, reflect.Array:
+		return true
+	}
+	return false
+}
+
+func fieldInfo(f reflect.StructField) (k string, oe bool) {
+	if f.PkgPath != "" { // Skip private fields.
+		return omittedKey, oe
+	}
+
+	k = f.Name
+	tag := f.Tag.Get("form")
+	if tag == "" {
+		return k, oe
+	}
+
+	ps := strings.SplitN(tag, ",", 2)
+	if ps[0] != "" {
+		k = ps[0]
+	}
+	if len(ps) == 2 {
+		oe = ps[1] == "omitempty"
+	}
+	return k, oe
+}
+
+func findField(v reflect.Value, n string, ignoreCase bool) (reflect.Value, bool) {
+	t := v.Type()
+	l := v.NumField()
+
+	var lowerN string
+	caseInsensitiveMatch := -1
+	if ignoreCase {
+		lowerN = strings.ToLower(n)
+	}
+
+	// First try named fields.
+	for i := 0; i < l; i++ {
+		f := t.Field(i)
+		k, _ := fieldInfo(f)
+		if k == omittedKey {
+			continue
+		} else if n == k {
+			return v.Field(i), true
+		} else if ignoreCase && lowerN == strings.ToLower(k) {
+			caseInsensitiveMatch = i
+		}
+	}
+
+	// If no exact match was found try case insensitive match.
+	if caseInsensitiveMatch != -1 {
+		return v.Field(caseInsensitiveMatch), true
+	}
+
+	// Then try anonymous (embedded) fields.
+	for i := 0; i < l; i++ {
+		f := t.Field(i)
+		k, _ := fieldInfo(f)
+		if k == omittedKey || !f.Anonymous { // || k != "" ?
+			continue
+		}
+		fv := v.Field(i)
+		fk := fv.Kind()
+		for fk == reflect.Ptr || fk == reflect.Interface {
+			fv = fv.Elem()
+			fk = fv.Kind()
+		}
+
+		if fk != reflect.Struct {
+			continue
+		}
+		if ev, ok := findField(fv, n, ignoreCase); ok {
+			return ev, true
+		}
+	}
+
+	return reflect.Value{}, false
+}
+
+var (
+	stringType    = reflect.TypeOf(string(""))
+	stringMapType = reflect.TypeOf(map[string]interface{}{})
+	timeType      = reflect.TypeOf(time.Time{})
+	timePtrType   = reflect.TypeOf(&time.Time{})
+	urlType       = reflect.TypeOf(url.URL{})
+)
+
+func skipTextMarshalling(t reflect.Type) bool {
+	/*// Skip time.Time because its text unmarshaling is overly rigid:
+	return t == timeType || t == timePtrType*/
+	// Skip time.Time & convertibles because its text unmarshaling is overly rigid:
+	return t.ConvertibleTo(timeType) || t.ConvertibleTo(timePtrType)
+}
+
+func unmarshalValue(v reflect.Value, x interface{}) bool {
+	if skipTextMarshalling(v.Type()) {
+		return false
+	}
+
+	tu, ok := v.Interface().(encoding.TextUnmarshaler)
+	if !ok && !v.CanAddr() {
+		return false
+	} else if !ok {
+		return unmarshalValue(v.Addr(), x)
+	}
+
+	s := getString(x)
+	if err := tu.UnmarshalText([]byte(s)); err != nil {
+		panic(err)
+	}
+	return true
+}
+
+func marshalValue(v reflect.Value) (string, bool) {
+	if skipTextMarshalling(v.Type()) {
+		return "", false
+	}
+
+	tm, ok := v.Interface().(encoding.TextMarshaler)
+	if !ok && !v.CanAddr() {
+		return "", false
+	} else if !ok {
+		return marshalValue(v.Addr())
+	}
+
+	bs, err := tm.MarshalText()
+	if err != nil {
+		panic(err)
+	}
+	return string(bs), true
+}

+ 14 - 0
vendor/github.com/ajg/form/form.go

@@ -0,0 +1,14 @@
+// Copyright 2014 Alvaro J. Genial. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package form implements encoding and decoding of application/x-www-form-urlencoded data.
+package form
+
+const (
+	implicitKey = "_"
+	omittedKey  = "-"
+
+	defaultDelimiter = '.'
+	defaultEscape    = '\\'
+)

+ 152 - 0
vendor/github.com/ajg/form/node.go

@@ -0,0 +1,152 @@
+// Copyright 2014 Alvaro J. Genial. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package form
+
+import (
+	"net/url"
+	"strconv"
+	"strings"
+)
+
+type node map[string]interface{}
+
+func (n node) values(d, e rune) url.Values {
+	vs := url.Values{}
+	n.merge(d, e, "", &vs)
+	return vs
+}
+
+func (n node) merge(d, e rune, p string, vs *url.Values) {
+	for k, x := range n {
+		switch y := x.(type) {
+		case string:
+			vs.Add(p+escape(d, e, k), y)
+		case node:
+			y.merge(d, e, p+escape(d, e, k)+string(d), vs)
+		default:
+			panic("value is neither string nor node")
+		}
+	}
+}
+
+// TODO: Add tests for implicit indexing.
+func parseValues(d, e rune, vs url.Values, canIndexFirstLevelOrdinally bool) node {
+	// NOTE: Because of the flattening of potentially multiple strings to one key, implicit indexing works:
+	//    i. At the first level;   e.g. Foo.Bar=A&Foo.Bar=B     becomes 0.Foo.Bar=A&1.Foo.Bar=B
+	//   ii. At the last level;    e.g. Foo.Bar._=A&Foo.Bar._=B becomes Foo.Bar.0=A&Foo.Bar.1=B
+	// TODO: At in-between levels; e.g. Foo._.Bar=A&Foo._.Bar=B becomes Foo.0.Bar=A&Foo.1.Bar=B
+	//       (This last one requires that there only be one placeholder in order for it to be unambiguous.)
+
+	m := map[string]string{}
+	for k, ss := range vs {
+		indexLastLevelOrdinally := strings.HasSuffix(k, string(d)+implicitKey)
+
+		for i, s := range ss {
+			if canIndexFirstLevelOrdinally {
+				k = strconv.Itoa(i) + string(d) + k
+			} else if indexLastLevelOrdinally {
+				k = strings.TrimSuffix(k, implicitKey) + strconv.Itoa(i)
+			}
+
+			m[k] = s
+		}
+	}
+
+	n := node{}
+	for k, s := range m {
+		n = n.split(d, e, k, s)
+	}
+	return n
+}
+
+func splitPath(d, e rune, path string) (k, rest string) {
+	esc := false
+	for i, r := range path {
+		switch {
+		case !esc && r == e:
+			esc = true
+		case !esc && r == d:
+			return unescape(d, e, path[:i]), path[i+1:]
+		default:
+			esc = false
+		}
+	}
+	return unescape(d, e, path), ""
+}
+
+func (n node) split(d, e rune, path, s string) node {
+	k, rest := splitPath(d, e, path)
+	if rest == "" {
+		return add(n, k, s)
+	}
+	if _, ok := n[k]; !ok {
+		n[k] = node{}
+	}
+
+	c := getNode(n[k])
+	n[k] = c.split(d, e, rest, s)
+	return n
+}
+
+func add(n node, k, s string) node {
+	if n == nil {
+		return node{k: s}
+	}
+
+	if _, ok := n[k]; ok {
+		panic("key " + k + " already set")
+	}
+
+	n[k] = s
+	return n
+}
+
+func isEmpty(x interface{}) bool {
+	switch y := x.(type) {
+	case string:
+		return y == ""
+	case node:
+		if s, ok := y[""].(string); ok {
+			return s == ""
+		}
+		return false
+	}
+	panic("value is neither string nor node")
+}
+
+func getNode(x interface{}) node {
+	switch y := x.(type) {
+	case string:
+		return node{"": y}
+	case node:
+		return y
+	}
+	panic("value is neither string nor node")
+}
+
+func getString(x interface{}) string {
+	switch y := x.(type) {
+	case string:
+		return y
+	case node:
+		if s, ok := y[""].(string); ok {
+			return s
+		}
+		return ""
+	}
+	panic("value is neither string nor node")
+}
+
+func escape(d, e rune, s string) string {
+	s = strings.Replace(s, string(e), string(e)+string(e), -1) // Escape the escape    (\ => \\)
+	s = strings.Replace(s, string(d), string(e)+string(d), -1) // Escape the delimiter (. => \.)
+	return s
+}
+
+func unescape(d, e rune, s string) string {
+	s = strings.Replace(s, string(e)+string(d), string(d), -1) // Unescape the delimiter (\. => .)
+	s = strings.Replace(s, string(e)+string(e), string(e), -1) // Unescape the escape    (\\ => \)
+	return s
+}

+ 15 - 0
vendor/github.com/davecgh/go-spew/LICENSE

@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 152 - 0
vendor/github.com/davecgh/go-spew/spew/bypass.go

@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+	// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+	// internal reflect.Value fields.  These values are valid before golang
+	// commit ecccf07e7f9d which changed the format.  The are also valid
+	// after commit 82f48826c6c7 which changed the format again to mirror
+	// the original format.  Code in the init function updates these offsets
+	// as necessary.
+	offsetPtr    = uintptr(ptrSize)
+	offsetScalar = uintptr(0)
+	offsetFlag   = uintptr(ptrSize * 2)
+
+	// flagKindWidth and flagKindShift indicate various bits that the
+	// reflect package uses internally to track kind information.
+	//
+	// flagRO indicates whether or not the value field of a reflect.Value is
+	// read-only.
+	//
+	// flagIndir indicates whether the value field of a reflect.Value is
+	// the actual data or a pointer to the data.
+	//
+	// These values are valid before golang commit 90a7c3c86944 which
+	// changed their positions.  Code in the init function updates these
+	// flags as necessary.
+	flagKindWidth = uintptr(5)
+	flagKindShift = uintptr(flagKindWidth - 1)
+	flagRO        = uintptr(1 << 0)
+	flagIndir     = uintptr(1 << 1)
+)
+
+func init() {
+	// Older versions of reflect.Value stored small integers directly in the
+	// ptr field (which is named val in the older versions).  Versions
+	// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+	// scalar for this purpose which unfortunately came before the flag
+	// field, so the offset of the flag field is different for those
+	// versions.
+	//
+	// This code constructs a new reflect.Value from a known small integer
+	// and checks if the size of the reflect.Value struct indicates it has
+	// the scalar field. When it does, the offsets are updated accordingly.
+	vv := reflect.ValueOf(0xf00)
+	if unsafe.Sizeof(vv) == (ptrSize * 4) {
+		offsetScalar = ptrSize * 2
+		offsetFlag = ptrSize * 3
+	}
+
+	// Commit 90a7c3c86944 changed the flag positions such that the low
+	// order bits are the kind.  This code extracts the kind from the flags
+	// field and ensures it's the correct type.  When it's not, the flag
+	// order has been changed to the newer format, so the flags are updated
+	// accordingly.
+	upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+	upfv := *(*uintptr)(upf)
+	flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+	if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+		flagKindShift = 0
+		flagRO = 1 << 5
+		flagIndir = 1 << 6
+
+		// Commit adf9b30e5594 modified the flags to separate the
+		// flagRO flag into two bits which specifies whether or not the
+		// field is embedded.  This causes flagIndir to move over a bit
+		// and means that flagRO is the combination of either of the
+		// original flagRO bit and the new bit.
+		//
+		// This code detects the change by extracting what used to be
+		// the indirect bit to ensure it's set.  When it's not, the flag
+		// order has been changed to the newer format, so the flags are
+		// updated accordingly.
+		if upfv&flagIndir == 0 {
+			flagRO = 3 << 5
+			flagIndir = 1 << 7
+		}
+	}
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+	indirects := 1
+	vt := v.Type()
+	upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+	rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+	if rvf&flagIndir != 0 {
+		vt = reflect.PtrTo(v.Type())
+		indirects++
+	} else if offsetScalar != 0 {
+		// The value is in the scalar field when it's not one of the
+		// reference types.
+		switch vt.Kind() {
+		case reflect.Uintptr:
+		case reflect.Chan:
+		case reflect.Func:
+		case reflect.Map:
+		case reflect.Ptr:
+		case reflect.UnsafePointer:
+		default:
+			upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+				offsetScalar)
+		}
+	}
+
+	pv := reflect.NewAt(vt, upv)
+	rv = pv
+	for i := 0; i < indirects; i++ {
+		rv = rv.Elem()
+	}
+	return rv
+}

+ 38 - 0
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go

@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}

+ 341 - 0
vendor/github.com/davecgh/go-spew/spew/common.go

@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}

+ 306 - 0
vendor/github.com/davecgh/go-spew/spew/config.go

@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}

+ 211 - 0
vendor/github.com/davecgh/go-spew/spew/doc.go

@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew

+ 509 - 0
vendor/github.com/davecgh/go-spew/spew/dump.go

@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound == true:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound == true:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}

+ 419 - 0
vendor/github.com/davecgh/go-spew/spew/format.go

@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound == true:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound == true:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}

+ 148 - 0
vendor/github.com/davecgh/go-spew/spew/spew.go

@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}

+ 201 - 0
vendor/github.com/gavv/monotime/LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 36 - 0
vendor/github.com/gavv/monotime/monotime.go

@@ -0,0 +1,36 @@
+// Copyright (C) 2016  Arista Networks, Inc.
+// Use of this source code is governed by the Apache License 2.0
+// that can be found in the COPYING file.
+
+// Package monotime provides functions to access monotonic clock source.
+package monotime
+
+import (
+	"time"
+	_ "unsafe" // required to use //go:linkname
+)
+
+//go:noescape
+//go:linkname nanotime runtime.nanotime
+func nanotime() int64
+
+// Now returns the current time in nanoseconds from a monotonic clock.
+//
+// The time returned is based on some arbitrary platform-specific point in the
+// past. The time returned is guaranteed to increase monotonically without
+// notable jumps, unlike time.Now() from the Go standard library, which may
+// jump forward or backward significantly due to system time changes or leap
+// seconds.
+//
+// It's implemented using runtime.nanotime(), which uses CLOCK_MONOTONIC on
+// Linux. Note that unlike CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC is affected
+// by time changes. However, time changes never cause clock jumps; instead,
+// clock frequency is adjusted slowly.
+func Now() time.Duration {
+	return time.Duration(nanotime())
+}
+
+// Since returns the time elapsed since t, obtained previously using Now.
+func Since(t time.Duration) time.Duration {
+	return Now() - t
+}

+ 6 - 0
vendor/github.com/gavv/monotime/monotime.s

@@ -0,0 +1,6 @@
+// Copyright (C) 2016  Arista Networks, Inc.
+// Use of this source code is governed by the Apache License 2.0
+// that can be found in the COPYING file.
+
+// This file is intentionally empty.
+// It's a workaround for https://github.com/golang/go/issues/15006

+ 27 - 0
vendor/github.com/google/go-querystring/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2013 Google. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 320 - 0
vendor/github.com/google/go-querystring/query/encode.go

@@ -0,0 +1,320 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package query implements encoding of structs into URL query parameters.
+//
+// As a simple example:
+//
+// 	type Options struct {
+// 		Query   string `url:"q"`
+// 		ShowAll bool   `url:"all"`
+// 		Page    int    `url:"page"`
+// 	}
+//
+// 	opt := Options{ "foo", true, 2 }
+// 	v, _ := query.Values(opt)
+// 	fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
+//
+// The exact mapping between Go values and url.Values is described in the
+// documentation for the Values() function.
+package query
+
+import (
+	"bytes"
+	"fmt"
+	"net/url"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+
+var encoderType = reflect.TypeOf(new(Encoder)).Elem()
+
+// Encoder is an interface implemented by any type that wishes to encode
+// itself into URL values in a non-standard way.
+type Encoder interface {
+	EncodeValues(key string, v *url.Values) error
+}
+
+// Values returns the url.Values encoding of v.
+//
+// Values expects to be passed a struct, and traverses it recursively using the
+// following encoding rules.
+//
+// Each exported struct field is encoded as a URL parameter unless
+//
+//	- the field's tag is "-", or
+//	- the field is empty and its tag specifies the "omitempty" option
+//
+// The empty values are false, 0, any nil pointer or interface value, any array
+// slice, map, or string of length zero, and any time.Time that returns true
+// for IsZero().
+//
+// The URL parameter name defaults to the struct field name but can be
+// specified in the struct field's tag value.  The "url" key in the struct
+// field's tag value is the key name, followed by an optional comma and
+// options.  For example:
+//
+// 	// Field is ignored by this package.
+// 	Field int `url:"-"`
+//
+// 	// Field appears as URL parameter "myName".
+// 	Field int `url:"myName"`
+//
+// 	// Field appears as URL parameter "myName" and the field is omitted if
+// 	// its value is empty
+// 	Field int `url:"myName,omitempty"`
+//
+// 	// Field appears as URL parameter "Field" (the default), but the field
+// 	// is skipped if empty.  Note the leading comma.
+// 	Field int `url:",omitempty"`
+//
+// For encoding individual field values, the following type-dependent rules
+// apply:
+//
+// Boolean values default to encoding as the strings "true" or "false".
+// Including the "int" option signals that the field should be encoded as the
+// strings "1" or "0".
+//
+// time.Time values default to encoding as RFC3339 timestamps.  Including the
+// "unix" option signals that the field should be encoded as a Unix time (see
+// time.Unix())
+//
+// Slice and Array values default to encoding as multiple URL values of the
+// same name.  Including the "comma" option signals that the field should be
+// encoded as a single comma-delimited value.  Including the "space" option
+// similarly encodes the value as a single space-delimited string. Including
+// the "semicolon" option will encode the value as a semicolon-delimited string.
+// Including the "brackets" option signals that the multiple URL values should
+// have "[]" appended to the value name. "numbered" will append a number to
+// the end of each incidence of the value name, example:
+// name0=value0&name1=value1, etc.
+//
+// Anonymous struct fields are usually encoded as if their inner exported
+// fields were fields in the outer struct, subject to the standard Go
+// visibility rules.  An anonymous struct field with a name given in its URL
+// tag is treated as having that name, rather than being anonymous.
+//
+// Non-nil pointer values are encoded as the value pointed to.
+//
+// Nested structs are encoded including parent fields in value names for
+// scoping. e.g:
+//
+// 	"user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
+//
+// All other values are encoded using their default string representation.
+//
+// Multiple fields that encode to the same URL parameter name will be included
+// as multiple URL values of the same name.
+func Values(v interface{}) (url.Values, error) {
+	values := make(url.Values)
+	val := reflect.ValueOf(v)
+	for val.Kind() == reflect.Ptr {
+		if val.IsNil() {
+			return values, nil
+		}
+		val = val.Elem()
+	}
+
+	if v == nil {
+		return values, nil
+	}
+
+	if val.Kind() != reflect.Struct {
+		return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
+	}
+
+	err := reflectValue(values, val, "")
+	return values, err
+}
+
+// reflectValue populates the values parameter from the struct fields in val.
+// Embedded structs are followed recursively (using the rules defined in the
+// Values function documentation) breadth-first.
+func reflectValue(values url.Values, val reflect.Value, scope string) error {
+	var embedded []reflect.Value
+
+	typ := val.Type()
+	for i := 0; i < typ.NumField(); i++ {
+		sf := typ.Field(i)
+		if sf.PkgPath != "" && !sf.Anonymous { // unexported
+			continue
+		}
+
+		sv := val.Field(i)
+		tag := sf.Tag.Get("url")
+		if tag == "-" {
+			continue
+		}
+		name, opts := parseTag(tag)
+		if name == "" {
+			if sf.Anonymous && sv.Kind() == reflect.Struct {
+				// save embedded struct for later processing
+				embedded = append(embedded, sv)
+				continue
+			}
+
+			name = sf.Name
+		}
+
+		if scope != "" {
+			name = scope + "[" + name + "]"
+		}
+
+		if opts.Contains("omitempty") && isEmptyValue(sv) {
+			continue
+		}
+
+		if sv.Type().Implements(encoderType) {
+			if !reflect.Indirect(sv).IsValid() {
+				sv = reflect.New(sv.Type().Elem())
+			}
+
+			m := sv.Interface().(Encoder)
+			if err := m.EncodeValues(name, &values); err != nil {
+				return err
+			}
+			continue
+		}
+
+		if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
+			var del byte
+			if opts.Contains("comma") {
+				del = ','
+			} else if opts.Contains("space") {
+				del = ' '
+			} else if opts.Contains("semicolon") {
+				del = ';'
+			} else if opts.Contains("brackets") {
+				name = name + "[]"
+			}
+
+			if del != 0 {
+				s := new(bytes.Buffer)
+				first := true
+				for i := 0; i < sv.Len(); i++ {
+					if first {
+						first = false
+					} else {
+						s.WriteByte(del)
+					}
+					s.WriteString(valueString(sv.Index(i), opts))
+				}
+				values.Add(name, s.String())
+			} else {
+				for i := 0; i < sv.Len(); i++ {
+					k := name
+					if opts.Contains("numbered") {
+						k = fmt.Sprintf("%s%d", name, i)
+					}
+					values.Add(k, valueString(sv.Index(i), opts))
+				}
+			}
+			continue
+		}
+
+		for sv.Kind() == reflect.Ptr {
+			if sv.IsNil() {
+				break
+			}
+			sv = sv.Elem()
+		}
+
+		if sv.Type() == timeType {
+			values.Add(name, valueString(sv, opts))
+			continue
+		}
+
+		if sv.Kind() == reflect.Struct {
+			reflectValue(values, sv, name)
+			continue
+		}
+
+		values.Add(name, valueString(sv, opts))
+	}
+
+	for _, f := range embedded {
+		if err := reflectValue(values, f, scope); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// valueString returns the string representation of a value.
+func valueString(v reflect.Value, opts tagOptions) string {
+	for v.Kind() == reflect.Ptr {
+		if v.IsNil() {
+			return ""
+		}
+		v = v.Elem()
+	}
+
+	if v.Kind() == reflect.Bool && opts.Contains("int") {
+		if v.Bool() {
+			return "1"
+		}
+		return "0"
+	}
+
+	if v.Type() == timeType {
+		t := v.Interface().(time.Time)
+		if opts.Contains("unix") {
+			return strconv.FormatInt(t.Unix(), 10)
+		}
+		return t.Format(time.RFC3339)
+	}
+
+	return fmt.Sprint(v.Interface())
+}
+
+// isEmptyValue checks if a value should be considered empty for the purposes
+// of omitting fields with the "omitempty" option.
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+
+	if v.Type() == timeType {
+		return v.Interface().(time.Time).IsZero()
+	}
+
+	return false
+}
+
+// tagOptions is the string following a comma in a struct field's "url" tag, or
+// the empty string. It does not include the leading comma.
+type tagOptions []string
+
+// parseTag splits a struct field's url tag into its name and comma-separated
+// options.
+func parseTag(tag string) (string, tagOptions) {
+	s := strings.Split(tag, ",")
+	return s[0], s[1:]
+}
+
+// Contains checks whether the tagOptions contains the specified option.
+func (o tagOptions) Contains(option string) bool {
+	for _, s := range o {
+		if s == option {
+			return true
+		}
+	}
+	return false
+}

+ 20 - 0
vendor/github.com/imkira/go-interpol/LICENSE

@@ -0,0 +1,20 @@
+Copyright (c) 2016 Mario Freitas (imkira@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 171 - 0
vendor/github.com/imkira/go-interpol/interpol.go

@@ -0,0 +1,171 @@
+// Package interpol provides utility functions for doing format-string like
+// string interpolation using named parameters.
+// Currently, a template only accepts variable placeholders delimited by brace
+// characters (eg. "Hello {foo} {bar}").
+package interpol
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"strings"
+)
+
+// Errors returned when formatting templates.
+var (
+	ErrUnexpectedClose = errors.New("interpol: unexpected close in template")
+	ErrExpectingClose  = errors.New("interpol: expecting close in template")
+	ErrKeyNotFound     = errors.New("interpol: key not found")
+	ErrReadByteFailed  = errors.New("interpol: read byte failed")
+)
+
+// Func receives the placeholder key and writes to the io.Writer. If an error
+// happens, the function can return an error, in which case the interpolation
+// will be aborted.
+type Func func(key string, w io.Writer) error
+
+// New creates a new interpolator with the given list of options.
+// You can use options such as the ones returned by WithTemplate, WithFormat
+// and WithOutput.
+func New(opts ...Option) *Interpolator {
+	opts2 := &Options{}
+	setOptions(opts, newOptionSetter(opts2))
+	return NewWithOptions(opts2)
+}
+
+// NewWithOptions creates a new interpolator with the given options.
+func NewWithOptions(opts *Options) *Interpolator {
+	return &Interpolator{
+		template: templateReader(opts),
+		output:   outputWriter(opts),
+		format:   opts.Format,
+		rb:       make([]rune, 0, 64),
+		start:    -1,
+		closing:  false,
+	}
+}
+
+// Interpolator interpolates Template to Output, according to Format.
+type Interpolator struct {
+	template io.RuneReader
+	output   runeWriter
+	format   Func
+	rb       []rune
+	start    int
+	closing  bool
+}
+
+// Interpolate reads runes from Template and writes them to Output, with the
+// exception of placeholders which are passed to Format.
+func (i *Interpolator) Interpolate() error {
+	for pos := 0; ; pos++ {
+		r, _, err := i.template.ReadRune()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return err
+		}
+		if err := i.parse(r, pos); err != nil {
+			return err
+		}
+	}
+	return i.finish()
+}
+
+func (i *Interpolator) parse(r rune, pos int) error {
+	switch r {
+	case '{':
+		return i.open(pos)
+	case '}':
+		return i.close()
+	default:
+		return i.append(r)
+	}
+}
+
+func (i *Interpolator) open(pos int) error {
+	if i.closing {
+		return ErrUnexpectedClose
+	}
+	if i.start >= 0 {
+		if _, err := i.output.WriteRune('{'); err != nil {
+			return err
+		}
+		i.start = -1
+	} else {
+		i.start = pos + 1
+	}
+	return nil
+}
+
+func (i *Interpolator) close() error {
+	if i.start >= 0 {
+		if err := i.format(string(i.rb), i.output); err != nil {
+			return err
+		}
+		i.rb = i.rb[:0]
+		i.start = -1
+	} else if i.closing {
+		i.closing = false
+		if _, err := i.output.WriteRune('}'); err != nil {
+			return err
+		}
+	} else {
+		i.closing = true
+	}
+	return nil
+}
+
+func (i *Interpolator) append(r rune) error {
+	if i.closing {
+		return ErrUnexpectedClose
+	}
+	if i.start < 0 {
+		_, err := i.output.WriteRune(r)
+		return err
+	}
+	i.rb = append(i.rb, r)
+	return nil
+}
+
+func (i *Interpolator) finish() error {
+	if i.start >= 0 {
+		return ErrExpectingClose
+	}
+	if i.closing {
+		return ErrUnexpectedClose
+	}
+	return nil
+}
+
+// WithFunc interpolates the specified template with replacements using the
+// given function.
+func WithFunc(template string, format Func) (string, error) {
+	buffer := bytes.NewBuffer(make([]byte, 0, len(template)))
+	opts := &Options{
+		Template: strings.NewReader(template),
+		Output:   buffer,
+		Format:   format,
+	}
+	i := NewWithOptions(opts)
+	if err := i.Interpolate(); err != nil {
+		return "", err
+	}
+	return buffer.String(), nil
+}
+
+// WithMap interpolates the specified template with replacements using the
+// given map. If a placeholder is used for which a value is not found, an error
+// is returned.
+func WithMap(template string, m map[string]string) (string, error) {
+	format := func(key string, w io.Writer) error {
+		value, ok := m[key]
+		if !ok {
+			return ErrKeyNotFound
+		}
+		_, err := w.Write([]byte(value))
+		return err
+	}
+	return WithFunc(template, format)
+}

+ 52 - 0
vendor/github.com/imkira/go-interpol/io.go

@@ -0,0 +1,52 @@
+package interpol
+
+import (
+	"bufio"
+	"io"
+	"unicode/utf8"
+)
+
+type runeWriter interface {
+	io.Writer
+	WriteRune(r rune) (int, error)
+}
+
+func templateReader(opts *Options) io.RuneReader {
+	if rr, ok := opts.Template.(io.RuneReader); ok {
+		return rr
+	}
+	return bufio.NewReaderSize(opts.Template, utf8.UTFMax)
+}
+
+func outputWriter(opts *Options) runeWriter {
+	if rw, ok := opts.Output.(runeWriter); ok {
+		return rw
+	}
+	return &simpleRuneWriter{w: opts.Output}
+}
+
+type simpleRuneWriter struct {
+	runeEncoder
+	w io.Writer
+}
+
+func (rw *simpleRuneWriter) Write(b []byte) (int, error) {
+	return rw.w.Write(b)
+}
+
+func (rw *simpleRuneWriter) WriteRune(r rune) (int, error) {
+	return rw.w.Write(rw.encode(r))
+}
+
+type runeEncoder struct {
+	b [utf8.UTFMax]byte
+}
+
+func (re *runeEncoder) encode(r rune) []byte {
+	if r < utf8.RuneSelf {
+		re.b[0] = byte(r)
+		return re.b[:1]
+	}
+	n := utf8.EncodeRune(re.b[:], r)
+	return re.b[:n]
+}

+ 68 - 0
vendor/github.com/imkira/go-interpol/options.go

@@ -0,0 +1,68 @@
+package interpol
+
+import "io"
+
+// Options contains all options supported by an Interpolator.
+type Options struct {
+	Template io.Reader
+	Format   Func
+	Output   io.Writer
+}
+
+// Option is an option that can be applied to an Interpolator.
+type Option func(OptionSetter)
+
+// OptionSetter is an interface that contains the setters for all options
+// supported by Interpolator.
+type OptionSetter interface {
+	SetTemplate(template io.Reader)
+	SetFormat(format Func)
+	SetOutput(output io.Writer)
+}
+
+// WithTemplate assigns Template to Options.
+func WithTemplate(template io.Reader) Option {
+	return func(setter OptionSetter) {
+		setter.SetTemplate(template)
+	}
+}
+
+// WithFormat assigns Format to Options.
+func WithFormat(format Func) Option {
+	return func(setter OptionSetter) {
+		setter.SetFormat(format)
+	}
+}
+
+// WithOutput assigns Output to Options.
+func WithOutput(output io.Writer) Option {
+	return func(setter OptionSetter) {
+		setter.SetOutput(output)
+	}
+}
+
+type optionSetter struct {
+	opts *Options
+}
+
+func newOptionSetter(opts *Options) *optionSetter {
+	return &optionSetter{opts: opts}
+}
+
+func (s *optionSetter) SetTemplate(template io.Reader) {
+	s.opts.Template = template
+}
+
+func (s *optionSetter) SetFormat(format Func) {
+	s.opts.Format = format
+}
+
+func (s *optionSetter) SetOutput(output io.Writer) {
+	s.opts.Output = output
+}
+
+func setOptions(opts []Option, setter OptionSetter) {
+	for _, opt := range opts {
+		opt(setter)
+	}
+}

+ 117 - 0
vendor/github.com/iris-contrib/httpexpect/Gopkg.lock

@@ -0,0 +1,117 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/ajg/form"
+  packages = ["."]
+  revision = "cc2954064ec9ea8d93917f0f87456e11d7b881ad"
+  version = "v1.5"
+
+[[projects]]
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "github.com/fatih/structs"
+  packages = ["."]
+  revision = "a720dfa8df582c51dee1b36feabb906bde1588bd"
+  version = "v1.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/gavv/monotime"
+  packages = ["."]
+  revision = "47d58efa69556a936a3c15eb2ed42706d968ab01"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/google/go-querystring"
+  packages = ["query"]
+  revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
+
+[[projects]]
+  name = "github.com/imkira/go-interpol"
+  packages = ["."]
+  revision = "5accad8134979a6ac504d456a6c7f1c53da237ca"
+  version = "v1.1.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/moul/http2curl"
+  packages = ["."]
+  revision = "4e24498b31dba4683efb9d35c1c8a91e2eda28c8"
+
+[[projects]]
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/sergi/go-diff"
+  packages = ["diffmatchpatch"]
+  revision = "feef008d51ad2b3778f85d387ccf91735543008d"
+
+[[projects]]
+  name = "github.com/stretchr/testify"
+  packages = ["assert","require"]
+  revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
+  version = "v1.1.4"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/xeipuuv/gojsonpointer"
+  packages = ["."]
+  revision = "6fe8760cad3569743d51ddbb243b26f8456742dc"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/xeipuuv/gojsonreference"
+  packages = ["."]
+  revision = "e02fc20de94c78484cd5ffb007f8af96be030a45"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/xeipuuv/gojsonschema"
+  packages = ["."]
+  revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/yalp/jsonpath"
+  packages = ["."]
+  revision = "31a79c7593bb93eb10b163650d4a3e6ca190e4dc"
+
+[[projects]]
+  name = "github.com/yudai/gojsondiff"
+  packages = [".","formatter"]
+  revision = "d53dddaf16b9f5b19737f4722943e7e1f289af13"
+  version = "v1.0.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/yudai/golcs"
+  packages = ["."]
+  revision = "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/net"
+  packages = ["idna","publicsuffix"]
+  revision = "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/text"
+  packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
+  revision = "ac87088df8ef557f1e32cd00ed0b6fbc3f7ddafb"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "85dac6f19e4c9a8e9ea0184ea34409237000c3e26db72ea2dfa98584b8670042"
+  solver-name = "gps-cdcl"
+  solver-version = 1

+ 66 - 0
vendor/github.com/iris-contrib/httpexpect/Gopkg.toml

@@ -0,0 +1,66 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#  name = "github.com/x/y"
+#  version = "2.4.0"
+
+
+[[constraint]]
+  name = "github.com/ajg/form"
+  version = "1.5.0"
+
+[[constraint]]
+  name = "github.com/fatih/structs"
+  version = "1.0.0"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/gavv/monotime"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/google/go-querystring"
+
+[[constraint]]
+  name = "github.com/imkira/go-interpol"
+  version = "1.1.0"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/moul/http2curl"
+
+[[constraint]]
+  name = "github.com/stretchr/testify"
+  version = "1.1.4"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/xeipuuv/gojsonschema"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/yalp/jsonpath"
+
+[[constraint]]
+  name = "github.com/yudai/gojsondiff"
+  version = "1.0.0"
+
+[[constraint]]
+  branch = "master"
+  name = "golang.org/x/net"

+ 21 - 0
vendor/github.com/iris-contrib/httpexpect/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Victor Gaydov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 7 - 0
vendor/github.com/iris-contrib/httpexpect/README.md

@@ -0,0 +1,7 @@
+# HTTPExpect
+
+The underline test framework that [iris/httptest](https://github.com/kataras/iris) is built on.
+
+[![Build status](https://api.travis-ci.org/iris-contrib/httpexpect.svg?branch=master&style=flat-square)](https://travis-ci.org/iris-contrib/httpexpect)
+
+Clone of [gavv/httpexpect](https://github.com/gavv/httpexpect), remove unused features in order to reduce the overall disk size. 

+ 299 - 0
vendor/github.com/iris-contrib/httpexpect/array.go

@@ -0,0 +1,299 @@
+package httpexpect
+
+import (
+	"reflect"
+)
+
+// Array provides methods to inspect attached []interface{} object
+// (Go representation of JSON array).
+type Array struct {
+	chain chain
+	value []interface{}
+}
+
+// NewArray returns a new Array given a reporter used to report failures
+// and value to be inspected.
+//
+// Both reporter and value should not be nil. If value is nil, failure is
+// reported.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+func NewArray(reporter Reporter, value []interface{}) *Array {
+	chain := makeChain(reporter)
+	if value == nil {
+		chain.fail("expected non-nil array value")
+	} else {
+		value, _ = canonArray(&chain, value)
+	}
+	return &Array{chain, value}
+}
+
+// Raw returns underlying value attached to Array.
+// This is the value originally passed to NewArray, converted to canonical form.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  assert.Equal(t, []interface{}{"foo", 123.0}, array.Raw())
+func (a *Array) Raw() []interface{} {
+	return a.value
+}
+
+// Path is similar to Value.Path.
+func (a *Array) Path(path string) *Value {
+	return getPath(&a.chain, a.value, path)
+}
+
+// Schema is similar to Value.Schema.
+func (a *Array) Schema(schema interface{}) *Array {
+	checkSchema(&a.chain, a.value, schema)
+	return a
+}
+
+// Length returns a new Number object that may be used to inspect array length.
+//
+// Example:
+//  array := NewArray(t, []interface{}{1, 2, 3})
+//  array.Length().Equal(3)
+func (a *Array) Length() *Number {
+	return &Number{a.chain, float64(len(a.value))}
+}
+
+// Element returns a new Value object that may be used to inspect array element
+// for given index.
+//
+// If index is out of array bounds, Element reports failure and returns empty
+// (but non-nil) value.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.Element(0).String().Equal("foo")
+//  array.Element(1).Number().Equal(123)
+func (a *Array) Element(index int) *Value {
+	if index < 0 || index >= len(a.value) {
+		a.chain.fail(
+			"\narray index out of bounds:\n  index %d\n\n  bounds [%d; %d)",
+			index,
+			0,
+			len(a.value))
+		return &Value{a.chain, nil}
+	}
+	return &Value{a.chain, a.value[index]}
+}
+
+// First returns a new Value object that may be used to inspect first element
+// of given array.
+//
+// If given array is empty, First reports failure and returns empty
+// (but non-nil) value.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.First().String().Equal("foo")
+func (a *Array) First() *Value {
+	if len(a.value) < 1 {
+		a.chain.fail("\narray is empty")
+		return &Value{a.chain, nil}
+	}
+	return &Value{a.chain, a.value[0]}
+}
+
+// Last returns a new Value object that may be used to inspect last element
+// of given array.
+//
+// If given array is empty, Last reports failure and returns empty
+// (but non-nil) value.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.Last().Number().Equal(123)
+func (a *Array) Last() *Value {
+	if len(a.value) < 1 {
+		a.chain.fail("\narray is empty")
+		return &Value{a.chain, nil}
+	}
+	return &Value{a.chain, a.value[len(a.value)-1]}
+}
+
+// Iter returns a new slice of Values attached to array elements.
+//
+// Example:
+//  strings := []interface{}{"foo", "bar"}
+//  array := NewArray(t, strings)
+//
+//  for n, val := range array.Iter() {
+//      val.String().Equal(strings[n])
+//  }
+func (a *Array) Iter() []Value {
+	if a.chain.failed() {
+		return []Value{}
+	}
+	ret := []Value{}
+	for n := range a.value {
+		ret = append(ret, Value{a.chain, a.value[n]})
+	}
+	return ret
+}
+
+// Empty succeeds if array is empty.
+//
+// Example:
+//  array := NewArray(t, []interface{}{})
+//  array.Empty()
+func (a *Array) Empty() *Array {
+	return a.Equal([]interface{}{})
+}
+
+// NotEmpty succeeds if array is non-empty.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.NotEmpty()
+func (a *Array) NotEmpty() *Array {
+	return a.NotEqual([]interface{}{})
+}
+
+// Equal succeeds if array is equal to another array.
+// Before comparison, both arrays are converted to canonical form.
+//
+// value should be slice of any type.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.Equal([]interface{}{"foo", 123})
+//
+//  array := NewArray(t, []interface{}{"foo", "bar"})
+//  array.Equal([]string{}{"foo", "bar"})
+//
+//  array := NewArray(t, []interface{}{123, 456})
+//  array.Equal([]int{}{123, 456})
+func (a *Array) Equal(value interface{}) *Array {
+	expected, ok := canonArray(&a.chain, value)
+	if !ok {
+		return a
+	}
+	if !reflect.DeepEqual(expected, a.value) {
+		a.chain.fail("\nexpected array equal to:\n%s\n\nbut got:\n%s\n\ndiff:\n%s",
+			dumpValue(expected),
+			dumpValue(a.value),
+			diffValues(expected, a.value))
+	}
+	return a
+}
+
+// NotEqual succeeds if array is not equal to another array.
+// Before comparison, both arrays are converted to canonical form.
+//
+// value should be slice of any type.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.NotEqual([]interface{}{123, "foo"})
+func (a *Array) NotEqual(value interface{}) *Array {
+	expected, ok := canonArray(&a.chain, value)
+	if !ok {
+		return a
+	}
+	if reflect.DeepEqual(expected, a.value) {
+		a.chain.fail("\nexpected array not equal to:\n%s",
+			dumpValue(expected))
+	}
+	return a
+}
+
+// Elements succeeds if array contains all given elements, in given order, and only them.
+// Before comparison, array and all elements are converted to canonical form.
+//
+// For partial or unordered comparison, see Contains and ContainsOnly.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.Elements("foo", 123)
+//
+// This calls are equivalent:
+//  array.Elelems("a", "b")
+//  array.Equal([]interface{}{"a", "b"})
+func (a *Array) Elements(values ...interface{}) *Array {
+	return a.Equal(values)
+}
+
+// Contains succeeds if array contains all given elements (in any order).
+// Before comparison, array and all elements are converted to canonical form.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.Contains(123, "foo")
+func (a *Array) Contains(values ...interface{}) *Array {
+	elements, ok := canonArray(&a.chain, values)
+	if !ok {
+		return a
+	}
+	for _, e := range elements {
+		if !a.containsElement(e) {
+			a.chain.fail("\nexpected array containing element:\n%s\n\nbut got:\n%s",
+				dumpValue(e), dumpValue(a.value))
+		}
+	}
+	return a
+}
+
+// NotContains succeeds if array contains none of given elements.
+// Before comparison, array and all elements are converted to canonical form.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.NotContains("bar")         // success
+//  array.NotContains("bar", "foo")  // failure (array contains "foo")
+func (a *Array) NotContains(values ...interface{}) *Array {
+	elements, ok := canonArray(&a.chain, values)
+	if !ok {
+		return a
+	}
+	for _, e := range elements {
+		if a.containsElement(e) {
+			a.chain.fail("\nexpected array not containing element:\n%s\n\nbut got:\n%s",
+				dumpValue(e), dumpValue(a.value))
+		}
+	}
+	return a
+}
+
+// ContainsOnly succeeds if array contains all given elements, in any order, and only
+// them. Before comparison, array and all elements are converted to canonical form.
+//
+// Example:
+//  array := NewArray(t, []interface{}{"foo", 123})
+//  array.ContainsOnly(123, "foo")
+//
+// This calls are equivalent:
+//  array.ContainsOnly("a", "b")
+//  array.ContainsOnly("b", "a")
+func (a *Array) ContainsOnly(values ...interface{}) *Array {
+	elements, ok := canonArray(&a.chain, values)
+	if !ok {
+		return a
+	}
+	if len(elements) != len(a.value) {
+		a.chain.fail("\nexpected array of length == %d:\n%s\n\n"+
+			"but got array of length %d:\n%s",
+			len(elements), dumpValue(elements),
+			len(a.value), dumpValue(a.value))
+		return a
+	}
+	for _, e := range elements {
+		if !a.containsElement(e) {
+			a.chain.fail("\nexpected array containing element:\n%s\n\nbut got:\n%s",
+				dumpValue(e), dumpValue(a.value))
+		}
+	}
+	return a
+}
+
+func (a *Array) containsElement(expected interface{}) bool {
+	for _, e := range a.value {
+		if reflect.DeepEqual(expected, e) {
+			return true
+		}
+	}
+	return false
+}

+ 99 - 0
vendor/github.com/iris-contrib/httpexpect/binder.go

@@ -0,0 +1,99 @@
+package httpexpect
+
+import (
+	"bytes"
+	"crypto/tls"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httptest"
+)
+
+// Binder implements networkless http.RoundTripper attached directly to
+// http.Handler.
+//
+// Binder emulates network communication by invoking given http.Handler
+// directly. It passes httptest.ResponseRecorder as http.ResponseWriter
+// to the handler, and then constructs http.Response from recorded data.
+type Binder struct {
+	// HTTP handler invoked for every request.
+	Handler http.Handler
+	// TLS connection state used for https:// requests.
+	TLS *tls.ConnectionState
+}
+
+// NewBinder returns a new Binder given a http.Handler.
+//
+// Example:
+//   client := &http.Client{
+//       Transport: NewBinder(handler),
+//   }
+func NewBinder(handler http.Handler) Binder {
+	return Binder{Handler: handler}
+}
+
+// RoundTrip implements http.RoundTripper.RoundTrip.
+func (binder Binder) RoundTrip(req *http.Request) (*http.Response, error) {
+	if req.Proto == "" {
+		req.Proto = fmt.Sprintf("HTTP/%d.%d", req.ProtoMajor, req.ProtoMinor)
+	}
+
+	if req.Body != nil {
+		if req.ContentLength == -1 {
+			req.TransferEncoding = []string{"chunked"}
+		}
+	} else {
+		req.Body = ioutil.NopCloser(bytes.NewReader(nil))
+	}
+
+	if req.URL != nil && req.URL.Scheme == "https" && binder.TLS != nil {
+		req.TLS = binder.TLS
+	}
+
+	if req.RequestURI == "" {
+		req.RequestURI = req.URL.RequestURI()
+	}
+
+	recorder := httptest.NewRecorder()
+
+	binder.Handler.ServeHTTP(recorder, req)
+
+	resp := http.Response{
+		Request:    req,
+		StatusCode: recorder.Code,
+		Status:     http.StatusText(recorder.Code),
+		Header:     recorder.HeaderMap,
+	}
+
+	if recorder.Flushed {
+		resp.TransferEncoding = []string{"chunked"}
+	}
+
+	if recorder.Body != nil {
+		resp.Body = ioutil.NopCloser(recorder.Body)
+	}
+
+	return &resp, nil
+}
+
+type connNonTLS struct {
+	net.Conn
+}
+
+func (connNonTLS) RemoteAddr() net.Addr {
+	return &net.TCPAddr{IP: net.IPv4zero}
+}
+
+func (connNonTLS) LocalAddr() net.Addr {
+	return &net.TCPAddr{IP: net.IPv4zero}
+}
+
+type connTLS struct {
+	connNonTLS
+	state *tls.ConnectionState
+}
+
+func (c connTLS) ConnectionState() tls.ConnectionState {
+	return *c.state
+}

+ 82 - 0
vendor/github.com/iris-contrib/httpexpect/boolean.go

@@ -0,0 +1,82 @@
+package httpexpect
+
+// Boolean provides methods to inspect attached bool value
+// (Go representation of JSON boolean).
+type Boolean struct {
+	chain chain
+	value bool
+}
+
+// NewBoolean returns a new Boolean given a reporter used to report
+// failures and value to be inspected.
+//
+// reporter should not be nil.
+//
+// Example:
+//  boolean := NewBoolean(t, true)
+func NewBoolean(reporter Reporter, value bool) *Boolean {
+	return &Boolean{makeChain(reporter), value}
+}
+
+// Raw returns underlying value attached to Boolean.
+// This is the value originally passed to NewBoolean.
+//
+// Example:
+//  boolean := NewBoolean(t, true)
+//  assert.Equal(t, true, boolean.Raw())
+func (b *Boolean) Raw() bool {
+	return b.value
+}
+
+// Path is similar to Value.Path.
+func (b *Boolean) Path(path string) *Value {
+	return getPath(&b.chain, b.value, path)
+}
+
+// Schema is similar to Value.Schema.
+func (b *Boolean) Schema(schema interface{}) *Boolean {
+	checkSchema(&b.chain, b.value, schema)
+	return b
+}
+
+// Equal succeeds if boolean is equal to given value.
+//
+// Example:
+//  boolean := NewBoolean(t, true)
+//  boolean.Equal(true)
+func (b *Boolean) Equal(value bool) *Boolean {
+	if !(b.value == value) {
+		b.chain.fail("expected boolean == %v, but got %v", value, b.value)
+	}
+	return b
+}
+
+// NotEqual succeeds if boolean is not equal to given value.
+//
+// Example:
+//  boolean := NewBoolean(t, true)
+//  boolean.NotEqual(false)
+func (b *Boolean) NotEqual(value bool) *Boolean {
+	if !(b.value != value) {
+		b.chain.fail("expected boolean != %v, but got %v", value, b.value)
+	}
+	return b
+}
+
+// True succeeds if boolean is true.
+//
+// Example:
+//  boolean := NewBoolean(t, true)
+//  boolean.True()
+func (b *Boolean) True() *Boolean {
+	return b.Equal(true)
+}
+
+// False succeeds if boolean is false.
+//
+// Example:
+//  boolean := NewBoolean(t, false)
+//  boolean.False()
+func (b *Boolean) False() *Boolean {
+	return b.Equal(false)
+}

+ 38 - 0
vendor/github.com/iris-contrib/httpexpect/chain.go

@@ -0,0 +1,38 @@
+package httpexpect
+
+type chain struct {
+	reporter Reporter
+	failbit  bool
+}
+
+func makeChain(reporter Reporter) chain {
+	return chain{reporter, false}
+}
+
+func (c *chain) failed() bool {
+	return c.failbit
+}
+
+func (c *chain) fail(message string, args ...interface{}) {
+	if c.failbit {
+		return
+	}
+	c.failbit = true
+	c.reporter.Errorf(message, args...)
+}
+
+func (c *chain) reset() {
+	c.failbit = false
+}
+
+func (c *chain) assertFailed(r Reporter) {
+	if !c.failbit {
+		r.Errorf("expected chain is failed, but it's ok")
+	}
+}
+
+func (c *chain) assertOK(r Reporter) {
+	if c.failbit {
+		r.Errorf("expected chain is ok, but it's failed")
+	}
+}

+ 105 - 0
vendor/github.com/iris-contrib/httpexpect/cookie.go

@@ -0,0 +1,105 @@
+package httpexpect
+
+import (
+	"net/http"
+	"time"
+)
+
+// Cookie provides methods to inspect attached http.Cookie value.
+type Cookie struct {
+	chain chain
+	value *http.Cookie
+}
+
+// NewCookie returns a new Cookie object given a reporter used to report
+// failures and cookie value to be inspected.
+//
+// reporter and value should not be nil.
+//
+// Example:
+//   cookie := NewCookie(reporter, &http.Cookie{...})
+//   cookie.Domain().Equal("example.com")
+//   cookie.Path().Equal("/")
+//   cookie.Expires().InRange(time.Now(), time.Now().Add(time.Hour * 24))
+func NewCookie(reporter Reporter, value *http.Cookie) *Cookie {
+	chain := makeChain(reporter)
+	if value == nil {
+		chain.fail("expected non-nil cookie")
+	}
+	return &Cookie{chain, value}
+}
+
+// Raw returns underlying http.Cookie value attached to Cookie.
+// This is the value originally passed to NewCookie.
+//
+// Example:
+//  cookie := NewCookie(t, c)
+//  assert.Equal(t, c, cookie.Raw())
+func (c *Cookie) Raw() *http.Cookie {
+	return c.value
+}
+
+// Name returns a new String object that may be used to inspect
+// cookie name.
+//
+// Example:
+//  cookie := NewCookie(t, &http.Cookie{...})
+//  cookie.Name().Equal("session")
+func (c *Cookie) Name() *String {
+	if c.chain.failed() {
+		return &String{c.chain, ""}
+	}
+	return &String{c.chain, c.value.Name}
+}
+
+// Value returns a new String object that may be used to inspect
+// cookie value.
+//
+// Example:
+//  cookie := NewCookie(t, &http.Cookie{...})
+//  cookie.Value().Equal("gH6z7Y")
+func (c *Cookie) Value() *String {
+	if c.chain.failed() {
+		return &String{c.chain, ""}
+	}
+	return &String{c.chain, c.value.Value}
+}
+
+// Domain returns a new String object that may be used to inspect
+// cookie domain.
+//
+// Example:
+//  cookie := NewCookie(t, &http.Cookie{...})
+//  cookie.Domain().Equal("example.com")
+func (c *Cookie) Domain() *String {
+	if c.chain.failed() {
+		return &String{c.chain, ""}
+	}
+	return &String{c.chain, c.value.Domain}
+}
+
+// Path returns a new String object that may be used to inspect
+// cookie path.
+//
+// Example:
+//  cookie := NewCookie(t, &http.Cookie{...})
+//  cookie.Path().Equal("/foo")
+func (c *Cookie) Path() *String {
+	if c.chain.failed() {
+		return &String{c.chain, ""}
+	}
+	return &String{c.chain, c.value.Path}
+}
+
+// Expires returns a new DateTime object that may be used to inspect
+// cookie expiration date.
+//
+// Example:
+//  cookie := NewCookie(t, &http.Cookie{...})
+//  cookie.Expires().InRange(time.Now(), time.Now().Add(time.Hour * 24))
+func (c *Cookie) Expires() *DateTime {
+	if c.chain.failed() {
+		return &DateTime{c.chain, time.Unix(0, 0)}
+	}
+	return &DateTime{c.chain, c.value.Expires}
+}

+ 129 - 0
vendor/github.com/iris-contrib/httpexpect/datetime.go

@@ -0,0 +1,129 @@
+package httpexpect
+
+import (
+	"time"
+)
+
+// DateTime provides methods to inspect attached time.Time value.
+type DateTime struct {
+	chain chain
+	value time.Time
+}
+
+// NewDateTime returns a new DateTime object given a reporter used to report
+// failures and time.Time value to be inspected.
+//
+// reporter should not be nil.
+//
+// Example:
+//   dt := NewDateTime(reporter, time.Now())
+//   dt.Le(time.Now())
+//
+//   time.Sleep(time.Second)
+//   dt.Lt(time.Now())
+func NewDateTime(reporter Reporter, value time.Time) *DateTime {
+	return &DateTime{makeChain(reporter), value}
+}
+
+// Raw returns underlying time.Time value attached to DateTime.
+// This is the value originally passed to NewDateTime.
+//
+// Example:
+//  dt := NewDateTime(t, timestamp)
+//  assert.Equal(t, timestamp, dt.Raw())
+func (dt *DateTime) Raw() time.Time {
+	return dt.value
+}
+
+// Equal succeeds if DateTime is equal to given value.
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 1))
+//  dt.Equal(time.Unix(0, 1))
+func (dt *DateTime) Equal(value time.Time) *DateTime {
+	if !dt.value.Equal(value) {
+		dt.chain.fail("\nexpected datetime equal to:\n %s\n\nbut got:\n %s",
+			value, dt.value)
+	}
+	return dt
+}
+
+// NotEqual succeeds if DateTime is not equal to given value.
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 1))
+//  dt.NotEqual(time.Unix(0, 2))
+func (dt *DateTime) NotEqual(value time.Time) *DateTime {
+	if dt.value.Equal(value) {
+		dt.chain.fail("\nexpected datetime not equal to:\n %s", value)
+	}
+	return dt
+}
+
+// Gt succeeds if DateTime is greater than given value.
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 2))
+//  dt.Gt(time.Unix(0, 1))
+func (dt *DateTime) Gt(value time.Time) *DateTime {
+	if !dt.value.After(value) {
+		dt.chain.fail("\nexpected datetime > then:\n %s\n\nbut got:\n %s",
+			value, dt.value)
+	}
+	return dt
+}
+
+// Ge succeeds if DateTime is greater than or equal to given value.
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 2))
+//  dt.Ge(time.Unix(0, 1))
+func (dt *DateTime) Ge(value time.Time) *DateTime {
+	if !(dt.value.After(value) || dt.value.Equal(value)) {
+		dt.chain.fail("\nexpected datetime >= then:\n %s\n\nbut got:\n %s",
+			value, dt.value)
+	}
+	return dt
+}
+
+// Lt succeeds if DateTime is lesser than given value.
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 1))
+//  dt.Lt(time.Unix(0, 2))
+func (dt *DateTime) Lt(value time.Time) *DateTime {
+	if !dt.value.Before(value) {
+		dt.chain.fail("\nexpected datetime < then:\n %s\n\nbut got:\n %s",
+			value, dt.value)
+	}
+	return dt
+}
+
+// Le succeeds if DateTime is lesser than or equal to given value.
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 1))
+//  dt.Le(time.Unix(0, 2))
+func (dt *DateTime) Le(value time.Time) *DateTime {
+	if !(dt.value.Before(value) || dt.value.Equal(value)) {
+		dt.chain.fail("\nexpected datetime <= then:\n %s\n\nbut got:\n %s",
+			value, dt.value)
+	}
+	return dt
+}
+
+// InRange succeeds if DateTime is in given range [min; max].
+//
+// Example:
+//  dt := NewDateTime(t, time.Unix(0, 2))
+//  dt.InRange(time.Unix(0, 1), time.Unix(0, 3))
+//  dt.InRange(time.Unix(0, 2), time.Unix(0, 2))
+func (dt *DateTime) InRange(min, max time.Time) *DateTime {
+	if !((dt.value.After(min) || dt.value.Equal(min)) &&
+		(dt.value.Before(max) || dt.value.Equal(max))) {
+		dt.chain.fail(
+			"\nexpected datetime in range:\n min: %s\n max: %s\n\nbut got: %s",
+			min, max, dt.value)
+	}
+	return dt
+}

+ 375 - 0
vendor/github.com/iris-contrib/httpexpect/expect.go

@@ -0,0 +1,375 @@
+// Package httpexpect helps with end-to-end HTTP and REST API testing.
+//
+// Usage examples
+//
+// See example directory:
+//  - https://godoc.org/github.com/ion-contrib/httpexpect/_examples
+//  - https://github.com/ion-contrib/httpexpect/tree/master/_examples
+//
+// Communication mode
+//
+// There are two common ways to test API with httpexpect:
+//  - start HTTP server and instruct httpexpect to use HTTP client for communication
+//  - don't start server and instruct httpexpect to invoke http handler directly
+//
+// The second approach works only if the server is a Go module and its handler can
+// be imported in tests.
+//
+// Concrete behaviour is determined by Client implementation passed to Config struct.
+// If you're using http.Client, set its Transport field (http.RoundTriper) to one of
+// the following:
+//  1. default (nil) - use HTTP transport from net/http (you should start server)
+//  2. httpexpect.Binder - invoke given http.Handler directly
+//
+// Note that http handler can be usually obtained from http framework you're using.
+// E.g., echo framework provides either http.Handler.
+//
+// You can also provide your own implementation of RequestFactory (creates http.Request),
+// or Client (gets http.Request and returns http.Response).
+//
+// If you're starting server from tests, it's very handy to use net/http/httptest.
+//
+// Value equality
+//
+// Whenever values are checked for equality in httpexpect, they are converted
+// to "canonical form":
+//  - structs are converted to map[string]interface{}
+//  - type aliases are removed
+//  - numeric types are converted to float64
+//  - non-nil interfaces pointing to nil slices and maps are replaced with
+//    nil interfaces
+//
+// This is equivalent to subsequently json.Marshal() and json.Unmarshal() the value
+// and currently is implemented so.
+//
+// Failure handling
+//
+// When some check fails, failure is reported. If non-fatal failures are used
+// (see Reporter interface), execution is continued and instance that was checked
+// is marked as failed.
+//
+// If specific instance is marked as failed, all subsequent checks are ignored
+// for this instance and for any child instances retrieved after failure.
+//
+// Example:
+//  array := NewArray(NewAssertReporter(t), []interface{}{"foo", 123})
+//
+//  e0 := array.Element(0)  // success
+//  e1 := array.Element(1)  // success
+//
+//  s0 := e0.String()  // success
+//  s1 := e1.String()  // failure; e1 and s1 are marked as failed, e0 and s0 are not
+//
+//  s0.Equal("foo")    // success
+//  s1.Equal("bar")    // this check is ignored because s1 is marked as failed
+package httpexpect
+
+import (
+	"io"
+	"net/http"
+	"net/http/cookiejar"
+	"time"
+
+	"golang.org/x/net/publicsuffix"
+)
+
+// Expect is a toplevel object that contains user Config and allows
+// to construct Request objects.
+type Expect struct {
+	config   Config
+	builders []func(*Request)
+}
+
+// Config contains various settings.
+type Config struct {
+	// BaseURL is a URL to prepended to all request. My be empty. If
+	// non-empty, trailing slash is allowed but not required and is
+	// appended automatically.
+	BaseURL string
+
+	// RequestFactory is used to pass in a custom *http.Request generation func.
+	// May be nil.
+	//
+	// You can use DefaultRequestFactory, or provide custom implementation.
+	// Useful for Google App Engine testing for example.
+	RequestFactory RequestFactory
+
+	// Client is used to send http.Request and receive http.Response.
+	// Should not be nil.
+	//
+	// You can use http.DefaultClient or http.Client, or provide
+	// custom implementation.
+	Client Client
+
+	// Reporter is used to report failures.
+	// Should not be nil.
+	//
+	// You can use AssertReporter, RequireReporter (they use testify),
+	// or testing.TB, or provide custom implementation.
+	Reporter Reporter
+
+	// Printers are used to print requests and responses.
+	// May be nil.
+	//
+	// You can use CompactPrinter, DebugPrinter, CurlPrinter, or provide
+	// custom implementation.
+	//
+	// You can also use builtin printers with alternative Logger if
+	// you're happy with their format, but want to send logs somewhere
+	// else instead of testing.TB.
+	Printers []Printer
+}
+
+// RequestFactory is used to create all http.Request objects.
+// aetest.Instance from the Google App Engine implements this interface.
+type RequestFactory interface {
+	NewRequest(method, urlStr string, body io.Reader) (*http.Request, error)
+}
+
+// Client is used to send http.Request and receive http.Response.
+// http.Client, Binder, and FastBinder implement this interface.
+type Client interface {
+	// Do sends request and returns response.
+	Do(*http.Request) (*http.Response, error)
+}
+
+// Printer is used to print requests and responses.
+// CompactPrinter, DebugPrinter, and CurlPrinter implement this interface.
+type Printer interface {
+	// Request is called before request is sent.
+	Request(*http.Request)
+
+	// Response is called after response is received.
+	Response(*http.Response, time.Duration)
+}
+
+// Logger is used as output backend for Printer.
+// testing.TB implements this interface.
+type Logger interface {
+	// Logf writes message to log.
+	Logf(fmt string, args ...interface{})
+}
+
+// Reporter is used to report failures.
+// testing.TB, AssertReporter, and RequireReporter implement this interface.
+type Reporter interface {
+	// Errorf reports failure.
+	// Allowed to return normally or terminate test using t.FailNow().
+	Errorf(message string, args ...interface{})
+}
+
+// LoggerReporter combines Logger and Reporter interfaces.
+type LoggerReporter interface {
+	Logger
+	Reporter
+}
+
+// DefaultRequestFactory is the default RequestFactory implementation which just
+// calls http.NewRequest.
+type DefaultRequestFactory struct{}
+
+// NewRequest implements RequestFactory.NewRequest.
+func (DefaultRequestFactory) NewRequest(
+	method, urlStr string, body io.Reader) (*http.Request, error) {
+	return http.NewRequest(method, urlStr, body)
+}
+
+// New returns a new Expect object.
+//
+// baseURL specifies URL to prepended to all request. My be empty. If non-empty,
+// trailing slash is allowed but not required and is appended automatically.
+//
+// New is a shorthand for WithConfig. It uses:
+//  - CompactPrinter as Printer, with testing.TB as Logger
+//  - AssertReporter as Reporter
+//  - DefaultRequestFactory as RequestFactory
+//
+// Client is set to a default client with a non-nil Jar:
+//  &http.Client{
+//      Jar: httpexpect.NewJar(),
+//  }
+//
+// Example:
+//  func TestSomething(t *testing.T) {
+//      e := httpexpect.New(t, "http://example.com/")
+//
+//      e.GET("/path").
+//          Expect().
+//          Status(http.StatusOK)
+//  }
+func New(t LoggerReporter, baseURL string) *Expect {
+	return WithConfig(Config{
+		BaseURL:  baseURL,
+		Reporter: NewAssertReporter(t),
+		Printers: []Printer{
+			NewCompactPrinter(t),
+		},
+	})
+}
+
+// WithConfig returns a new Expect object with given config.
+//
+// Reporter should not be nil.
+//
+// If RequestFactory is nil, it's set to a DefaultRequestFactory instance.
+//
+// If Client is nil, it's set to a default client with a non-nil Jar:
+//  &http.Client{
+//      Jar: httpexpect.NewJar(),
+//  }
+//
+// Example:
+//  func TestSomething(t *testing.T) {
+//      e := httpexpect.WithConfig(httpexpect.Config{
+//          BaseURL:  "http://example.com/",
+//          Client:   &http.Client{
+//              Transport: httpexpect.NewBinder(myHandler()),
+//              Jar:       httpexpect.NewJar(),
+//          },
+//          Reporter: httpexpect.NewAssertReporter(t),
+//          Printers: []httpexpect.Printer{
+//              httpexpect.NewCurlPrinter(t),
+//              httpexpect.NewDebugPrinter(t, true)
+//          },
+//      })
+//
+//      e.GET("/path").
+//          Expect().
+//          Status(http.StatusOK)
+//  }
+func WithConfig(config Config) *Expect {
+	if config.Reporter == nil {
+		panic("config.Reporter is nil")
+	}
+	if config.RequestFactory == nil {
+		config.RequestFactory = DefaultRequestFactory{}
+	}
+	if config.Client == nil {
+		config.Client = &http.Client{
+			Jar: NewJar(),
+		}
+	}
+	return &Expect{
+		config:   config,
+		builders: nil,
+	}
+}
+
+// NewJar returns a new http.CookieJar.
+//
+// Returned jar is implemented in net/http/cookiejar. PublicSuffixList is
+// implemented in golang.org/x/net/publicsuffix.
+//
+// Note that this jar ignores cookies when request url is empty.
+func NewJar() http.CookieJar {
+	jar, err := cookiejar.New(&cookiejar.Options{
+		PublicSuffixList: publicsuffix.List,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return jar
+}
+
+// Builder returns a copy of Expect instance with given builder attached to it.
+// Returned copy contains all previously attached builders plus a new one.
+// Builders are invoked from Request method, after constructing every new request.
+//
+// Example:
+//  e := httpexpect.New(t, "http://example.com")
+//
+//  token := e.POST("/login").WithForm(Login{"ford", "betelgeuse7"}).
+//      Expect().
+//      Status(http.StatusOK).JSON().Object().Value("token").String().Raw()
+//
+//  auth := e.Builder(func (req *httpexpect.Request) {
+//      req.WithHeader("Authorization", "Bearer "+token)
+//  })
+//
+//  auth.GET("/restricted").
+//     Expect().
+//     Status(http.StatusOK)
+func (e *Expect) Builder(builder func(*Request)) *Expect {
+	ret := *e
+	ret.builders = append(e.builders, builder)
+	return &ret
+}
+
+// Request returns a new Request object.
+// Arguments a similar to NewRequest.
+// After creating request, all builders attached to Expect object are invoked.
+// See Builder.
+func (e *Expect) Request(method, path string, pathargs ...interface{}) *Request {
+	req := NewRequest(e.config, method, path, pathargs...)
+
+	for _, builder := range e.builders {
+		builder(req)
+	}
+
+	return req
+}
+
+// OPTIONS is a shorthand for e.Request("OPTIONS", path, pathargs...).
+func (e *Expect) OPTIONS(path string, pathargs ...interface{}) *Request {
+	return e.Request("OPTIONS", path, pathargs...)
+}
+
+// HEAD is a shorthand for e.Request("HEAD", path, pathargs...).
+func (e *Expect) HEAD(path string, pathargs ...interface{}) *Request {
+	return e.Request("HEAD", path, pathargs...)
+}
+
+// GET is a shorthand for e.Request("GET", path, pathargs...).
+func (e *Expect) GET(path string, pathargs ...interface{}) *Request {
+	return e.Request("GET", path, pathargs...)
+}
+
+// POST is a shorthand for e.Request("POST", path, pathargs...).
+func (e *Expect) POST(path string, pathargs ...interface{}) *Request {
+	return e.Request("POST", path, pathargs...)
+}
+
+// PUT is a shorthand for e.Request("PUT", path, pathargs...).
+func (e *Expect) PUT(path string, pathargs ...interface{}) *Request {
+	return e.Request("PUT", path, pathargs...)
+}
+
+// PATCH is a shorthand for e.Request("PATCH", path, pathargs...).
+func (e *Expect) PATCH(path string, pathargs ...interface{}) *Request {
+	return e.Request("PATCH", path, pathargs...)
+}
+
+// DELETE is a shorthand for e.Request("DELETE", path, pathargs...).
+func (e *Expect) DELETE(path string, pathargs ...interface{}) *Request {
+	return e.Request("DELETE", path, pathargs...)
+}
+
+// Value is a shorthand for NewValue(e.config.Reporter, value).
+func (e *Expect) Value(value interface{}) *Value {
+	return NewValue(e.config.Reporter, value)
+}
+
+// Object is a shorthand for NewObject(e.config.Reporter, value).
+func (e *Expect) Object(value map[string]interface{}) *Object {
+	return NewObject(e.config.Reporter, value)
+}
+
+// Array is a shorthand for NewArray(e.config.Reporter, value).
+func (e *Expect) Array(value []interface{}) *Array {
+	return NewArray(e.config.Reporter, value)
+}
+
+// String is a shorthand for NewString(e.config.Reporter, value).
+func (e *Expect) String(value string) *String {
+	return NewString(e.config.Reporter, value)
+}
+
+// Number is a shorthand for NewNumber(e.config.Reporter, value).
+func (e *Expect) Number(value float64) *Number {
+	return NewNumber(e.config.Reporter, value)
+}
+
+// Boolean is a shorthand for NewBoolean(e.config.Reporter, value).
+func (e *Expect) Boolean(value bool) *Boolean {
+	return NewBoolean(e.config.Reporter, value)
+}

+ 184 - 0
vendor/github.com/iris-contrib/httpexpect/helpers.go

@@ -0,0 +1,184 @@
+package httpexpect
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"regexp"
+
+	"github.com/xeipuuv/gojsonschema"
+	"github.com/yalp/jsonpath"
+	"github.com/yudai/gojsondiff"
+	"github.com/yudai/gojsondiff/formatter"
+)
+
+func toString(str interface{}) (s string, ok bool) {
+	ok = true
+	defer func() {
+		if err := recover(); err != nil {
+			ok = false
+		}
+	}()
+	s = reflect.ValueOf(str).Convert(reflect.TypeOf("")).String()
+	return
+}
+
+func getPath(chain *chain, value interface{}, path string) *Value {
+	if chain.failed() {
+		return &Value{*chain, nil}
+	}
+
+	result, err := jsonpath.Read(value, path)
+	if err != nil {
+		chain.fail(err.Error())
+		return &Value{*chain, nil}
+	}
+
+	return &Value{*chain, result}
+}
+
+func checkSchema(chain *chain, value, schema interface{}) {
+	if chain.failed() {
+		return
+	}
+
+	valueLoader := gojsonschema.NewGoLoader(value)
+
+	var schemaLoader gojsonschema.JSONLoader
+
+	if str, ok := toString(schema); ok {
+		if ok, _ := regexp.MatchString(`^\w+://`, str); ok {
+			schemaLoader = gojsonschema.NewReferenceLoader(str)
+		} else {
+			schemaLoader = gojsonschema.NewStringLoader(str)
+		}
+	} else {
+		schemaLoader = gojsonschema.NewGoLoader(schema)
+	}
+
+	result, err := gojsonschema.Validate(schemaLoader, valueLoader)
+	if err != nil {
+		chain.fail("\n%s\n\nschema:\n%s\n\nvalue:\n%s",
+			err.Error(),
+			dumpSchema(schema),
+			dumpValue(value))
+		return
+	}
+
+	if !result.Valid() {
+		errors := ""
+		for _, err := range result.Errors() {
+			errors += fmt.Sprintf(" %s\n", err)
+		}
+
+		chain.fail(
+			"\njson schema validation failed, schema:\n%s\n\nvalue:%s\n\nerrors:\n%s",
+			dumpSchema(schema),
+			dumpValue(value),
+			errors)
+
+		return
+	}
+}
+
+func dumpSchema(schema interface{}) string {
+	if s, ok := toString(schema); ok {
+		schema = s
+	}
+	return regexp.MustCompile(`(?m:^)`).
+		ReplaceAllString(fmt.Sprintf("%v", schema), " ")
+}
+
+func canonNumber(chain *chain, number interface{}) (f float64, ok bool) {
+	ok = true
+	defer func() {
+		if err := recover(); err != nil {
+			chain.fail("%v", err)
+			ok = false
+		}
+	}()
+	f = reflect.ValueOf(number).Convert(reflect.TypeOf(float64(0))).Float()
+	return
+}
+
+func canonArray(chain *chain, in interface{}) ([]interface{}, bool) {
+	var out []interface{}
+	data, ok := canonValue(chain, in)
+	if ok {
+		out, ok = data.([]interface{})
+		if !ok {
+			chain.fail("expected array, got %v", out)
+		}
+	}
+	return out, ok
+}
+
+func canonMap(chain *chain, in interface{}) (map[string]interface{}, bool) {
+	var out map[string]interface{}
+	data, ok := canonValue(chain, in)
+	if ok {
+		out, ok = data.(map[string]interface{})
+		if !ok {
+			chain.fail("expected map, got %v", out)
+		}
+	}
+	return out, ok
+}
+
+func canonValue(chain *chain, in interface{}) (interface{}, bool) {
+	b, err := json.Marshal(in)
+	if err != nil {
+		chain.fail(err.Error())
+		return nil, false
+	}
+
+	var out interface{}
+	if err := json.Unmarshal(b, &out); err != nil {
+		chain.fail(err.Error())
+		return nil, false
+	}
+
+	return out, true
+}
+
+func dumpValue(value interface{}) string {
+	b, err := json.MarshalIndent(value, " ", "  ")
+	if err != nil {
+		return " " + fmt.Sprintf("%#v", value)
+	}
+	return " " + string(b)
+}
+
+func diffValues(expected, actual interface{}) string {
+	differ := gojsondiff.New()
+
+	var diff gojsondiff.Diff
+
+	if ve, ok := expected.(map[string]interface{}); ok {
+		if va, ok := actual.(map[string]interface{}); ok {
+			diff = differ.CompareObjects(ve, va)
+		} else {
+			return " (unavailable)"
+		}
+	} else if ve, ok := expected.([]interface{}); ok {
+		if va, ok := actual.([]interface{}); ok {
+			diff = differ.CompareArrays(ve, va)
+		} else {
+			return " (unavailable)"
+		}
+	} else {
+		return " (unavailable)"
+	}
+
+	config := formatter.AsciiFormatterConfig{
+		ShowArrayIndex: true,
+	}
+	formatter := formatter.NewAsciiFormatter(expected, config)
+
+	str, err := formatter.Format(diff)
+	if err != nil {
+		return " (unavailable)"
+	}
+
+	return "--- expected\n+++ actual\n" + str
+}

+ 198 - 0
vendor/github.com/iris-contrib/httpexpect/match.go

@@ -0,0 +1,198 @@
+package httpexpect
+
+import (
+	"reflect"
+)
+
+// Match provides methods to inspect attached regexp match results.
+type Match struct {
+	chain      chain
+	submatches []string
+	names      map[string]int
+}
+
+// NewMatch returns a new Match object given a reporter used to report
+// failures and submatches to be inspected.
+//
+// reporter should not be nil. submatches and names may be nil.
+//
+// Example:
+//   s := "http://example.com/users/john"
+//   r := regexp.MustCompile(`http://(?P<host>.+)/users/(?P<user>.+)`)
+//   m := NewMatch(reporter, r.FindStringSubmatch(s), r.SubexpNames())
+//
+//   m.NotEmpty()
+//   m.Length().Equal(3)
+//
+//   m.Index(0).Equal("http://example.com/users/john")
+//   m.Index(1).Equal("example.com")
+//   m.Index(2).Equal("john")
+//
+//   m.Name("host").Equal("example.com")
+//   m.Name("user").Equal("john")
+func NewMatch(reporter Reporter, submatches []string, names []string) *Match {
+	return makeMatch(makeChain(reporter), submatches, names)
+}
+
+func makeMatch(chain chain, submatches []string, names []string) *Match {
+	if submatches == nil {
+		submatches = []string{}
+	}
+	namemap := map[string]int{}
+	for n, name := range names {
+		if name != "" {
+			namemap[name] = n
+		}
+	}
+	return &Match{chain, submatches, namemap}
+}
+
+// Raw returns underlying submatches attached to Match.
+// This is the value originally passed to NewMatch.
+//
+// Example:
+//  m := NewMatch(t, submatches, names)
+//  assert.Equal(t, submatches, m.Raw())
+func (m *Match) Raw() []string {
+	return m.submatches
+}
+
+// Length returns a new Number object that may be used to inspect
+// number of submatches.
+//
+// Example:
+//  m := NewMatch(t, submatches, names)
+//  m.Length().Equal(len(submatches))
+func (m *Match) Length() *Number {
+	return &Number{m.chain, float64(len(m.submatches))}
+}
+
+// Index returns a new String object that may be used to inspect submatch
+// with given index.
+//
+// Note that submatch with index 0 contains the whole match. If index is out
+// of bounds, Index reports failure and returns empty (but non-nil) value.
+//
+// Example:
+//   s := "http://example.com/users/john"
+//
+//   r := regexp.MustCompile(`http://(.+)/users/(.+)`)
+//   m := NewMatch(t, r.FindStringSubmatch(s), nil)
+//
+//   m.Index(0).Equal("http://example.com/users/john")
+//   m.Index(1).Equal("example.com")
+//   m.Index(2).Equal("john")
+func (m *Match) Index(index int) *String {
+	if index < 0 || index >= len(m.submatches) {
+		m.chain.fail(
+			"\nsubmatch index out of bounds:\n  index %d\n\n  bounds [%d; %d)",
+			index,
+			0,
+			len(m.submatches))
+		return &String{m.chain, ""}
+	}
+	return &String{m.chain, m.submatches[index]}
+}
+
+// Name returns a new String object that may be used to inspect submatch
+// with given name.
+//
+// If there is no submatch with given name, Name reports failure and returns
+// empty (but non-nil) value.
+//
+// Example:
+//   s := "http://example.com/users/john"
+//
+//   r := regexp.MustCompile(`http://(?P<host>.+)/users/(?P<user>.+)`)
+//   m := NewMatch(t, r.FindStringSubmatch(s), r.SubexpNames())
+//
+//   m.Name("host").Equal("example.com")
+//   m.Name("user").Equal("john")
+func (m *Match) Name(name string) *String {
+	index, ok := m.names[name]
+	if !ok {
+		m.chain.fail(
+			"\nsubmatch name not found:\n %q\n\navailable names:\n%s",
+			name,
+			dumpValue(m.names))
+		return &String{m.chain, ""}
+	}
+	return m.Index(index)
+}
+
+// Empty succeeds if submatches array is empty.
+//
+// Example:
+//  m := NewMatch(t, submatches, names)
+//  m.Empty()
+func (m *Match) Empty() *Match {
+	if len(m.submatches) != 0 {
+		m.chain.fail("\nexpected zero submatches, but got:\n  %s",
+			dumpValue(m.submatches))
+	}
+	return m
+}
+
+// NotEmpty succeeds if submatches array is non-empty.
+//
+// Example:
+//  m := NewMatch(t, submatches, names)
+//  m.NotEmpty()
+func (m *Match) NotEmpty() *Match {
+	if len(m.submatches) == 0 {
+		m.chain.fail("expected non-zero submatches")
+	}
+	return m
+}
+
+// Values succeeds if submatches array, starting from index 1, is equal to
+// given array.
+//
+// Note that submatch with index 0 contains the whole match and is not
+// included into this check.
+//
+// Example:
+//   s := "http://example.com/users/john"
+//   r := regexp.MustCompile(`http://(.+)/users/(.+)`)
+//   m := NewMatch(t, r.FindStringSubmatch(s), nil)
+//   m.Values("example.com", "john")
+func (m *Match) Values(values ...string) *Match {
+	if values == nil {
+		values = []string{}
+	}
+	if !reflect.DeepEqual(values, m.getValues()) {
+		m.chain.fail("\nexpected submatches equal to:\n%s\n\nbut got:\n%s",
+			dumpValue(values),
+			dumpValue(m.getValues()))
+	}
+	return m
+}
+
+// NotValues succeeds if submatches array, starting from index 1, is not
+// equal to given array.
+//
+// Note that submatch with index 0 contains the whole match and is not
+// included into this check.
+//
+// Example:
+//   s := "http://example.com/users/john"
+//   r := regexp.MustCompile(`http://(.+)/users/(.+)`)
+//   m := NewMatch(t, r.FindStringSubmatch(s), nil)
+//   m.NotValues("example.com", "bob")
+func (m *Match) NotValues(values ...string) *Match {
+	if values == nil {
+		values = []string{}
+	}
+	if reflect.DeepEqual(values, m.getValues()) {
+		m.chain.fail("\nexpected submatches not equal to:\n%s",
+			dumpValue(values))
+	}
+	return m
+}
+
+func (m *Match) getValues() []string {
+	if len(m.submatches) > 1 {
+		return m.submatches[1:]
+	}
+	return []string{}
+}

+ 244 - 0
vendor/github.com/iris-contrib/httpexpect/number.go

@@ -0,0 +1,244 @@
+package httpexpect
+
+import (
+	"math"
+)
+
+// Number provides methods to inspect attached float64 value
+// (Go representation of JSON number).
+type Number struct {
+	chain chain
+	value float64
+}
+
+// NewNumber returns a new Number given a reporter used to report
+// failures and value to be inspected.
+//
+// reporter should not be nil.
+//
+// Example:
+//  number := NewNumber(t, 123.4)
+func NewNumber(reporter Reporter, value float64) *Number {
+	return &Number{makeChain(reporter), value}
+}
+
+// Raw returns underlying value attached to Number.
+// This is the value originally passed to NewNumber.
+//
+// Example:
+//  number := NewNumber(t, 123.4)
+//  assert.Equal(t, 123.4, number.Raw())
+func (n *Number) Raw() float64 {
+	return n.value
+}
+
+// Path is similar to Value.Path.
+func (n *Number) Path(path string) *Value {
+	return getPath(&n.chain, n.value, path)
+}
+
+// Schema is similar to Value.Schema.
+func (n *Number) Schema(schema interface{}) *Number {
+	checkSchema(&n.chain, n.value, schema)
+	return n
+}
+
+// Equal succeeds if number is equal to given value.
+//
+// value should have numeric type convertible to float64. Before comparison,
+// it is converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.Equal(float64(123))
+//  number.Equal(int32(123))
+func (n *Number) Equal(value interface{}) *Number {
+	v, ok := canonNumber(&n.chain, value)
+	if !ok {
+		return n
+	}
+	if !(n.value == v) {
+		n.chain.fail("\nexpected number equal to:\n %v\n\nbut got:\n %v",
+			v, n.value)
+	}
+	return n
+}
+
+// NotEqual succeeds if number is not equal to given value.
+//
+// value should have numeric type convertible to float64. Before comparison,
+// it is converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.NotEqual(float64(321))
+//  number.NotEqual(int32(321))
+func (n *Number) NotEqual(value interface{}) *Number {
+	v, ok := canonNumber(&n.chain, value)
+	if !ok {
+		return n
+	}
+	if !(n.value != v) {
+		n.chain.fail("\nexpected number not equal to:\n %v\n\nbut got:\n %v",
+			v, n.value)
+	}
+	return n
+}
+
+// EqualDelta succeeds if two numerals are within delta of each other.
+//
+// Example:
+//  number := NewNumber(t, 123.0)
+//  number.EqualDelta(123.2, 0.3)
+func (n *Number) EqualDelta(value, delta float64) *Number {
+	if math.IsNaN(n.value) || math.IsNaN(value) || math.IsNaN(delta) {
+		n.chain.fail("\nexpected number equal to:\n %v\n\nbut got:\n %v\n\ndelta:\n %v",
+			value, n.value, delta)
+		return n
+	}
+
+	diff := (n.value - value)
+
+	if diff < -delta || diff > delta {
+		n.chain.fail("\nexpected number equal to:\n %v\n\nbut got:\n %v\n\ndelta:\n %v",
+			value, n.value, delta)
+		return n
+	}
+
+	return n
+}
+
+// NotEqualDelta succeeds if two numerals are not within delta of each other.
+//
+// Example:
+//  number := NewNumber(t, 123.0)
+//  number.NotEqualDelta(123.2, 0.1)
+func (n *Number) NotEqualDelta(value, delta float64) *Number {
+	if math.IsNaN(n.value) || math.IsNaN(value) || math.IsNaN(delta) {
+		n.chain.fail(
+			"\nexpected number not equal to:\n %v\n\nbut got:\n %v\n\ndelta:\n %v",
+			value, n.value, delta)
+		return n
+	}
+
+	diff := (n.value - value)
+
+	if !(diff < -delta || diff > delta) {
+		n.chain.fail(
+			"\nexpected number not equal to:\n %v\n\nbut got:\n %v\n\ndelta:\n %v",
+			value, n.value, delta)
+		return n
+	}
+
+	return n
+}
+
+// Gt succeeds if number is greater than given value.
+//
+// value should have numeric type convertible to float64. Before comparison,
+// it is converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.Gt(float64(122))
+//  number.Gt(int32(122))
+func (n *Number) Gt(value interface{}) *Number {
+	v, ok := canonNumber(&n.chain, value)
+	if !ok {
+		return n
+	}
+	if !(n.value > v) {
+		n.chain.fail("\nexpected number > then:\n %v\n\nbut got:\n %v",
+			v, n.value)
+	}
+	return n
+}
+
+// Ge succeeds if number is greater than or equal to given value.
+//
+// value should have numeric type convertible to float64. Before comparison,
+// it is converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.Ge(float64(122))
+//  number.Ge(int32(122))
+func (n *Number) Ge(value interface{}) *Number {
+	v, ok := canonNumber(&n.chain, value)
+	if !ok {
+		return n
+	}
+	if !(n.value >= v) {
+		n.chain.fail("\nexpected number >= then:\n %v\n\nbut got:\n %v",
+			v, n.value)
+	}
+	return n
+}
+
+// Lt succeeds if number is lesser than given value.
+//
+// value should have numeric type convertible to float64. Before comparison,
+// it is converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.Lt(float64(124))
+//  number.Lt(int32(124))
+func (n *Number) Lt(value interface{}) *Number {
+	v, ok := canonNumber(&n.chain, value)
+	if !ok {
+		return n
+	}
+	if !(n.value < v) {
+		n.chain.fail("\nexpected number < then:\n %v\n\nbut got:\n %v",
+			v, n.value)
+	}
+	return n
+}
+
+// Le succeeds if number is lesser than or equal to given value.
+//
+// value should have numeric type convertible to float64. Before comparison,
+// it is converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.Le(float64(124))
+//  number.Le(int32(124))
+func (n *Number) Le(value interface{}) *Number {
+	v, ok := canonNumber(&n.chain, value)
+	if !ok {
+		return n
+	}
+	if !(n.value <= v) {
+		n.chain.fail("\nexpected number <= then:\n %v\n\nbut got:\n %v",
+			v, n.value)
+	}
+	return n
+}
+
+// InRange succeeds if number is in given range [min; max].
+//
+// min and max should have numeric type convertible to float64. Before comparison,
+// they are converted to float64.
+//
+// Example:
+//  number := NewNumber(t, 123)
+//  number.InRange(float32(100), int32(200))  // success
+//  number.InRange(100, 200)                  // success
+//  number.InRange(123, 123)                  // success
+func (n *Number) InRange(min, max interface{}) *Number {
+	a, ok := canonNumber(&n.chain, min)
+	if !ok {
+		return n
+	}
+	b, ok := canonNumber(&n.chain, max)
+	if !ok {
+		return n
+	}
+	if !(n.value >= a && n.value <= b) {
+		n.chain.fail("\nexpected number in range:\n [%v; %v]\n\nbut got:\n %v",
+			a, b, n.value)
+	}
+	return n
+}

+ 329 - 0
vendor/github.com/iris-contrib/httpexpect/object.go

@@ -0,0 +1,329 @@
+package httpexpect
+
+import (
+	"reflect"
+)
+
+// Object provides methods to inspect attached map[string]interface{} object
+// (Go representation of JSON object).
+type Object struct {
+	chain chain
+	value map[string]interface{}
+}
+
+// NewObject returns a new Object given a reporter used to report failures
+// and value to be inspected.
+//
+// Both reporter and value should not be nil. If value is nil, failure is
+// reported.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+func NewObject(reporter Reporter, value map[string]interface{}) *Object {
+	chain := makeChain(reporter)
+	if value == nil {
+		chain.fail("expected non-nil map value")
+	} else {
+		value, _ = canonMap(&chain, value)
+	}
+	return &Object{chain, value}
+}
+
+// Raw returns underlying value attached to Object.
+// This is the value originally passed to NewObject, converted to canonical form.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  assert.Equal(t, map[string]interface{}{"foo": 123.0}, object.Raw())
+func (o *Object) Raw() map[string]interface{} {
+	return o.value
+}
+
+// Path is similar to Value.Path.
+func (o *Object) Path(path string) *Value {
+	return getPath(&o.chain, o.value, path)
+}
+
+// Schema is similar to Value.Schema.
+func (o *Object) Schema(schema interface{}) *Object {
+	checkSchema(&o.chain, o.value, schema)
+	return o
+}
+
+// Keys returns a new Array object that may be used to inspect objects keys.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123, "bar": 456})
+//  object.Keys().ContainsOnly("foo", "bar")
+func (o *Object) Keys() *Array {
+	keys := []interface{}{}
+	for k := range o.value {
+		keys = append(keys, k)
+	}
+	return &Array{o.chain, keys}
+}
+
+// Values returns a new Array object that may be used to inspect objects values.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123, "bar": 456})
+//  object.Values().ContainsOnly(123, 456)
+func (o *Object) Values() *Array {
+	values := []interface{}{}
+	for _, v := range o.value {
+		values = append(values, v)
+	}
+	return &Array{o.chain, values}
+}
+
+// Value returns a new Value object that may be used to inspect single value
+// for given key.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.Value("foo").Number().Equal(123)
+func (o *Object) Value(key string) *Value {
+	value, ok := o.value[key]
+	if !ok {
+		o.chain.fail("\nexpected object containing key '%s', but got:\n%s",
+			key, dumpValue(o.value))
+		return &Value{o.chain, nil}
+	}
+	return &Value{o.chain, value}
+}
+
+// Empty succeeds if object is empty.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{})
+//  object.Empty()
+func (o *Object) Empty() *Object {
+	return o.Equal(map[string]interface{}{})
+}
+
+// NotEmpty succeeds if object is non-empty.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.NotEmpty()
+func (o *Object) NotEmpty() *Object {
+	return o.NotEqual(map[string]interface{}{})
+}
+
+// Equal succeeds if object is equal to another object.
+// Before comparison, both objects are converted to canonical form.
+//
+// value should map[string]interface{} or struct.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.Equal(map[string]interface{}{"foo": 123})
+func (o *Object) Equal(value interface{}) *Object {
+	expected, ok := canonMap(&o.chain, value)
+	if !ok {
+		return o
+	}
+	if !reflect.DeepEqual(expected, o.value) {
+		o.chain.fail("\nexpected object equal to:\n%s\n\nbut got:\n%s\n\ndiff:\n%s",
+			dumpValue(expected),
+			dumpValue(o.value),
+			diffValues(expected, o.value))
+	}
+	return o
+}
+
+// NotEqual succeeds if object is not equal to another object.
+// Before comparison, both objects are converted to canonical form.
+//
+// value should map[string]interface{} or struct.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.Equal(map[string]interface{}{"bar": 123})
+func (o *Object) NotEqual(v interface{}) *Object {
+	expected, ok := canonMap(&o.chain, v)
+	if !ok {
+		return o
+	}
+	if reflect.DeepEqual(expected, o.value) {
+		o.chain.fail("\nexpected object not equal to:\n%s",
+			dumpValue(expected))
+	}
+	return o
+}
+
+// ContainsKey succeeds if object contains given key.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.ContainsKey("foo")
+func (o *Object) ContainsKey(key string) *Object {
+	if !o.containsKey(key) {
+		o.chain.fail("\nexpected object containing key '%s', but got:\n%s",
+			key, dumpValue(o.value))
+	}
+	return o
+}
+
+// NotContainsKey succeeds if object doesn't contain given key.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.NotContainsKey("bar")
+func (o *Object) NotContainsKey(key string) *Object {
+	if o.containsKey(key) {
+		o.chain.fail(
+			"\nexpected object not containing key '%s', but got:\n%s", key,
+			dumpValue(o.value))
+	}
+	return o
+}
+
+// ContainsMap succeeds if object contains given sub-object.
+// Before comparison, both objects are converted to canonical form.
+//
+// value should map[string]interface{} or struct.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{
+//      "foo": 123,
+//      "bar": []interface{}{"x", "y"},
+//      "bar": map[string]interface{}{
+//          "a": true,
+//          "b": false,
+//      },
+//  })
+//
+//  object.ContainsMap(map[string]interface{}{  // success
+//      "foo": 123,
+//      "bar": map[string]interface{}{
+//          "a": true,
+//      },
+//  })
+//
+//  object.ContainsMap(map[string]interface{}{  // failure
+//      "foo": 123,
+//      "qux": 456,
+//  })
+//
+//  object.ContainsMap(map[string]interface{}{  // failure, slices should match exactly
+//      "bar": []interface{}{"x"},
+//  })
+func (o *Object) ContainsMap(value interface{}) *Object {
+	if !o.containsMap(value) {
+		o.chain.fail("\nexpected object containing sub-object:\n%s\n\nbut got:\n%s",
+			dumpValue(value), dumpValue(o.value))
+	}
+	return o
+}
+
+// NotContainsMap succeeds if object doesn't contain given sub-object exactly.
+// Before comparison, both objects are converted to canonical form.
+//
+// value should map[string]interface{} or struct.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123, "bar": 456})
+//  object.NotContainsMap(map[string]interface{}{"foo": 123, "bar": "no-no-no"})
+func (o *Object) NotContainsMap(value interface{}) *Object {
+	if o.containsMap(value) {
+		o.chain.fail("\nexpected object not containing sub-object:\n%s\n\nbut got:\n%s",
+			dumpValue(value), dumpValue(o.value))
+	}
+	return o
+}
+
+// ValueEqual succeeds if object's value for given key is equal to given value.
+// Before comparison, both values are converted to canonical form.
+//
+// value should map[string]interface{} or struct.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.ValueEqual("foo", 123)
+func (o *Object) ValueEqual(key string, value interface{}) *Object {
+	if !o.containsKey(key) {
+		o.chain.fail("\nexpected object containing key '%s', but got:\n%s",
+			key, dumpValue(o.value))
+		return o
+	}
+	expected, ok := canonValue(&o.chain, value)
+	if !ok {
+		return o
+	}
+	if !reflect.DeepEqual(expected, o.value[key]) {
+		o.chain.fail(
+			"\nexpected value for key '%s' equal to:\n%s\n\nbut got:\n%s\n\ndiff:\n%s",
+			key,
+			dumpValue(expected),
+			dumpValue(o.value[key]),
+			diffValues(expected, o.value[key]))
+	}
+	return o
+}
+
+// ValueNotEqual succeeds if object's value for given key is not equal to given value.
+// Before comparison, both values are converted to canonical form.
+//
+// value should map[string]interface{} or struct.
+//
+// If object doesn't contain any value for given key, failure is reported.
+//
+// Example:
+//  object := NewObject(t, map[string]interface{}{"foo": 123})
+//  object.ValueNotEqual("foo", "bad value")  // success
+//  object.ValueNotEqual("bar", "bad value")  // failure! (key is missing)
+func (o *Object) ValueNotEqual(key string, value interface{}) *Object {
+	if !o.containsKey(key) {
+		o.chain.fail("\nexpected object containing key '%s', but got:\n%s",
+			key, dumpValue(o.value))
+		return o
+	}
+	expected, ok := canonValue(&o.chain, value)
+	if !ok {
+		return o
+	}
+	if reflect.DeepEqual(expected, o.value[key]) {
+		o.chain.fail("\nexpected value for key '%s' not equal to:\n%s",
+			key, dumpValue(expected))
+	}
+	return o
+}
+
+func (o *Object) containsKey(key string) bool {
+	for k := range o.value {
+		if k == key {
+			return true
+		}
+	}
+	return false
+}
+
+func (o *Object) containsMap(sm interface{}) bool {
+	submap, ok := canonMap(&o.chain, sm)
+	if !ok {
+		return false
+	}
+	return checkContainsMap(o.value, submap)
+}
+
+func checkContainsMap(outer, inner map[string]interface{}) bool {
+	for k, iv := range inner {
+		ov, ok := outer[k]
+		if !ok {
+			return false
+		}
+		if ovm, ok := ov.(map[string]interface{}); ok {
+			if ivm, ok := iv.(map[string]interface{}); ok {
+				if !checkContainsMap(ovm, ivm) {
+					return false
+				}
+				continue
+			}
+		}
+		if !reflect.DeepEqual(ov, iv) {
+			return false
+		}
+	}
+	return true
+}

+ 100 - 0
vendor/github.com/iris-contrib/httpexpect/printer.go

@@ -0,0 +1,100 @@
+package httpexpect
+
+import (
+	"net/http"
+	"net/http/httputil"
+	"strings"
+	"time"
+
+	"github.com/moul/http2curl"
+)
+
+// CompactPrinter implements Printer. It prints requests in compact form.
+type CompactPrinter struct {
+	logger Logger
+}
+
+// NewCompactPrinter returns a new CompactPrinter given a logger.
+func NewCompactPrinter(logger Logger) CompactPrinter {
+	return CompactPrinter{logger}
+}
+
+// Request implements Printer.Request.
+func (p CompactPrinter) Request(req *http.Request) {
+	if req != nil {
+		p.logger.Logf("%s %s", req.Method, req.URL)
+	}
+}
+
+// Response implements Printer.Response.
+func (CompactPrinter) Response(*http.Response, time.Duration) {
+}
+
+// DebugPrinter implements Printer. Uses net/http/httputil to dump
+// both requests and responses.
+type DebugPrinter struct {
+	logger Logger
+	body   bool
+}
+
+// NewDebugPrinter returns a new DebugPrinter given a logger and body
+// flag. If body is true, request and response body is also printed.
+func NewDebugPrinter(logger Logger, body bool) DebugPrinter {
+	return DebugPrinter{logger, body}
+}
+
+// Request implements Printer.Request.
+func (p DebugPrinter) Request(req *http.Request) {
+	if req == nil {
+		return
+	}
+
+	dump, err := httputil.DumpRequest(req, p.body)
+	if err != nil {
+		panic(err)
+	}
+	p.logger.Logf("%s", dump)
+}
+
+// Response implements Printer.Response.
+func (p DebugPrinter) Response(resp *http.Response, duration time.Duration) {
+	if resp == nil {
+		return
+	}
+
+	dump, err := httputil.DumpResponse(resp, p.body)
+	if err != nil {
+		panic(err)
+	}
+
+	text := strings.Replace(string(dump), "\r\n", "\n", -1)
+	lines := strings.SplitN(text, "\n", 2)
+
+	p.logger.Logf("%s %s\n%s", lines[0], duration, lines[1])
+}
+
+// CurlPrinter implements Printer. Uses http2curl to dump requests as
+// curl commands.
+type CurlPrinter struct {
+	logger Logger
+}
+
+// NewCurlPrinter returns a new CurlPrinter given a logger.
+func NewCurlPrinter(logger Logger) CurlPrinter {
+	return CurlPrinter{logger}
+}
+
+// Request implements Printer.Request.
+func (p CurlPrinter) Request(req *http.Request) {
+	if req != nil {
+		cmd, err := http2curl.GetCurlCommand(req)
+		if err != nil {
+			panic(err)
+		}
+		p.logger.Logf("%s", cmd.String())
+	}
+}
+
+// Response implements Printer.Response.
+func (CurlPrinter) Response(*http.Response, time.Duration) {
+}

+ 40 - 0
vendor/github.com/iris-contrib/httpexpect/reporter.go

@@ -0,0 +1,40 @@
+package httpexpect
+
+import (
+	"fmt"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// AssertReporter implements Reporter interface using `testify/assert'
+// package. Failures are non-fatal with this reporter.
+type AssertReporter struct {
+	backend *assert.Assertions
+}
+
+// NewAssertReporter returns a new AssertReporter object.
+func NewAssertReporter(t assert.TestingT) *AssertReporter {
+	return &AssertReporter{assert.New(t)}
+}
+
+// Errorf implements Reporter.Errorf.
+func (r *AssertReporter) Errorf(message string, args ...interface{}) {
+	r.backend.Fail(fmt.Sprintf(message, args...))
+}
+
+// RequireReporter implements Reporter interface using `testify/require'
+// package. Failures fatal with this reporter.
+type RequireReporter struct {
+	backend *require.Assertions
+}
+
+// NewRequireReporter returns a new RequireReporter object.
+func NewRequireReporter(t require.TestingT) *RequireReporter {
+	return &RequireReporter{require.New(t)}
+}
+
+// Errorf implements Reporter.Errorf.
+func (r *RequireReporter) Errorf(message string, args ...interface{}) {
+	r.backend.FailNow(fmt.Sprintf(message, args...))
+}

+ 860 - 0
vendor/github.com/iris-contrib/httpexpect/request.go

@@ -0,0 +1,860 @@
+package httpexpect
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime/multipart"
+	"net/http"
+	"net/url"
+	"os"
+	"reflect"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/ajg/form"
+	"github.com/fatih/structs"
+	"github.com/gavv/monotime"
+	"github.com/google/go-querystring/query"
+	"github.com/imkira/go-interpol"
+)
+
+// Request provides methods to incrementally build http.Request object,
+// send it, and receive response.
+type Request struct {
+	config     Config
+	chain      chain
+	http       *http.Request
+	path       string
+	query      url.Values
+	form       url.Values
+	formbuf    *bytes.Buffer
+	multipart  *multipart.Writer
+	forcetype  bool
+	typesetter string
+	bodysetter string
+}
+
+// NewRequest returns a new Request object.
+//
+// method defines the HTTP method (GET, POST, PUT, etc.). path defines url path.
+//
+// Simple interpolation is allowed for {named} parameters in path:
+//  - if pathargs is given, it's used to substitute first len(pathargs) parameters,
+//    regardless of their names
+//  - if WithPath() or WithPathObject() is called, it's used to substitute given
+//    parameters by name
+//
+// For example:
+//  req := NewRequest(config, "POST", "/repos/{user}/{repo}", "gavv", "httpexpect")
+//  // path will be "/repos/gavv/httpexpect"
+//
+// Or:
+//  req := NewRequest(config, "POST", "/repos/{user}/{repo}")
+//  req.WithPath("user", "gavv")
+//  req.WithPath("repo", "httpexpect")
+//  // path will be "/repos/gavv/httpexpect"
+//
+// After interpolation, path is urlencoded and appended to Config.BaseURL,
+// separated by slash. If BaseURL ends with a slash and path (after interpolation)
+// starts with a slash, only single slash is inserted.
+func NewRequest(config Config, method, path string, pathargs ...interface{}) *Request {
+	if config.RequestFactory == nil {
+		panic("config.RequestFactory == nil")
+	}
+
+	if config.Client == nil {
+		panic("config.Client == nil")
+	}
+
+	chain := makeChain(config.Reporter)
+
+	n := 0
+	path, err := interpol.WithFunc(path, func(k string, w io.Writer) error {
+		if n < len(pathargs) {
+			if pathargs[n] == nil {
+				chain.fail(
+					"\nunexpected nil argument for url path format string:\n"+
+						" Request(\"%s\", %v...)", method, pathargs)
+			} else {
+				w.Write([]byte(fmt.Sprint(pathargs[n])))
+			}
+		} else {
+			w.Write([]byte("{"))
+			w.Write([]byte(k))
+			w.Write([]byte("}"))
+		}
+		n++
+		return nil
+	})
+	if err != nil {
+		chain.fail(err.Error())
+	}
+
+	hr, err := config.RequestFactory.NewRequest(method, config.BaseURL, nil)
+	if err != nil {
+		chain.fail(err.Error())
+	}
+
+	return &Request{
+		config: config,
+		chain:  chain,
+		path:   path,
+		http:   hr,
+	}
+}
+
+// WithPath substitutes named parameters in url path.
+//
+// value is converted to string using fmt.Sprint(). If there is no named
+// parameter '{key}' in url path, failure is reported.
+//
+// Named parameters are case-insensitive.
+//
+// Example:
+//  req := NewRequest(config, "POST", "/repos/{user}/{repo}")
+//  req.WithPath("user", "gavv")
+//  req.WithPath("repo", "httpexpect")
+//  // path will be "/repos/gavv/httpexpect"
+func (r *Request) WithPath(key string, value interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	ok := false
+	path, err := interpol.WithFunc(r.path, func(k string, w io.Writer) error {
+		if strings.EqualFold(k, key) {
+			if value == nil {
+				r.chain.fail(
+					"\nunexpected nil argument for url path format string:\n"+
+						" WithPath(\"%s\", %v)", key, value)
+			} else {
+				w.Write([]byte(fmt.Sprint(value)))
+				ok = true
+			}
+		} else {
+			w.Write([]byte("{"))
+			w.Write([]byte(k))
+			w.Write([]byte("}"))
+		}
+		return nil
+	})
+	if err == nil {
+		r.path = path
+	} else {
+		r.chain.fail(err.Error())
+		return r
+	}
+	if !ok {
+		r.chain.fail("\nunexpected key for url path format string:\n"+
+			" WithPath(\"%s\", %v)\n\npath:\n %q",
+			key, value, r.path)
+		return r
+	}
+	return r
+}
+
+// WithPathObject substitutes multiple named parameters in url path.
+//
+// object should be map or struct. If object is struct, it's converted
+// to map using https://github.com/fatih/structs. Structs may contain
+// "path" struct tag, similar to "json" struct tag for json.Marshal().
+//
+// Each map value is converted to string using fmt.Sprint(). If there
+// is no named parameter for some map '{key}' in url path, failure is
+// reported.
+//
+// Named parameters are case-insensitive.
+//
+// Example:
+//  type MyPath struct {
+//      Login string `path:"user"`
+//      Repo  string
+//  }
+//
+//  req := NewRequest(config, "POST", "/repos/{user}/{repo}")
+//  req.WithPathObject(MyPath{"gavv", "httpexpect"})
+//  // path will be "/repos/gavv/httpexpect"
+//
+//  req := NewRequest(config, "POST", "/repos/{user}/{repo}")
+//  req.WithPathObject(map[string]string{"user": "gavv", "repo": "httpexpect"})
+//  // path will be "/repos/gavv/httpexpect"
+func (r *Request) WithPathObject(object interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if object == nil {
+		return r
+	}
+	var (
+		m  map[string]interface{}
+		ok bool
+	)
+	if reflect.Indirect(reflect.ValueOf(object)).Kind() == reflect.Struct {
+		s := structs.New(object)
+		s.TagName = "path"
+		m = s.Map()
+	} else {
+		m, ok = canonMap(&r.chain, object)
+		if !ok {
+			return r
+		}
+	}
+	for k, v := range m {
+		r.WithPath(k, v)
+	}
+	return r
+}
+
+// WithQuery adds query parameter to request URL.
+//
+// value is converted to string using fmt.Sprint() and urlencoded.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithQuery("a", 123)
+//  req.WithQuery("b", "foo")
+//  // URL is now http://example.com/path?a=123&b=foo
+func (r *Request) WithQuery(key string, value interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if r.query == nil {
+		r.query = make(url.Values)
+	}
+	r.query.Add(key, fmt.Sprint(value))
+	return r
+}
+
+// WithQueryObject adds multiple query parameters to request URL.
+//
+// object is converted to query string using github.com/google/go-querystring
+// if it's a struct or pointer to struct, or github.com/ajg/form otherwise.
+//
+// Various object types are supported. Structs may contain "url" struct tag,
+// similar to "json" struct tag for json.Marshal().
+//
+// Example:
+//  type MyURL struct {
+//      A int    `url:"a"`
+//      B string `url:"b"`
+//  }
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithQueryObject(MyURL{A: 123, B: "foo"})
+//  // URL is now http://example.com/path?a=123&b=foo
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithQueryObject(map[string]interface{}{"a": 123, "b": "foo"})
+//  // URL is now http://example.com/path?a=123&b=foo
+func (r *Request) WithQueryObject(object interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if object == nil {
+		return r
+	}
+	var (
+		q   url.Values
+		err error
+	)
+	if reflect.Indirect(reflect.ValueOf(object)).Kind() == reflect.Struct {
+		q, err = query.Values(object)
+		if err != nil {
+			r.chain.fail(err.Error())
+			return r
+		}
+	} else {
+		q, err = form.EncodeToValues(object)
+		if err != nil {
+			r.chain.fail(err.Error())
+			return r
+		}
+	}
+	if r.query == nil {
+		r.query = make(url.Values)
+	}
+	for k, v := range q {
+		r.query[k] = append(r.query[k], v...)
+	}
+	return r
+}
+
+// WithQueryString parses given query string and adds it to request URL.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithQuery("a", 11)
+//  req.WithQueryString("b=22&c=33")
+//  // URL is now http://example.com/path?a=11&bb=22&c=33
+func (r *Request) WithQueryString(query string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	v, err := url.ParseQuery(query)
+	if err != nil {
+		r.chain.fail(err.Error())
+		return r
+	}
+	if r.query == nil {
+		r.query = make(url.Values)
+	}
+	for k, v := range v {
+		r.query[k] = append(r.query[k], v...)
+	}
+	return r
+}
+
+// WithURL sets request URL.
+//
+// This URL overwrites Config.BaseURL. Request path passed to NewRequest()
+// is appended to this URL, separated by slash if necessary.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "/path")
+//  req.WithURL("http://example.com")
+//  // URL is now http://example.com/path
+func (r *Request) WithURL(urlStr string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if u, err := url.Parse(urlStr); err == nil {
+		r.http.URL = u
+	} else {
+		r.chain.fail(err.Error())
+	}
+	return r
+}
+
+// WithHeaders adds given headers to request.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithHeaders(map[string]string{
+//      "Content-Type": "application/json",
+//  })
+func (r *Request) WithHeaders(headers map[string]string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	for k, v := range headers {
+		r.WithHeader(k, v)
+	}
+	return r
+}
+
+// WithHeader adds given single header to request.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithHeader("Content-Type": "application/json")
+func (r *Request) WithHeader(k, v string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	switch http.CanonicalHeaderKey(k) {
+	case "Host":
+		r.http.Host = v
+	case "Content-Type":
+		if !r.forcetype {
+			delete(r.http.Header, "Content-Type")
+		}
+		r.forcetype = true
+		r.typesetter = "WithHeader"
+		r.http.Header.Add(k, v)
+	default:
+		r.http.Header.Add(k, v)
+	}
+	return r
+}
+
+// WithCookies adds given cookies to request.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithCookies(map[string]string{
+//      "foo": "aa",
+//      "bar": "bb",
+//  })
+func (r *Request) WithCookies(cookies map[string]string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	for k, v := range cookies {
+		r.WithCookie(k, v)
+	}
+	return r
+}
+
+// WithCookie adds given single cookie to request.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithCookie("name", "value")
+func (r *Request) WithCookie(k, v string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	r.http.AddCookie(&http.Cookie{
+		Name:  k,
+		Value: v,
+	})
+	return r
+}
+
+// WithBasicAuth sets the request's Authorization header to use HTTP
+// Basic Authentication with the provided username and password.
+//
+// With HTTP Basic Authentication the provided username and password
+// are not encrypted.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithBasicAuth("john", "secret")
+func (r *Request) WithBasicAuth(username, password string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	r.http.SetBasicAuth(username, password)
+	return r
+}
+
+// WithProto sets HTTP protocol version.
+//
+// proto should have form of "HTTP/{major}.{minor}", e.g. "HTTP/1.1".
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithProto("HTTP/2.0")
+func (r *Request) WithProto(proto string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	major, minor, ok := http.ParseHTTPVersion(proto)
+	if !ok {
+		r.chain.fail(
+			"\nunexpected protocol version %q, expected \"HTTP/{major}.{minor}\"",
+			proto)
+		return r
+	}
+	r.http.ProtoMajor = major
+	r.http.ProtoMinor = minor
+	return r
+}
+
+// WithChunked enables chunked encoding and sets request body reader.
+//
+// Expect() will read all available data from given reader. Content-Length
+// is not set, and "chunked" Transfer-Encoding is used.
+//
+// If protocol version is not at least HTTP/1.1 (required for chunked
+// encoding), failure is reported.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/upload")
+//  fh, _ := os.Open("data")
+//  defer fh.Close()
+//  req.WithHeader("Content-Type": "application/octet-stream")
+//  req.WithChunked(fh)
+func (r *Request) WithChunked(reader io.Reader) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if !r.http.ProtoAtLeast(1, 1) {
+		r.chain.fail("chunked Transfer-Encoding requires at least \"HTTP/1.1\","+
+			"but \"HTTP/%d.%d\" is enabled", r.http.ProtoMajor, r.http.ProtoMinor)
+		return r
+	}
+	r.setBody("WithChunked", reader, -1, false)
+	return r
+}
+
+// WithBytes sets request body to given slice of bytes.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithHeader("Content-Type": "application/json")
+//  req.WithBytes([]byte(`{"foo": 123}`))
+func (r *Request) WithBytes(b []byte) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if b == nil {
+		r.setBody("WithBytes", nil, 0, false)
+	} else {
+		r.setBody("WithBytes", bytes.NewReader(b), len(b), false)
+	}
+	return r
+}
+
+// WithText sets Content-Type header to "text/plain; charset=utf-8" and
+// sets body to given string.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithText("hello, world!")
+func (r *Request) WithText(s string) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	r.setType("WithText", "text/plain; charset=utf-8", false)
+	r.setBody("WithText", strings.NewReader(s), len(s), false)
+	return r
+}
+
+// WithJSON sets Content-Type header to "application/json; charset=utf-8"
+// and sets body to object, marshaled using json.Marshal().
+//
+// Example:
+//  type MyJSON struct {
+//      Foo int `json:"foo"`
+//  }
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithJSON(MyJSON{Foo: 123})
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithJSON(map[string]interface{}{"foo": 123})
+func (r *Request) WithJSON(object interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	b, err := json.Marshal(object)
+	if err != nil {
+		r.chain.fail(err.Error())
+		return r
+	}
+
+	r.setType("WithJSON", "application/json; charset=utf-8", false)
+	r.setBody("WithJSON", bytes.NewReader(b), len(b), false)
+
+	return r
+}
+
+// WithForm sets Content-Type header to "application/x-www-form-urlencoded"
+// or (if WithMultipart() was called) "multipart/form-data", converts given
+// object to url.Values using github.com/ajg/form, and adds it to request body.
+//
+// Various object types are supported, including maps and structs. Structs may
+// contain "form" struct tag, similar to "json" struct tag for json.Marshal().
+// See https://github.com/ajg/form for details.
+//
+// Multiple WithForm(), WithFormField(), and WithFile() calls may be combined.
+// If WithMultipart() is called, it should be called first.
+//
+// Example:
+//  type MyForm struct {
+//      Foo int `form:"foo"`
+//  }
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithForm(MyForm{Foo: 123})
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithForm(map[string]interface{}{"foo": 123})
+func (r *Request) WithForm(object interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+
+	f, err := form.EncodeToValues(object)
+	if err != nil {
+		r.chain.fail(err.Error())
+		return r
+	}
+
+	if r.multipart != nil {
+		r.setType("WithForm", "multipart/form-data", false)
+
+		var keys []string
+		for k := range f {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		for _, k := range keys {
+			if err := r.multipart.WriteField(k, f[k][0]); err != nil {
+				r.chain.fail(err.Error())
+				return r
+			}
+		}
+	} else {
+		r.setType("WithForm", "application/x-www-form-urlencoded", false)
+
+		if r.form == nil {
+			r.form = make(url.Values)
+		}
+		for k, v := range f {
+			r.form[k] = append(r.form[k], v...)
+		}
+	}
+
+	return r
+}
+
+// WithFormField sets Content-Type header to "application/x-www-form-urlencoded"
+// or (if WithMultipart() was called) "multipart/form-data", converts given
+// value to string using fmt.Sprint(), and adds it to request body.
+//
+// Multiple WithForm(), WithFormField(), and WithFile() calls may be combined.
+// If WithMultipart() is called, it should be called first.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithFormField("foo", 123).
+//      WithFormField("bar", 456)
+func (r *Request) WithFormField(key string, value interface{}) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	if r.multipart != nil {
+		r.setType("WithFormField", "multipart/form-data", false)
+
+		err := r.multipart.WriteField(key, fmt.Sprint(value))
+		if err != nil {
+			r.chain.fail(err.Error())
+			return r
+		}
+	} else {
+		r.setType("WithFormField", "application/x-www-form-urlencoded", false)
+
+		if r.form == nil {
+			r.form = make(url.Values)
+		}
+		r.form[key] = append(r.form[key], fmt.Sprint(value))
+	}
+	return r
+}
+
+// WithFile sets Content-Type header to "multipart/form-data", reads given
+// file and adds its contents to request body.
+//
+// If reader is given, it's used to read file contents. Otherwise, os.Open()
+// is used to read a file with given path.
+//
+// Multiple WithForm(), WithFormField(), and WithFile() calls may be combined.
+// WithMultipart() should be called before WithFile(), otherwise WithFile()
+// fails.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithFile("avatar", "./john.png")
+//
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  fh, _ := os.Open("./john.png")
+//  req.WithMultipart().
+//      WithFile("avatar", "john.png", fh)
+//  fh.Close()
+func (r *Request) WithFile(key, path string, reader ...io.Reader) *Request {
+	if r.chain.failed() {
+		return r
+	}
+
+	r.setType("WithFile", "multipart/form-data", false)
+
+	if r.multipart == nil {
+		r.chain.fail("WithFile requires WithMultipart to be called first")
+		return r
+	}
+
+	wr, err := r.multipart.CreateFormFile(key, path)
+	if err != nil {
+		r.chain.fail(err.Error())
+		return r
+	}
+
+	var rd io.Reader
+	if len(reader) != 0 && reader[0] != nil {
+		rd = reader[0]
+	} else {
+		f, err := os.Open(path)
+		if err != nil {
+			r.chain.fail(err.Error())
+			return r
+		}
+		rd = f
+		defer f.Close()
+	}
+
+	if _, err := io.Copy(wr, rd); err != nil {
+		r.chain.fail(err.Error())
+		return r
+	}
+
+	return r
+}
+
+// WithFileBytes is like WithFile, but uses given slice of bytes as the
+// file contents.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  fh, _ := os.Open("./john.png")
+//  b, _ := ioutil.ReadAll(fh)
+//  req.WithMultipart().
+//      WithFileBytes("avatar", "john.png", b)
+//  fh.Close()
+func (r *Request) WithFileBytes(key, path string, data []byte) *Request {
+	if r.chain.failed() {
+		return r
+	}
+	return r.WithFile(key, path, bytes.NewReader(data))
+}
+
+// WithMultipart sets Content-Type header to "multipart/form-data".
+//
+// After this call, WithForm() and WithFormField() switch to multipart
+// form instead of urlencoded form.
+//
+// If WithMultipart() is called, it should be called before WithForm(),
+// WithFormField(), and WithFile().
+//
+// WithFile() always requires WithMultipart() to be called first.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithMultipart().
+//      WithForm(map[string]interface{}{"foo": 123})
+func (r *Request) WithMultipart() *Request {
+	if r.chain.failed() {
+		return r
+	}
+
+	r.setType("WithMultipart", "multipart/form-data", false)
+
+	if r.multipart == nil {
+		r.formbuf = new(bytes.Buffer)
+		r.multipart = multipart.NewWriter(r.formbuf)
+		r.setBody("WithMultipart", r.formbuf, 0, false)
+	}
+
+	return r
+}
+
+// Expect constructs http.Request, sends it, receives http.Response, and
+// returns a new Response object to inspect received response.
+//
+// Request is sent using Config.Client interface.
+//
+// Example:
+//  req := NewRequest(config, "PUT", "http://example.com/path")
+//  req.WithJSON(map[string]interface{}{"foo": 123})
+//  resp := req.Expect()
+//  resp.Status(http.StatusOK)
+func (r *Request) Expect() *Response {
+	r.encodeRequest()
+
+	resp, elapsed := r.sendRequest()
+
+	return makeResponse(r.chain, resp, elapsed)
+}
+
+func (r *Request) encodeRequest() {
+	if r.chain.failed() {
+		return
+	}
+
+	r.http.URL.Path = concatPaths(r.http.URL.Path, r.path)
+
+	if r.query != nil {
+		r.http.URL.RawQuery = r.query.Encode()
+	}
+
+	if r.multipart != nil {
+		if err := r.multipart.Close(); err != nil {
+			r.chain.fail(err.Error())
+			return
+		}
+
+		r.setType("Expect", r.multipart.FormDataContentType(), true)
+		r.setBody("Expect", r.formbuf, r.formbuf.Len(), true)
+	} else if r.form != nil {
+		s := r.form.Encode()
+		r.setBody("WithForm or WithFormField", strings.NewReader(s), len(s), false)
+	}
+}
+
+func (r *Request) sendRequest() (resp *http.Response, elapsed time.Duration) {
+	if r.chain.failed() {
+		return
+	}
+
+	for _, printer := range r.config.Printers {
+		printer.Request(r.http)
+	}
+
+	start := monotime.Now()
+
+	resp, err := r.config.Client.Do(r.http)
+
+	elapsed = monotime.Since(start)
+
+	if err != nil {
+		r.chain.fail(err.Error())
+		return
+	}
+
+	for _, printer := range r.config.Printers {
+		printer.Response(resp, elapsed)
+	}
+
+	return
+}
+
+func (r *Request) setType(newSetter, newType string, overwrite bool) {
+	if r.forcetype {
+		return
+	}
+
+	if !overwrite {
+		previousType := r.http.Header.Get("Content-Type")
+
+		if previousType != "" && previousType != newType {
+			r.chain.fail(
+				"\nambiguous request \"Content-Type\" header values:\n %q (set by %s)\n\n"+
+					"and:\n %q (wanted by %s)",
+				previousType, r.typesetter,
+				newType, newSetter)
+			return
+		}
+	}
+
+	r.typesetter = newSetter
+	r.http.Header["Content-Type"] = []string{newType}
+}
+
+func (r *Request) setBody(setter string, reader io.Reader, len int, overwrite bool) {
+	if !overwrite && r.bodysetter != "" {
+		r.chain.fail(
+			"\nambiguous request body contents:\n  set by %s\n  overwritten by %s",
+			r.bodysetter, setter)
+		return
+	}
+
+	if len > 0 && reader == nil {
+		panic("invalid length")
+	}
+
+	if reader == nil {
+		r.http.Body = nil
+		r.http.ContentLength = 0
+	} else {
+		r.http.Body = ioutil.NopCloser(reader)
+		r.http.ContentLength = int64(len)
+	}
+
+	r.bodysetter = setter
+}
+
+func concatPaths(a, b string) string {
+	if a == "" {
+		return b
+	}
+	if b == "" {
+		return a
+	}
+	a = strings.TrimSuffix(a, "/")
+	b = strings.TrimPrefix(b, "/")
+	return a + "/" + b
+}

+ 515 - 0
vendor/github.com/iris-contrib/httpexpect/response.go

@@ -0,0 +1,515 @@
+package httpexpect
+
+import (
+	"bytes"
+	"encoding/json"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/ajg/form"
+)
+
+// StatusRange is enum for response status ranges.
+type StatusRange int
+
+const (
+	// Status1xx defines "Informational" status codes.
+	Status1xx StatusRange = 100
+
+	// Status2xx defines "Success" status codes.
+	Status2xx StatusRange = 200
+
+	// Status3xx defines "Redirection" status codes.
+	Status3xx StatusRange = 300
+
+	// Status4xx defines "Client Error" status codes.
+	Status4xx StatusRange = 400
+
+	// Status5xx defines "Server Error" status codes.
+	Status5xx StatusRange = 500
+)
+
+// Response provides methods to inspect attached http.Response object.
+type Response struct {
+	chain chain
+	resp  *http.Response
+	// Content "eaten" on makeResponse, so we can't manually read the `Expect().Raw().Body`
+	// therefore we just export this for now, we have a solution like we do on Iris
+	// to use a noop reader to not "eat" it but we don't need it here.
+	// Usage: `Expect().Content`.
+	Content []byte
+	cookies []*http.Cookie
+	time    time.Duration
+}
+
+// NewResponse returns a new Response given a reporter used to report
+// failures and http.Response to be inspected.
+//
+// Both reporter and response should not be nil. If response is nil,
+// failure is reported.
+//
+// If duration is given, it defines response time to be reported by
+// response.Duration().
+func NewResponse(
+	reporter Reporter, response *http.Response, duration ...time.Duration) *Response {
+	var dr time.Duration
+	if len(duration) > 0 {
+		dr = duration[0]
+	}
+	return makeResponse(makeChain(reporter), response, dr)
+}
+
+func makeResponse(
+	chain chain, response *http.Response, duration time.Duration) *Response {
+	var content []byte
+	var cookies []*http.Cookie
+	if response != nil {
+		content = getContent(&chain, response)
+		cookies = response.Cookies()
+	} else {
+		chain.fail("expected non-nil response")
+	}
+	return &Response{
+		chain:   chain,
+		resp:    response,
+		Content: content,
+		cookies: cookies,
+		time:    duration,
+	}
+}
+
+func getContent(chain *chain, resp *http.Response) []byte {
+	if resp.Body == nil {
+		return []byte{}
+	}
+
+	content, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		chain.fail(err.Error())
+		return nil
+	}
+
+	return content
+}
+
+// Raw returns underlying http.Response object.
+// This is the value originally passed to NewResponse.
+func (r *Response) Raw() *http.Response {
+	return r.resp
+}
+
+// Duration returns a new Number object that may be used to inspect
+// response time, in nanoseconds.
+//
+// Response time is a time interval starting just before request is sent
+// and ending right after response is received, retrieved from monotonic
+// clock source.
+//
+// Example:
+//  resp := NewResponse(t, response, time.Duration(10000000))
+//  resp.Duration().Equal(10 * time.Millisecond)
+func (r *Response) Duration() *Number {
+	return &Number{r.chain, float64(r.time)}
+}
+
+// Status succeeds if response contains given status code.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Status(http.StatusOK)
+func (r *Response) Status(status int) *Response {
+	if r.chain.failed() {
+		return r
+	}
+	r.checkEqual("status", statusCodeText(status), statusCodeText(r.resp.StatusCode))
+	return r
+}
+
+// StatusRange succeeds if response status belongs to given range.
+//
+// Supported ranges:
+//  - Status1xx - Informational
+//  - Status2xx - Success
+//  - Status3xx - Redirection
+//  - Status4xx - Client Error
+//  - Status5xx - Server Error
+//
+// See https://en.wikipedia.org/wiki/List_of_HTTP_status_codes.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.StatusRange(Status2xx)
+func (r *Response) StatusRange(rn StatusRange) *Response {
+	if r.chain.failed() {
+		return r
+	}
+
+	status := statusCodeText(r.resp.StatusCode)
+
+	actual := statusRangeText(r.resp.StatusCode)
+	expected := statusRangeText(int(rn))
+
+	if actual == "" || actual != expected {
+		if actual == "" {
+			r.chain.fail("\nexpected status from range:\n %q\n\nbut got:\n %q",
+				expected, status)
+		} else {
+			r.chain.fail(
+				"\nexpected status from range:\n %q\n\nbut got:\n %q (%q)",
+				expected, actual, status)
+		}
+	}
+
+	return r
+}
+
+func statusCodeText(code int) string {
+	if s := http.StatusText(code); s != "" {
+		return strconv.Itoa(code) + " " + s
+	}
+	return strconv.Itoa(code)
+}
+
+func statusRangeText(code int) string {
+	switch {
+	case code >= 100 && code < 200:
+		return "1xx Informational"
+	case code >= 200 && code < 300:
+		return "2xx Success"
+	case code >= 300 && code < 400:
+		return "3xx Redirection"
+	case code >= 400 && code < 500:
+		return "4xx Client Error"
+	case code >= 500 && code < 600:
+		return "5xx Server Error"
+	default:
+		return ""
+	}
+}
+
+// Headers returns a new Object that may be used to inspect header map.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Headers().Value("Content-Type").String().Equal("application-json")
+func (r *Response) Headers() *Object {
+	var value map[string]interface{}
+	if !r.chain.failed() {
+		value, _ = canonMap(&r.chain, r.resp.Header)
+	}
+	return &Object{r.chain, value}
+}
+
+// Header returns a new String object that may be used to inspect given header.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Header("Content-Type").Equal("application-json")
+//  resp.Header("Date").DateTime().Le(time.Now())
+func (r *Response) Header(header string) *String {
+	value := ""
+	if !r.chain.failed() {
+		value = r.resp.Header.Get(header)
+	}
+	return &String{r.chain, value}
+}
+
+// Cookies returns a new Array object with all cookie names set by this response.
+// Returned Array contains a String value for every cookie name.
+//
+// Note that this returns only cookies set by Set-Cookie headers of this response.
+// It doesn't return session cookies from previous responses, which may be stored
+// in a cookie jar.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Cookies().Contains("session")
+func (r *Response) Cookies() *Array {
+	if r.chain.failed() {
+		return &Array{r.chain, nil}
+	}
+	names := []interface{}{}
+	for _, c := range r.cookies {
+		names = append(names, c.Name)
+	}
+	return &Array{r.chain, names}
+}
+
+// Cookie returns a new Cookie object that may be used to inspect given cookie
+// set by this response.
+//
+// Note that this returns only cookies set by Set-Cookie headers of this response.
+// It doesn't return session cookies from previous responses, which may be stored
+// in a cookie jar.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Cookie("session").Domain().Equal("example.com")
+func (r *Response) Cookie(name string) *Cookie {
+	if r.chain.failed() {
+		return &Cookie{r.chain, nil}
+	}
+	names := []string{}
+	for _, c := range r.cookies {
+		if c.Name == name {
+			return &Cookie{r.chain, c}
+		}
+		names = append(names, c.Name)
+	}
+	r.chain.fail("\nexpected response with cookie:\n %q\n\nbut got only cookies:\n%s",
+		name, dumpValue(names))
+	return &Cookie{r.chain, nil}
+}
+
+// Body returns a new String object that may be used to inspect response body.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Body().NotEmpty()
+//  resp.Body().Length().Equal(100)
+func (r *Response) Body() *String {
+	return &String{r.chain, string(r.Content)}
+}
+
+// NoContent succeeds if response contains empty Content-Type header and
+// empty body.
+func (r *Response) NoContent() *Response {
+	if r.chain.failed() {
+		return r
+	}
+
+	contentType := r.resp.Header.Get("Content-Type")
+
+	r.checkEqual("\"Content-Type\" header", "", contentType)
+	r.checkEqual("body", "", string(r.Content))
+
+	return r
+}
+
+// ContentType succeeds if response contains Content-Type header with given
+// media type and charset.
+//
+// If charset is omitted, and mediaType is non-empty, Content-Type header
+// should contain empty or utf-8 charset.
+//
+// If charset is omitted, and mediaType is also empty, Content-Type header
+// should contain no charset.
+func (r *Response) ContentType(mediaType string, charset ...string) *Response {
+	r.checkContentType(mediaType, charset...)
+	return r
+}
+
+// ContentEncoding succeeds if response has exactly given Content-Encoding list.
+// Common values are empty, "gzip", "compress", "deflate", "identity" and "br".
+func (r *Response) ContentEncoding(encoding ...string) *Response {
+	if r.chain.failed() {
+		return r
+	}
+	r.checkEqual("\"Content-Encoding\" header", encoding, r.resp.Header["Content-Encoding"])
+	return r
+}
+
+// TransferEncoding succeeds if response contains given Transfer-Encoding list.
+// Common values are empty, "chunked" and "identity".
+func (r *Response) TransferEncoding(encoding ...string) *Response {
+	if r.chain.failed() {
+		return r
+	}
+	r.checkEqual("\"Transfer-Encoding\" header", encoding, r.resp.TransferEncoding)
+	return r
+}
+
+// Text returns a new String object that may be used to inspect response body.
+//
+// Text succeeds if response contains "text/plain" Content-Type header
+// with empty or "utf-8" charset.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Text().Equal("hello, world!")
+func (r *Response) Text() *String {
+	var content string
+
+	if !r.chain.failed() && r.checkContentType("text/plain") {
+		content = string(r.Content)
+	}
+
+	return &String{r.chain, content}
+}
+
+// Form returns a new Object that may be used to inspect form contents
+// of response.
+//
+// Form succeeds if response contains "application/x-www-form-urlencoded"
+// Content-Type header and if form may be decoded from response body.
+// Decoding is performed using https://github.com/ajg/form.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.Form().Value("foo").Equal("bar")
+func (r *Response) Form() *Object {
+	object := r.getForm()
+	return &Object{r.chain, object}
+}
+
+func (r *Response) getForm() map[string]interface{} {
+	if r.chain.failed() {
+		return nil
+	}
+
+	if !r.checkContentType("application/x-www-form-urlencoded", "") {
+		return nil
+	}
+
+	decoder := form.NewDecoder(bytes.NewReader(r.Content))
+
+	var object map[string]interface{}
+	if err := decoder.Decode(&object); err != nil {
+		r.chain.fail(err.Error())
+		return nil
+	}
+
+	return object
+}
+
+// JSON returns a new Value object that may be used to inspect JSON contents
+// of response.
+//
+// JSON succeeds if response contains "application/json" Content-Type header
+// with empty or "utf-8" charset and if JSON may be decoded from response body.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.JSON().Array().Elements("foo", "bar")
+func (r *Response) JSON() *Value {
+	value := r.getJSON()
+	return &Value{r.chain, value}
+}
+
+func (r *Response) getJSON() interface{} {
+	if r.chain.failed() {
+		return nil
+	}
+
+	if !r.checkContentType("application/json") {
+		return nil
+	}
+
+	var value interface{}
+	if err := json.Unmarshal(r.Content, &value); err != nil {
+		r.chain.fail(err.Error())
+		return nil
+	}
+
+	return value
+}
+
+// JSONP returns a new Value object that may be used to inspect JSONP contents
+// of response.
+//
+// JSONP succeeds if response contains "application/javascript" Content-Type
+// header with empty or "utf-8" charset and response body of the following form:
+//  callback(<valid json>);
+// or:
+//  callback(<valid json>)
+//
+// Whitespaces are allowed.
+//
+// Example:
+//  resp := NewResponse(t, response)
+//  resp.JSONP("myCallback").Array().Elements("foo", "bar")
+func (r *Response) JSONP(callback string) *Value {
+	value := r.getJSONP(callback)
+	return &Value{r.chain, value}
+}
+
+var (
+	jsonp = regexp.MustCompile(`^\s*([^\s(]+)\s*\((.*)\)\s*;*\s*$`)
+)
+
+func (r *Response) getJSONP(callback string) interface{} {
+	if r.chain.failed() {
+		return nil
+	}
+
+	if !r.checkContentType("application/javascript") {
+		return nil
+	}
+
+	m := jsonp.FindSubmatch(r.Content)
+	if len(m) != 3 || string(m[1]) != callback {
+		r.chain.fail(
+			"\nexpected JSONP body in form of:\n \"%s(<valid json>)\"\n\nbut got:\n %q\n",
+			callback,
+			string(r.Content))
+		return nil
+	}
+
+	var value interface{}
+	if err := json.Unmarshal(m[2], &value); err != nil {
+		r.chain.fail(err.Error())
+		return nil
+	}
+
+	return value
+}
+
+func (r *Response) checkContentType(expectedType string, expectedCharset ...string) bool {
+	if r.chain.failed() {
+		return false
+	}
+
+	contentType := r.resp.Header.Get("Content-Type")
+
+	if expectedType == "" && len(expectedCharset) == 0 {
+		if contentType == "" {
+			return true
+		}
+	}
+
+	mediaType, params, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		r.chain.fail("\ngot invalid \"Content-Type\" header %q", contentType)
+		return false
+	}
+
+	if mediaType != expectedType {
+		r.chain.fail(
+			"\nexpected \"Content-Type\" header with %q media type,"+
+				"\nbut got %q", expectedType, mediaType)
+		return false
+	}
+
+	charset := params["charset"]
+
+	if len(expectedCharset) == 0 {
+		if charset != "" && !strings.EqualFold(charset, "utf-8") {
+			r.chain.fail(
+				"\nexpected \"Content-Type\" header with \"utf-8\" or empty charset,"+
+					"\nbut got %q", charset)
+			return false
+		}
+	} else {
+		if !strings.EqualFold(charset, expectedCharset[0]) {
+			r.chain.fail(
+				"\nexpected \"Content-Type\" header with %q charset,"+
+					"\nbut got %q", expectedCharset[0], charset)
+			return false
+		}
+	}
+
+	return true
+}
+
+func (r *Response) checkEqual(what string, expected, actual interface{}) {
+	if !reflect.DeepEqual(expected, actual) {
+		r.chain.fail("\nexpected %s equal to:\n%s\n\nbut got:\n%s", what,
+			dumpValue(expected), dumpValue(actual))
+	}
+}

+ 320 - 0
vendor/github.com/iris-contrib/httpexpect/string.go

@@ -0,0 +1,320 @@
+package httpexpect
+
+import (
+	"net/http"
+	"regexp"
+	"strings"
+	"time"
+)
+
+// String provides methods to inspect attached string value
+// (Go representation of JSON string).
+type String struct {
+	chain chain
+	value string
+}
+
+// NewString returns a new String given a reporter used to report failures
+// and value to be inspected.
+//
+// reporter should not be nil.
+//
+// Example:
+//  str := NewString(t, "Hello")
+func NewString(reporter Reporter, value string) *String {
+	return &String{makeChain(reporter), value}
+}
+
+// Raw returns underlying value attached to String.
+// This is the value originally passed to NewString.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  assert.Equal(t, "Hello", str.Raw())
+func (s *String) Raw() string {
+	return s.value
+}
+
+// Path is similar to Value.Path.
+func (s *String) Path(path string) *Value {
+	return getPath(&s.chain, s.value, path)
+}
+
+// Schema is similar to Value.Schema.
+func (s *String) Schema(schema interface{}) *String {
+	checkSchema(&s.chain, s.value, schema)
+	return s
+}
+
+// Length returns a new Number object that may be used to inspect string length.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.Length().Equal(5)
+func (s *String) Length() *Number {
+	return &Number{s.chain, float64(len(s.value))}
+}
+
+// DateTime parses date/time from string an returns a new DateTime object.
+//
+// If layout is given, DateTime() uses time.Parse() with given layout.
+// Otherwise, it uses http.ParseTime(). If pasing error occurred,
+// DateTime reports failure and returns empty (but non-nil) object.
+//
+// Example:
+//   str := NewString(t, "Tue, 15 Nov 1994 08:12:31 GMT")
+//   str.DateTime().Lt(time.Now())
+//
+//   str := NewString(t, "15 Nov 94 08:12 GMT")
+//   str.DateTime(time.RFC822).Lt(time.Now())
+func (s *String) DateTime(layout ...string) *DateTime {
+	if s.chain.failed() {
+		return &DateTime{s.chain, time.Unix(0, 0)}
+	}
+	var (
+		t   time.Time
+		err error
+	)
+	if len(layout) != 0 {
+		t, err = time.Parse(layout[0], s.value)
+	} else {
+		t, err = http.ParseTime(s.value)
+	}
+	if err != nil {
+		s.chain.fail(err.Error())
+		return &DateTime{s.chain, time.Unix(0, 0)}
+	}
+	return &DateTime{s.chain, t}
+}
+
+// Empty succeeds if string is empty.
+//
+// Example:
+//  str := NewString(t, "")
+//  str.Empty()
+func (s *String) Empty() *String {
+	return s.Equal("")
+}
+
+// NotEmpty succeeds if string is non-empty.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.NotEmpty()
+func (s *String) NotEmpty() *String {
+	return s.NotEqual("")
+}
+
+// Equal succeeds if string is equal to another str.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.Equal("Hello")
+func (s *String) Equal(value string) *String {
+	if !(s.value == value) {
+		s.chain.fail("\nexpected string equal to:\n %q\n\nbut got:\n %q",
+			value, s.value)
+	}
+	return s
+}
+
+// NotEqual succeeds if string is not equal to another str.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.NotEqual("Goodbye")
+func (s *String) NotEqual(value string) *String {
+	if !(s.value != value) {
+		s.chain.fail("\nexpected string not equal to:\n %q", value)
+	}
+	return s
+}
+
+// EqualFold succeeds if string is equal to another string under Unicode case-folding
+// (case-insensitive match).
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.EqualFold("hELLo")
+func (s *String) EqualFold(value string) *String {
+	if !strings.EqualFold(s.value, value) {
+		s.chain.fail(
+			"\nexpected string equal to (case-insensitive):\n %q\n\nbut got:\n %q",
+			value, s.value)
+	}
+	return s
+}
+
+// NotEqualFold succeeds if string is not equal to another string under Unicode
+// case-folding (case-insensitive match).
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.NotEqualFold("gOODBYe")
+func (s *String) NotEqualFold(value string) *String {
+	if strings.EqualFold(s.value, value) {
+		s.chain.fail(
+			"\nexpected string not equal to (case-insensitive):\n %q\n\nbut got:\n %q",
+			value, s.value)
+	}
+	return s
+}
+
+// Contains succeeds if string contains given substr.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.Contains("ell")
+func (s *String) Contains(value string) *String {
+	if !strings.Contains(s.value, value) {
+		s.chain.fail(
+			"\nexpected string containing substring:\n %q\n\nbut got:\n %q",
+			value, s.value)
+	}
+	return s
+}
+
+// NotContains succeeds if string doesn't contain given substr.
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.NotContains("bye")
+func (s *String) NotContains(value string) *String {
+	if strings.Contains(s.value, value) {
+		s.chain.fail(
+			"\nexpected string not containing substring:\n %q\n\nbut got:\n %q",
+			value, s.value)
+	}
+	return s
+}
+
+// ContainsFold succeeds if string contains given substring under Unicode case-folding
+// (case-insensitive match).
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.ContainsFold("ELL")
+func (s *String) ContainsFold(value string) *String {
+	if !strings.Contains(strings.ToLower(s.value), strings.ToLower(value)) {
+		s.chain.fail(
+			"\nexpected string containing substring (case-insensitive):\n %q"+
+				"\n\nbut got:\n %q", value, s.value)
+	}
+	return s
+}
+
+// NotContainsFold succeeds if string doesn't contain given substring under Unicode
+// case-folding (case-insensitive match).
+//
+// Example:
+//  str := NewString(t, "Hello")
+//  str.NotContainsFold("BYE")
+func (s *String) NotContainsFold(value string) *String {
+	if strings.Contains(strings.ToLower(s.value), strings.ToLower(value)) {
+		s.chain.fail(
+			"\nexpected string not containing substring (case-insensitive):\n %q"+
+				"\n\nbut got:\n %q", value, s.value)
+	}
+	return s
+}
+
+// Match matches the string with given regexp and returns a new Match object
+// with found submatches.
+//
+// If regexp is invalid or string doesn't match regexp, Match fails and returns
+// empty (but non-nil) object. regexp.Compile is used to construct regexp, and
+// Regexp.FindStringSubmatch is used to construct matches.
+//
+// Example:
+//   s := NewString(t, "http://example.com/users/john")
+//   m := s.Match(`http://(?P<host>.+)/users/(?P<user>.+)`)
+//
+//   m.NotEmpty()
+//   m.Length().Equal(3)
+//
+//   m.Index(0).Equal("http://example.com/users/john")
+//   m.Index(1).Equal("example.com")
+//   m.Index(2).Equal("john")
+//
+//   m.Name("host").Equal("example.com")
+//   m.Name("user").Equal("john")
+func (s *String) Match(re string) *Match {
+	r, err := regexp.Compile(re)
+	if err != nil {
+		s.chain.fail(err.Error())
+		return makeMatch(s.chain, nil, nil)
+	}
+
+	m := r.FindStringSubmatch(s.value)
+	if m == nil {
+		s.chain.fail("\nexpected string matching regexp:\n `%s`\n\nbut got:\n %q",
+			re, s.value)
+		return makeMatch(s.chain, nil, nil)
+	}
+
+	return makeMatch(s.chain, m, r.SubexpNames())
+}
+
+// MatchAll find all matches in string for given regexp and returns a list
+// of found matches.
+//
+// If regexp is invalid or string doesn't match regexp, MatchAll fails and
+// returns empty (but non-nil) slice. regexp.Compile is used to construct
+// regexp, and Regexp.FindAllStringSubmatch is used to find matches.
+//
+// Example:
+//   s := NewString(t,
+//      "http://example.com/users/john http://example.com/users/bob")
+//
+//   m := s.MatchAll(`http://(?P<host>\S+)/users/(?P<user>\S+)`)
+//
+//   m[0].Name("user").Equal("john")
+//   m[1].Name("user").Equal("bob")
+func (s *String) MatchAll(re string) []Match {
+	r, err := regexp.Compile(re)
+	if err != nil {
+		s.chain.fail(err.Error())
+		return []Match{}
+	}
+
+	matches := r.FindAllStringSubmatch(s.value, -1)
+	if matches == nil {
+		s.chain.fail("\nexpected string matching regexp:\n `%s`\n\nbut got:\n %q",
+			re, s.value)
+		return []Match{}
+	}
+
+	ret := []Match{}
+	for _, m := range matches {
+		ret = append(ret, *makeMatch(
+			s.chain,
+			m,
+			r.SubexpNames()))
+	}
+
+	return ret
+}
+
+// NotMatch succeeds if the string doesn't match to given regexp.
+//
+// regexp.Compile is used to construct regexp, and Regexp.MatchString
+// is used to perform match.
+//
+// Example:
+//   s := NewString(t, "a")
+//   s.NotMatch(`[^a]`)
+func (s *String) NotMatch(re string) *String {
+	r, err := regexp.Compile(re)
+	if err != nil {
+		s.chain.fail(err.Error())
+		return s
+	}
+
+	if r.MatchString(s.value) {
+		s.chain.fail("\nexpected string not matching regexp:\n `%s`\n\nbut got:\n %q",
+			re, s.value)
+		return s
+	}
+
+	return s
+}

+ 286 - 0
vendor/github.com/iris-contrib/httpexpect/value.go

@@ -0,0 +1,286 @@
+package httpexpect
+
+import (
+	"reflect"
+)
+
+// Value provides methods to inspect attached interface{} object
+// (Go representation of arbitrary JSON value) and cast it to
+// concrete type.
+type Value struct {
+	chain chain
+	value interface{}
+}
+
+// NewValue returns a new Value given a reporter used to report failures
+// and value to be inspected.
+//
+// reporter should not be nil, but value may be nil.
+//
+// Example:
+//  value := NewValue(t, map[string]interface{}{"foo": 123})
+//  value.Object()
+//
+//  value := NewValue(t, []interface{}{"foo", 123})
+//  value.Array()
+//
+//  value := NewValue(t, "foo")
+//  value.String()
+//
+//  value := NewValue(t, 123)
+//  value.Number()
+//
+//  value := NewValue(t, true)
+//  value.Boolean()
+//
+//  value := NewValue(t, nil)
+//  value.Null()
+func NewValue(reporter Reporter, value interface{}) *Value {
+	chain := makeChain(reporter)
+	if value != nil {
+		value, _ = canonValue(&chain, value)
+	}
+	return &Value{chain, value}
+}
+
+// Raw returns underlying value attached to Value.
+// This is the value originally passed to NewValue, converted to canonical form.
+//
+// Example:
+//  value := NewValue(t, "foo")
+//  assert.Equal(t, "foo", number.Raw().(string))
+func (v *Value) Raw() interface{} {
+	return v.value
+}
+
+// Path returns a new Value object for child object(s) matching given
+// JSONPath expression.
+//
+// JSONPath is a simple XPath-like query language.
+// See http://goessner.net/articles/JsonPath/.
+//
+// We currently use https://github.com/yalp/jsonpath, which implements
+// only a subset of JSONPath, yet useful for simple queries. It doesn't
+// support filters and requires double quotes for strings.
+//
+// Example 1:
+//  json := `{"users": [{"name": "john"}, {"name": "bob"}]}`
+//  value := NewValue(t, json)
+//
+//  value.Path("$.users[0].name").String().Equal("john")
+//  value.Path("$.users[1].name").String().Equal("bob")
+//
+// Example 2:
+//  json := `{"yfGH2a": {"user": "john"}, "f7GsDd": {"user": "john"}}`
+//  value := NewValue(t, json)
+//
+//  for _, user := range value.Path("$..user").Array().Iter() {
+//      user.String().Equal("john")
+//  }
+func (v *Value) Path(path string) *Value {
+	return getPath(&v.chain, v.value, path)
+}
+
+// Schema succeeds if value matches given JSON Schema.
+//
+// JSON Schema specifies a JSON-based format to define the structure of
+// JSON data. See http://json-schema.org/.
+// We use https://github.com/xeipuuv/gojsonschema implementation.
+//
+// schema should be one of the following:
+//  - go value that can be json.Marshal-ed to a valid schema
+//  - type convertible to string containing valid schema
+//  - type convertible to string containing valid http:// or file:// URI,
+//    pointing to reachable and valid schema
+//
+// Example 1:
+//   schema := `{
+//     "type": "object",
+//     "properties": {
+//        "foo": {
+//            "type": "string"
+//        },
+//        "bar": {
+//            "type": "integer"
+//        }
+//    },
+//    "require": ["foo", "bar"]
+//  }`
+//
+//  value := NewValue(t, map[string]interface{}{
+//      "foo": "a",
+//      "bar": 1,
+//  })
+//
+//  value.Schema(schema)
+//
+// Example 2:
+//  value := NewValue(t, data)
+//  value.Schema("http://example.com/schema.json")
+func (v *Value) Schema(schema interface{}) *Value {
+	checkSchema(&v.chain, v.value, schema)
+	return v
+}
+
+// Object returns a new Object attached to underlying value.
+//
+// If underlying value is not an object (map[string]interface{}), failure is reported
+// and empty (but non-nil) value is returned.
+//
+// Example:
+//  value := NewValue(t, map[string]interface{}{"foo": 123})
+//  value.Object().ContainsKey("foo")
+func (v *Value) Object() *Object {
+	data, ok := v.value.(map[string]interface{})
+	if !ok {
+		v.chain.fail("\nexpected object value (map or struct), but got:\n%s",
+			dumpValue(v.value))
+	}
+	return &Object{v.chain, data}
+}
+
+// Array returns a new Array attached to underlying value.
+//
+// If underlying value is not an array ([]interface{}), failure is reported and empty
+// (but non-nil) value is returned.
+//
+// Example:
+//  value := NewValue(t, []interface{}{"foo", 123})
+//  value.Array().Elements("foo", 123)
+func (v *Value) Array() *Array {
+	data, ok := v.value.([]interface{})
+	if !ok {
+		v.chain.fail("\nexpected array value, but got:\n%s",
+			dumpValue(v.value))
+	}
+	return &Array{v.chain, data}
+}
+
+// String returns a new String attached to underlying value.
+//
+// If underlying value is not string, failure is reported and empty (but non-nil)
+// value is returned.
+//
+// Example:
+//  value := NewValue(t, "foo")
+//  value.String().EqualFold("FOO")
+func (v *Value) String() *String {
+	data, ok := v.value.(string)
+	if !ok {
+		v.chain.fail("\nexpected string value, but got:\n%s",
+			dumpValue(v.value))
+	}
+	return &String{v.chain, data}
+}
+
+// Number returns a new Number attached to underlying value.
+//
+// If underlying value is not a number (numeric type convertible to float64), failure
+// is reported and empty (but non-nil) value is returned.
+//
+// Example:
+//  value := NewValue(t, 123)
+//  value.Number().InRange(100, 200)
+func (v *Value) Number() *Number {
+	data, ok := v.value.(float64)
+	if !ok {
+		v.chain.fail("\nexpected numeric value, but got:\n%s",
+			dumpValue(v.value))
+	}
+	return &Number{v.chain, data}
+}
+
+// Boolean returns a new Boolean attached to underlying value.
+//
+// If underlying value is not a bool, failure is reported and empty (but non-nil)
+// value is returned.
+//
+// Example:
+//  value := NewValue(t, true)
+//  value.Boolean().True()
+func (v *Value) Boolean() *Boolean {
+	data, ok := v.value.(bool)
+	if !ok {
+		v.chain.fail("\nexpected boolean value, but got:\n%s",
+			dumpValue(v.value))
+	}
+	return &Boolean{v.chain, data}
+}
+
+// Null succeeds if value is nil.
+//
+// Note that non-nil interface{} that points to nil value (e.g. nil slice or map)
+// is also treated as null value. Empty (non-nil) slice or map, empty string, and
+// zero number are not treated as null value.
+//
+// Example:
+//  value := NewValue(t, nil)
+//  value.Null()
+//
+//  value := NewValue(t, []interface{}(nil))
+//  value.Null()
+func (v *Value) Null() *Value {
+	if v.value != nil {
+		v.chain.fail("\nexpected nil value, but got:\n%s",
+			dumpValue(v.value))
+	}
+	return v
+}
+
+// NotNull succeeds if value is not nil.
+//
+// Note that non-nil interface{} that points to nil value (e.g. nil slice or map)
+// is also treated as null value. Empty (non-nil) slice or map, empty string, and
+// zero number are not treated as null value.
+//
+// Example:
+//  value := NewValue(t, "")
+//  value.NotNull()
+//
+//  value := NewValue(t, make([]interface{}, 0)
+//  value.Null()
+func (v *Value) NotNull() *Value {
+	if v.value == nil {
+		v.chain.fail("\nexpected non-nil value, but got:\n%s",
+			dumpValue(v.value))
+	}
+	return v
+}
+
+// Equal succeeds if value is equal to another value.
+// Before comparison, both values are converted to canonical form.
+//
+// Example:
+//  value := NewValue(t, "foo")
+//  value.Equal("foo")
+func (v *Value) Equal(value interface{}) *Value {
+	expected, ok := canonValue(&v.chain, value)
+	if !ok {
+		return v
+	}
+	if !reflect.DeepEqual(expected, v.value) {
+		v.chain.fail("\nexpected value equal to:\n%s\n\nbut got:\n%s\n\ndiff:\n%s",
+			dumpValue(expected),
+			dumpValue(v.value),
+			diffValues(expected, v.value))
+	}
+	return v
+}
+
+// NotEqual succeeds if value is not equal to another value.
+// Before comparison, both values are converted to canonical form.
+//
+// Example:
+//  value := NewValue(t, "foo")
+//  value.NorEqual("bar")
+func (v *Value) NotEqual(value interface{}) *Value {
+	expected, ok := canonValue(&v.chain, value)
+	if !ok {
+		return v
+	}
+	if reflect.DeepEqual(expected, v.value) {
+		v.chain.fail("\nexpected value not equal to:\n%s",
+			dumpValue(expected))
+	}
+	return v
+}

+ 143 - 0
vendor/github.com/kataras/iris/httptest/httptest.go

@@ -0,0 +1,143 @@
+package httptest
+
+import (
+	"crypto/tls"
+	"net/http"
+	"testing"
+
+	"github.com/iris-contrib/httpexpect"
+	"github.com/kataras/iris"
+)
+
+type (
+	// OptionSetter sets a configuration field to the configuration
+	OptionSetter interface {
+		// Set receives a pointer to the Configuration type and does the job of filling it
+		Set(c *Configuration)
+	}
+	// OptionSet implements the OptionSetter
+	OptionSet func(c *Configuration)
+)
+
+// Set is the func which makes the OptionSet an OptionSetter, this is used mostly
+func (o OptionSet) Set(c *Configuration) {
+	o(c)
+}
+
+// Configuration httptest configuration
+type Configuration struct {
+	// URL the base url.
+	// Defaults to empty string "".
+	URL string
+	// Debug if true then debug messages from the httpexpect will be shown when a test runs
+	// Defaults to false.
+	Debug bool
+	// LogLevel sets the application's log level.
+	// Defaults to "disable" when testing.
+	LogLevel string
+}
+
+// Set implements the OptionSetter for the Configuration itself
+func (c Configuration) Set(main *Configuration) {
+	main.URL = c.URL
+	main.Debug = c.Debug
+	if c.LogLevel != "" {
+		main.LogLevel = c.LogLevel
+	}
+}
+
+var (
+	// URL if setted then it sets the httptest's BaseURL.
+	// Defaults to empty string "".
+	URL = func(schemeAndHost string) OptionSet {
+		return func(c *Configuration) {
+			c.URL = schemeAndHost
+		}
+	}
+	// Debug if true then debug messages from the httpexpect will be shown when a test runs
+	// Defaults to false.
+	Debug = func(val bool) OptionSet {
+		return func(c *Configuration) {
+			c.Debug = val
+		}
+	}
+
+	// LogLevel sets the application's log level "val".
+	// Defaults to disabled when testing.
+	LogLevel = func(val string) OptionSet {
+		return func(c *Configuration) {
+			c.LogLevel = val
+		}
+	}
+)
+
+// DefaultConfiguration returns the default configuration for the httptest.
+func DefaultConfiguration() *Configuration {
+	return &Configuration{URL: "", Debug: false, LogLevel: "disable"}
+}
+
+// New Prepares and returns a new test framework based on the "app".
+// You can find example on the https://github.com/kataras/iris/tree/master/_examples/testing/httptest
+func New(t *testing.T, app *iris.Application, setters ...OptionSetter) *httpexpect.Expect {
+	conf := DefaultConfiguration()
+	for _, setter := range setters {
+		setter.Set(conf)
+	}
+
+	// set the logger or disable it (default) and disable the updater (for any case).
+	app.Configure(iris.WithoutVersionChecker)
+	app.Logger().SetLevel(conf.LogLevel)
+
+	if err := app.Build(); err != nil {
+		if conf.Debug && (conf.LogLevel == "disable" || conf.LogLevel == "disabled") {
+			app.Logger().Println(err.Error())
+			return nil
+		}
+	}
+
+	testConfiguration := httpexpect.Config{
+		BaseURL: conf.URL,
+		Client: &http.Client{
+			Transport: httpexpect.NewBinder(app),
+			Jar:       httpexpect.NewJar(),
+		},
+		Reporter: httpexpect.NewAssertReporter(t),
+	}
+
+	if conf.Debug {
+		testConfiguration.Printers = []httpexpect.Printer{
+			httpexpect.NewDebugPrinter(t, true),
+		}
+	}
+
+	return httpexpect.WithConfig(testConfiguration)
+}
+
+// NewInsecure same as New but receives a single host instead of the whole framework.
+// Useful for testing running TLS servers.
+func NewInsecure(t *testing.T, setters ...OptionSetter) *httpexpect.Expect {
+	conf := DefaultConfiguration()
+	for _, setter := range setters {
+		setter.Set(conf)
+	}
+	transport := &http.Transport{
+		TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+	}
+
+	testConfiguration := httpexpect.Config{
+		BaseURL: conf.URL,
+		Client: &http.Client{
+			Transport: transport,
+			Jar:       httpexpect.NewJar(),
+		},
+		Reporter: httpexpect.NewAssertReporter(t),
+	}
+
+	if conf.Debug {
+		testConfiguration.Printers = []httpexpect.Printer{
+			httpexpect.NewDebugPrinter(t, true),
+		}
+	}
+
+	return httpexpect.WithConfig(testConfiguration)
+}

+ 97 - 0
vendor/github.com/kataras/iris/httptest/netutils.go

@@ -0,0 +1,97 @@
+package httptest
+
+import (
+	"crypto/tls"
+	"net"
+)
+
+// copied from net/http/httptest/internal
+
+// LocalhostCert is a PEM-encoded TLS cert with SAN IPs
+// "127.0.0.1" and "[::1]", expiring at Jan 29 16:00:00 2084 GMT.
+// generated from src/crypto/tls:
+// go run generate_cert.go  --rsa-bits 1024 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
+// note: these are not the net/http/httptest/internal contents but doesn't matter.
+var LocalhostCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIDAzCCAeugAwIBAgIJAP0pWSuIYyQCMA0GCSqGSIb3DQEBBQUAMBgxFjAUBgNV
+BAMMDWxvY2FsaG9zdDozMzEwHhcNMTYxMjI1MDk1OTI3WhcNMjYxMjIzMDk1OTI3
+WjAYMRYwFAYDVQQDDA1sb2NhbGhvc3Q6MzMxMIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA5vETjLa+8W856rWXO1xMF/CLss9vn5xZhPXKhgz+D7ogSAXm
+mWP53eeBUGC2r26J++CYfVqwOmfJEu9kkGUVi8cGMY9dHeIFPfxD31MYX175jJQe
+tu0WeUII7ciNsSUDyBMqsl7yi1IgN7iLONM++1+QfbbmNiEbghRV6icEH6M+bWlz
+3YSAMEdpK3mg2gsugfLKMwJkaBKEehUNMySRlIhyLITqt1exYGaggRd1zjqUpqpD
+sL2sRVHJ3qHGkSh8nVy8MvG8BXiFdYQJP3mCQDZzruCyMWj5/19KAyu7Cto3Bcvu
+PgujnwRtU+itt8WhZUVtU1n7Ivf6lMJTBcc4OQIDAQABo1AwTjAdBgNVHQ4EFgQU
+MXrBvbILQmiwjUj19aecF2N+6IkwHwYDVR0jBBgwFoAUMXrBvbILQmiwjUj19aec
+F2N+6IkwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA4zbFml1t9KXJ
+OijAV8gALePR8v04DQwJP+jsRxXw5zzhc8Wqzdd2hjUd07mfRWAvmyywrmhCV6zq
+OHznR+aqIqHtm0vV8OpKxLoIQXavfBd6axEXt3859RDM4xJNwIlxs3+LWGPgINud
+wjJqjyzSlhJpQpx4YZ5Da+VMiqAp8N1UeaZ5lBvmSDvoGh6HLODSqtPlWMrldRW9
+AfsXVxenq81MIMeKW2fSOoPnWZ4Vjf1+dSlbJE/DD4zzcfbyfgY6Ep/RrUltJ3ag
+FQbuNTQlgKabe21dSL9zJ2PengVKXl4Trl+4t/Kina9N9Jw535IRCSwinD6a/2Ca
+m7DnVXFiVA==
+-----END CERTIFICATE-----
+`)
+
+// LocalhostKey is the private key for localhostCert.
+var LocalhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA5vETjLa+8W856rWXO1xMF/CLss9vn5xZhPXKhgz+D7ogSAXm
+mWP53eeBUGC2r26J++CYfVqwOmfJEu9kkGUVi8cGMY9dHeIFPfxD31MYX175jJQe
+tu0WeUII7ciNsSUDyBMqsl7yi1IgN7iLONM++1+QfbbmNiEbghRV6icEH6M+bWlz
+3YSAMEdpK3mg2gsugfLKMwJkaBKEehUNMySRlIhyLITqt1exYGaggRd1zjqUpqpD
+sL2sRVHJ3qHGkSh8nVy8MvG8BXiFdYQJP3mCQDZzruCyMWj5/19KAyu7Cto3Bcvu
+PgujnwRtU+itt8WhZUVtU1n7Ivf6lMJTBcc4OQIDAQABAoIBAQCTLE0eHpPevtg0
++FaRUMd5diVA5asoF3aBIjZXaU47bY0G+SO02x6wSMmDFK83a4Vpy/7B3Bp0jhF5
+DLCUyKaLdmE/EjLwSUq37ty+JHFizd7QtNBCGSN6URfpmSabHpCjX3uVQqblHIhF
+mki3BQCdJ5CoXPemxUCHjDgYSZb6JVNIPJExjekc0+4A2MYWMXV6Wr86C7AY3659
+KmveZpC3gOkLA/g/IqDQL/QgTq7/3eloHaO+uPBihdF56do4eaOO0jgFYpl8V7ek
+PZhHfhuPZV3oq15+8Vt77ngtjUWVI6qX0E3ilh+V5cof+03q0FzHPVe3zBUNXcm0
+OGz19u/FAoGBAPSm4Aa4xs/ybyjQakMNix9rak66ehzGkmlfeK5yuQ/fHmTg8Ac+
+ahGs6A3lFWQiyU6hqm6Qp0iKuxuDh35DJGCWAw5OUS/7WLJtu8fNFch6iIG29rFs
+s+Uz2YLxJPebpBsKymZUp7NyDRgEElkiqsREmbYjLrc8uNKkDy+k14YnAoGBAPGn
+ZlN0Mo5iNgQStulYEP5pI7WOOax9KOYVnBNguqgY9c7fXVXBxChoxt5ebQJWG45y
+KPG0hB0bkA4YPu4bTRf5acIMpjFwcxNlmwdc4oCkT4xqAFs9B/AKYZgkf4IfKHqW
+P9PD7TbUpkaxv25bPYwUSEB7lPa+hBtRyN9Wo6qfAoGAPBkeISiU1hJE0i7YW55h
+FZfKZoqSYq043B+ywo+1/Dsf+UH0VKM1ZSAnZPpoVc/hyaoW9tAb98r0iZ620wJl
+VkCjgYklknbY5APmw/8SIcxP6iVq1kzQqDYjcXIRVa3rEyWEcLzM8VzL8KFXbIQC
+lPIRHFfqKuMEt+HLRTXmJ7MCgYAHGvv4QjdmVl7uObqlG9DMGj1RjlAF0VxNf58q
+NrLmVG2N2qV86wigg4wtZ6te4TdINfUcPkmQLYpLz8yx5Z2bsdq5OPP+CidoD5nC
+WqnSTIKGR2uhQycjmLqL5a7WHaJsEFTqHh2wego1k+5kCUzC/KmvM7MKmkl6ICp+
+3qZLUwKBgQCDOhKDwYo1hdiXoOOQqg/LZmpWOqjO3b4p99B9iJqhmXN0GKXIPSBh
+5nqqmGsG8asSQhchs7EPMh8B80KbrDTeidWskZuUoQV27Al1UEmL6Zcl83qXD6sf
+k9X9TwWyZtp5IL1CAEd/Il9ZTXFzr3lNaN8LCFnU+EIsz1YgUW8LTg==
+-----END RSA PRIVATE KEY-----
+`)
+
+// NewLocalListener returns a new ipv4 "127.0.0.1:0"
+// or tcp6 "[::1]:0" tcp listener.
+func NewLocalListener() net.Listener {
+	l, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
+			panic(err)
+		}
+	}
+	return l
+}
+
+// NewLocalTLSListener returns a new tls listener
+// based on the "tcpListener", if "tcpListener" is nil
+// it make use of the `NewLocalListener`.
+// Cert and Key are `LocalhostCert` and `LocalhostKey` respectfully.
+func NewLocalTLSListener(tcpListener net.Listener) net.Listener {
+	if tcpListener == nil {
+		tcpListener = NewLocalListener()
+	}
+
+	cert, err := tls.X509KeyPair(LocalhostCert, LocalhostKey)
+	if err != nil {
+		panic(err)
+	}
+
+	cfg := new(tls.Config)
+	cfg.NextProtos = []string{"http/1.1"}
+	cfg.Certificates = []tls.Certificate{cert}
+	cfg.InsecureSkipVerify = true
+	return tls.NewListener(tcpListener, cfg)
+}

+ 73 - 0
vendor/github.com/kataras/iris/httptest/status.go

@@ -0,0 +1,73 @@
+package httptest
+
+// HTTP status codes as registered with IANA.
+// See: http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
+// Raw Copy from the net/http std package in order to recude the import path of "net/http" for the users.
+//
+// These may or may not stay.
+const (
+	StatusContinue           = 100 // RFC 7231, 6.2.1
+	StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2
+	StatusProcessing         = 102 // RFC 2518, 10.1
+
+	StatusOK                   = 200 // RFC 7231, 6.3.1
+	StatusCreated              = 201 // RFC 7231, 6.3.2
+	StatusAccepted             = 202 // RFC 7231, 6.3.3
+	StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4
+	StatusNoContent            = 204 // RFC 7231, 6.3.5
+	StatusResetContent         = 205 // RFC 7231, 6.3.6
+	StatusPartialContent       = 206 // RFC 7233, 4.1
+	StatusMultiStatus          = 207 // RFC 4918, 11.1
+	StatusAlreadyReported      = 208 // RFC 5842, 7.1
+	StatusIMUsed               = 226 // RFC 3229, 10.4.1
+
+	StatusMultipleChoices   = 300 // RFC 7231, 6.4.1
+	StatusMovedPermanently  = 301 // RFC 7231, 6.4.2
+	StatusFound             = 302 // RFC 7231, 6.4.3
+	StatusSeeOther          = 303 // RFC 7231, 6.4.4
+	StatusNotModified       = 304 // RFC 7232, 4.1
+	StatusUseProxy          = 305 // RFC 7231, 6.4.5
+	_                       = 306 // RFC 7231, 6.4.6 (Unused)
+	StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7
+	StatusPermanentRedirect = 308 // RFC 7538, 3
+
+	StatusBadRequest                   = 400 // RFC 7231, 6.5.1
+	StatusUnauthorized                 = 401 // RFC 7235, 3.1
+	StatusPaymentRequired              = 402 // RFC 7231, 6.5.2
+	StatusForbidden                    = 403 // RFC 7231, 6.5.3
+	StatusNotFound                     = 404 // RFC 7231, 6.5.4
+	StatusMethodNotAllowed             = 405 // RFC 7231, 6.5.5
+	StatusNotAcceptable                = 406 // RFC 7231, 6.5.6
+	StatusProxyAuthRequired            = 407 // RFC 7235, 3.2
+	StatusRequestTimeout               = 408 // RFC 7231, 6.5.7
+	StatusConflict                     = 409 // RFC 7231, 6.5.8
+	StatusGone                         = 410 // RFC 7231, 6.5.9
+	StatusLengthRequired               = 411 // RFC 7231, 6.5.10
+	StatusPreconditionFailed           = 412 // RFC 7232, 4.2
+	StatusRequestEntityTooLarge        = 413 // RFC 7231, 6.5.11
+	StatusRequestURITooLong            = 414 // RFC 7231, 6.5.12
+	StatusUnsupportedMediaType         = 415 // RFC 7231, 6.5.13
+	StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4
+	StatusExpectationFailed            = 417 // RFC 7231, 6.5.14
+	StatusTeapot                       = 418 // RFC 7168, 2.3.3
+	StatusUnprocessableEntity          = 422 // RFC 4918, 11.2
+	StatusLocked                       = 423 // RFC 4918, 11.3
+	StatusFailedDependency             = 424 // RFC 4918, 11.4
+	StatusUpgradeRequired              = 426 // RFC 7231, 6.5.15
+	StatusPreconditionRequired         = 428 // RFC 6585, 3
+	StatusTooManyRequests              = 429 // RFC 6585, 4
+	StatusRequestHeaderFieldsTooLarge  = 431 // RFC 6585, 5
+	StatusUnavailableForLegalReasons   = 451 // RFC 7725, 3
+
+	StatusInternalServerError           = 500 // RFC 7231, 6.6.1
+	StatusNotImplemented                = 501 // RFC 7231, 6.6.2
+	StatusBadGateway                    = 502 // RFC 7231, 6.6.3
+	StatusServiceUnavailable            = 503 // RFC 7231, 6.6.4
+	StatusGatewayTimeout                = 504 // RFC 7231, 6.6.5
+	StatusHTTPVersionNotSupported       = 505 // RFC 7231, 6.6.6
+	StatusVariantAlsoNegotiates         = 506 // RFC 2295, 8.1
+	StatusInsufficientStorage           = 507 // RFC 4918, 11.5
+	StatusLoopDetected                  = 508 // RFC 5842, 7.2
+	StatusNotExtended                   = 510 // RFC 2774, 7
+	StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
+)

+ 22 - 0
vendor/github.com/moul/http2curl/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Manfred Touron
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 50 - 0
vendor/github.com/moul/http2curl/Makefile

@@ -0,0 +1,50 @@
+# Project-specific variables
+CONVEY_PORT ?=	9042
+
+
+# Common variables
+SOURCES :=	$(shell find . -type f -name "*.go")
+COMMANDS :=	$(shell go list ./... | grep -v /vendor/ | grep /cmd/)
+PACKAGES :=	$(shell go list ./... | grep -v /vendor/ | grep -v /cmd/)
+GOENV ?=	GO15VENDOREXPERIMENT=1
+GO ?=		$(GOENV) go
+GODEP ?=	$(GOENV) godep
+USER ?=		$(shell whoami)
+
+
+all:	build
+
+
+.PHONY: build
+build:
+	echo "nothing to do"
+
+
+.PHONY: test
+test:
+	$(GO) get -t .
+	$(GO) test -v .
+
+
+.PHONY: godep-save
+godep-save:
+	$(GODEP) save $(PACKAGES) $(COMMANDS)
+
+
+.PHONY: re
+re:	clean all
+
+
+.PHONY: convey
+convey:
+	$(GO) get github.com/smartystreets/goconvey
+	goconvey -cover -port=$(CONVEY_PORT) -workDir="$(realpath .)" -depth=1
+
+
+.PHONY:	cover
+cover:	profile.out
+
+
+profile.out:	$(SOURCES)
+	rm -f $@
+	$(GO) test -covermode=count -coverpkg=. -coverprofile=$@ .

+ 71 - 0
vendor/github.com/moul/http2curl/http2curl.go

@@ -0,0 +1,71 @@
+package http2curl
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"sort"
+	"strings"
+)
+
+// CurlCommand contains exec.Command compatible slice + helpers
+type CurlCommand struct {
+	slice []string
+}
+
+// append appends a string to the CurlCommand
+func (c *CurlCommand) append(newSlice ...string) {
+	c.slice = append(c.slice, newSlice...)
+}
+
+// String returns a ready to copy/paste command
+func (c *CurlCommand) String() string {
+	return strings.Join(c.slice, " ")
+}
+
+// nopCloser is used to create a new io.ReadCloser for req.Body
+type nopCloser struct {
+	io.Reader
+}
+
+func bashEscape(str string) string {
+	return `'` + strings.Replace(str, `'`, `'\''`, -1) + `'`
+}
+
+func (nopCloser) Close() error { return nil }
+
+// GetCurlCommand returns a CurlCommand corresponding to an http.Request
+func GetCurlCommand(req *http.Request) (*CurlCommand, error) {
+	command := CurlCommand{}
+
+	command.append("curl")
+
+	command.append("-X", bashEscape(req.Method))
+
+	if req.Body != nil {
+		body, err := ioutil.ReadAll(req.Body)
+		if err != nil {
+			return nil, err
+		}
+		req.Body = nopCloser{bytes.NewBuffer(body)}
+		bodyEscaped := bashEscape(string(body))
+		command.append("-d", bodyEscaped)
+	}
+
+	var keys []string
+
+	for k := range req.Header {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		command.append("-H", bashEscape(fmt.Sprintf("%s: %s", k, strings.Join(req.Header[k], " "))))
+	}
+
+	command.append(bashEscape(req.URL.String()))
+
+	return &command, nil
+}

+ 27 - 0
vendor/github.com/pmezard/go-difflib/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 772 - 0
vendor/github.com/pmezard/go-difflib/difflib/difflib.go

@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+)
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+	isJunk func(string) bool) *SequenceMatcher {
+
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s, _ := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s, _ := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s, _ := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//     alo <= i <= i+k <= ahi
+//     blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+//     k >= k'
+//     i <= i'
+//     and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+				j1, min(j2, j1+n)})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = max(i1, i2-n), max(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+	matches := 0
+	for _, m := range m.GetMatchingBlocks() {
+		matches += m.Size
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+	// viewing a and b as multisets, set matches to the cardinality
+	// of their intersection; this counts the number of matches
+	// without regard to order, so is clearly an upper bound
+	if m.fullBCount == nil {
+		m.fullBCount = map[string]int{}
+		for _, s := range m.b {
+			m.fullBCount[s] = m.fullBCount[s] + 1
+		}
+	}
+
+	// avail[x] is the number of times x appears in 'b' less the
+	// number of times we've seen it in 'a' so far ... kinda
+	avail := map[string]int{}
+	matches := 0
+	for _, s := range m.a {
+		n, ok := avail[s]
+		if !ok {
+			n = m.fullBCount[s]
+		}
+		avail[s] = n - 1
+		if n > 0 {
+			matches += 1
+		}
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+	la, lb := len(m.a), len(m.b)
+	return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	if length == 0 {
+		beginning -= 1 // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+	A        []string // First sequence lines
+	FromFile string   // First file name
+	FromDate string   // First file time
+	B        []string // Second sequence lines
+	ToFile   string   // Second file name
+	ToDate   string   // Second file time
+	Eol      string   // Headers end of line, defaults to LF
+	Context  int      // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context.  The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline.  This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times.  Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	wf := func(format string, args ...interface{}) error {
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
+		return err
+	}
+	ws := func(s string) error {
+		_, err := buf.WriteString(s)
+		return err
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		first, last := g[0], g[len(g)-1]
+		range1 := formatRangeUnified(first.I1, last.I2)
+		range2 := formatRangeUnified(first.J1, last.J2)
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+			return err
+		}
+		for _, c := range g {
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+			if c.Tag == 'e' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws(" " + line); err != nil {
+						return err
+					}
+				}
+				continue
+			}
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws("-" + line); err != nil {
+						return err
+					}
+				}
+			}
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, line := range diff.B[j1:j2] {
+					if err := ws("+" + line); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteUnifiedDiff(w, diff)
+	return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 0 {
+		beginning -= 1 // empty ranges begin at line just before the range
+	}
+	if length <= 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times.  Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	var diffErr error
+	wf := func(format string, args ...interface{}) {
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
+		if diffErr == nil && err != nil {
+			diffErr = err
+		}
+	}
+	ws := func(s string) {
+		_, err := buf.WriteString(s)
+		if diffErr == nil && err != nil {
+			diffErr = err
+		}
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	prefix := map[byte]string{
+		'i': "+ ",
+		'd': "- ",
+		'r': "! ",
+		'e': "  ",
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+			}
+		}
+
+		first, last := g[0], g[len(g)-1]
+		ws("***************" + diff.Eol)
+
+		range1 := formatRangeContext(first.I1, last.I2)
+		wf("*** %s ****%s", range1, diff.Eol)
+		for _, c := range g {
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, cc := range g {
+					if cc.Tag == 'i' {
+						continue
+					}
+					for _, line := range diff.A[cc.I1:cc.I2] {
+						ws(prefix[cc.Tag] + line)
+					}
+				}
+				break
+			}
+		}
+
+		range2 := formatRangeContext(first.J1, last.J2)
+		wf("--- %s ----%s", range2, diff.Eol)
+		for _, c := range g {
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, cc := range g {
+					if cc.Tag == 'd' {
+						continue
+					}
+					for _, line := range diff.B[cc.J1:cc.J2] {
+						ws(prefix[cc.Tag] + line)
+					}
+				}
+				break
+			}
+		}
+	}
+	return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteContextDiff(w, diff)
+	return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+	lines := strings.SplitAfter(s, "\n")
+	lines[len(lines)-1] += "\n"
+	return lines
+}

+ 20 - 0
vendor/github.com/sergi/go-diff/LICENSE

@@ -0,0 +1,20 @@
+Copyright (c) 2012-2016 The go-diff Authors. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+

+ 1344 - 0
vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go

@@ -0,0 +1,1344 @@
+// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
+// https://github.com/sergi/go-diff
+// See the included LICENSE file for license details.
+//
+// go-diff is a Go implementation of Google's Diff, Match, and Patch library
+// Original library is Copyright (c) 2006 Google Inc.
+// http://code.google.com/p/google-diff-match-patch/
+
+package diffmatchpatch
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"html"
+	"math"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// Operation defines the operation of a diff item.
+type Operation int8
+
+const (
+	// DiffDelete item represents a delete diff.
+	DiffDelete Operation = -1
+	// DiffInsert item represents an insert diff.
+	DiffInsert Operation = 1
+	// DiffEqual item represents an equal diff.
+	DiffEqual Operation = 0
+)
+
+// Diff represents one diff operation
+type Diff struct {
+	Type Operation
+	Text string
+}
+
+func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff {
+	return append(slice[:index], append(elements, slice[index+amount:]...)...)
+}
+
+// DiffMain finds the differences between two texts.
+// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character.
+func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff {
+	return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines)
+}
+
+// DiffMainRunes finds the differences between two rune sequences.
+// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character.
+func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff {
+	var deadline time.Time
+	if dmp.DiffTimeout > 0 {
+		deadline = time.Now().Add(dmp.DiffTimeout)
+	}
+	return dmp.diffMainRunes(text1, text2, checklines, deadline)
+}
+
+func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff {
+	if runesEqual(text1, text2) {
+		var diffs []Diff
+		if len(text1) > 0 {
+			diffs = append(diffs, Diff{DiffEqual, string(text1)})
+		}
+		return diffs
+	}
+	// Trim off common prefix (speedup).
+	commonlength := commonPrefixLength(text1, text2)
+	commonprefix := text1[:commonlength]
+	text1 = text1[commonlength:]
+	text2 = text2[commonlength:]
+
+	// Trim off common suffix (speedup).
+	commonlength = commonSuffixLength(text1, text2)
+	commonsuffix := text1[len(text1)-commonlength:]
+	text1 = text1[:len(text1)-commonlength]
+	text2 = text2[:len(text2)-commonlength]
+
+	// Compute the diff on the middle block.
+	diffs := dmp.diffCompute(text1, text2, checklines, deadline)
+
+	// Restore the prefix and suffix.
+	if len(commonprefix) != 0 {
+		diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...)
+	}
+	if len(commonsuffix) != 0 {
+		diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)})
+	}
+
+	return dmp.DiffCleanupMerge(diffs)
+}
+
+// diffCompute finds the differences between two rune slices.  Assumes that the texts do not have any common prefix or suffix.
+func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff {
+	diffs := []Diff{}
+	if len(text1) == 0 {
+		// Just add some text (speedup).
+		return append(diffs, Diff{DiffInsert, string(text2)})
+	} else if len(text2) == 0 {
+		// Just delete some text (speedup).
+		return append(diffs, Diff{DiffDelete, string(text1)})
+	}
+
+	var longtext, shorttext []rune
+	if len(text1) > len(text2) {
+		longtext = text1
+		shorttext = text2
+	} else {
+		longtext = text2
+		shorttext = text1
+	}
+
+	if i := runesIndex(longtext, shorttext); i != -1 {
+		op := DiffInsert
+		// Swap insertions for deletions if diff is reversed.
+		if len(text1) > len(text2) {
+			op = DiffDelete
+		}
+		// Shorter text is inside the longer text (speedup).
+		return []Diff{
+			Diff{op, string(longtext[:i])},
+			Diff{DiffEqual, string(shorttext)},
+			Diff{op, string(longtext[i+len(shorttext):])},
+		}
+	} else if len(shorttext) == 1 {
+		// Single character string.
+		// After the previous speedup, the character can't be an equality.
+		return []Diff{
+			Diff{DiffDelete, string(text1)},
+			Diff{DiffInsert, string(text2)},
+		}
+		// Check to see if the problem can be split in two.
+	} else if hm := dmp.diffHalfMatch(text1, text2); hm != nil {
+		// A half-match was found, sort out the return data.
+		text1A := hm[0]
+		text1B := hm[1]
+		text2A := hm[2]
+		text2B := hm[3]
+		midCommon := hm[4]
+		// Send both pairs off for separate processing.
+		diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline)
+		diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline)
+		// Merge the results.
+		return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...)
+	} else if checklines && len(text1) > 100 && len(text2) > 100 {
+		return dmp.diffLineMode(text1, text2, deadline)
+	}
+	return dmp.diffBisect(text1, text2, deadline)
+}
+
+// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs.
+func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff {
+	// Scan the text on a line-by-line basis first.
+	text1, text2, linearray := dmp.diffLinesToRunes(text1, text2)
+
+	diffs := dmp.diffMainRunes(text1, text2, false, deadline)
+
+	// Convert the diff back to original text.
+	diffs = dmp.DiffCharsToLines(diffs, linearray)
+	// Eliminate freak matches (e.g. blank lines)
+	diffs = dmp.DiffCleanupSemantic(diffs)
+
+	// Rediff any replacement blocks, this time character-by-character.
+	// Add a dummy entry at the end.
+	diffs = append(diffs, Diff{DiffEqual, ""})
+
+	pointer := 0
+	countDelete := 0
+	countInsert := 0
+
+	// NOTE: Rune slices are slower than using strings in this case.
+	textDelete := ""
+	textInsert := ""
+
+	for pointer < len(diffs) {
+		switch diffs[pointer].Type {
+		case DiffInsert:
+			countInsert++
+			textInsert += diffs[pointer].Text
+		case DiffDelete:
+			countDelete++
+			textDelete += diffs[pointer].Text
+		case DiffEqual:
+			// Upon reaching an equality, check for prior redundancies.
+			if countDelete >= 1 && countInsert >= 1 {
+				// Delete the offending records and add the merged ones.
+				diffs = splice(diffs, pointer-countDelete-countInsert,
+					countDelete+countInsert)
+
+				pointer = pointer - countDelete - countInsert
+				a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline)
+				for j := len(a) - 1; j >= 0; j-- {
+					diffs = splice(diffs, pointer, 0, a[j])
+				}
+				pointer = pointer + len(a)
+			}
+
+			countInsert = 0
+			countDelete = 0
+			textDelete = ""
+			textInsert = ""
+		}
+		pointer++
+	}
+
+	return diffs[:len(diffs)-1] // Remove the dummy entry at the end.
+}
+
+// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff.
+// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character.
+// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
+func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff {
+	// Unused in this code, but retained for interface compatibility.
+	return dmp.diffBisect([]rune(text1), []rune(text2), deadline)
+}
+
+// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff.
+// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations.
+func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff {
+	// Cache the text lengths to prevent multiple calls.
+	runes1Len, runes2Len := len(runes1), len(runes2)
+
+	maxD := (runes1Len + runes2Len + 1) / 2
+	vOffset := maxD
+	vLength := 2 * maxD
+
+	v1 := make([]int, vLength)
+	v2 := make([]int, vLength)
+	for i := range v1 {
+		v1[i] = -1
+		v2[i] = -1
+	}
+	v1[vOffset+1] = 0
+	v2[vOffset+1] = 0
+
+	delta := runes1Len - runes2Len
+	// If the total number of characters is odd, then the front path will collide with the reverse path.
+	front := (delta%2 != 0)
+	// Offsets for start and end of k loop. Prevents mapping of space beyond the grid.
+	k1start := 0
+	k1end := 0
+	k2start := 0
+	k2end := 0
+	for d := 0; d < maxD; d++ {
+		// Bail out if deadline is reached.
+		if !deadline.IsZero() && time.Now().After(deadline) {
+			break
+		}
+
+		// Walk the front path one step.
+		for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 {
+			k1Offset := vOffset + k1
+			var x1 int
+
+			if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) {
+				x1 = v1[k1Offset+1]
+			} else {
+				x1 = v1[k1Offset-1] + 1
+			}
+
+			y1 := x1 - k1
+			for x1 < runes1Len && y1 < runes2Len {
+				if runes1[x1] != runes2[y1] {
+					break
+				}
+				x1++
+				y1++
+			}
+			v1[k1Offset] = x1
+			if x1 > runes1Len {
+				// Ran off the right of the graph.
+				k1end += 2
+			} else if y1 > runes2Len {
+				// Ran off the bottom of the graph.
+				k1start += 2
+			} else if front {
+				k2Offset := vOffset + delta - k1
+				if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 {
+					// Mirror x2 onto top-left coordinate system.
+					x2 := runes1Len - v2[k2Offset]
+					if x1 >= x2 {
+						// Overlap detected.
+						return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline)
+					}
+				}
+			}
+		}
+		// Walk the reverse path one step.
+		for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 {
+			k2Offset := vOffset + k2
+			var x2 int
+			if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) {
+				x2 = v2[k2Offset+1]
+			} else {
+				x2 = v2[k2Offset-1] + 1
+			}
+			var y2 = x2 - k2
+			for x2 < runes1Len && y2 < runes2Len {
+				if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] {
+					break
+				}
+				x2++
+				y2++
+			}
+			v2[k2Offset] = x2
+			if x2 > runes1Len {
+				// Ran off the left of the graph.
+				k2end += 2
+			} else if y2 > runes2Len {
+				// Ran off the top of the graph.
+				k2start += 2
+			} else if !front {
+				k1Offset := vOffset + delta - k2
+				if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 {
+					x1 := v1[k1Offset]
+					y1 := vOffset + x1 - k1Offset
+					// Mirror x2 onto top-left coordinate system.
+					x2 = runes1Len - x2
+					if x1 >= x2 {
+						// Overlap detected.
+						return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline)
+					}
+				}
+			}
+		}
+	}
+	// Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all.
+	return []Diff{
+		Diff{DiffDelete, string(runes1)},
+		Diff{DiffInsert, string(runes2)},
+	}
+}
+
+func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int,
+	deadline time.Time) []Diff {
+	runes1a := runes1[:x]
+	runes2a := runes2[:y]
+	runes1b := runes1[x:]
+	runes2b := runes2[y:]
+
+	// Compute both diffs serially.
+	diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline)
+	diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline)
+
+	return append(diffs, diffsb...)
+}
+
+// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line.
+// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes.
+func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) {
+	chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2)
+	return string(chars1), string(chars2), lineArray
+}
+
+// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line.
+func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) {
+	// '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character.
+	lineArray := []string{""}    // e.g. lineArray[4] == 'Hello\n'
+	lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4
+
+	chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash)
+	chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash)
+
+	return chars1, chars2, lineArray
+}
+
+func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) {
+	return dmp.DiffLinesToRunes(string(text1), string(text2))
+}
+
+// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line.
+// We use strings instead of []runes as input mainly because you can't use []rune as a map key.
+func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune {
+	// Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect.
+	lineStart := 0
+	lineEnd := -1
+	runes := []rune{}
+
+	for lineEnd < len(text)-1 {
+		lineEnd = indexOf(text, "\n", lineStart)
+
+		if lineEnd == -1 {
+			lineEnd = len(text) - 1
+		}
+
+		line := text[lineStart : lineEnd+1]
+		lineStart = lineEnd + 1
+		lineValue, ok := lineHash[line]
+
+		if ok {
+			runes = append(runes, rune(lineValue))
+		} else {
+			*lineArray = append(*lineArray, line)
+			lineHash[line] = len(*lineArray) - 1
+			runes = append(runes, rune(len(*lineArray)-1))
+		}
+	}
+
+	return runes
+}
+
+// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text.
+func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff {
+	hydrated := make([]Diff, 0, len(diffs))
+	for _, aDiff := range diffs {
+		chars := aDiff.Text
+		text := make([]string, len(chars))
+
+		for i, r := range chars {
+			text[i] = lineArray[r]
+		}
+
+		aDiff.Text = strings.Join(text, "")
+		hydrated = append(hydrated, aDiff)
+	}
+	return hydrated
+}
+
+// DiffCommonPrefix determines the common prefix length of two strings.
+func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int {
+	// Unused in this code, but retained for interface compatibility.
+	return commonPrefixLength([]rune(text1), []rune(text2))
+}
+
+// DiffCommonSuffix determines the common suffix length of two strings.
+func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int {
+	// Unused in this code, but retained for interface compatibility.
+	return commonSuffixLength([]rune(text1), []rune(text2))
+}
+
+// commonPrefixLength returns the length of the common prefix of two rune slices.
+func commonPrefixLength(text1, text2 []rune) int {
+	short, long := text1, text2
+	if len(short) > len(long) {
+		short, long = long, short
+	}
+	for i, r := range short {
+		if r != long[i] {
+			return i
+		}
+	}
+	return len(short)
+}
+
+// commonSuffixLength returns the length of the common suffix of two rune slices.
+func commonSuffixLength(text1, text2 []rune) int {
+	n := min(len(text1), len(text2))
+	for i := 0; i < n; i++ {
+		if text1[len(text1)-i-1] != text2[len(text2)-i-1] {
+			return i
+		}
+	}
+	return n
+
+	// TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54
+	// Binary search.
+	// Performance analysis: http://neil.fraser.name/news/2007/10/09/
+	/*
+	   pointermin := 0
+	   pointermax := math.Min(len(text1), len(text2))
+	   pointermid := pointermax
+	   pointerend := 0
+	   for pointermin < pointermid {
+	       if text1[len(text1)-pointermid:len(text1)-pointerend] ==
+	           text2[len(text2)-pointermid:len(text2)-pointerend] {
+	           pointermin = pointermid
+	           pointerend = pointermin
+	       } else {
+	           pointermax = pointermid
+	       }
+	       pointermid = math.Floor((pointermax-pointermin)/2 + pointermin)
+	   }
+	   return pointermid
+	*/
+}
+
+// DiffCommonOverlap determines if the suffix of one string is the prefix of another.
+func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int {
+	// Cache the text lengths to prevent multiple calls.
+	text1Length := len(text1)
+	text2Length := len(text2)
+	// Eliminate the null case.
+	if text1Length == 0 || text2Length == 0 {
+		return 0
+	}
+	// Truncate the longer string.
+	if text1Length > text2Length {
+		text1 = text1[text1Length-text2Length:]
+	} else if text1Length < text2Length {
+		text2 = text2[0:text1Length]
+	}
+	textLength := int(math.Min(float64(text1Length), float64(text2Length)))
+	// Quick check for the worst case.
+	if text1 == text2 {
+		return textLength
+	}
+
+	// Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/
+	best := 0
+	length := 1
+	for {
+		pattern := text1[textLength-length:]
+		found := strings.Index(text2, pattern)
+		if found == -1 {
+			break
+		}
+		length += found
+		if found == 0 || text1[textLength-length:] == text2[0:length] {
+			best = length
+			length++
+		}
+	}
+
+	return best
+}
+
+// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs.
+func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string {
+	// Unused in this code, but retained for interface compatibility.
+	runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2))
+	if runeSlices == nil {
+		return nil
+	}
+
+	result := make([]string, len(runeSlices))
+	for i, r := range runeSlices {
+		result[i] = string(r)
+	}
+	return result
+}
+
+func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune {
+	if dmp.DiffTimeout <= 0 {
+		// Don't risk returning a non-optimal diff if we have unlimited time.
+		return nil
+	}
+
+	var longtext, shorttext []rune
+	if len(text1) > len(text2) {
+		longtext = text1
+		shorttext = text2
+	} else {
+		longtext = text2
+		shorttext = text1
+	}
+
+	if len(longtext) < 4 || len(shorttext)*2 < len(longtext) {
+		return nil // Pointless.
+	}
+
+	// First check if the second quarter is the seed for a half-match.
+	hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4))
+
+	// Check again based on the third quarter.
+	hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2))
+
+	hm := [][]rune{}
+	if hm1 == nil && hm2 == nil {
+		return nil
+	} else if hm2 == nil {
+		hm = hm1
+	} else if hm1 == nil {
+		hm = hm2
+	} else {
+		// Both matched.  Select the longest.
+		if len(hm1[4]) > len(hm2[4]) {
+			hm = hm1
+		} else {
+			hm = hm2
+		}
+	}
+
+	// A half-match was found, sort out the return data.
+	if len(text1) > len(text2) {
+		return hm
+	}
+
+	return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]}
+}
+
+// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext?
+// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match.
+func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune {
+	var bestCommonA []rune
+	var bestCommonB []rune
+	var bestCommonLen int
+	var bestLongtextA []rune
+	var bestLongtextB []rune
+	var bestShorttextA []rune
+	var bestShorttextB []rune
+
+	// Start with a 1/4 length substring at position i as a seed.
+	seed := l[i : i+len(l)/4]
+
+	for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) {
+		prefixLength := commonPrefixLength(l[i:], s[j:])
+		suffixLength := commonSuffixLength(l[:i], s[:j])
+
+		if bestCommonLen < suffixLength+prefixLength {
+			bestCommonA = s[j-suffixLength : j]
+			bestCommonB = s[j : j+prefixLength]
+			bestCommonLen = len(bestCommonA) + len(bestCommonB)
+			bestLongtextA = l[:i-suffixLength]
+			bestLongtextB = l[i+prefixLength:]
+			bestShorttextA = s[:j-suffixLength]
+			bestShorttextB = s[j+prefixLength:]
+		}
+	}
+
+	if bestCommonLen*2 < len(l) {
+		return nil
+	}
+
+	return [][]rune{
+		bestLongtextA,
+		bestLongtextB,
+		bestShorttextA,
+		bestShorttextB,
+		append(bestCommonA, bestCommonB...),
+	}
+}
+
+// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities.
+func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff {
+	changes := false
+	// Stack of indices where equalities are found.
+	type equality struct {
+		data int
+		next *equality
+	}
+	var equalities *equality
+
+	var lastequality string
+	// Always equal to diffs[equalities[equalitiesLength - 1]][1]
+	var pointer int // Index of current position.
+	// Number of characters that changed prior to the equality.
+	var lengthInsertions1, lengthDeletions1 int
+	// Number of characters that changed after the equality.
+	var lengthInsertions2, lengthDeletions2 int
+
+	for pointer < len(diffs) {
+		if diffs[pointer].Type == DiffEqual {
+			// Equality found.
+
+			equalities = &equality{
+				data: pointer,
+				next: equalities,
+			}
+			lengthInsertions1 = lengthInsertions2
+			lengthDeletions1 = lengthDeletions2
+			lengthInsertions2 = 0
+			lengthDeletions2 = 0
+			lastequality = diffs[pointer].Text
+		} else {
+			// An insertion or deletion.
+
+			if diffs[pointer].Type == DiffInsert {
+				lengthInsertions2 += len(diffs[pointer].Text)
+			} else {
+				lengthDeletions2 += len(diffs[pointer].Text)
+			}
+			// Eliminate an equality that is smaller or equal to the edits on both sides of it.
+			difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1)))
+			difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2)))
+			if len(lastequality) > 0 &&
+				(len(lastequality) <= difference1) &&
+				(len(lastequality) <= difference2) {
+				// Duplicate record.
+				insPoint := equalities.data
+				diffs = append(
+					diffs[:insPoint],
+					append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...)
+
+				// Change second copy to insert.
+				diffs[insPoint+1].Type = DiffInsert
+				// Throw away the equality we just deleted.
+				equalities = equalities.next
+
+				if equalities != nil {
+					equalities = equalities.next
+				}
+				if equalities != nil {
+					pointer = equalities.data
+				} else {
+					pointer = -1
+				}
+
+				lengthInsertions1 = 0 // Reset the counters.
+				lengthDeletions1 = 0
+				lengthInsertions2 = 0
+				lengthDeletions2 = 0
+				lastequality = ""
+				changes = true
+			}
+		}
+		pointer++
+	}
+
+	// Normalize the diff.
+	if changes {
+		diffs = dmp.DiffCleanupMerge(diffs)
+	}
+	diffs = dmp.DiffCleanupSemanticLossless(diffs)
+	// Find any overlaps between deletions and insertions.
+	// e.g: <del>abcxxx</del><ins>xxxdef</ins>
+	//   -> <del>abc</del>xxx<ins>def</ins>
+	// e.g: <del>xxxabc</del><ins>defxxx</ins>
+	//   -> <ins>def</ins>xxx<del>abc</del>
+	// Only extract an overlap if it is as big as the edit ahead or behind it.
+	pointer = 1
+	for pointer < len(diffs) {
+		if diffs[pointer-1].Type == DiffDelete &&
+			diffs[pointer].Type == DiffInsert {
+			deletion := diffs[pointer-1].Text
+			insertion := diffs[pointer].Text
+			overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion)
+			overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion)
+			if overlapLength1 >= overlapLength2 {
+				if float64(overlapLength1) >= float64(len(deletion))/2 ||
+					float64(overlapLength1) >= float64(len(insertion))/2 {
+
+					// Overlap found. Insert an equality and trim the surrounding edits.
+					diffs = append(
+						diffs[:pointer],
+						append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...)
+
+					diffs[pointer-1].Text =
+						deletion[0 : len(deletion)-overlapLength1]
+					diffs[pointer+1].Text = insertion[overlapLength1:]
+					pointer++
+				}
+			} else {
+				if float64(overlapLength2) >= float64(len(deletion))/2 ||
+					float64(overlapLength2) >= float64(len(insertion))/2 {
+					// Reverse overlap found. Insert an equality and swap and trim the surrounding edits.
+					overlap := Diff{DiffEqual, deletion[:overlapLength2]}
+					diffs = append(
+						diffs[:pointer],
+						append([]Diff{overlap}, diffs[pointer:]...)...)
+
+					diffs[pointer-1].Type = DiffInsert
+					diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2]
+					diffs[pointer+1].Type = DiffDelete
+					diffs[pointer+1].Text = deletion[overlapLength2:]
+					pointer++
+				}
+			}
+			pointer++
+		}
+		pointer++
+	}
+
+	return diffs
+}
+
+// Define some regex patterns for matching boundaries.
+var (
+	nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`)
+	whitespaceRegex      = regexp.MustCompile(`\s`)
+	linebreakRegex       = regexp.MustCompile(`[\r\n]`)
+	blanklineEndRegex    = regexp.MustCompile(`\n\r?\n$`)
+	blanklineStartRegex  = regexp.MustCompile(`^\r?\n\r?\n`)
+)
+
+// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries.
+// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables.
+func diffCleanupSemanticScore(one, two string) int {
+	if len(one) == 0 || len(two) == 0 {
+		// Edges are the best.
+		return 6
+	}
+
+	// Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'.  Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity.
+	rune1, _ := utf8.DecodeLastRuneInString(one)
+	rune2, _ := utf8.DecodeRuneInString(two)
+	char1 := string(rune1)
+	char2 := string(rune2)
+
+	nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1)
+	nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2)
+	whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1)
+	whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2)
+	lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1)
+	lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2)
+	blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one)
+	blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two)
+
+	if blankLine1 || blankLine2 {
+		// Five points for blank lines.
+		return 5
+	} else if lineBreak1 || lineBreak2 {
+		// Four points for line breaks.
+		return 4
+	} else if nonAlphaNumeric1 && !whitespace1 && whitespace2 {
+		// Three points for end of sentences.
+		return 3
+	} else if whitespace1 || whitespace2 {
+		// Two points for whitespace.
+		return 2
+	} else if nonAlphaNumeric1 || nonAlphaNumeric2 {
+		// One point for non-alphanumeric.
+		return 1
+	}
+	return 0
+}
+
+// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary.
+// E.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
+func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff {
+	pointer := 1
+
+	// Intentionally ignore the first and last element (don't need checking).
+	for pointer < len(diffs)-1 {
+		if diffs[pointer-1].Type == DiffEqual &&
+			diffs[pointer+1].Type == DiffEqual {
+
+			// This is a single edit surrounded by equalities.
+			equality1 := diffs[pointer-1].Text
+			edit := diffs[pointer].Text
+			equality2 := diffs[pointer+1].Text
+
+			// First, shift the edit as far left as possible.
+			commonOffset := dmp.DiffCommonSuffix(equality1, edit)
+			if commonOffset > 0 {
+				commonString := edit[len(edit)-commonOffset:]
+				equality1 = equality1[0 : len(equality1)-commonOffset]
+				edit = commonString + edit[:len(edit)-commonOffset]
+				equality2 = commonString + equality2
+			}
+
+			// Second, step character by character right, looking for the best fit.
+			bestEquality1 := equality1
+			bestEdit := edit
+			bestEquality2 := equality2
+			bestScore := diffCleanupSemanticScore(equality1, edit) +
+				diffCleanupSemanticScore(edit, equality2)
+
+			for len(edit) != 0 && len(equality2) != 0 {
+				_, sz := utf8.DecodeRuneInString(edit)
+				if len(equality2) < sz || edit[:sz] != equality2[:sz] {
+					break
+				}
+				equality1 += edit[:sz]
+				edit = edit[sz:] + equality2[:sz]
+				equality2 = equality2[sz:]
+				score := diffCleanupSemanticScore(equality1, edit) +
+					diffCleanupSemanticScore(edit, equality2)
+				// The >= encourages trailing rather than leading whitespace on edits.
+				if score >= bestScore {
+					bestScore = score
+					bestEquality1 = equality1
+					bestEdit = edit
+					bestEquality2 = equality2
+				}
+			}
+
+			if diffs[pointer-1].Text != bestEquality1 {
+				// We have an improvement, save it back to the diff.
+				if len(bestEquality1) != 0 {
+					diffs[pointer-1].Text = bestEquality1
+				} else {
+					diffs = splice(diffs, pointer-1, 1)
+					pointer--
+				}
+
+				diffs[pointer].Text = bestEdit
+				if len(bestEquality2) != 0 {
+					diffs[pointer+1].Text = bestEquality2
+				} else {
+					diffs = append(diffs[:pointer+1], diffs[pointer+2:]...)
+					pointer--
+				}
+			}
+		}
+		pointer++
+	}
+
+	return diffs
+}
+
+// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities.
+func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff {
+	changes := false
+	// Stack of indices where equalities are found.
+	type equality struct {
+		data int
+		next *equality
+	}
+	var equalities *equality
+	// Always equal to equalities[equalitiesLength-1][1]
+	lastequality := ""
+	pointer := 0 // Index of current position.
+	// Is there an insertion operation before the last equality.
+	preIns := false
+	// Is there a deletion operation before the last equality.
+	preDel := false
+	// Is there an insertion operation after the last equality.
+	postIns := false
+	// Is there a deletion operation after the last equality.
+	postDel := false
+	for pointer < len(diffs) {
+		if diffs[pointer].Type == DiffEqual { // Equality found.
+			if len(diffs[pointer].Text) < dmp.DiffEditCost &&
+				(postIns || postDel) {
+				// Candidate found.
+				equalities = &equality{
+					data: pointer,
+					next: equalities,
+				}
+				preIns = postIns
+				preDel = postDel
+				lastequality = diffs[pointer].Text
+			} else {
+				// Not a candidate, and can never become one.
+				equalities = nil
+				lastequality = ""
+			}
+			postIns = false
+			postDel = false
+		} else { // An insertion or deletion.
+			if diffs[pointer].Type == DiffDelete {
+				postDel = true
+			} else {
+				postIns = true
+			}
+
+			// Five types to be split:
+			// <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
+			// <ins>A</ins>X<ins>C</ins><del>D</del>
+			// <ins>A</ins><del>B</del>X<ins>C</ins>
+			// <ins>A</del>X<ins>C</ins><del>D</del>
+			// <ins>A</ins><del>B</del>X<del>C</del>
+			var sumPres int
+			if preIns {
+				sumPres++
+			}
+			if preDel {
+				sumPres++
+			}
+			if postIns {
+				sumPres++
+			}
+			if postDel {
+				sumPres++
+			}
+			if len(lastequality) > 0 &&
+				((preIns && preDel && postIns && postDel) ||
+					((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) {
+
+				insPoint := equalities.data
+
+				// Duplicate record.
+				diffs = append(diffs[:insPoint],
+					append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...)
+
+				// Change second copy to insert.
+				diffs[insPoint+1].Type = DiffInsert
+				// Throw away the equality we just deleted.
+				equalities = equalities.next
+				lastequality = ""
+
+				if preIns && preDel {
+					// No changes made which could affect previous entry, keep going.
+					postIns = true
+					postDel = true
+					equalities = nil
+				} else {
+					if equalities != nil {
+						equalities = equalities.next
+					}
+					if equalities != nil {
+						pointer = equalities.data
+					} else {
+						pointer = -1
+					}
+					postIns = false
+					postDel = false
+				}
+				changes = true
+			}
+		}
+		pointer++
+	}
+
+	if changes {
+		diffs = dmp.DiffCleanupMerge(diffs)
+	}
+
+	return diffs
+}
+
+// DiffCleanupMerge reorders and merges like edit sections. Merge equalities.
+// Any edit section can move as long as it doesn't cross an equality.
+func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff {
+	// Add a dummy entry at the end.
+	diffs = append(diffs, Diff{DiffEqual, ""})
+	pointer := 0
+	countDelete := 0
+	countInsert := 0
+	commonlength := 0
+	textDelete := []rune(nil)
+	textInsert := []rune(nil)
+
+	for pointer < len(diffs) {
+		switch diffs[pointer].Type {
+		case DiffInsert:
+			countInsert++
+			textInsert = append(textInsert, []rune(diffs[pointer].Text)...)
+			pointer++
+			break
+		case DiffDelete:
+			countDelete++
+			textDelete = append(textDelete, []rune(diffs[pointer].Text)...)
+			pointer++
+			break
+		case DiffEqual:
+			// Upon reaching an equality, check for prior redundancies.
+			if countDelete+countInsert > 1 {
+				if countDelete != 0 && countInsert != 0 {
+					// Factor out any common prefixies.
+					commonlength = commonPrefixLength(textInsert, textDelete)
+					if commonlength != 0 {
+						x := pointer - countDelete - countInsert
+						if x > 0 && diffs[x-1].Type == DiffEqual {
+							diffs[x-1].Text += string(textInsert[:commonlength])
+						} else {
+							diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...)
+							pointer++
+						}
+						textInsert = textInsert[commonlength:]
+						textDelete = textDelete[commonlength:]
+					}
+					// Factor out any common suffixies.
+					commonlength = commonSuffixLength(textInsert, textDelete)
+					if commonlength != 0 {
+						insertIndex := len(textInsert) - commonlength
+						deleteIndex := len(textDelete) - commonlength
+						diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text
+						textInsert = textInsert[:insertIndex]
+						textDelete = textDelete[:deleteIndex]
+					}
+				}
+				// Delete the offending records and add the merged ones.
+				if countDelete == 0 {
+					diffs = splice(diffs, pointer-countInsert,
+						countDelete+countInsert,
+						Diff{DiffInsert, string(textInsert)})
+				} else if countInsert == 0 {
+					diffs = splice(diffs, pointer-countDelete,
+						countDelete+countInsert,
+						Diff{DiffDelete, string(textDelete)})
+				} else {
+					diffs = splice(diffs, pointer-countDelete-countInsert,
+						countDelete+countInsert,
+						Diff{DiffDelete, string(textDelete)},
+						Diff{DiffInsert, string(textInsert)})
+				}
+
+				pointer = pointer - countDelete - countInsert + 1
+				if countDelete != 0 {
+					pointer++
+				}
+				if countInsert != 0 {
+					pointer++
+				}
+			} else if pointer != 0 && diffs[pointer-1].Type == DiffEqual {
+				// Merge this equality with the previous one.
+				diffs[pointer-1].Text += diffs[pointer].Text
+				diffs = append(diffs[:pointer], diffs[pointer+1:]...)
+			} else {
+				pointer++
+			}
+			countInsert = 0
+			countDelete = 0
+			textDelete = nil
+			textInsert = nil
+			break
+		}
+	}
+
+	if len(diffs[len(diffs)-1].Text) == 0 {
+		diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end.
+	}
+
+	// Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
+	changes := false
+	pointer = 1
+	// Intentionally ignore the first and last element (don't need checking).
+	for pointer < (len(diffs) - 1) {
+		if diffs[pointer-1].Type == DiffEqual &&
+			diffs[pointer+1].Type == DiffEqual {
+			// This is a single edit surrounded by equalities.
+			if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) {
+				// Shift the edit over the previous equality.
+				diffs[pointer].Text = diffs[pointer-1].Text +
+					diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)]
+				diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text
+				diffs = splice(diffs, pointer-1, 1)
+				changes = true
+			} else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) {
+				// Shift the edit over the next equality.
+				diffs[pointer-1].Text += diffs[pointer+1].Text
+				diffs[pointer].Text =
+					diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text
+				diffs = splice(diffs, pointer+1, 1)
+				changes = true
+			}
+		}
+		pointer++
+	}
+
+	// If shifts were made, the diff needs reordering and another shift sweep.
+	if changes {
+		diffs = dmp.DiffCleanupMerge(diffs)
+	}
+
+	return diffs
+}
+
+// DiffXIndex returns the equivalent location in s2.
+func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int {
+	chars1 := 0
+	chars2 := 0
+	lastChars1 := 0
+	lastChars2 := 0
+	lastDiff := Diff{}
+	for i := 0; i < len(diffs); i++ {
+		aDiff := diffs[i]
+		if aDiff.Type != DiffInsert {
+			// Equality or deletion.
+			chars1 += len(aDiff.Text)
+		}
+		if aDiff.Type != DiffDelete {
+			// Equality or insertion.
+			chars2 += len(aDiff.Text)
+		}
+		if chars1 > loc {
+			// Overshot the location.
+			lastDiff = aDiff
+			break
+		}
+		lastChars1 = chars1
+		lastChars2 = chars2
+	}
+	if lastDiff.Type == DiffDelete {
+		// The location was deleted.
+		return lastChars2
+	}
+	// Add the remaining character length.
+	return lastChars2 + (loc - lastChars1)
+}
+
+// DiffPrettyHtml converts a []Diff into a pretty HTML report.
+// It is intended as an example from which to write one's own display functions.
+func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string {
+	var buff bytes.Buffer
+	for _, diff := range diffs {
+		text := strings.Replace(html.EscapeString(diff.Text), "\n", "&para;<br>", -1)
+		switch diff.Type {
+		case DiffInsert:
+			_, _ = buff.WriteString("<ins style=\"background:#e6ffe6;\">")
+			_, _ = buff.WriteString(text)
+			_, _ = buff.WriteString("</ins>")
+		case DiffDelete:
+			_, _ = buff.WriteString("<del style=\"background:#ffe6e6;\">")
+			_, _ = buff.WriteString(text)
+			_, _ = buff.WriteString("</del>")
+		case DiffEqual:
+			_, _ = buff.WriteString("<span>")
+			_, _ = buff.WriteString(text)
+			_, _ = buff.WriteString("</span>")
+		}
+	}
+	return buff.String()
+}
+
+// DiffPrettyText converts a []Diff into a colored text report.
+func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string {
+	var buff bytes.Buffer
+	for _, diff := range diffs {
+		text := diff.Text
+
+		switch diff.Type {
+		case DiffInsert:
+			_, _ = buff.WriteString("\x1b[32m")
+			_, _ = buff.WriteString(text)
+			_, _ = buff.WriteString("\x1b[0m")
+		case DiffDelete:
+			_, _ = buff.WriteString("\x1b[31m")
+			_, _ = buff.WriteString(text)
+			_, _ = buff.WriteString("\x1b[0m")
+		case DiffEqual:
+			_, _ = buff.WriteString(text)
+		}
+	}
+
+	return buff.String()
+}
+
+// DiffText1 computes and returns the source text (all equalities and deletions).
+func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string {
+	//StringBuilder text = new StringBuilder()
+	var text bytes.Buffer
+
+	for _, aDiff := range diffs {
+		if aDiff.Type != DiffInsert {
+			_, _ = text.WriteString(aDiff.Text)
+		}
+	}
+	return text.String()
+}
+
+// DiffText2 computes and returns the destination text (all equalities and insertions).
+func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string {
+	var text bytes.Buffer
+
+	for _, aDiff := range diffs {
+		if aDiff.Type != DiffDelete {
+			_, _ = text.WriteString(aDiff.Text)
+		}
+	}
+	return text.String()
+}
+
+// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters.
+func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int {
+	levenshtein := 0
+	insertions := 0
+	deletions := 0
+
+	for _, aDiff := range diffs {
+		switch aDiff.Type {
+		case DiffInsert:
+			insertions += len(aDiff.Text)
+		case DiffDelete:
+			deletions += len(aDiff.Text)
+		case DiffEqual:
+			// A deletion and an insertion is one substitution.
+			levenshtein += max(insertions, deletions)
+			insertions = 0
+			deletions = 0
+		}
+	}
+
+	levenshtein += max(insertions, deletions)
+	return levenshtein
+}
+
+// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2.
+// E.g. =3\t-2\t+ing  -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated.  Inserted text is escaped using %xx notation.
+func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string {
+	var text bytes.Buffer
+	for _, aDiff := range diffs {
+		switch aDiff.Type {
+		case DiffInsert:
+			_, _ = text.WriteString("+")
+			_, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1))
+			_, _ = text.WriteString("\t")
+			break
+		case DiffDelete:
+			_, _ = text.WriteString("-")
+			_, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text)))
+			_, _ = text.WriteString("\t")
+			break
+		case DiffEqual:
+			_, _ = text.WriteString("=")
+			_, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text)))
+			_, _ = text.WriteString("\t")
+			break
+		}
+	}
+	delta := text.String()
+	if len(delta) != 0 {
+		// Strip off trailing tab character.
+		delta = delta[0 : utf8.RuneCountInString(delta)-1]
+		delta = unescaper.Replace(delta)
+	}
+	return delta
+}
+
+// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff.
+func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) {
+	i := 0
+	runes := []rune(text1)
+
+	for _, token := range strings.Split(delta, "\t") {
+		if len(token) == 0 {
+			// Blank tokens are ok (from a trailing \t).
+			continue
+		}
+
+		// Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality).
+		param := token[1:]
+
+		switch op := token[0]; op {
+		case '+':
+			// Decode would Diff all "+" to " "
+			param = strings.Replace(param, "+", "%2b", -1)
+			param, err = url.QueryUnescape(param)
+			if err != nil {
+				return nil, err
+			}
+			if !utf8.ValidString(param) {
+				return nil, fmt.Errorf("invalid UTF-8 token: %q", param)
+			}
+
+			diffs = append(diffs, Diff{DiffInsert, param})
+		case '=', '-':
+			n, err := strconv.ParseInt(param, 10, 0)
+			if err != nil {
+				return nil, err
+			} else if n < 0 {
+				return nil, errors.New("Negative number in DiffFromDelta: " + param)
+			}
+
+			i += int(n)
+			// Break out if we are out of bounds, go1.6 can't handle this very well
+			if i > len(runes) {
+				break
+			}
+			// Remember that string slicing is by byte - we want by rune here.
+			text := string(runes[i-int(n) : i])
+
+			if op == '=' {
+				diffs = append(diffs, Diff{DiffEqual, text})
+			} else {
+				diffs = append(diffs, Diff{DiffDelete, text})
+			}
+		default:
+			// Anything else is an error.
+			return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0]))
+		}
+	}
+
+	if i != len(runes) {
+		return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1))
+	}
+
+	return diffs, nil
+}

+ 46 - 0
vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go

@@ -0,0 +1,46 @@
+// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
+// https://github.com/sergi/go-diff
+// See the included LICENSE file for license details.
+//
+// go-diff is a Go implementation of Google's Diff, Match, and Patch library
+// Original library is Copyright (c) 2006 Google Inc.
+// http://code.google.com/p/google-diff-match-patch/
+
+// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text.
+package diffmatchpatch
+
+import (
+	"time"
+)
+
+// DiffMatchPatch holds the configuration for diff-match-patch operations.
+type DiffMatchPatch struct {
+	// Number of seconds to map a diff before giving up (0 for infinity).
+	DiffTimeout time.Duration
+	// Cost of an empty edit operation in terms of edit characters.
+	DiffEditCost int
+	// How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match).
+	MatchDistance int
+	// When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose).  Note that MatchThreshold controls how closely the end points of a delete need to match.
+	PatchDeleteThreshold float64
+	// Chunk size for context length.
+	PatchMargin int
+	// The number of bits in an int.
+	MatchMaxBits int
+	// At what point is no match declared (0.0 = perfection, 1.0 = very loose).
+	MatchThreshold float64
+}
+
+// New creates a new DiffMatchPatch object with default parameters.
+func New() *DiffMatchPatch {
+	// Defaults.
+	return &DiffMatchPatch{
+		DiffTimeout:          time.Second,
+		DiffEditCost:         4,
+		MatchThreshold:       0.5,
+		MatchDistance:        1000,
+		PatchDeleteThreshold: 0.5,
+		PatchMargin:          4,
+		MatchMaxBits:         32,
+	}
+}

+ 160 - 0
vendor/github.com/sergi/go-diff/diffmatchpatch/match.go

@@ -0,0 +1,160 @@
+// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
+// https://github.com/sergi/go-diff
+// See the included LICENSE file for license details.
+//
+// go-diff is a Go implementation of Google's Diff, Match, and Patch library
+// Original library is Copyright (c) 2006 Google Inc.
+// http://code.google.com/p/google-diff-match-patch/
+
+package diffmatchpatch
+
+import (
+	"math"
+)
+
+// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'.
+// Returns -1 if no match found.
+func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int {
+	// Check for null inputs not needed since null can't be passed in C#.
+
+	loc = int(math.Max(0, math.Min(float64(loc), float64(len(text)))))
+	if text == pattern {
+		// Shortcut (potentially not guaranteed by the algorithm)
+		return 0
+	} else if len(text) == 0 {
+		// Nothing to match.
+		return -1
+	} else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern {
+		// Perfect match at the perfect spot!  (Includes case of null pattern)
+		return loc
+	}
+	// Do a fuzzy compare.
+	return dmp.MatchBitap(text, pattern, loc)
+}
+
+// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm.
+// Returns -1 if no match was found.
+func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int {
+	// Initialise the alphabet.
+	s := dmp.MatchAlphabet(pattern)
+
+	// Highest score beyond which we give up.
+	scoreThreshold := dmp.MatchThreshold
+	// Is there a nearby exact match? (speedup)
+	bestLoc := indexOf(text, pattern, loc)
+	if bestLoc != -1 {
+		scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
+			pattern), scoreThreshold)
+		// What about in the other direction? (speedup)
+		bestLoc = lastIndexOf(text, pattern, loc+len(pattern))
+		if bestLoc != -1 {
+			scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
+				pattern), scoreThreshold)
+		}
+	}
+
+	// Initialise the bit arrays.
+	matchmask := 1 << uint((len(pattern) - 1))
+	bestLoc = -1
+
+	var binMin, binMid int
+	binMax := len(pattern) + len(text)
+	lastRd := []int{}
+	for d := 0; d < len(pattern); d++ {
+		// Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level.
+		binMin = 0
+		binMid = binMax
+		for binMin < binMid {
+			if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold {
+				binMin = binMid
+			} else {
+				binMax = binMid
+			}
+			binMid = (binMax-binMin)/2 + binMin
+		}
+		// Use the result from this iteration as the maximum for the next.
+		binMax = binMid
+		start := int(math.Max(1, float64(loc-binMid+1)))
+		finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern)))
+
+		rd := make([]int, finish+2)
+		rd[finish+1] = (1 << uint(d)) - 1
+
+		for j := finish; j >= start; j-- {
+			var charMatch int
+			if len(text) <= j-1 {
+				// Out of range.
+				charMatch = 0
+			} else if _, ok := s[text[j-1]]; !ok {
+				charMatch = 0
+			} else {
+				charMatch = s[text[j-1]]
+			}
+
+			if d == 0 {
+				// First pass: exact match.
+				rd[j] = ((rd[j+1] << 1) | 1) & charMatch
+			} else {
+				// Subsequent passes: fuzzy match.
+				rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1]
+			}
+			if (rd[j] & matchmask) != 0 {
+				score := dmp.matchBitapScore(d, j-1, loc, pattern)
+				// This match will almost certainly be better than any existing match.  But check anyway.
+				if score <= scoreThreshold {
+					// Told you so.
+					scoreThreshold = score
+					bestLoc = j - 1
+					if bestLoc > loc {
+						// When passing loc, don't exceed our current distance from loc.
+						start = int(math.Max(1, float64(2*loc-bestLoc)))
+					} else {
+						// Already passed loc, downhill from here on in.
+						break
+					}
+				}
+			}
+		}
+		if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold {
+			// No hope for a (better) match at greater error levels.
+			break
+		}
+		lastRd = rd
+	}
+	return bestLoc
+}
+
+// matchBitapScore computes and returns the score for a match with e errors and x location.
+func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 {
+	accuracy := float64(e) / float64(len(pattern))
+	proximity := math.Abs(float64(loc - x))
+	if dmp.MatchDistance == 0 {
+		// Dodge divide by zero error.
+		if proximity == 0 {
+			return accuracy
+		}
+
+		return 1.0
+	}
+	return accuracy + (proximity / float64(dmp.MatchDistance))
+}
+
+// MatchAlphabet initialises the alphabet for the Bitap algorithm.
+func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int {
+	s := map[byte]int{}
+	charPattern := []byte(pattern)
+	for _, c := range charPattern {
+		_, ok := s[c]
+		if !ok {
+			s[c] = 0
+		}
+	}
+	i := 0
+
+	for _, c := range charPattern {
+		value := s[c] | int(uint(1)<<uint((len(pattern)-i-1)))
+		s[c] = value
+		i++
+	}
+	return s
+}

+ 23 - 0
vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go

@@ -0,0 +1,23 @@
+// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
+// https://github.com/sergi/go-diff
+// See the included LICENSE file for license details.
+//
+// go-diff is a Go implementation of Google's Diff, Match, and Patch library
+// Original library is Copyright (c) 2006 Google Inc.
+// http://code.google.com/p/google-diff-match-patch/
+
+package diffmatchpatch
+
+func min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}
+
+func max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}

+ 556 - 0
vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go

@@ -0,0 +1,556 @@
+// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
+// https://github.com/sergi/go-diff
+// See the included LICENSE file for license details.
+//
+// go-diff is a Go implementation of Google's Diff, Match, and Patch library
+// Original library is Copyright (c) 2006 Google Inc.
+// http://code.google.com/p/google-diff-match-patch/
+
+package diffmatchpatch
+
+import (
+	"bytes"
+	"errors"
+	"math"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// Patch represents one patch operation.
+type Patch struct {
+	diffs   []Diff
+	start1  int
+	start2  int
+	length1 int
+	length2 int
+}
+
+// String emulates GNU diff's format.
+// Header: @@ -382,8 +481,9 @@
+// Indicies are printed as 1-based, not 0-based.
+func (p *Patch) String() string {
+	var coords1, coords2 string
+
+	if p.length1 == 0 {
+		coords1 = strconv.Itoa(p.start1) + ",0"
+	} else if p.length1 == 1 {
+		coords1 = strconv.Itoa(p.start1 + 1)
+	} else {
+		coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1)
+	}
+
+	if p.length2 == 0 {
+		coords2 = strconv.Itoa(p.start2) + ",0"
+	} else if p.length2 == 1 {
+		coords2 = strconv.Itoa(p.start2 + 1)
+	} else {
+		coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2)
+	}
+
+	var text bytes.Buffer
+	_, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n")
+
+	// Escape the body of the patch with %xx notation.
+	for _, aDiff := range p.diffs {
+		switch aDiff.Type {
+		case DiffInsert:
+			_, _ = text.WriteString("+")
+		case DiffDelete:
+			_, _ = text.WriteString("-")
+		case DiffEqual:
+			_, _ = text.WriteString(" ")
+		}
+
+		_, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1))
+		_, _ = text.WriteString("\n")
+	}
+
+	return unescaper.Replace(text.String())
+}
+
+// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits.
+func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch {
+	if len(text) == 0 {
+		return patch
+	}
+
+	pattern := text[patch.start2 : patch.start2+patch.length1]
+	padding := 0
+
+	// Look for the first and last matches of pattern in text.  If two different matches are found, increase the pattern length.
+	for strings.Index(text, pattern) != strings.LastIndex(text, pattern) &&
+		len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin {
+		padding += dmp.PatchMargin
+		maxStart := max(0, patch.start2-padding)
+		minEnd := min(len(text), patch.start2+patch.length1+padding)
+		pattern = text[maxStart:minEnd]
+	}
+	// Add one chunk for good luck.
+	padding += dmp.PatchMargin
+
+	// Add the prefix.
+	prefix := text[max(0, patch.start2-padding):patch.start2]
+	if len(prefix) != 0 {
+		patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...)
+	}
+	// Add the suffix.
+	suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)]
+	if len(suffix) != 0 {
+		patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix})
+	}
+
+	// Roll back the start points.
+	patch.start1 -= len(prefix)
+	patch.start2 -= len(prefix)
+	// Extend the lengths.
+	patch.length1 += len(prefix) + len(suffix)
+	patch.length2 += len(prefix) + len(suffix)
+
+	return patch
+}
+
+// PatchMake computes a list of patches.
+func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch {
+	if len(opt) == 1 {
+		diffs, _ := opt[0].([]Diff)
+		text1 := dmp.DiffText1(diffs)
+		return dmp.PatchMake(text1, diffs)
+	} else if len(opt) == 2 {
+		text1 := opt[0].(string)
+		switch t := opt[1].(type) {
+		case string:
+			diffs := dmp.DiffMain(text1, t, true)
+			if len(diffs) > 2 {
+				diffs = dmp.DiffCleanupSemantic(diffs)
+				diffs = dmp.DiffCleanupEfficiency(diffs)
+			}
+			return dmp.PatchMake(text1, diffs)
+		case []Diff:
+			return dmp.patchMake2(text1, t)
+		}
+	} else if len(opt) == 3 {
+		return dmp.PatchMake(opt[0], opt[2])
+	}
+	return []Patch{}
+}
+
+// patchMake2 computes a list of patches to turn text1 into text2.
+// text2 is not provided, diffs are the delta between text1 and text2.
+func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch {
+	// Check for null inputs not needed since null can't be passed in C#.
+	patches := []Patch{}
+	if len(diffs) == 0 {
+		return patches // Get rid of the null case.
+	}
+
+	patch := Patch{}
+	charCount1 := 0 // Number of characters into the text1 string.
+	charCount2 := 0 // Number of characters into the text2 string.
+	// Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info.
+	prepatchText := text1
+	postpatchText := text1
+
+	for i, aDiff := range diffs {
+		if len(patch.diffs) == 0 && aDiff.Type != DiffEqual {
+			// A new patch starts here.
+			patch.start1 = charCount1
+			patch.start2 = charCount2
+		}
+
+		switch aDiff.Type {
+		case DiffInsert:
+			patch.diffs = append(patch.diffs, aDiff)
+			patch.length2 += len(aDiff.Text)
+			postpatchText = postpatchText[:charCount2] +
+				aDiff.Text + postpatchText[charCount2:]
+		case DiffDelete:
+			patch.length1 += len(aDiff.Text)
+			patch.diffs = append(patch.diffs, aDiff)
+			postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):]
+		case DiffEqual:
+			if len(aDiff.Text) <= 2*dmp.PatchMargin &&
+				len(patch.diffs) != 0 && i != len(diffs)-1 {
+				// Small equality inside a patch.
+				patch.diffs = append(patch.diffs, aDiff)
+				patch.length1 += len(aDiff.Text)
+				patch.length2 += len(aDiff.Text)
+			}
+			if len(aDiff.Text) >= 2*dmp.PatchMargin {
+				// Time for a new patch.
+				if len(patch.diffs) != 0 {
+					patch = dmp.PatchAddContext(patch, prepatchText)
+					patches = append(patches, patch)
+					patch = Patch{}
+					// Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch.
+					prepatchText = postpatchText
+					charCount1 = charCount2
+				}
+			}
+		}
+
+		// Update the current character count.
+		if aDiff.Type != DiffInsert {
+			charCount1 += len(aDiff.Text)
+		}
+		if aDiff.Type != DiffDelete {
+			charCount2 += len(aDiff.Text)
+		}
+	}
+
+	// Pick up the leftover patch if not empty.
+	if len(patch.diffs) != 0 {
+		patch = dmp.PatchAddContext(patch, prepatchText)
+		patches = append(patches, patch)
+	}
+
+	return patches
+}
+
+// PatchDeepCopy returns an array that is identical to a given an array of patches.
+func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch {
+	patchesCopy := []Patch{}
+	for _, aPatch := range patches {
+		patchCopy := Patch{}
+		for _, aDiff := range aPatch.diffs {
+			patchCopy.diffs = append(patchCopy.diffs, Diff{
+				aDiff.Type,
+				aDiff.Text,
+			})
+		}
+		patchCopy.start1 = aPatch.start1
+		patchCopy.start2 = aPatch.start2
+		patchCopy.length1 = aPatch.length1
+		patchCopy.length2 = aPatch.length2
+		patchesCopy = append(patchesCopy, patchCopy)
+	}
+	return patchesCopy
+}
+
+// PatchApply merges a set of patches onto the text.  Returns a patched text, as well as an array of true/false values indicating which patches were applied.
+func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) {
+	if len(patches) == 0 {
+		return text, []bool{}
+	}
+
+	// Deep copy the patches so that no changes are made to originals.
+	patches = dmp.PatchDeepCopy(patches)
+
+	nullPadding := dmp.PatchAddPadding(patches)
+	text = nullPadding + text + nullPadding
+	patches = dmp.PatchSplitMax(patches)
+
+	x := 0
+	// delta keeps track of the offset between the expected and actual location of the previous patch.  If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22.
+	delta := 0
+	results := make([]bool, len(patches))
+	for _, aPatch := range patches {
+		expectedLoc := aPatch.start2 + delta
+		text1 := dmp.DiffText1(aPatch.diffs)
+		var startLoc int
+		endLoc := -1
+		if len(text1) > dmp.MatchMaxBits {
+			// PatchSplitMax will only provide an oversized pattern in the case of a monster delete.
+			startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc)
+			if startLoc != -1 {
+				endLoc = dmp.MatchMain(text,
+					text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits)
+				if endLoc == -1 || startLoc >= endLoc {
+					// Can't find valid trailing context.  Drop this patch.
+					startLoc = -1
+				}
+			}
+		} else {
+			startLoc = dmp.MatchMain(text, text1, expectedLoc)
+		}
+		if startLoc == -1 {
+			// No match found.  :(
+			results[x] = false
+			// Subtract the delta for this failed patch from subsequent patches.
+			delta -= aPatch.length2 - aPatch.length1
+		} else {
+			// Found a match.  :)
+			results[x] = true
+			delta = startLoc - expectedLoc
+			var text2 string
+			if endLoc == -1 {
+				text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))]
+			} else {
+				text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))]
+			}
+			if text1 == text2 {
+				// Perfect match, just shove the Replacement text in.
+				text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):]
+			} else {
+				// Imperfect match.  Run a diff to get a framework of equivalent indices.
+				diffs := dmp.DiffMain(text1, text2, false)
+				if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold {
+					// The end points match, but the content is unacceptably bad.
+					results[x] = false
+				} else {
+					diffs = dmp.DiffCleanupSemanticLossless(diffs)
+					index1 := 0
+					for _, aDiff := range aPatch.diffs {
+						if aDiff.Type != DiffEqual {
+							index2 := dmp.DiffXIndex(diffs, index1)
+							if aDiff.Type == DiffInsert {
+								// Insertion
+								text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:]
+							} else if aDiff.Type == DiffDelete {
+								// Deletion
+								startIndex := startLoc + index2
+								text = text[:startIndex] +
+									text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:]
+							}
+						}
+						if aDiff.Type != DiffDelete {
+							index1 += len(aDiff.Text)
+						}
+					}
+				}
+			}
+		}
+		x++
+	}
+	// Strip the padding off.
+	text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))]
+	return text, results
+}
+
+// PatchAddPadding adds some padding on text start and end so that edges can match something.
+// Intended to be called only from within patchApply.
+func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
+	paddingLength := dmp.PatchMargin
+	nullPadding := ""
+	for x := 1; x <= paddingLength; x++ {
+		nullPadding += string(x)
+	}
+
+	// Bump all the patches forward.
+	for i := range patches {
+		patches[i].start1 += paddingLength
+		patches[i].start2 += paddingLength
+	}
+
+	// Add some padding on start of first diff.
+	if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual {
+		// Add nullPadding equality.
+		patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...)
+		patches[0].start1 -= paddingLength // Should be 0.
+		patches[0].start2 -= paddingLength // Should be 0.
+		patches[0].length1 += paddingLength
+		patches[0].length2 += paddingLength
+	} else if paddingLength > len(patches[0].diffs[0].Text) {
+		// Grow first equality.
+		extraLength := paddingLength - len(patches[0].diffs[0].Text)
+		patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text
+		patches[0].start1 -= extraLength
+		patches[0].start2 -= extraLength
+		patches[0].length1 += extraLength
+		patches[0].length2 += extraLength
+	}
+
+	// Add some padding on end of last diff.
+	last := len(patches) - 1
+	if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual {
+		// Add nullPadding equality.
+		patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding})
+		patches[last].length1 += paddingLength
+		patches[last].length2 += paddingLength
+	} else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) {
+		// Grow last equality.
+		lastDiff := patches[last].diffs[len(patches[last].diffs)-1]
+		extraLength := paddingLength - len(lastDiff.Text)
+		patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength]
+		patches[last].length1 += extraLength
+		patches[last].length2 += extraLength
+	}
+
+	return nullPadding
+}
+
+// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm.
+// Intended to be called only from within patchApply.
+func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
+	patchSize := dmp.MatchMaxBits
+	for x := 0; x < len(patches); x++ {
+		if patches[x].length1 <= patchSize {
+			continue
+		}
+		bigpatch := patches[x]
+		// Remove the big old patch.
+		patches = append(patches[:x], patches[x+1:]...)
+		x--
+
+		start1 := bigpatch.start1
+		start2 := bigpatch.start2
+		precontext := ""
+		for len(bigpatch.diffs) != 0 {
+			// Create one of several smaller patches.
+			patch := Patch{}
+			empty := true
+			patch.start1 = start1 - len(precontext)
+			patch.start2 = start2 - len(precontext)
+			if len(precontext) != 0 {
+				patch.length1 = len(precontext)
+				patch.length2 = len(precontext)
+				patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext})
+			}
+			for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin {
+				diffType := bigpatch.diffs[0].Type
+				diffText := bigpatch.diffs[0].Text
+				if diffType == DiffInsert {
+					// Insertions are harmless.
+					patch.length2 += len(diffText)
+					start2 += len(diffText)
+					patch.diffs = append(patch.diffs, bigpatch.diffs[0])
+					bigpatch.diffs = bigpatch.diffs[1:]
+					empty = false
+				} else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize {
+					// This is a large deletion.  Let it pass in one chunk.
+					patch.length1 += len(diffText)
+					start1 += len(diffText)
+					empty = false
+					patch.diffs = append(patch.diffs, Diff{diffType, diffText})
+					bigpatch.diffs = bigpatch.diffs[1:]
+				} else {
+					// Deletion or equality.  Only take as much as we can stomach.
+					diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)]
+
+					patch.length1 += len(diffText)
+					start1 += len(diffText)
+					if diffType == DiffEqual {
+						patch.length2 += len(diffText)
+						start2 += len(diffText)
+					} else {
+						empty = false
+					}
+					patch.diffs = append(patch.diffs, Diff{diffType, diffText})
+					if diffText == bigpatch.diffs[0].Text {
+						bigpatch.diffs = bigpatch.diffs[1:]
+					} else {
+						bigpatch.diffs[0].Text =
+							bigpatch.diffs[0].Text[len(diffText):]
+					}
+				}
+			}
+			// Compute the head context for the next patch.
+			precontext = dmp.DiffText2(patch.diffs)
+			precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):]
+
+			postcontext := ""
+			// Append the end context for this patch.
+			if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin {
+				postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin]
+			} else {
+				postcontext = dmp.DiffText1(bigpatch.diffs)
+			}
+
+			if len(postcontext) != 0 {
+				patch.length1 += len(postcontext)
+				patch.length2 += len(postcontext)
+				if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual {
+					patch.diffs[len(patch.diffs)-1].Text += postcontext
+				} else {
+					patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext})
+				}
+			}
+			if !empty {
+				x++
+				patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...)
+			}
+		}
+	}
+	return patches
+}
+
+// PatchToText takes a list of patches and returns a textual representation.
+func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string {
+	var text bytes.Buffer
+	for _, aPatch := range patches {
+		_, _ = text.WriteString(aPatch.String())
+	}
+	return text.String()
+}
+
+// PatchFromText parses a textual representation of patches and returns a List of Patch objects.
+func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) {
+	patches := []Patch{}
+	if len(textline) == 0 {
+		return patches, nil
+	}
+	text := strings.Split(textline, "\n")
+	textPointer := 0
+	patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$")
+
+	var patch Patch
+	var sign uint8
+	var line string
+	for textPointer < len(text) {
+
+		if !patchHeader.MatchString(text[textPointer]) {
+			return patches, errors.New("Invalid patch string: " + text[textPointer])
+		}
+
+		patch = Patch{}
+		m := patchHeader.FindStringSubmatch(text[textPointer])
+
+		patch.start1, _ = strconv.Atoi(m[1])
+		if len(m[2]) == 0 {
+			patch.start1--
+			patch.length1 = 1
+		} else if m[2] == "0" {
+			patch.length1 = 0
+		} else {
+			patch.start1--
+			patch.length1, _ = strconv.Atoi(m[2])
+		}
+
+		patch.start2, _ = strconv.Atoi(m[3])
+
+		if len(m[4]) == 0 {
+			patch.start2--
+			patch.length2 = 1
+		} else if m[4] == "0" {
+			patch.length2 = 0
+		} else {
+			patch.start2--
+			patch.length2, _ = strconv.Atoi(m[4])
+		}
+		textPointer++
+
+		for textPointer < len(text) {
+			if len(text[textPointer]) > 0 {
+				sign = text[textPointer][0]
+			} else {
+				textPointer++
+				continue
+			}
+
+			line = text[textPointer][1:]
+			line = strings.Replace(line, "+", "%2b", -1)
+			line, _ = url.QueryUnescape(line)
+			if sign == '-' {
+				// Deletion.
+				patch.diffs = append(patch.diffs, Diff{DiffDelete, line})
+			} else if sign == '+' {
+				// Insertion.
+				patch.diffs = append(patch.diffs, Diff{DiffInsert, line})
+			} else if sign == ' ' {
+				// Minor equality.
+				patch.diffs = append(patch.diffs, Diff{DiffEqual, line})
+			} else if sign == '@' {
+				// Start of next patch.
+				break
+			} else {
+				// WTF?
+				return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line))
+			}
+			textPointer++
+		}
+
+		patches = append(patches, patch)
+	}
+	return patches, nil
+}

+ 88 - 0
vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go

@@ -0,0 +1,88 @@
+// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
+// https://github.com/sergi/go-diff
+// See the included LICENSE file for license details.
+//
+// go-diff is a Go implementation of Google's Diff, Match, and Patch library
+// Original library is Copyright (c) 2006 Google Inc.
+// http://code.google.com/p/google-diff-match-patch/
+
+package diffmatchpatch
+
+import (
+	"strings"
+	"unicode/utf8"
+)
+
+// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI.
+// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive.  Thus "%3F" would not be unescaped.  But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc.
+var unescaper = strings.NewReplacer(
+	"%21", "!", "%7E", "~", "%27", "'",
+	"%28", "(", "%29", ")", "%3B", ";",
+	"%2F", "/", "%3F", "?", "%3A", ":",
+	"%40", "@", "%26", "&", "%3D", "=",
+	"%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*")
+
+// indexOf returns the first index of pattern in str, starting at str[i].
+func indexOf(str string, pattern string, i int) int {
+	if i > len(str)-1 {
+		return -1
+	}
+	if i <= 0 {
+		return strings.Index(str, pattern)
+	}
+	ind := strings.Index(str[i:], pattern)
+	if ind == -1 {
+		return -1
+	}
+	return ind + i
+}
+
+// lastIndexOf returns the last index of pattern in str, starting at str[i].
+func lastIndexOf(str string, pattern string, i int) int {
+	if i < 0 {
+		return -1
+	}
+	if i >= len(str) {
+		return strings.LastIndex(str, pattern)
+	}
+	_, size := utf8.DecodeRuneInString(str[i:])
+	return strings.LastIndex(str[:i+size], pattern)
+}
+
+// runesIndexOf returns the index of pattern in target, starting at target[i].
+func runesIndexOf(target, pattern []rune, i int) int {
+	if i > len(target)-1 {
+		return -1
+	}
+	if i <= 0 {
+		return runesIndex(target, pattern)
+	}
+	ind := runesIndex(target[i:], pattern)
+	if ind == -1 {
+		return -1
+	}
+	return ind + i
+}
+
+func runesEqual(r1, r2 []rune) bool {
+	if len(r1) != len(r2) {
+		return false
+	}
+	for i, c := range r1 {
+		if c != r2[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// runesIndex is the equivalent of strings.Index for rune slices.
+func runesIndex(r1, r2 []rune) int {
+	last := len(r1) - len(r2)
+	for i := 0; i <= last; i++ {
+		if runesEqual(r1[i:i+len(r2)], r2) {
+			return i
+		}
+	}
+	return -1
+}

+ 22 - 0
vendor/github.com/stretchr/testify/LICENSE

@@ -0,0 +1,22 @@
+Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
+
+Please consider promoting this project if you find it useful.
+
+Permission is hereby granted, free of charge, to any person 
+obtaining a copy of this software and associated documentation 
+files (the "Software"), to deal in the Software without restriction, 
+including without limitation the rights to use, copy, modify, merge, 
+publish, distribute, sublicense, and/or sell copies of the Software, 
+and to permit persons to whom the Software is furnished to do so, 
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 
+OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 387 - 0
vendor/github.com/stretchr/testify/assert/assertion_forward.go

@@ -0,0 +1,387 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+*/
+
+package assert
+
+import (
+
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+	return Condition(a.t, comp, msgAndArgs...)
+}
+
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+// 
+//    a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
+//    a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
+//    a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+	return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+// 
+//  a.Empty(obj)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+	return Empty(a.t, object, msgAndArgs...)
+}
+
+
+// Equal asserts that two objects are equal.
+// 
+//    a.Equal(123, 123, "123 and 123 should be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+// 
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err, "An error was expected") {
+// 	   assert.Equal(t, err, expectedError)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+	return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+// 
+//    a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+// 
+//   actualObj, err := SomeFunction()
+//   if a.Error(err, "An error was expected") {
+// 	   assert.Equal(t, err, expectedError)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+	return Error(a.t, err, msgAndArgs...)
+}
+
+
+// Exactly asserts that two objects are equal is value and type.
+// 
+//    a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+	return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+	return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+
+// False asserts that the specified value is false.
+// 
+//    a.False(myBool, "myBool should be false")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+	return False(a.t, value, msgAndArgs...)
+}
+
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+// 
+//  a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
+	return HTTPBodyContains(a.t, handler, method, url, values, str)
+}
+
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+// 
+//  a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
+	return HTTPBodyNotContains(a.t, handler, method, url, values, str)
+}
+
+
+// HTTPError asserts that a specified handler returns an error status code.
+// 
+//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
+	return HTTPError(a.t, handler, method, url, values)
+}
+
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+// 
+//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
+	return HTTPRedirect(a.t, handler, method, url, values)
+}
+
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+// 
+//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
+	return HTTPSuccess(a.t, handler, method, url, values)
+}
+
+
+// Implements asserts that an object is implemented by the specified interface.
+// 
+//    a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+
+// InDelta asserts that the two numerals are within delta of each other.
+// 
+// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+
+// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+
+// JSONEq asserts that two JSON strings are equivalent.
+// 
+//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+	return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+// 
+//    a.Len(mySlice, 3, "The size of slice is not 3")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+	return Len(a.t, object, length, msgAndArgs...)
+}
+
+
+// Nil asserts that the specified object is nil.
+// 
+//    a.Nil(err, "err should be nothing")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+	return Nil(a.t, object, msgAndArgs...)
+}
+
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+// 
+//   actualObj, err := SomeFunction()
+//   if a.NoError(err) {
+// 	   assert.Equal(t, actualObj, expectedObj)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+	return NoError(a.t, err, msgAndArgs...)
+}
+
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+// 
+//    a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+//    a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
+//    a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+	return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+// 
+//  if a.NotEmpty(obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+	return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+
+// NotEqual asserts that the specified values are NOT equal.
+// 
+//    a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// NotNil asserts that the specified object is not nil.
+// 
+//    a.NotNil(err, "err should be something")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+	return NotNil(a.t, object, msgAndArgs...)
+}
+
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+// 
+//   a.NotPanics(func(){
+//     RemainCalm()
+//   }, "Calling RemainCalm() should NOT panic")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	return NotPanics(a.t, f, msgAndArgs...)
+}
+
+
+// NotRegexp asserts that a specified regexp does not match a string.
+// 
+//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+//  a.NotRegexp("^start", "it's not starting")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+	return NotZero(a.t, i, msgAndArgs...)
+}
+
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+// 
+//   a.Panics(func(){
+//     GoCrazy()
+//   }, "Calling GoCrazy() should panic")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	return Panics(a.t, f, msgAndArgs...)
+}
+
+
+// Regexp asserts that a specified regexp matches a string.
+// 
+//  a.Regexp(regexp.MustCompile("start"), "it's starting")
+//  a.Regexp("start...$", "it's not starting")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+
+// True asserts that the specified value is true.
+// 
+//    a.True(myBool, "myBool should be true")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+	return True(a.t, value, msgAndArgs...)
+}
+
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+// 
+//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+	return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+	return Zero(a.t, i, msgAndArgs...)
+}

+ 4 - 0
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl

@@ -0,0 +1,4 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+	return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}

+ 1052 - 0
vendor/github.com/stretchr/testify/assert/assertions.go

@@ -0,0 +1,1052 @@
+package assert
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"math"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strings"
+	"time"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/pmezard/go-difflib/difflib"
+)
+
+func init() {
+	spew.Config.SortKeys = true
+}
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+	Errorf(format string, args ...interface{})
+}
+
+// Comparison a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+	Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+
+	if expected == nil || actual == nil {
+		return expected == actual
+	}
+
+	return reflect.DeepEqual(expected, actual)
+
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+	if ObjectsAreEqual(expected, actual) {
+		return true
+	}
+
+	actualType := reflect.TypeOf(actual)
+	if actualType == nil {
+		return false
+	}
+	expectedValue := reflect.ValueOf(expected)
+	if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+		// Attempt comparison after type conversion
+		return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+	}
+
+	return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+	pc := uintptr(0)
+	file := ""
+	line := 0
+	ok := false
+	name := ""
+
+	callers := []string{}
+	for i := 0; ; i++ {
+		pc, file, line, ok = runtime.Caller(i)
+		if !ok {
+			// The breaks below failed to terminate the loop, and we ran off the
+			// end of the call stack.
+			break
+		}
+
+		// This is a huge edge case, but it will panic if this is the case, see #180
+		if file == "<autogenerated>" {
+			break
+		}
+
+		f := runtime.FuncForPC(pc)
+		if f == nil {
+			break
+		}
+		name = f.Name()
+
+		// testing.tRunner is the standard library function that calls
+		// tests. Subtests are called directly by tRunner, without going through
+		// the Test/Benchmark/Example function that contains the t.Run calls, so
+		// with subtests we should break when we hit tRunner, without adding it
+		// to the list of callers.
+		if name == "testing.tRunner" {
+			break
+		}
+
+		parts := strings.Split(file, "/")
+		dir := parts[len(parts)-2]
+		file = parts[len(parts)-1]
+		if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+			callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+		}
+
+		// Drop the package
+		segments := strings.Split(name, ".")
+		name = segments[len(segments)-1]
+		if isTest(name, "Test") ||
+			isTest(name, "Benchmark") ||
+			isTest(name, "Example") {
+			break
+		}
+	}
+
+	return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+	if !strings.HasPrefix(name, prefix) {
+		return false
+	}
+	if len(name) == len(prefix) { // "Test" is ok
+		return true
+	}
+	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+	return !unicode.IsLower(rune)
+}
+
+// getWhitespaceString returns a string that is long enough to overwrite the default
+// output from the go testing framework.
+func getWhitespaceString() string {
+
+	_, file, line, ok := runtime.Caller(1)
+	if !ok {
+		return ""
+	}
+	parts := strings.Split(file, "/")
+	file = parts[len(parts)-1]
+
+	return strings.Repeat(" ", len(fmt.Sprintf("%s:%d:      ", file, line)))
+
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+	if len(msgAndArgs) == 0 || msgAndArgs == nil {
+		return ""
+	}
+	if len(msgAndArgs) == 1 {
+		return msgAndArgs[0].(string)
+	}
+	if len(msgAndArgs) > 1 {
+		return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+	}
+	return ""
+}
+
+// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's
+// test printing (see inner comment for specifics)
+func indentMessageLines(message string, tabs int) string {
+	outBuf := new(bytes.Buffer)
+
+	for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+		if i != 0 {
+			outBuf.WriteRune('\n')
+		}
+		for ii := 0; ii < tabs; ii++ {
+			outBuf.WriteRune('\t')
+			// Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter
+			// by 1 prematurely.
+			if ii == 0 && i > 0 {
+				ii++
+			}
+		}
+		outBuf.WriteString(scanner.Text())
+	}
+
+	return outBuf.String()
+}
+
+type failNower interface {
+	FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+	Fail(t, failureMessage, msgAndArgs...)
+
+	// We cannot extend TestingT with FailNow() and
+	// maintain backwards compatibility, so we fallback
+	// to panicking when FailNow is not available in
+	// TestingT.
+	// See issue #263
+
+	if t, ok := t.(failNower); ok {
+		t.FailNow()
+	} else {
+		panic("test failed and t is missing `FailNow()`")
+	}
+	return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+
+	message := messageFromMsgAndArgs(msgAndArgs...)
+
+	errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t")
+	if len(message) > 0 {
+		t.Errorf("\r%s\r\tError Trace:\t%s\n"+
+			"\r\tError:%s\n"+
+			"\r\tMessages:\t%s\n\r",
+			getWhitespaceString(),
+			errorTrace,
+			indentMessageLines(failureMessage, 2),
+			message)
+	} else {
+		t.Errorf("\r%s\r\tError Trace:\t%s\n"+
+			"\r\tError:%s\n\r",
+			getWhitespaceString(),
+			errorTrace,
+			indentMessageLines(failureMessage, 2))
+	}
+
+	return false
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+//    assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+
+	interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+	if !reflect.TypeOf(object).Implements(interfaceType) {
+		return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+
+	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Equal asserts that two objects are equal.
+//
+//    assert.Equal(t, 123, 123, "123 and 123 should be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+	if !ObjectsAreEqual(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal: %s (expected)\n"+
+			"        != %s (actual)%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+	aType := reflect.TypeOf(expected)
+	bType := reflect.TypeOf(actual)
+
+	if aType != bType && isNumericType(aType) && isNumericType(bType) {
+		return fmt.Sprintf("%v(%#v)", aType, expected),
+			fmt.Sprintf("%v(%#v)", bType, actual)
+	}
+
+	return fmt.Sprintf("%#v", expected),
+		fmt.Sprintf("%#v", actual)
+}
+
+func isNumericType(t reflect.Type) bool {
+	switch t.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	}
+
+	return false
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+	if !ObjectsAreEqualValues(expected, actual) {
+		return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
+			"        != %#v (actual)", expected, actual), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// Exactly asserts that two objects are equal is value and type.
+//
+//    assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+	aType := reflect.TypeOf(expected)
+	bType := reflect.TypeOf(actual)
+
+	if aType != bType {
+		return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...)
+	}
+
+	return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+//    assert.NotNil(t, err, "err should be something")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if !isNil(object) {
+		return true
+	}
+	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+	if object == nil {
+		return true
+	}
+
+	value := reflect.ValueOf(object)
+	kind := value.Kind()
+	if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
+		return true
+	}
+
+	return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+//    assert.Nil(t, err, "err should be nothing")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if isNil(object) {
+		return true
+	}
+	return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+var numericZeros = []interface{}{
+	int(0),
+	int8(0),
+	int16(0),
+	int32(0),
+	int64(0),
+	uint(0),
+	uint8(0),
+	uint16(0),
+	uint32(0),
+	uint64(0),
+	float32(0),
+	float64(0),
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+	if object == nil {
+		return true
+	} else if object == "" {
+		return true
+	} else if object == false {
+		return true
+	}
+
+	for _, v := range numericZeros {
+		if object == v {
+			return true
+		}
+	}
+
+	objValue := reflect.ValueOf(object)
+
+	switch objValue.Kind() {
+	case reflect.Map:
+		fallthrough
+	case reflect.Slice, reflect.Chan:
+		{
+			return (objValue.Len() == 0)
+		}
+	case reflect.Struct:
+		switch object.(type) {
+		case time.Time:
+			return object.(time.Time).IsZero()
+		}
+	case reflect.Ptr:
+		{
+			if objValue.IsNil() {
+				return true
+			}
+			switch object.(type) {
+			case *time.Time:
+				return object.(*time.Time).IsZero()
+			default:
+				return false
+			}
+		}
+	}
+	return false
+}
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  assert.Empty(t, obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+	pass := isEmpty(object)
+	if !pass {
+		Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+	}
+
+	return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if assert.NotEmpty(t, obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+	pass := !isEmpty(object)
+	if !pass {
+		Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+	}
+
+	return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+	v := reflect.ValueOf(x)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+	return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+//    assert.Len(t, mySlice, 3, "The size of slice is not 3")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+	ok, l := getLen(object)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+	}
+
+	if l != length {
+		return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+	}
+	return true
+}
+
+// True asserts that the specified value is true.
+//
+//    assert.True(t, myBool, "myBool should be true")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+
+	if value != true {
+		return Fail(t, "Should be true", msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// False asserts that the specified value is false.
+//
+//    assert.False(t, myBool, "myBool should be false")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+
+	if value != false {
+		return Fail(t, "Should be false", msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+//    assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+	if ObjectsAreEqual(expected, actual) {
+		return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+	listValue := reflect.ValueOf(list)
+	elementValue := reflect.ValueOf(element)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+			found = false
+		}
+	}()
+
+	if reflect.TypeOf(list).Kind() == reflect.String {
+		return true, strings.Contains(listValue.String(), elementValue.String())
+	}
+
+	if reflect.TypeOf(list).Kind() == reflect.Map {
+		mapKeys := listValue.MapKeys()
+		for i := 0; i < len(mapKeys); i++ {
+			if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+				return true, true
+			}
+		}
+		return true, false
+	}
+
+	for i := 0; i < listValue.Len(); i++ {
+		if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+			return true, true
+		}
+	}
+	return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
+//    assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
+//    assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+
+	ok, found := includeElement(s, contains)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+	}
+	if !found {
+		return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+//    assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
+//    assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+
+	ok, found := includeElement(s, contains)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+	}
+	if found {
+		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+	result := comp()
+	if !result {
+		Fail(t, "Condition failed!", msgAndArgs...)
+	}
+	return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}) {
+
+	didPanic := false
+	var message interface{}
+	func() {
+
+		defer func() {
+			if message = recover(); message != nil {
+				didPanic = true
+			}
+		}()
+
+		// call the target function
+		f()
+
+	}()
+
+	return didPanic, message
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+//   assert.Panics(t, func(){
+//     GoCrazy()
+//   }, "Calling GoCrazy() should panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+	if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   assert.NotPanics(t, func(){
+//     RemainCalm()
+//   }, "Calling RemainCalm() should NOT panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+	if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+
+	dt := expected.Sub(actual)
+	if dt < -delta || dt > delta {
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+	}
+
+	return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+	var xf float64
+	xok := true
+
+	switch xn := x.(type) {
+	case uint8:
+		xf = float64(xn)
+	case uint16:
+		xf = float64(xn)
+	case uint32:
+		xf = float64(xn)
+	case uint64:
+		xf = float64(xn)
+	case int:
+		xf = float64(xn)
+	case int8:
+		xf = float64(xn)
+	case int16:
+		xf = float64(xn)
+	case int32:
+		xf = float64(xn)
+	case int64:
+		xf = float64(xn)
+	case float32:
+		xf = float64(xn)
+	case float64:
+		xf = float64(xn)
+	default:
+		xok = false
+	}
+
+	return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+
+	af, aok := toFloat(expected)
+	bf, bok := toFloat(actual)
+
+	if !aok || !bok {
+		return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+	}
+
+	if math.IsNaN(af) {
+		return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...)
+	}
+
+	if math.IsNaN(bf) {
+		return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+	}
+
+	dt := af - bf
+	if dt < -delta || dt > delta {
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+	}
+
+	return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+	}
+
+	actualSlice := reflect.ValueOf(actual)
+	expectedSlice := reflect.ValueOf(expected)
+
+	for i := 0; i < actualSlice.Len(); i++ {
+		result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta)
+		if !result {
+			return result
+		}
+	}
+
+	return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+	af, aok := toFloat(expected)
+	if !aok {
+		return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+	}
+	if af == 0 {
+		return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+	}
+	bf, bok := toFloat(actual)
+	if !bok {
+		return 0, fmt.Errorf("expected value %q cannot be converted to float", actual)
+	}
+
+	return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	actualEpsilon, err := calcRelativeError(expected, actual)
+	if err != nil {
+		return Fail(t, err.Error(), msgAndArgs...)
+	}
+	if actualEpsilon > epsilon {
+		return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+			"        < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...)
+	}
+
+	return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+	}
+
+	actualSlice := reflect.ValueOf(actual)
+	expectedSlice := reflect.ValueOf(expected)
+
+	for i := 0; i < actualSlice.Len(); i++ {
+		result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+		if !result {
+			return result
+		}
+	}
+
+	return true
+}
+
+/*
+	Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.NoError(t, err) {
+//	   assert.Equal(t, actualObj, expectedObj)
+//   }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if err != nil {
+		return Fail(t, fmt.Sprintf("Received unexpected error %+v", err), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err, "An error was expected") {
+//	   assert.Equal(t, err, expectedError)
+//   }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+
+	if err == nil {
+		return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+	}
+
+	return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   assert.EqualError(t, err,  expectedErrorString, "An error was expected")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+
+	message := messageFromMsgAndArgs(msgAndArgs...)
+	if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
+		return false
+	}
+	s := "An error with value \"%s\" is expected but got \"%s\". %s"
+	return Equal(t, errString, theError.Error(),
+		s, errString, theError.Error(), message)
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+	var r *regexp.Regexp
+	if rr, ok := rx.(*regexp.Regexp); ok {
+		r = rr
+	} else {
+		r = regexp.MustCompile(fmt.Sprint(rx))
+	}
+
+	return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+//  assert.Regexp(t, "start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+
+	match := matchRegexp(rx, str)
+
+	if !match {
+		Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+	}
+
+	return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+//  assert.NotRegexp(t, "^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	match := matchRegexp(rx, str)
+
+	if match {
+		Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+	}
+
+	return !match
+
+}
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+	if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+		return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+	}
+	return true
+}
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+	if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+		return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+	}
+	return true
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+	var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+	if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+	}
+
+	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+	}
+
+	return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+	t := reflect.TypeOf(v)
+	k := t.Kind()
+
+	if k == reflect.Ptr {
+		t = t.Elem()
+		k = t.Kind()
+	}
+	return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice or array. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+	if expected == nil || actual == nil {
+		return ""
+	}
+
+	et, ek := typeAndKind(expected)
+	at, _ := typeAndKind(actual)
+
+	if et != at {
+		return ""
+	}
+
+	if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array {
+		return ""
+	}
+
+	e := spew.Sdump(expected)
+	a := spew.Sdump(actual)
+
+	diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+		A:        difflib.SplitLines(e),
+		B:        difflib.SplitLines(a),
+		FromFile: "Expected",
+		FromDate: "",
+		ToFile:   "Actual",
+		ToDate:   "",
+		Context:  1,
+	})
+
+	return "\n\nDiff:\n" + diff
+}

+ 45 - 0
vendor/github.com/stretchr/testify/assert/doc.go

@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/assert"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      assert.Equal(t, a, b, "The two words should be the same.")
+//
+//    }
+//
+// if you assert many times, use the format below:
+//
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/assert"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//      assert := assert.New(t)
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      assert.Equal(a, b, "The two words should be the same.")
+//    }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert

+ 10 - 0
vendor/github.com/stretchr/testify/assert/errors.go

@@ -0,0 +1,10 @@
+package assert
+
+import (
+	"errors"
+)
+
+// AnError is an error instance useful for testing.  If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")

+ 16 - 0
vendor/github.com/stretchr/testify/assert/forward_assertions.go

@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+	t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+	return &Assertions{
+		t: t,
+	}
+}
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl

+ 106 - 0
vendor/github.com/stretchr/testify/assert/http_assertions.go

@@ -0,0 +1,106 @@
+package assert
+
+import (
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1
+// if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {
+	w := httptest.NewRecorder()
+	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+	if err != nil {
+		return -1
+	}
+	handler(w, req)
+	return w.Code
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
+	code := httpCode(handler, method, url, values)
+	if code == -1 {
+		return false
+	}
+	return code >= http.StatusOK && code <= http.StatusPartialContent
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
+	code := httpCode(handler, method, url, values)
+	if code == -1 {
+		return false
+	}
+	return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
+	code := httpCode(handler, method, url, values)
+	if code == -1 {
+		return false
+	}
+	return code >= http.StatusBadRequest
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+	w := httptest.NewRecorder()
+	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+	if err != nil {
+		return ""
+	}
+	handler(w, req)
+	return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+//  assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
+	body := HTTPBody(handler, method, url, values)
+
+	contains := strings.Contains(body, fmt.Sprint(str))
+	if !contains {
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+	}
+
+	return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
+	body := HTTPBody(handler, method, url, values)
+
+	contains := strings.Contains(body, fmt.Sprint(str))
+	if contains {
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+	}
+
+	return !contains
+}

+ 28 - 0
vendor/github.com/stretchr/testify/require/doc.go

@@ -0,0 +1,28 @@
+// Package require implements the same assertions as the `assert` package but
+// stops test execution when a test fails.
+//
+// Example Usage
+//
+// The following is a complete example using require in a standard test function:
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/require"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      require.Equal(t, a, b, "The two words should be the same.")
+//
+//    }
+//
+// Assertions
+//
+// The `require` package have same global functions as in the `assert` package,
+// but instead of returning a boolean result they call `t.FailNow()`.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package require

+ 16 - 0
vendor/github.com/stretchr/testify/require/forward_requirements.go

@@ -0,0 +1,16 @@
+package require
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+	t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+	return &Assertions{
+		t: t,
+	}
+}
+
+//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl

+ 464 - 0
vendor/github.com/stretchr/testify/require/require.go

@@ -0,0 +1,464 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+*/
+
+package require
+
+import (
+
+	assert "github.com/stretchr/testify/assert"
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
+  if !assert.Condition(t, comp, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+// 
+//    assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
+//    assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
+//    assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+  if !assert.Contains(t, s, contains, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+// 
+//  assert.Empty(t, obj)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+  if !assert.Empty(t, object, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Equal asserts that two objects are equal.
+// 
+//    assert.Equal(t, 123, 123, "123 and 123 should be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+  if !assert.Equal(t, expected, actual, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+// 
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err, "An error was expected") {
+// 	   assert.Equal(t, err, expectedError)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
+  if !assert.EqualError(t, theError, errString, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+// 
+//    assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+  if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+// 
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err, "An error was expected") {
+// 	   assert.Equal(t, err, expectedError)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Error(t TestingT, err error, msgAndArgs ...interface{}) {
+  if !assert.Error(t, err, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Exactly asserts that two objects are equal is value and type.
+// 
+//    assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+  if !assert.Exactly(t, expected, actual, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
+  if !assert.Fail(t, failureMessage, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
+  if !assert.FailNow(t, failureMessage, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// False asserts that the specified value is false.
+// 
+//    assert.False(t, myBool, "myBool should be false")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func False(t TestingT, value bool, msgAndArgs ...interface{}) {
+  if !assert.False(t, value, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+// 
+//  assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
+  if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
+    t.FailNow()
+  }
+}
+
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+// 
+//  assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
+  if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
+    t.FailNow()
+  }
+}
+
+
+// HTTPError asserts that a specified handler returns an error status code.
+// 
+//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
+  if !assert.HTTPError(t, handler, method, url, values) {
+    t.FailNow()
+  }
+}
+
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+// 
+//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
+  if !assert.HTTPRedirect(t, handler, method, url, values) {
+    t.FailNow()
+  }
+}
+
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+// 
+//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
+  if !assert.HTTPSuccess(t, handler, method, url, values) {
+    t.FailNow()
+  }
+}
+
+
+// Implements asserts that an object is implemented by the specified interface.
+// 
+//    assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+  if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// InDelta asserts that the two numerals are within delta of each other.
+// 
+// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+  if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+  if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+  if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
+func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+  if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
+  if !assert.IsType(t, expectedType, object, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// JSONEq asserts that two JSON strings are equivalent.
+// 
+//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
+  if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+// 
+//    assert.Len(t, mySlice, 3, "The size of slice is not 3")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
+  if !assert.Len(t, object, length, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Nil asserts that the specified object is nil.
+// 
+//    assert.Nil(t, err, "err should be nothing")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+  if !assert.Nil(t, object, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+// 
+//   actualObj, err := SomeFunction()
+//   if assert.NoError(t, err) {
+// 	   assert.Equal(t, actualObj, expectedObj)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
+  if !assert.NoError(t, err, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+// 
+//    assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+//    assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
+//    assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+  if !assert.NotContains(t, s, contains, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+// 
+//  if assert.NotEmpty(t, obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+  if !assert.NotEmpty(t, object, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotEqual asserts that the specified values are NOT equal.
+// 
+//    assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+  if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotNil asserts that the specified object is not nil.
+// 
+//    assert.NotNil(t, err, "err should be something")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+  if !assert.NotNil(t, object, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+// 
+//   assert.NotPanics(t, func(){
+//     RemainCalm()
+//   }, "Calling RemainCalm() should NOT panic")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+  if !assert.NotPanics(t, f, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotRegexp asserts that a specified regexp does not match a string.
+// 
+//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+//  assert.NotRegexp(t, "^start", "it's not starting")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+  if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
+  if !assert.NotZero(t, i, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+// 
+//   assert.Panics(t, func(){
+//     GoCrazy()
+//   }, "Calling GoCrazy() should panic")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+  if !assert.Panics(t, f, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Regexp asserts that a specified regexp matches a string.
+// 
+//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+//  assert.Regexp(t, "start...$", "it's not starting")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+  if !assert.Regexp(t, rx, str, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// True asserts that the specified value is true.
+// 
+//    assert.True(t, myBool, "myBool should be true")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func True(t TestingT, value bool, msgAndArgs ...interface{}) {
+  if !assert.True(t, value, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+// 
+//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
+  if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
+    t.FailNow()
+  }
+}
+
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
+  if !assert.Zero(t, i, msgAndArgs...) {
+    t.FailNow()
+  }
+}

+ 6 - 0
vendor/github.com/stretchr/testify/require/require.go.tmpl

@@ -0,0 +1,6 @@
+{{.Comment}}
+func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
+  if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
+    t.FailNow()
+  }
+}

+ 388 - 0
vendor/github.com/stretchr/testify/require/require_forward.go

@@ -0,0 +1,388 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+*/
+
+package require
+
+import (
+
+	assert "github.com/stretchr/testify/assert"
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
+	Condition(a.t, comp, msgAndArgs...)
+}
+
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+// 
+//    a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
+//    a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
+//    a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+	Contains(a.t, s, contains, msgAndArgs...)
+}
+
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+// 
+//  a.Empty(obj)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
+	Empty(a.t, object, msgAndArgs...)
+}
+
+
+// Equal asserts that two objects are equal.
+// 
+//    a.Equal(123, 123, "123 and 123 should be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+	Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+// 
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err, "An error was expected") {
+// 	   assert.Equal(t, err, expectedError)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
+	EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+// 
+//    a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+	EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+// 
+//   actualObj, err := SomeFunction()
+//   if a.Error(err, "An error was expected") {
+// 	   assert.Equal(t, err, expectedError)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
+	Error(a.t, err, msgAndArgs...)
+}
+
+
+// Exactly asserts that two objects are equal is value and type.
+// 
+//    a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+	Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
+	Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
+	FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+
+// False asserts that the specified value is false.
+// 
+//    a.False(myBool, "myBool should be false")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
+	False(a.t, value, msgAndArgs...)
+}
+
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+// 
+//  a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
+	HTTPBodyContains(a.t, handler, method, url, values, str)
+}
+
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+// 
+//  a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
+	HTTPBodyNotContains(a.t, handler, method, url, values, str)
+}
+
+
+// HTTPError asserts that a specified handler returns an error status code.
+// 
+//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) {
+	HTTPError(a.t, handler, method, url, values)
+}
+
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+// 
+//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) {
+	HTTPRedirect(a.t, handler, method, url, values)
+}
+
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+// 
+//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) {
+	HTTPSuccess(a.t, handler, method, url, values)
+}
+
+
+// Implements asserts that an object is implemented by the specified interface.
+// 
+//    a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+	Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+
+// InDelta asserts that the two numerals are within delta of each other.
+// 
+// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+	InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+	InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+	InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+
+// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+	InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
+	IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+
+// JSONEq asserts that two JSON strings are equivalent.
+// 
+//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
+	JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+// 
+//    a.Len(mySlice, 3, "The size of slice is not 3")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
+	Len(a.t, object, length, msgAndArgs...)
+}
+
+
+// Nil asserts that the specified object is nil.
+// 
+//    a.Nil(err, "err should be nothing")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
+	Nil(a.t, object, msgAndArgs...)
+}
+
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+// 
+//   actualObj, err := SomeFunction()
+//   if a.NoError(err) {
+// 	   assert.Equal(t, actualObj, expectedObj)
+//   }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
+	NoError(a.t, err, msgAndArgs...)
+}
+
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+// 
+//    a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+//    a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
+//    a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+	NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+// 
+//  if a.NotEmpty(obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
+	NotEmpty(a.t, object, msgAndArgs...)
+}
+
+
+// NotEqual asserts that the specified values are NOT equal.
+// 
+//    a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+	NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+
+// NotNil asserts that the specified object is not nil.
+// 
+//    a.NotNil(err, "err should be something")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
+	NotNil(a.t, object, msgAndArgs...)
+}
+
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+// 
+//   a.NotPanics(func(){
+//     RemainCalm()
+//   }, "Calling RemainCalm() should NOT panic")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+	NotPanics(a.t, f, msgAndArgs...)
+}
+
+
+// NotRegexp asserts that a specified regexp does not match a string.
+// 
+//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+//  a.NotRegexp("^start", "it's not starting")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+	NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
+	NotZero(a.t, i, msgAndArgs...)
+}
+
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+// 
+//   a.Panics(func(){
+//     GoCrazy()
+//   }, "Calling GoCrazy() should panic")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+	Panics(a.t, f, msgAndArgs...)
+}
+
+
+// Regexp asserts that a specified regexp matches a string.
+// 
+//  a.Regexp(regexp.MustCompile("start"), "it's starting")
+//  a.Regexp("start...$", "it's not starting")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+	Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+
+// True asserts that the specified value is true.
+// 
+//    a.True(myBool, "myBool should be true")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
+	True(a.t, value, msgAndArgs...)
+}
+
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+// 
+//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+// 
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
+	WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
+	Zero(a.t, i, msgAndArgs...)
+}

+ 4 - 0
vendor/github.com/stretchr/testify/require/require_forward.go.tmpl

@@ -0,0 +1,4 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
+	{{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}

+ 9 - 0
vendor/github.com/stretchr/testify/require/requirements.go

@@ -0,0 +1,9 @@
+package require
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+	Errorf(format string, args ...interface{})
+	FailNow()
+}
+
+//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl

+ 202 - 0
vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2015 xeipuuv
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 190 - 0
vendor/github.com/xeipuuv/gojsonpointer/pointer.go

@@ -0,0 +1,190 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author  			xeipuuv
+// author-github 	https://github.com/xeipuuv
+// author-mail		xeipuuv@gmail.com
+//
+// repository-name	gojsonpointer
+// repository-desc	An implementation of JSON Pointer - Go language
+//
+// description		Main and unique file.
+//
+// created      	25-02-2013
+
+package gojsonpointer
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+const (
+	const_empty_pointer     = ``
+	const_pointer_separator = `/`
+
+	const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
+)
+
+type implStruct struct {
+	mode string // "SET" or "GET"
+
+	inDocument interface{}
+
+	setInValue interface{}
+
+	getOutNode interface{}
+	getOutKind reflect.Kind
+	outError   error
+}
+
+type JsonPointer struct {
+	referenceTokens []string
+}
+
+// NewJsonPointer parses the given string JSON pointer and returns an object
+func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) {
+
+	// Pointer to the root of the document
+	if len(jsonPointerString) == 0 {
+		// Keep referenceTokens nil
+		return
+	}
+	if jsonPointerString[0] != '/' {
+		return p, errors.New(const_invalid_start)
+	}
+
+	p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator)
+	return
+}
+
+// Uses the pointer to retrieve a value from a JSON document
+func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+
+	is := &implStruct{mode: "GET", inDocument: document}
+	p.implementation(is)
+	return is.getOutNode, is.getOutKind, is.outError
+
+}
+
+// Uses the pointer to update a value from a JSON document
+func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
+
+	is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
+	p.implementation(is)
+	return document, is.outError
+
+}
+
+// Both Get and Set functions use the same implementation to avoid code duplication
+func (p *JsonPointer) implementation(i *implStruct) {
+
+	kind := reflect.Invalid
+
+	// Full document when empty
+	if len(p.referenceTokens) == 0 {
+		i.getOutNode = i.inDocument
+		i.outError = nil
+		i.getOutKind = kind
+		i.outError = nil
+		return
+	}
+
+	node := i.inDocument
+
+	for ti, token := range p.referenceTokens {
+
+		isLastToken := ti == len(p.referenceTokens)-1
+
+		switch v := node.(type) {
+
+		case map[string]interface{}:
+			decodedToken := decodeReferenceToken(token)
+			if _, ok := v[decodedToken]; ok {
+				node = v[decodedToken]
+				if isLastToken && i.mode == "SET" {
+					v[decodedToken] = i.setInValue
+				}
+			} else {
+				i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
+				i.getOutKind = reflect.Map
+				i.getOutNode = nil
+				return
+			}
+
+		case []interface{}:
+			tokenIndex, err := strconv.Atoi(token)
+			if err != nil {
+				i.outError = fmt.Errorf("Invalid array index '%s'", token)
+				i.getOutKind = reflect.Slice
+				i.getOutNode = nil
+				return
+			}
+			if tokenIndex < 0 || tokenIndex >= len(v) {
+				i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex)
+				i.getOutKind = reflect.Slice
+				i.getOutNode = nil
+				return
+			}
+
+			node = v[tokenIndex]
+			if isLastToken && i.mode == "SET" {
+				v[tokenIndex] = i.setInValue
+			}
+
+		default:
+			i.outError = fmt.Errorf("Invalid token reference '%s'", token)
+			i.getOutKind = reflect.ValueOf(node).Kind()
+			i.getOutNode = nil
+			return
+		}
+
+	}
+
+	i.getOutNode = node
+	i.getOutKind = reflect.ValueOf(node).Kind()
+	i.outError = nil
+}
+
+// Pointer to string representation function
+func (p *JsonPointer) String() string {
+
+	if len(p.referenceTokens) == 0 {
+		return const_empty_pointer
+	}
+
+	pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
+
+	return pointerString
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+func decodeReferenceToken(token string) string {
+	step1 := strings.Replace(token, `~1`, `/`, -1)
+	step2 := strings.Replace(step1, `~0`, `~`, -1)
+	return step2
+}
+
+func encodeReferenceToken(token string) string {
+	step1 := strings.Replace(token, `~`, `~0`, -1)
+	step2 := strings.Replace(step1, `/`, `~1`, -1)
+	return step2
+}

+ 202 - 0
vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2015 xeipuuv
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 141 - 0
vendor/github.com/xeipuuv/gojsonreference/reference.go

@@ -0,0 +1,141 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author  			xeipuuv
+// author-github 	https://github.com/xeipuuv
+// author-mail		xeipuuv@gmail.com
+//
+// repository-name	gojsonreference
+// repository-desc	An implementation of JSON Reference - Go language
+//
+// description		Main and unique file.
+//
+// created      	26-02-2013
+
+package gojsonreference
+
+import (
+	"errors"
+	"github.com/xeipuuv/gojsonpointer"
+	"net/url"
+	"path/filepath"
+	"runtime"
+	"strings"
+)
+
+const (
+	const_fragment_char = `#`
+)
+
+func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
+
+	var r JsonReference
+	err := r.parse(jsonReferenceString)
+	return r, err
+
+}
+
+type JsonReference struct {
+	referenceUrl     *url.URL
+	referencePointer gojsonpointer.JsonPointer
+
+	HasFullUrl      bool
+	HasUrlPathOnly  bool
+	HasFragmentOnly bool
+	HasFileScheme   bool
+	HasFullFilePath bool
+}
+
+func (r *JsonReference) GetUrl() *url.URL {
+	return r.referenceUrl
+}
+
+func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
+	return &r.referencePointer
+}
+
+func (r *JsonReference) String() string {
+
+	if r.referenceUrl != nil {
+		return r.referenceUrl.String()
+	}
+
+	if r.HasFragmentOnly {
+		return const_fragment_char + r.referencePointer.String()
+	}
+
+	return r.referencePointer.String()
+}
+
+func (r *JsonReference) IsCanonical() bool {
+	return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *JsonReference) parse(jsonReferenceString string) (err error) {
+
+	r.referenceUrl, err = url.Parse(jsonReferenceString)
+	if err != nil {
+		return
+	}
+	refUrl := r.referenceUrl
+
+	if refUrl.Scheme != "" && refUrl.Host != "" {
+		r.HasFullUrl = true
+	} else {
+		if refUrl.Path != "" {
+			r.HasUrlPathOnly = true
+		} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
+			r.HasFragmentOnly = true
+		}
+	}
+
+	r.HasFileScheme = refUrl.Scheme == "file"
+	if runtime.GOOS == "windows" {
+		// on Windows, a file URL may have an extra leading slash, and if it
+		// doesn't then its first component will be treated as the host by the
+		// Go runtime
+		if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
+			r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
+		} else {
+			r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
+		}
+	} else {
+		r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
+	}
+
+	// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+	r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
+
+	return
+}
+
+// Creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
+	childUrl := child.GetUrl()
+	parentUrl := r.GetUrl()
+	if childUrl == nil {
+		return nil, errors.New("childUrl is nil!")
+	}
+	if parentUrl == nil {
+		return nil, errors.New("parentUrl is nil!")
+	}
+
+	ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String())
+	if err != nil {
+		return nil, err
+	}
+	return &ref, err
+}

+ 202 - 0
vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2015 xeipuuv
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 283 - 0
vendor/github.com/xeipuuv/gojsonschema/errors.go

@@ -0,0 +1,283 @@
+package gojsonschema
+
+import (
+	"bytes"
+	"sync"
+	"text/template"
+)
+
+var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}}
+
+// template.Template is not thread-safe for writing, so some locking is done
+// sync.RWMutex is used for efficiently locking when new templates are created
+type errorTemplate struct {
+	*template.Template
+	sync.RWMutex
+}
+
+type (
+	// RequiredError. ErrorDetails: property string
+	RequiredError struct {
+		ResultErrorFields
+	}
+
+	// InvalidTypeError. ErrorDetails: expected, given
+	InvalidTypeError struct {
+		ResultErrorFields
+	}
+
+	// NumberAnyOfError. ErrorDetails: -
+	NumberAnyOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberOneOfError. ErrorDetails: -
+	NumberOneOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberAllOfError. ErrorDetails: -
+	NumberAllOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberNotError. ErrorDetails: -
+	NumberNotError struct {
+		ResultErrorFields
+	}
+
+	// MissingDependencyError. ErrorDetails: dependency
+	MissingDependencyError struct {
+		ResultErrorFields
+	}
+
+	// InternalError. ErrorDetails: error
+	InternalError struct {
+		ResultErrorFields
+	}
+
+	// EnumError. ErrorDetails: allowed
+	EnumError struct {
+		ResultErrorFields
+	}
+
+	// ArrayNoAdditionalItemsError. ErrorDetails: -
+	ArrayNoAdditionalItemsError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMinItemsError. ErrorDetails: min
+	ArrayMinItemsError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMaxItemsError. ErrorDetails: max
+	ArrayMaxItemsError struct {
+		ResultErrorFields
+	}
+
+	// ItemsMustBeUniqueError. ErrorDetails: type
+	ItemsMustBeUniqueError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMinPropertiesError. ErrorDetails: min
+	ArrayMinPropertiesError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMaxPropertiesError. ErrorDetails: max
+	ArrayMaxPropertiesError struct {
+		ResultErrorFields
+	}
+
+	// AdditionalPropertyNotAllowedError. ErrorDetails: property
+	AdditionalPropertyNotAllowedError struct {
+		ResultErrorFields
+	}
+
+	// InvalidPropertyPatternError. ErrorDetails: property, pattern
+	InvalidPropertyPatternError struct {
+		ResultErrorFields
+	}
+
+	// StringLengthGTEError. ErrorDetails: min
+	StringLengthGTEError struct {
+		ResultErrorFields
+	}
+
+	// StringLengthLTEError. ErrorDetails: max
+	StringLengthLTEError struct {
+		ResultErrorFields
+	}
+
+	// DoesNotMatchPatternError. ErrorDetails: pattern
+	DoesNotMatchPatternError struct {
+		ResultErrorFields
+	}
+
+	// DoesNotMatchFormatError. ErrorDetails: format
+	DoesNotMatchFormatError struct {
+		ResultErrorFields
+	}
+
+	// MultipleOfError. ErrorDetails: multiple
+	MultipleOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberGTEError. ErrorDetails: min
+	NumberGTEError struct {
+		ResultErrorFields
+	}
+
+	// NumberGTError. ErrorDetails: min
+	NumberGTError struct {
+		ResultErrorFields
+	}
+
+	// NumberLTEError. ErrorDetails: max
+	NumberLTEError struct {
+		ResultErrorFields
+	}
+
+	// NumberLTError. ErrorDetails: max
+	NumberLTError struct {
+		ResultErrorFields
+	}
+)
+
+// newError takes a ResultError type and sets the type, context, description, details, value, and field
+func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) {
+	var t string
+	var d string
+	switch err.(type) {
+	case *RequiredError:
+		t = "required"
+		d = locale.Required()
+	case *InvalidTypeError:
+		t = "invalid_type"
+		d = locale.InvalidType()
+	case *NumberAnyOfError:
+		t = "number_any_of"
+		d = locale.NumberAnyOf()
+	case *NumberOneOfError:
+		t = "number_one_of"
+		d = locale.NumberOneOf()
+	case *NumberAllOfError:
+		t = "number_all_of"
+		d = locale.NumberAllOf()
+	case *NumberNotError:
+		t = "number_not"
+		d = locale.NumberNot()
+	case *MissingDependencyError:
+		t = "missing_dependency"
+		d = locale.MissingDependency()
+	case *InternalError:
+		t = "internal"
+		d = locale.Internal()
+	case *EnumError:
+		t = "enum"
+		d = locale.Enum()
+	case *ArrayNoAdditionalItemsError:
+		t = "array_no_additional_items"
+		d = locale.ArrayNoAdditionalItems()
+	case *ArrayMinItemsError:
+		t = "array_min_items"
+		d = locale.ArrayMinItems()
+	case *ArrayMaxItemsError:
+		t = "array_max_items"
+		d = locale.ArrayMaxItems()
+	case *ItemsMustBeUniqueError:
+		t = "unique"
+		d = locale.Unique()
+	case *ArrayMinPropertiesError:
+		t = "array_min_properties"
+		d = locale.ArrayMinProperties()
+	case *ArrayMaxPropertiesError:
+		t = "array_max_properties"
+		d = locale.ArrayMaxProperties()
+	case *AdditionalPropertyNotAllowedError:
+		t = "additional_property_not_allowed"
+		d = locale.AdditionalPropertyNotAllowed()
+	case *InvalidPropertyPatternError:
+		t = "invalid_property_pattern"
+		d = locale.InvalidPropertyPattern()
+	case *StringLengthGTEError:
+		t = "string_gte"
+		d = locale.StringGTE()
+	case *StringLengthLTEError:
+		t = "string_lte"
+		d = locale.StringLTE()
+	case *DoesNotMatchPatternError:
+		t = "pattern"
+		d = locale.DoesNotMatchPattern()
+	case *DoesNotMatchFormatError:
+		t = "format"
+		d = locale.DoesNotMatchFormat()
+	case *MultipleOfError:
+		t = "multiple_of"
+		d = locale.MultipleOf()
+	case *NumberGTEError:
+		t = "number_gte"
+		d = locale.NumberGTE()
+	case *NumberGTError:
+		t = "number_gt"
+		d = locale.NumberGT()
+	case *NumberLTEError:
+		t = "number_lte"
+		d = locale.NumberLTE()
+	case *NumberLTError:
+		t = "number_lt"
+		d = locale.NumberLT()
+	}
+
+	err.SetType(t)
+	err.SetContext(context)
+	err.SetValue(value)
+	err.SetDetails(details)
+	details["field"] = err.Field()
+
+	if _, exists := details["context"]; !exists && context != nil {
+		details["context"] = context.String()
+	}
+
+	err.SetDescription(formatErrorDescription(d, details))
+}
+
+// formatErrorDescription takes a string in the default text/template
+// format and converts it to a string with replacements. The fields come
+// from the ErrorDetails struct and vary for each type of error.
+func formatErrorDescription(s string, details ErrorDetails) string {
+
+	var tpl *template.Template
+	var descrAsBuffer bytes.Buffer
+	var err error
+
+	errorTemplates.RLock()
+	tpl = errorTemplates.Lookup(s)
+	errorTemplates.RUnlock()
+
+	if tpl == nil {
+		errorTemplates.Lock()
+		tpl = errorTemplates.New(s)
+
+		if ErrorTemplateFuncs != nil {
+			tpl.Funcs(ErrorTemplateFuncs)
+		}
+
+		tpl, err = tpl.Parse(s)
+		errorTemplates.Unlock()
+
+		if err != nil {
+			return err.Error()
+		}
+	}
+
+	err = tpl.Execute(&descrAsBuffer, details)
+	if err != nil {
+		return err.Error()
+	}
+
+	return descrAsBuffer.String()
+}

+ 203 - 0
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go

@@ -0,0 +1,203 @@
+package gojsonschema
+
+import (
+	"net"
+	"net/url"
+	"reflect"
+	"regexp"
+	"strings"
+	"time"
+)
+
+type (
+	// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
+	FormatChecker interface {
+		IsFormat(input string) bool
+	}
+
+	// FormatCheckerChain holds the formatters
+	FormatCheckerChain struct {
+		formatters map[string]FormatChecker
+	}
+
+	// EmailFormatter verifies email address formats
+	EmailFormatChecker struct{}
+
+	// IPV4FormatChecker verifies IP addresses in the ipv4 format
+	IPV4FormatChecker struct{}
+
+	// IPV6FormatChecker verifies IP addresses in the ipv6 format
+	IPV6FormatChecker struct{}
+
+	// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
+	//
+	// Valid formats:
+	// 		Partial Time: HH:MM:SS
+	//		Full Date: YYYY-MM-DD
+	// 		Full Time: HH:MM:SSZ-07:00
+	//		Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
+	//
+	// 	Where
+	//		YYYY = 4DIGIT year
+	//		MM = 2DIGIT month ; 01-12
+	//		DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
+	//		HH = 2DIGIT hour ; 00-23
+	//		MM = 2DIGIT ; 00-59
+	//		SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
+	//		T = Literal
+	//		Z = Literal
+	//
+	//	Note: Nanoseconds are also suported in all formats
+	//
+	// http://tools.ietf.org/html/rfc3339#section-5.6
+	DateTimeFormatChecker struct{}
+
+	// URIFormatChecker validates a URI with a valid Scheme per RFC3986
+	URIFormatChecker struct{}
+
+	// URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
+	URIReferenceFormatChecker struct{}
+
+	// HostnameFormatChecker validates a hostname is in the correct format
+	HostnameFormatChecker struct{}
+
+	// UUIDFormatChecker validates a UUID is in the correct format
+	UUIDFormatChecker struct{}
+
+	// RegexFormatChecker validates a regex is in the correct format
+	RegexFormatChecker struct{}
+)
+
+var (
+	// Formatters holds the valid formatters, and is a public variable
+	// so library users can add custom formatters
+	FormatCheckers = FormatCheckerChain{
+		formatters: map[string]FormatChecker{
+			"date-time": 	 DateTimeFormatChecker{},
+			"hostname":  	 HostnameFormatChecker{},
+			"email":     	 EmailFormatChecker{},
+			"ipv4":      	 IPV4FormatChecker{},
+			"ipv6":      	 IPV6FormatChecker{},
+			"uri":       	 URIFormatChecker{},
+			"uri-reference": URIReferenceFormatChecker{},
+			"uuid":      	 UUIDFormatChecker{},
+			"regex":     	 RegexFormatChecker{},
+		},
+	}
+
+	// Regex credit: https://github.com/asaskevich/govalidator
+	rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
+
+	// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
+	rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
+
+	rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
+)
+
+// Add adds a FormatChecker to the FormatCheckerChain
+// The name used will be the value used for the format key in your json schema
+func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
+	c.formatters[name] = f
+
+	return c
+}
+
+// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
+func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
+	delete(c.formatters, name)
+
+	return c
+}
+
+// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
+func (c *FormatCheckerChain) Has(name string) bool {
+	_, ok := c.formatters[name]
+
+	return ok
+}
+
+// IsFormat will check an input against a FormatChecker with the given name
+// to see if it is the correct format
+func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
+	f, ok := c.formatters[name]
+
+	if !ok {
+		return false
+	}
+
+	if !isKind(input, reflect.String) {
+		return false
+	}
+
+	inputString := input.(string)
+
+	return f.IsFormat(inputString)
+}
+
+func (f EmailFormatChecker) IsFormat(input string) bool {
+	return rxEmail.MatchString(input)
+}
+
+// Credit: https://github.com/asaskevich/govalidator
+func (f IPV4FormatChecker) IsFormat(input string) bool {
+	ip := net.ParseIP(input)
+	return ip != nil && strings.Contains(input, ".")
+}
+
+// Credit: https://github.com/asaskevich/govalidator
+func (f IPV6FormatChecker) IsFormat(input string) bool {
+	ip := net.ParseIP(input)
+	return ip != nil && strings.Contains(input, ":")
+}
+
+func (f DateTimeFormatChecker) IsFormat(input string) bool {
+	formats := []string{
+		"15:04:05",
+		"15:04:05Z07:00",
+		"2006-01-02",
+		time.RFC3339,
+		time.RFC3339Nano,
+	}
+
+	for _, format := range formats {
+		if _, err := time.Parse(format, input); err == nil {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (f URIFormatChecker) IsFormat(input string) bool {
+	u, err := url.Parse(input)
+	if err != nil || u.Scheme == "" {
+		return false
+	}
+
+	return true
+}
+
+func (f URIReferenceFormatChecker) IsFormat(input string) bool {
+	_, err := url.Parse(input)
+	return err == nil
+}
+
+func (f HostnameFormatChecker) IsFormat(input string) bool {
+	return rxHostname.MatchString(input) && len(input) < 256
+}
+
+func (f UUIDFormatChecker) IsFormat(input string) bool {
+	return rxUUID.MatchString(input)
+}
+
+// IsFormat implements FormatChecker interface.
+func (f RegexFormatChecker) IsFormat(input string) bool {
+	if input == "" {
+		return true
+	}
+	_, err := regexp.Compile(input)
+	if err != nil {
+		return false
+	}
+	return true
+}

+ 37 - 0
vendor/github.com/xeipuuv/gojsonschema/internalLog.go

@@ -0,0 +1,37 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Very simple log wrapper.
+//					Used for debugging/testing purposes.
+//
+// created          01-01-2015
+
+package gojsonschema
+
+import (
+	"log"
+)
+
+const internalLogEnabled = false
+
+func internalLog(format string, v ...interface{}) {
+	log.Printf(format, v...)
+}

+ 72 - 0
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go

@@ -0,0 +1,72 @@
+// Copyright 2013 MongoDB, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           tolsen
+// author-github    https://github.com/tolsen
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
+//
+// created          04-09-2013
+
+package gojsonschema
+
+import "bytes"
+
+// jsonContext implements a persistent linked-list of strings
+type jsonContext struct {
+	head string
+	tail *jsonContext
+}
+
+func newJsonContext(head string, tail *jsonContext) *jsonContext {
+	return &jsonContext{head, tail}
+}
+
+// String displays the context in reverse.
+// This plays well with the data structure's persistent nature with
+// Cons and a json document's tree structure.
+func (c *jsonContext) String(del ...string) string {
+	byteArr := make([]byte, 0, c.stringLen())
+	buf := bytes.NewBuffer(byteArr)
+	c.writeStringToBuffer(buf, del)
+
+	return buf.String()
+}
+
+func (c *jsonContext) stringLen() int {
+	length := 0
+	if c.tail != nil {
+		length = c.tail.stringLen() + 1 // add 1 for "."
+	}
+
+	length += len(c.head)
+	return length
+}
+
+func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
+	if c.tail != nil {
+		c.tail.writeStringToBuffer(buf, del)
+
+		if len(del) > 0 {
+			buf.WriteString(del[0])
+		} else {
+			buf.WriteString(".")
+		}
+	}
+
+	buf.WriteString(c.head)
+}

+ 341 - 0
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go

@@ -0,0 +1,341 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description		Different strategies to load JSON files.
+// 					Includes References (file and HTTP), JSON strings and Go types.
+//
+// created          01-02-2015
+
+package gojsonschema
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+
+	"github.com/xeipuuv/gojsonreference"
+)
+
+var osFS = osFileSystem(os.Open)
+
+// JSON loader interface
+
+type JSONLoader interface {
+	JsonSource() interface{}
+	LoadJSON() (interface{}, error)
+	JsonReference() (gojsonreference.JsonReference, error)
+	LoaderFactory() JSONLoaderFactory
+}
+
+type JSONLoaderFactory interface {
+	New(source string) JSONLoader
+}
+
+type DefaultJSONLoaderFactory struct {
+}
+
+type FileSystemJSONLoaderFactory struct {
+	fs http.FileSystem
+}
+
+func (d DefaultJSONLoaderFactory) New(source string) JSONLoader {
+	return &jsonReferenceLoader{
+		fs:     osFS,
+		source: source,
+	}
+}
+
+func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader {
+	return &jsonReferenceLoader{
+		fs:     f.fs,
+		source: source,
+	}
+}
+
+// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem.
+type osFileSystem func(string) (*os.File, error)
+
+func (o osFileSystem) Open(name string) (http.File, error) {
+	return o(name)
+}
+
+// JSON Reference loader
+// references are used to load JSONs from files and HTTP
+
+type jsonReferenceLoader struct {
+	fs     http.FileSystem
+	source string
+}
+
+func (l *jsonReferenceLoader) JsonSource() interface{} {
+	return l.source
+}
+
+func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) {
+	return gojsonreference.NewJsonReference(l.JsonSource().(string))
+}
+
+func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
+	return &FileSystemJSONLoaderFactory{
+		fs: l.fs,
+	}
+}
+
+// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
+func NewReferenceLoader(source string) *jsonReferenceLoader {
+	return &jsonReferenceLoader{
+		fs:     osFS,
+		source: source,
+	}
+}
+
+// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
+func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader {
+	return &jsonReferenceLoader{
+		fs:     fs,
+		source: source,
+	}
+}
+
+func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
+
+	var err error
+
+	reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string))
+	if err != nil {
+		return nil, err
+	}
+
+	refToUrl := reference
+	refToUrl.GetUrl().Fragment = ""
+
+	var document interface{}
+
+	if reference.HasFileScheme {
+
+		filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1)
+		if runtime.GOOS == "windows" {
+			// on Windows, a file URL may have an extra leading slash, use slashes
+			// instead of backslashes, and have spaces escaped
+			if strings.HasPrefix(filename, "/") {
+				filename = filename[1:]
+			}
+			filename = filepath.FromSlash(filename)
+		}
+
+		document, err = l.loadFromFile(filename)
+		if err != nil {
+			return nil, err
+		}
+
+	} else {
+
+		document, err = l.loadFromHTTP(refToUrl.String())
+		if err != nil {
+			return nil, err
+		}
+
+	}
+
+	return document, nil
+
+}
+
+func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
+
+	resp, err := http.Get(address)
+	if err != nil {
+		return nil, err
+	}
+
+	// must return HTTP Status 200 OK
+	if resp.StatusCode != http.StatusOK {
+		return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status}))
+	}
+
+	bodyBuff, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
+	f, err := l.fs.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	bodyBuff, err := ioutil.ReadAll(f)
+	if err != nil {
+		return nil, err
+	}
+
+	return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+// JSON string loader
+
+type jsonStringLoader struct {
+	source string
+}
+
+func (l *jsonStringLoader) JsonSource() interface{} {
+	return l.source
+}
+
+func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) {
+	return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
+	return &DefaultJSONLoaderFactory{}
+}
+
+func NewStringLoader(source string) *jsonStringLoader {
+	return &jsonStringLoader{source: source}
+}
+
+func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
+
+	return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string)))
+
+}
+
+// JSON bytes loader
+
+type jsonBytesLoader struct {
+	source []byte
+}
+
+func (l *jsonBytesLoader) JsonSource() interface{} {
+	return l.source
+}
+
+func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) {
+	return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
+	return &DefaultJSONLoaderFactory{}
+}
+
+func NewBytesLoader(source []byte) *jsonBytesLoader {
+	return &jsonBytesLoader{source: source}
+}
+
+func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
+	return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte)))
+}
+
+// JSON Go (types) loader
+// used to load JSONs from the code as maps, interface{}, structs ...
+
+type jsonGoLoader struct {
+	source interface{}
+}
+
+func (l *jsonGoLoader) JsonSource() interface{} {
+	return l.source
+}
+
+func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) {
+	return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
+	return &DefaultJSONLoaderFactory{}
+}
+
+func NewGoLoader(source interface{}) *jsonGoLoader {
+	return &jsonGoLoader{source: source}
+}
+
+func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
+
+	// convert it to a compliant JSON first to avoid types "mismatches"
+
+	jsonBytes, err := json.Marshal(l.JsonSource())
+	if err != nil {
+		return nil, err
+	}
+
+	return decodeJsonUsingNumber(bytes.NewReader(jsonBytes))
+
+}
+
+type jsonIOLoader struct {
+	buf *bytes.Buffer
+}
+
+func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) {
+	buf := &bytes.Buffer{}
+	return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
+}
+
+func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) {
+	buf := &bytes.Buffer{}
+	return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
+}
+
+func (l *jsonIOLoader) JsonSource() interface{} {
+	return l.buf.String()
+}
+
+func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
+	return decodeJsonUsingNumber(l.buf)
+}
+
+func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) {
+	return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
+	return &DefaultJSONLoaderFactory{}
+}
+
+func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
+
+	var document interface{}
+
+	decoder := json.NewDecoder(r)
+	decoder.UseNumber()
+
+	err := decoder.Decode(&document)
+	if err != nil {
+		return nil, err
+	}
+	
+	return document, nil
+
+}

+ 286 - 0
vendor/github.com/xeipuuv/gojsonschema/locales.go

@@ -0,0 +1,286 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Contains const string and messages.
+//
+// created          01-01-2015
+
+package gojsonschema
+
+type (
+	// locale is an interface for defining custom error strings
+	locale interface {
+		Required() string
+		InvalidType() string
+		NumberAnyOf() string
+		NumberOneOf() string
+		NumberAllOf() string
+		NumberNot() string
+		MissingDependency() string
+		Internal() string
+		Enum() string
+		ArrayNotEnoughItems() string
+		ArrayNoAdditionalItems() string
+		ArrayMinItems() string
+		ArrayMaxItems() string
+		Unique() string
+		ArrayMinProperties() string
+		ArrayMaxProperties() string
+		AdditionalPropertyNotAllowed() string
+		InvalidPropertyPattern() string
+		StringGTE() string
+		StringLTE() string
+		DoesNotMatchPattern() string
+		DoesNotMatchFormat() string
+		MultipleOf() string
+		NumberGTE() string
+		NumberGT() string
+		NumberLTE() string
+		NumberLT() string
+
+		// Schema validations
+		RegexPattern() string
+		GreaterThanZero() string
+		MustBeOfA() string
+		MustBeOfAn() string
+		CannotBeUsedWithout() string
+		CannotBeGT() string
+		MustBeOfType() string
+		MustBeValidRegex() string
+		MustBeValidFormat() string
+		MustBeGTEZero() string
+		KeyCannotBeGreaterThan() string
+		KeyItemsMustBeOfType() string
+		KeyItemsMustBeUnique() string
+		ReferenceMustBeCanonical() string
+		NotAValidType() string
+		Duplicated() string
+		HttpBadStatus() string
+		ParseError() string
+
+		// ErrorFormat
+		ErrorFormat() string
+	}
+
+	// DefaultLocale is the default locale for this package
+	DefaultLocale struct{}
+)
+
+func (l DefaultLocale) Required() string {
+	return `{{.property}} is required`
+}
+
+func (l DefaultLocale) InvalidType() string {
+	return `Invalid type. Expected: {{.expected}}, given: {{.given}}`
+}
+
+func (l DefaultLocale) NumberAnyOf() string {
+	return `Must validate at least one schema (anyOf)`
+}
+
+func (l DefaultLocale) NumberOneOf() string {
+	return `Must validate one and only one schema (oneOf)`
+}
+
+func (l DefaultLocale) NumberAllOf() string {
+	return `Must validate all the schemas (allOf)`
+}
+
+func (l DefaultLocale) NumberNot() string {
+	return `Must not validate the schema (not)`
+}
+
+func (l DefaultLocale) MissingDependency() string {
+	return `Has a dependency on {{.dependency}}`
+}
+
+func (l DefaultLocale) Internal() string {
+	return `Internal Error {{.error}}`
+}
+
+func (l DefaultLocale) Enum() string {
+	return `{{.field}} must be one of the following: {{.allowed}}`
+}
+
+func (l DefaultLocale) ArrayNoAdditionalItems() string {
+	return `No additional items allowed on array`
+}
+
+func (l DefaultLocale) ArrayNotEnoughItems() string {
+	return `Not enough items on array to match positional list of schema`
+}
+
+func (l DefaultLocale) ArrayMinItems() string {
+	return `Array must have at least {{.min}} items`
+}
+
+func (l DefaultLocale) ArrayMaxItems() string {
+	return `Array must have at most {{.max}} items`
+}
+
+func (l DefaultLocale) Unique() string {
+	return `{{.type}} items must be unique`
+}
+
+func (l DefaultLocale) ArrayMinProperties() string {
+	return `Must have at least {{.min}} properties`
+}
+
+func (l DefaultLocale) ArrayMaxProperties() string {
+	return `Must have at most {{.max}} properties`
+}
+
+func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
+	return `Additional property {{.property}} is not allowed`
+}
+
+func (l DefaultLocale) InvalidPropertyPattern() string {
+	return `Property "{{.property}}" does not match pattern {{.pattern}}`
+}
+
+func (l DefaultLocale) StringGTE() string {
+	return `String length must be greater than or equal to {{.min}}`
+}
+
+func (l DefaultLocale) StringLTE() string {
+	return `String length must be less than or equal to {{.max}}`
+}
+
+func (l DefaultLocale) DoesNotMatchPattern() string {
+	return `Does not match pattern '{{.pattern}}'`
+}
+
+func (l DefaultLocale) DoesNotMatchFormat() string {
+	return `Does not match format '{{.format}}'`
+}
+
+func (l DefaultLocale) MultipleOf() string {
+	return `Must be a multiple of {{.multiple}}`
+}
+
+func (l DefaultLocale) NumberGTE() string {
+	return `Must be greater than or equal to {{.min}}`
+}
+
+func (l DefaultLocale) NumberGT() string {
+	return `Must be greater than {{.min}}`
+}
+
+func (l DefaultLocale) NumberLTE() string {
+	return `Must be less than or equal to {{.max}}`
+}
+
+func (l DefaultLocale) NumberLT() string {
+	return `Must be less than {{.max}}`
+}
+
+// Schema validators
+func (l DefaultLocale) RegexPattern() string {
+	return `Invalid regex pattern '{{.pattern}}'`
+}
+
+func (l DefaultLocale) GreaterThanZero() string {
+	return `{{.number}} must be strictly greater than 0`
+}
+
+func (l DefaultLocale) MustBeOfA() string {
+	return `{{.x}} must be of a {{.y}}`
+}
+
+func (l DefaultLocale) MustBeOfAn() string {
+	return `{{.x}} must be of an {{.y}}`
+}
+
+func (l DefaultLocale) CannotBeUsedWithout() string {
+	return `{{.x}} cannot be used without {{.y}}`
+}
+
+func (l DefaultLocale) CannotBeGT() string {
+	return `{{.x}} cannot be greater than {{.y}}`
+}
+
+func (l DefaultLocale) MustBeOfType() string {
+	return `{{.key}} must be of type {{.type}}`
+}
+
+func (l DefaultLocale) MustBeValidRegex() string {
+	return `{{.key}} must be a valid regex`
+}
+
+func (l DefaultLocale) MustBeValidFormat() string {
+	return `{{.key}} must be a valid format {{.given}}`
+}
+
+func (l DefaultLocale) MustBeGTEZero() string {
+	return `{{.key}} must be greater than or equal to 0`
+}
+
+func (l DefaultLocale) KeyCannotBeGreaterThan() string {
+	return `{{.key}} cannot be greater than {{.y}}`
+}
+
+func (l DefaultLocale) KeyItemsMustBeOfType() string {
+	return `{{.key}} items must be {{.type}}`
+}
+
+func (l DefaultLocale) KeyItemsMustBeUnique() string {
+	return `{{.key}} items must be unique`
+}
+
+func (l DefaultLocale) ReferenceMustBeCanonical() string {
+	return `Reference {{.reference}} must be canonical`
+}
+
+func (l DefaultLocale) NotAValidType() string {
+	return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}`
+}
+
+func (l DefaultLocale) Duplicated() string {
+	return `{{.type}} type is duplicated`
+}
+
+func (l DefaultLocale) HttpBadStatus() string {
+	return `Could not read schema from HTTP, response status is {{.status}}`
+}
+
+// Replacement options: field, description, context, value
+func (l DefaultLocale) ErrorFormat() string {
+	return `{{.field}}: {{.description}}`
+}
+
+//Parse error
+func (l DefaultLocale) ParseError() string {
+	return `Expected: %expected%, given: Invalid JSON`
+}
+
+const (
+	STRING_NUMBER                     = "number"
+	STRING_ARRAY_OF_STRINGS           = "array of strings"
+	STRING_ARRAY_OF_SCHEMAS           = "array of schemas"
+	STRING_SCHEMA                     = "schema"
+	STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
+	STRING_PROPERTIES                 = "properties"
+	STRING_DEPENDENCY                 = "dependency"
+	STRING_PROPERTY                   = "property"
+	STRING_UNDEFINED                  = "undefined"
+	STRING_CONTEXT_ROOT               = "(root)"
+	STRING_ROOT_SCHEMA_PROPERTY       = "(root)"
+)

+ 172 - 0
vendor/github.com/xeipuuv/gojsonschema/result.go

@@ -0,0 +1,172 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Result and ResultError implementations.
+//
+// created          01-01-2015
+
+package gojsonschema
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	// ErrorDetails is a map of details specific to each error.
+	// While the values will vary, every error will contain a "field" value
+	ErrorDetails map[string]interface{}
+
+	// ResultError is the interface that library errors must implement
+	ResultError interface {
+		Field() string
+		SetType(string)
+		Type() string
+		SetContext(*jsonContext)
+		Context() *jsonContext
+		SetDescription(string)
+		Description() string
+		SetValue(interface{})
+		Value() interface{}
+		SetDetails(ErrorDetails)
+		Details() ErrorDetails
+		String() string
+	}
+
+	// ResultErrorFields holds the fields for each ResultError implementation.
+	// ResultErrorFields implements the ResultError interface, so custom errors
+	// can be defined by just embedding this type
+	ResultErrorFields struct {
+		errorType   string       // A string with the type of error (i.e. invalid_type)
+		context     *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
+		description string       // A human readable error message
+		value       interface{}  // Value given by the JSON file that is the source of the error
+		details     ErrorDetails
+	}
+
+	Result struct {
+		errors []ResultError
+		// Scores how well the validation matched. Useful in generating
+		// better error messages for anyOf and oneOf.
+		score int
+	}
+)
+
+// Field outputs the field name without the root context
+// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
+func (v *ResultErrorFields) Field() string {
+	if p, ok := v.Details()["property"]; ok {
+		if str, isString := p.(string); isString {
+			return str
+		}
+	}
+
+	return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
+}
+
+func (v *ResultErrorFields) SetType(errorType string) {
+	v.errorType = errorType
+}
+
+func (v *ResultErrorFields) Type() string {
+	return v.errorType
+}
+
+func (v *ResultErrorFields) SetContext(context *jsonContext) {
+	v.context = context
+}
+
+func (v *ResultErrorFields) Context() *jsonContext {
+	return v.context
+}
+
+func (v *ResultErrorFields) SetDescription(description string) {
+	v.description = description
+}
+
+func (v *ResultErrorFields) Description() string {
+	return v.description
+}
+
+func (v *ResultErrorFields) SetValue(value interface{}) {
+	v.value = value
+}
+
+func (v *ResultErrorFields) Value() interface{} {
+	return v.value
+}
+
+func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
+	v.details = details
+}
+
+func (v *ResultErrorFields) Details() ErrorDetails {
+	return v.details
+}
+
+func (v ResultErrorFields) String() string {
+	// as a fallback, the value is displayed go style
+	valueString := fmt.Sprintf("%v", v.value)
+
+	// marshal the go value value to json
+	if v.value == nil {
+		valueString = TYPE_NULL
+	} else {
+		if vs, err := marshalToJsonString(v.value); err == nil {
+			if vs == nil {
+				valueString = TYPE_NULL
+			} else {
+				valueString = *vs
+			}
+		}
+	}
+
+	return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
+		"context":     v.context.String(),
+		"description": v.description,
+		"value":       valueString,
+		"field":       v.Field(),
+	})
+}
+
+func (v *Result) Valid() bool {
+	return len(v.errors) == 0
+}
+
+func (v *Result) Errors() []ResultError {
+	return v.errors
+}
+
+func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) {
+	newError(err, context, value, Locale, details)
+	v.errors = append(v.errors, err)
+	v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
+}
+
+// Used to copy errors from a sub-schema to the main one
+func (v *Result) mergeErrors(otherResult *Result) {
+	v.errors = append(v.errors, otherResult.Errors()...)
+	v.score += otherResult.score
+}
+
+func (v *Result) incrementScore() {
+	v.score++
+}

+ 933 - 0
vendor/github.com/xeipuuv/gojsonschema/schema.go

@@ -0,0 +1,933 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Defines Schema, the main entry to every subSchema.
+//                  Contains the parsing logic and error checking.
+//
+// created          26-02-2013
+
+package gojsonschema
+
+import (
+	//	"encoding/json"
+	"errors"
+	"reflect"
+	"regexp"
+	"text/template"
+
+	"github.com/xeipuuv/gojsonreference"
+)
+
+var (
+	// Locale is the default locale to use
+	// Library users can overwrite with their own implementation
+	Locale locale = DefaultLocale{}
+
+	// ErrorTemplateFuncs allows you to define custom template funcs for use in localization.
+	ErrorTemplateFuncs template.FuncMap
+)
+
+func NewSchema(l JSONLoader) (*Schema, error) {
+	ref, err := l.JsonReference()
+	if err != nil {
+		return nil, err
+	}
+
+	d := Schema{}
+	d.pool = newSchemaPool(l.LoaderFactory())
+	d.documentReference = ref
+	d.referencePool = newSchemaReferencePool()
+
+	var doc interface{}
+	if ref.String() != "" {
+		// Get document from schema pool
+		spd, err := d.pool.GetDocument(d.documentReference)
+		if err != nil {
+			return nil, err
+		}
+		doc = spd.Document
+	} else {
+		// Load JSON directly
+		doc, err = l.LoadJSON()
+		if err != nil {
+			return nil, err
+		}
+		d.pool.SetStandaloneDocument(doc)
+	}
+
+	err = d.parse(doc)
+	if err != nil {
+		return nil, err
+	}
+
+	return &d, nil
+}
+
+type Schema struct {
+	documentReference gojsonreference.JsonReference
+	rootSchema        *subSchema
+	pool              *schemaPool
+	referencePool     *schemaReferencePool
+}
+
+func (d *Schema) parse(document interface{}) error {
+	d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY}
+	return d.parseSchema(document, d.rootSchema)
+}
+
+func (d *Schema) SetRootSchemaName(name string) {
+	d.rootSchema.property = name
+}
+
+// Parses a subSchema
+//
+// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
+// Not much magic involved here, most of the job is to validate the key names and their values,
+// then the values are copied into subSchema struct
+//
+func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error {
+
+	if !isKind(documentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.ParseError(),
+			ErrorDetails{
+				"expected": STRING_SCHEMA,
+			},
+		))
+	}
+
+	m := documentNode.(map[string]interface{})
+
+	if currentSchema == d.rootSchema {
+		currentSchema.ref = &d.documentReference
+	}
+
+	// $subSchema
+	if existsMapKey(m, KEY_SCHEMA) {
+		if !isKind(m[KEY_SCHEMA], reflect.String) {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": TYPE_STRING,
+					"given":    KEY_SCHEMA,
+				},
+			))
+		}
+		schemaRef := m[KEY_SCHEMA].(string)
+		schemaReference, err := gojsonreference.NewJsonReference(schemaRef)
+		currentSchema.subSchema = &schemaReference
+		if err != nil {
+			return err
+		}
+	}
+
+	// $ref
+	if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_REF,
+			},
+		))
+	}
+	if k, ok := m[KEY_REF].(string); ok {
+
+		jsonReference, err := gojsonreference.NewJsonReference(k)
+		if err != nil {
+			return err
+		}
+
+		if jsonReference.HasFullUrl {
+			currentSchema.ref = &jsonReference
+		} else {
+			inheritedReference, err := currentSchema.ref.Inherits(jsonReference)
+			if err != nil {
+				return err
+			}
+
+			currentSchema.ref = inheritedReference
+		}
+
+		if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok {
+			currentSchema.refSchema = sch
+
+		} else {
+			err := d.parseReference(documentNode, currentSchema, k)
+			if err != nil {
+				return err
+			}
+
+			return nil
+		}
+	}
+
+	// definitions
+	if existsMapKey(m, KEY_DEFINITIONS) {
+		if isKind(m[KEY_DEFINITIONS], reflect.Map) {
+			currentSchema.definitions = make(map[string]*subSchema)
+			for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
+				if isKind(dv, reflect.Map) {
+					newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref}
+					currentSchema.definitions[dk] = newSchema
+					err := d.parseSchema(dv, newSchema)
+					if err != nil {
+						return errors.New(err.Error())
+					}
+				} else {
+					return errors.New(formatErrorDescription(
+						Locale.InvalidType(),
+						ErrorDetails{
+							"expected": STRING_ARRAY_OF_SCHEMAS,
+							"given":    KEY_DEFINITIONS,
+						},
+					))
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_ARRAY_OF_SCHEMAS,
+					"given":    KEY_DEFINITIONS,
+				},
+			))
+		}
+
+	}
+
+	// id
+	if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_ID,
+			},
+		))
+	}
+	if k, ok := m[KEY_ID].(string); ok {
+		currentSchema.id = &k
+	}
+
+	// title
+	if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_TITLE,
+			},
+		))
+	}
+	if k, ok := m[KEY_TITLE].(string); ok {
+		currentSchema.title = &k
+	}
+
+	// description
+	if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_DESCRIPTION,
+			},
+		))
+	}
+	if k, ok := m[KEY_DESCRIPTION].(string); ok {
+		currentSchema.description = &k
+	}
+
+	// type
+	if existsMapKey(m, KEY_TYPE) {
+		if isKind(m[KEY_TYPE], reflect.String) {
+			if k, ok := m[KEY_TYPE].(string); ok {
+				err := currentSchema.types.Add(k)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			if isKind(m[KEY_TYPE], reflect.Slice) {
+				arrayOfTypes := m[KEY_TYPE].([]interface{})
+				for _, typeInArray := range arrayOfTypes {
+					if reflect.ValueOf(typeInArray).Kind() != reflect.String {
+						return errors.New(formatErrorDescription(
+							Locale.InvalidType(),
+							ErrorDetails{
+								"expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+								"given":    KEY_TYPE,
+							},
+						))
+					} else {
+						currentSchema.types.Add(typeInArray.(string))
+					}
+				}
+
+			} else {
+				return errors.New(formatErrorDescription(
+					Locale.InvalidType(),
+					ErrorDetails{
+						"expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+						"given":    KEY_TYPE,
+					},
+				))
+			}
+		}
+	}
+
+	// properties
+	if existsMapKey(m, KEY_PROPERTIES) {
+		err := d.parseProperties(m[KEY_PROPERTIES], currentSchema)
+		if err != nil {
+			return err
+		}
+	}
+
+	// additionalProperties
+	if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) {
+		if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) {
+			currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool)
+		} else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) {
+			newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref}
+			currentSchema.additionalProperties = newSchema
+			err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema)
+			if err != nil {
+				return errors.New(err.Error())
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+					"given":    KEY_ADDITIONAL_PROPERTIES,
+				},
+			))
+		}
+	}
+
+	// patternProperties
+	if existsMapKey(m, KEY_PATTERN_PROPERTIES) {
+		if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) {
+			patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{})
+			if len(patternPropertiesMap) > 0 {
+				currentSchema.patternProperties = make(map[string]*subSchema)
+				for k, v := range patternPropertiesMap {
+					_, err := regexp.MatchString(k, "")
+					if err != nil {
+						return errors.New(formatErrorDescription(
+							Locale.RegexPattern(),
+							ErrorDetails{"pattern": k},
+						))
+					}
+					newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+					err = d.parseSchema(v, newSchema)
+					if err != nil {
+						return errors.New(err.Error())
+					}
+					currentSchema.patternProperties[k] = newSchema
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_SCHEMA,
+					"given":    KEY_PATTERN_PROPERTIES,
+				},
+			))
+		}
+	}
+
+	// dependencies
+	if existsMapKey(m, KEY_DEPENDENCIES) {
+		err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema)
+		if err != nil {
+			return err
+		}
+	}
+
+	// items
+	if existsMapKey(m, KEY_ITEMS) {
+		if isKind(m[KEY_ITEMS], reflect.Slice) {
+			for _, itemElement := range m[KEY_ITEMS].([]interface{}) {
+				if isKind(itemElement, reflect.Map) {
+					newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+					newSchema.ref = currentSchema.ref
+					currentSchema.AddItemsChild(newSchema)
+					err := d.parseSchema(itemElement, newSchema)
+					if err != nil {
+						return err
+					}
+				} else {
+					return errors.New(formatErrorDescription(
+						Locale.InvalidType(),
+						ErrorDetails{
+							"expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+							"given":    KEY_ITEMS,
+						},
+					))
+				}
+				currentSchema.itemsChildrenIsSingleSchema = false
+			}
+		} else if isKind(m[KEY_ITEMS], reflect.Map) {
+			newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+			newSchema.ref = currentSchema.ref
+			currentSchema.AddItemsChild(newSchema)
+			err := d.parseSchema(m[KEY_ITEMS], newSchema)
+			if err != nil {
+				return err
+			}
+			currentSchema.itemsChildrenIsSingleSchema = true
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+					"given":    KEY_ITEMS,
+				},
+			))
+		}
+	}
+
+	// additionalItems
+	if existsMapKey(m, KEY_ADDITIONAL_ITEMS) {
+		if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) {
+			currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool)
+		} else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) {
+			newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref}
+			currentSchema.additionalItems = newSchema
+			err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema)
+			if err != nil {
+				return errors.New(err.Error())
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+					"given":    KEY_ADDITIONAL_ITEMS,
+				},
+			))
+		}
+	}
+
+	// validation : number / integer
+
+	if existsMapKey(m, KEY_MULTIPLE_OF) {
+		multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF])
+		if multipleOfValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_NUMBER,
+					"given":    KEY_MULTIPLE_OF,
+				},
+			))
+		}
+		if *multipleOfValue <= 0 {
+			return errors.New(formatErrorDescription(
+				Locale.GreaterThanZero(),
+				ErrorDetails{"number": KEY_MULTIPLE_OF},
+			))
+		}
+		currentSchema.multipleOf = multipleOfValue
+	}
+
+	if existsMapKey(m, KEY_MINIMUM) {
+		minimumValue := mustBeNumber(m[KEY_MINIMUM])
+		if minimumValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER},
+			))
+		}
+		currentSchema.minimum = minimumValue
+	}
+
+	if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) {
+		if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
+			if currentSchema.minimum == nil {
+				return errors.New(formatErrorDescription(
+					Locale.CannotBeUsedWithout(),
+					ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
+				))
+			}
+			exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool)
+			currentSchema.exclusiveMinimum = exclusiveMinimumValue
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_MAXIMUM) {
+		maximumValue := mustBeNumber(m[KEY_MAXIMUM])
+		if maximumValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER},
+			))
+		}
+		currentSchema.maximum = maximumValue
+	}
+
+	if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) {
+		if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
+			if currentSchema.maximum == nil {
+				return errors.New(formatErrorDescription(
+					Locale.CannotBeUsedWithout(),
+					ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
+				))
+			}
+			exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool)
+			currentSchema.exclusiveMaximum = exclusiveMaximumValue
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER},
+			))
+		}
+	}
+
+	if currentSchema.minimum != nil && currentSchema.maximum != nil {
+		if *currentSchema.minimum > *currentSchema.maximum {
+			return errors.New(formatErrorDescription(
+				Locale.CannotBeGT(),
+				ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM},
+			))
+		}
+	}
+
+	// validation : string
+
+	if existsMapKey(m, KEY_MIN_LENGTH) {
+		minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH])
+		if minLengthIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER},
+			))
+		}
+		if *minLengthIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MIN_LENGTH},
+			))
+		}
+		currentSchema.minLength = minLengthIntegerValue
+	}
+
+	if existsMapKey(m, KEY_MAX_LENGTH) {
+		maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH])
+		if maxLengthIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER},
+			))
+		}
+		if *maxLengthIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MAX_LENGTH},
+			))
+		}
+		currentSchema.maxLength = maxLengthIntegerValue
+	}
+
+	if currentSchema.minLength != nil && currentSchema.maxLength != nil {
+		if *currentSchema.minLength > *currentSchema.maxLength {
+			return errors.New(formatErrorDescription(
+				Locale.CannotBeGT(),
+				ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_PATTERN) {
+		if isKind(m[KEY_PATTERN], reflect.String) {
+			regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string))
+			if err != nil {
+				return errors.New(formatErrorDescription(
+					Locale.MustBeValidRegex(),
+					ErrorDetails{"key": KEY_PATTERN},
+				))
+			}
+			currentSchema.pattern = regexpObject
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_FORMAT) {
+		formatString, ok := m[KEY_FORMAT].(string)
+		if ok && FormatCheckers.Has(formatString) {
+			currentSchema.format = formatString
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeValidFormat(),
+				ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]},
+			))
+		}
+	}
+
+	// validation : object
+
+	if existsMapKey(m, KEY_MIN_PROPERTIES) {
+		minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES])
+		if minPropertiesIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER},
+			))
+		}
+		if *minPropertiesIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MIN_PROPERTIES},
+			))
+		}
+		currentSchema.minProperties = minPropertiesIntegerValue
+	}
+
+	if existsMapKey(m, KEY_MAX_PROPERTIES) {
+		maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES])
+		if maxPropertiesIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER},
+			))
+		}
+		if *maxPropertiesIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MAX_PROPERTIES},
+			))
+		}
+		currentSchema.maxProperties = maxPropertiesIntegerValue
+	}
+
+	if currentSchema.minProperties != nil && currentSchema.maxProperties != nil {
+		if *currentSchema.minProperties > *currentSchema.maxProperties {
+			return errors.New(formatErrorDescription(
+				Locale.KeyCannotBeGreaterThan(),
+				ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_REQUIRED) {
+		if isKind(m[KEY_REQUIRED], reflect.Slice) {
+			requiredValues := m[KEY_REQUIRED].([]interface{})
+			for _, requiredValue := range requiredValues {
+				if isKind(requiredValue, reflect.String) {
+					err := currentSchema.AddRequired(requiredValue.(string))
+					if err != nil {
+						return err
+					}
+				} else {
+					return errors.New(formatErrorDescription(
+						Locale.KeyItemsMustBeOfType(),
+						ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING},
+					))
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	// validation : array
+
+	if existsMapKey(m, KEY_MIN_ITEMS) {
+		minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS])
+		if minItemsIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER},
+			))
+		}
+		if *minItemsIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MIN_ITEMS},
+			))
+		}
+		currentSchema.minItems = minItemsIntegerValue
+	}
+
+	if existsMapKey(m, KEY_MAX_ITEMS) {
+		maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS])
+		if maxItemsIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER},
+			))
+		}
+		if *maxItemsIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MAX_ITEMS},
+			))
+		}
+		currentSchema.maxItems = maxItemsIntegerValue
+	}
+
+	if existsMapKey(m, KEY_UNIQUE_ITEMS) {
+		if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) {
+			currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool)
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN},
+			))
+		}
+	}
+
+	// validation : all
+
+	if existsMapKey(m, KEY_ENUM) {
+		if isKind(m[KEY_ENUM], reflect.Slice) {
+			for _, v := range m[KEY_ENUM].([]interface{}) {
+				err := currentSchema.AddEnum(v)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	// validation : subSchema
+
+	if existsMapKey(m, KEY_ONE_OF) {
+		if isKind(m[KEY_ONE_OF], reflect.Slice) {
+			for _, v := range m[KEY_ONE_OF].([]interface{}) {
+				newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref}
+				currentSchema.AddOneOf(newSchema)
+				err := d.parseSchema(v, newSchema)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_ANY_OF) {
+		if isKind(m[KEY_ANY_OF], reflect.Slice) {
+			for _, v := range m[KEY_ANY_OF].([]interface{}) {
+				newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref}
+				currentSchema.AddAnyOf(newSchema)
+				err := d.parseSchema(v, newSchema)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_ALL_OF) {
+		if isKind(m[KEY_ALL_OF], reflect.Slice) {
+			for _, v := range m[KEY_ALL_OF].([]interface{}) {
+				newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref}
+				currentSchema.AddAllOf(newSchema)
+				err := d.parseSchema(v, newSchema)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_NOT) {
+		if isKind(m[KEY_NOT], reflect.Map) {
+			newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref}
+			currentSchema.SetNot(newSchema)
+			err := d.parseSchema(m[KEY_NOT], newSchema)
+			if err != nil {
+				return err
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT},
+			))
+		}
+	}
+
+	return nil
+}
+
+func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error {
+	var refdDocumentNode interface{}
+	jsonPointer := currentSchema.ref.GetPointer()
+	standaloneDocument := d.pool.GetStandaloneDocument()
+
+	if standaloneDocument != nil {
+
+		var err error
+		refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument)
+		if err != nil {
+			return err
+		}
+
+	} else {
+		dsp, err := d.pool.GetDocument(*currentSchema.ref)
+		if err != nil {
+			return err
+		}
+
+		refdDocumentNode, _, err = jsonPointer.Get(dsp.Document)
+		if err != nil {
+			return err
+		}
+
+	}
+
+	if !isKind(refdDocumentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.MustBeOfType(),
+			ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT},
+		))
+	}
+
+	// returns the loaded referenced subSchema for the caller to update its current subSchema
+	newSchemaDocument := refdDocumentNode.(map[string]interface{})
+	newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
+	d.referencePool.Add(currentSchema.ref.String()+reference, newSchema)
+
+	err := d.parseSchema(newSchemaDocument, newSchema)
+	if err != nil {
+		return err
+	}
+
+	currentSchema.refSchema = newSchema
+
+	return nil
+
+}
+
+func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error {
+
+	if !isKind(documentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.MustBeOfType(),
+			ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT},
+		))
+	}
+
+	m := documentNode.(map[string]interface{})
+	for k := range m {
+		schemaProperty := k
+		newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref}
+		currentSchema.AddPropertiesChild(newSchema)
+		err := d.parseSchema(m[k], newSchema)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error {
+
+	if !isKind(documentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.MustBeOfType(),
+			ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT},
+		))
+	}
+
+	m := documentNode.(map[string]interface{})
+	currentSchema.dependencies = make(map[string]interface{})
+
+	for k := range m {
+		switch reflect.ValueOf(m[k]).Kind() {
+
+		case reflect.Slice:
+			values := m[k].([]interface{})
+			var valuesToRegister []string
+
+			for _, value := range values {
+				if !isKind(value, reflect.String) {
+					return errors.New(formatErrorDescription(
+						Locale.MustBeOfType(),
+						ErrorDetails{
+							"key":  STRING_DEPENDENCY,
+							"type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+						},
+					))
+				} else {
+					valuesToRegister = append(valuesToRegister, value.(string))
+				}
+				currentSchema.dependencies[k] = valuesToRegister
+			}
+
+		case reflect.Map:
+			depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+			err := d.parseSchema(m[k], depSchema)
+			if err != nil {
+				return err
+			}
+			currentSchema.dependencies[k] = depSchema
+
+		default:
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfType(),
+				ErrorDetails{
+					"key":  STRING_DEPENDENCY,
+					"type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+				},
+			))
+		}
+
+	}
+
+	return nil
+}

Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff