瀏覽代碼

更新api

lijian 6 年之前
父節點
當前提交
cf619cc6db
共有 100 個文件被更改,包括 21234 次插入304 次删除
  1. 11 1
      pkg/models/product.go
  2. 35 9
      pkg/models/user.go
  3. 10 0
      pkg/models/vendor.go
  4. 15 2
      pkg/rpcs/registry.go
  5. 14 0
      pkg/utils/util.go
  6. 9 0
      publish.sh
  7. 2 1
      run.sh
  8. 8 0
      services/apiprovider/actions.go
  9. 1 1
      services/apiprovider/flags.go
  10. 1 0
      services/apiprovider/jwt.go
  11. 16 5
      services/apiprovider/main.go
  12. 57 0
      services/apiprovider/product.go
  13. 8 1
      services/apiprovider/router.go
  14. 25 12
      services/apiprovider/user.go
  15. 2 2
      services/controller/flags.go
  16. 45 0
      services/knowoapi/controllers/errorresponse.go
  17. 16 0
      services/knowoapi/controllers/parsebody.go
  18. 96 0
      services/knowoapi/controllers/produdct.go
  19. 57 0
      services/knowoapi/controllers/token.go
  20. 89 0
      services/knowoapi/controllers/user.go
  21. 50 0
      services/knowoapi/flags.go
  22. 二進制
      services/knowoapi/knowoapi
  23. 47 0
      services/knowoapi/main.go
  24. 18 0
      services/knowoapi/model/all.go
  25. 24 0
      services/knowoapi/model/cache.go
  26. 45 0
      services/knowoapi/model/product.go
  27. 127 0
      services/knowoapi/model/user.go
  28. 33 0
      services/knowoapi/model/vendor.go
  29. 60 0
      services/knowoapi/router.go
  30. 45 0
      services/knowoapi/services/product.go
  31. 76 0
      services/knowoapi/services/user.go
  32. 0 3
      services/registry/db.go
  33. 152 0
      services/registry/product.go
  34. 4 110
      services/registry/registry.go
  35. 2 40
      services/registry/registry_test.go
  36. 0 106
      services/registry/user.go
  37. 0 11
      services/registry/utils.go
  38. 3 0
      vendor/github.com/BurntSushi/toml/COMPATIBLE
  39. 14 0
      vendor/github.com/BurntSushi/toml/COPYING
  40. 19 0
      vendor/github.com/BurntSushi/toml/Makefile
  41. 509 0
      vendor/github.com/BurntSushi/toml/decode.go
  42. 121 0
      vendor/github.com/BurntSushi/toml/decode_meta.go
  43. 27 0
      vendor/github.com/BurntSushi/toml/doc.go
  44. 568 0
      vendor/github.com/BurntSushi/toml/encode.go
  45. 19 0
      vendor/github.com/BurntSushi/toml/encoding_types.go
  46. 18 0
      vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
  47. 953 0
      vendor/github.com/BurntSushi/toml/lex.go
  48. 592 0
      vendor/github.com/BurntSushi/toml/parse.go
  49. 1 0
      vendor/github.com/BurntSushi/toml/session.vim
  50. 91 0
      vendor/github.com/BurntSushi/toml/type_check.go
  51. 242 0
      vendor/github.com/BurntSushi/toml/type_fields.go
  52. 27 0
      vendor/github.com/Joker/jade/LICENSE.md
  53. 193 0
      vendor/github.com/Joker/jade/config.go
  54. 27 0
      vendor/github.com/Joker/jade/config_string.go
  55. 661 0
      vendor/github.com/Joker/jade/jade_lex.go
  56. 556 0
      vendor/github.com/Joker/jade/jade_node.go
  57. 450 0
      vendor/github.com/Joker/jade/jade_parse.go
  58. 220 0
      vendor/github.com/Joker/jade/lex.go
  59. 86 0
      vendor/github.com/Joker/jade/node.go
  60. 146 0
      vendor/github.com/Joker/jade/parse.go
  61. 58 0
      vendor/github.com/Joker/jade/template.go
  62. 20 0
      vendor/github.com/Shopify/goreferrer/LICENSE
  63. 4147 0
      vendor/github.com/Shopify/goreferrer/default_rules.go
  64. 76 0
      vendor/github.com/Shopify/goreferrer/referrer.go
  65. 53 0
      vendor/github.com/Shopify/goreferrer/rich_url.go
  66. 206 0
      vendor/github.com/Shopify/goreferrer/rules.go
  67. 22 0
      vendor/github.com/aymerick/raymond/LICENSE
  68. 785 0
      vendor/github.com/aymerick/raymond/ast/node.go
  69. 279 0
      vendor/github.com/aymerick/raymond/ast/print.go
  70. 95 0
      vendor/github.com/aymerick/raymond/data_frame.go
  71. 65 0
      vendor/github.com/aymerick/raymond/escape.go
  72. 1005 0
      vendor/github.com/aymerick/raymond/eval.go
  73. 382 0
      vendor/github.com/aymerick/raymond/helper.go
  74. 639 0
      vendor/github.com/aymerick/raymond/lexer/lexer.go
  75. 183 0
      vendor/github.com/aymerick/raymond/lexer/token.go
  76. 846 0
      vendor/github.com/aymerick/raymond/parser/parser.go
  77. 360 0
      vendor/github.com/aymerick/raymond/parser/whitespace.go
  78. 85 0
      vendor/github.com/aymerick/raymond/partial.go
  79. 28 0
      vendor/github.com/aymerick/raymond/raymond.go
  80. 84 0
      vendor/github.com/aymerick/raymond/string.go
  81. 248 0
      vendor/github.com/aymerick/raymond/template.go
  82. 85 0
      vendor/github.com/aymerick/raymond/utils.go
  83. 9 0
      vendor/github.com/eknkc/amber/LICENSE
  84. 817 0
      vendor/github.com/eknkc/amber/compiler.go
  85. 257 0
      vendor/github.com/eknkc/amber/doc.go
  86. 285 0
      vendor/github.com/eknkc/amber/parser/nodes.go
  87. 456 0
      vendor/github.com/eknkc/amber/parser/parser.go
  88. 501 0
      vendor/github.com/eknkc/amber/parser/scanner.go
  89. 287 0
      vendor/github.com/eknkc/amber/runtime.go
  90. 21 0
      vendor/github.com/fatih/structs/LICENSE
  91. 141 0
      vendor/github.com/fatih/structs/field.go
  92. 586 0
      vendor/github.com/fatih/structs/structs.go
  93. 32 0
      vendor/github.com/fatih/structs/tags.go
  94. 10 0
      vendor/github.com/flosch/pongo2/AUTHORS
  95. 20 0
      vendor/github.com/flosch/pongo2/LICENSE
  96. 130 0
      vendor/github.com/flosch/pongo2/context.go
  97. 31 0
      vendor/github.com/flosch/pongo2/doc.go
  98. 91 0
      vendor/github.com/flosch/pongo2/error.go
  99. 139 0
      vendor/github.com/flosch/pongo2/filters.go
  100. 927 0
      vendor/github.com/flosch/pongo2/filters_builtin.go

+ 11 - 1
pkg/models/product.go

@@ -2,6 +2,8 @@
 package models
 
 import (
+	"errors"
+
 	"github.com/jinzhu/gorm"
 )
 
@@ -11,7 +13,7 @@ type Product struct {
 	// which vendor
 	VendorID int32
 	// name
-	ProductName string `sql:"type:varchar(200);not null;"`
+	ProductName string `sql:"type:varchar(200);not null;" binding:"required"`
 	// desc
 	ProductDescription string `sql:"type:text;not null;"`
 	// product key to auth a product
@@ -21,3 +23,11 @@ type Product struct {
 
 	Devices []Device
 }
+
+// Validate 验证
+func (a *Product) Validate() error {
+	if a.ProductName == "" || a.VendorID == 0 {
+		return errors.New("非法参数:[ProductName, VendorID]")
+	}
+	return nil
+}

+ 35 - 9
pkg/models/user.go

@@ -1,6 +1,8 @@
 package models
 
 import (
+	"errors"
+
 	"github.com/jinzhu/gorm"
 )
 
@@ -15,21 +17,45 @@ type User struct {
 	Email      string `sql:"type:varchar(200);not null;"`
 	UserType   int    `sql:"default:1;not null;"`
 	VendorID   uint
-	Status     int `sql:"default:1;not null;"`
-	Vendor     Vendor
+	Status     int    `sql:"default:1;not null;"`
+	Vendor     Vendor `gorm:"foreignkey:VendorID"`
+}
+
+// Validate 验证
+func (a *User) Validate() error {
+	if a.UserName == "" || a.UserPass == "" || a.Vendor.VendorName == "" {
+		return errors.New("参数不能为空")
+	}
+	return nil
 }
 
 // LoginRequest 登录请求
 type LoginRequest struct {
-	UserName string `json:"login_name"`
-	Password string `json:"login_pass"`
+	UserName string `json:"login_name" `
+	Password string `json:"login_pass" `
+}
+
+// Validate 验证
+func (a *LoginRequest) Validate() error {
+	if a.UserName == "" || a.Password == "" {
+		return errors.New("参数不能为空")
+	}
+	return nil
 }
 
 // Reqrequest 注册请求
 type Reqrequest struct {
-	UserName   string `json:"username" binding:"required"`
-	PassWord   string `json:"password" binding:"required"`
-	Phone      string `json:"phone" binding:"required"`
-	Email      string `json:"email" binding:"required"`
-	VendorName string `json:"company" binding:"required"`
+	UserName   string `json:"username"`
+	PassWord   string `json:"password" `
+	Phone      string `json:"phone" `
+	Email      string `json:"email" `
+	VendorName string `json:"company"`
+}
+
+// Validate 验证
+func (a *Reqrequest) Validate() error {
+	if a.UserName == "" || a.PassWord == "" || a.Phone == "" || a.Email == "" || a.VendorName == "" {
+		return errors.New("参数不能为空")
+	}
+	return nil
 }

+ 10 - 0
pkg/models/vendor.go

@@ -1,6 +1,8 @@
 package models
 
 import (
+	"errors"
+
 	"github.com/jinzhu/gorm"
 )
 
@@ -16,3 +18,11 @@ type Vendor struct {
 	VendorDescription string `sql:"type:text;not null;"`
 	Products          []Product
 }
+
+// Validate Validate
+func (a *Vendor) Validate() error {
+	if a.VendorName == "" {
+		return errors.New("参数不能为空")
+	}
+	return nil
+}

+ 15 - 2
pkg/rpcs/registry.go

@@ -1,15 +1,28 @@
 package rpcs
 
-// device register args
+// ArgsDeviceRegister device register args
 type ArgsDeviceRegister struct {
 	ProductKey    string
 	DeviceCode    string
 	DeviceVersion string
 }
 
-// device update args
+// ArgsDeviceUpdate device update args
 type ArgsDeviceUpdate struct {
 	DeviceIdentifier  string
 	DeviceName        string
 	DeviceDescription string
 }
+
+// ArgsProductList get products list args
+type ArgsProductList struct {
+	ArgsPage
+	VendorID    uint
+	ProductName string
+}
+
+// ArgsPage page params
+type ArgsPage struct {
+	Pi int
+	Ps int
+}

+ 14 - 0
pkg/utils/util.go

@@ -0,0 +1,14 @@
+package utils
+
+import (
+	"crypto/md5"
+	"encoding/hex"
+	"fmt"
+)
+
+func Md5(s string) string {
+	h := md5.New()
+	h.Write([]byte(s))
+	cipherStr := h.Sum(nil)
+	return fmt.Sprintf("%s", hex.EncodeToString(cipherStr))
+}

+ 9 - 0
publish.sh

@@ -0,0 +1,9 @@
+git add .
+git commit -m '提交测试'
+git push
+
+ssh root@192.168.175.60 -p kaiwu123
+
+git pull
+
+./install.sh

+ 2 - 1
run.sh

@@ -1,6 +1,6 @@
 export GOPATH=/Users/terrence/go
 
-sudo killall -9 httpaccess registry apiprovider devicemanager controller mqttaccess
+sudo killall -9 httpaccess registry apiprovider devicemanager controller mqttaccess knowoapi
 
 # start services
 #$GOPATH/bin/httpaccess -etcd http://localhost:2379 -httphost internal:443 -loglevel debug -usehttps -keyfile $GOPATH/src/github.com/PandoCloud/pando-cloud/pkg/server/testdata/key.pem -cafile $GOPATH/src/github.com/PandoCloud/pando-cloud/pkg/server/testdata/cert.pem &
@@ -11,6 +11,7 @@ $GOPATH/bin/devicemanager -etcd http://192.168.175.60:2379 -loglevel debug  -rpc
 $GOPATH/bin/controller -etcd http://192.168.175.60:2379 -loglevel debug  -rpchost localhost:20032 &
 #$GOPATH/bin/mqttaccess -etcd http://localhost:2379 -loglevel debug  -rpchost localhost:20030 -tcphost internal:1883 -usetls -keyfile $GOPATH/src/github.com/PandoCloud/pando-cloud/pkg/server/testdata/key.pem -cafile $GOPATH/src/github.com/PandoCloud/pando-cloud/pkg/server/testdata/cert.pem &
 $GOPATH/bin/mqttaccess -etcd http://192.168.175.60:2379 -loglevel debug  -rpchost localhost:20030 -tcphost 0.0.0.0:1883  &
+$GOPATH/bin/knowoapi -etcd http://192.168.175.60:2379 -loglevel debug  -httphost localhost:8888 -dbhost 192.168.175.60 -dbname SparrowCloud -dbport 3306 -dbuser SparrowCloud -dbpass 123456 -aeskey ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP &
 
 exit 0
 

+ 8 - 0
services/apiprovider/actions.go

@@ -2,6 +2,7 @@ package main
 
 import (
 	"encoding/json"
+	"errors"
 	"sparrow/pkg/productconfig"
 	"sparrow/pkg/rpcs"
 
@@ -24,6 +25,13 @@ const (
 	ErrWrongProductConfig = 10006
 	ErrWrongQueryFormat   = 10007
 	ErrAccessDenied       = 10008
+	ErrIllegalityAction   = 10009 //非法操作
+)
+
+var (
+	// ErrBadRequestString 参数不全错误
+	errBadRequestString = errors.New("请求参数不全")
+	errIllegalityString = errors.New("非法操作")
 )
 
 const (

+ 1 - 1
services/apiprovider/flags.go

@@ -6,7 +6,7 @@ import (
 
 const (
 	flagRabbitHost    = "rabbithost"
-	defaultRabbitHost = "amqp://pandocloud:123@192.168.175.60:5672/"
+	defaultRabbitHost = "amqp://knowocloud:123456@192.168.175.60:5672/"
 
 	flagDBHost = "dbhost"
 	flagDBPort = "dbport"

+ 1 - 0
services/apiprovider/jwt.go

@@ -209,6 +209,7 @@ func parseToken(token *jwt.Token) *UserToken {
 		UserName: claims["UserName"].(string),
 		UserType: claims["UserType"].(string),
 		RoleCode: int(claims["RoleCode"].(float64)),
+		VendorID: uint(claims["VendorID"].(float64)),
 	}
 	return ut
 }

+ 16 - 5
services/apiprovider/main.go

@@ -2,11 +2,12 @@ package main
 
 import (
 	"net/http"
+	"sparrow/pkg/mysql"
 	"sparrow/pkg/server"
 
 	"github.com/go-martini/martini"
+	"github.com/jinzhu/gorm"
 	"github.com/martini-contrib/render"
-	"github.com/martini-contrib/sessions"
 	"github.com/xyproto/permissionsql"
 )
 
@@ -19,10 +20,6 @@ func main() {
 	}
 	DSN := *confDBUser + ":" + *confDBPass + "@tcp(" + *confDBHost + ":" + *confDBPort + ")/" + *confDBName + "?charset=utf8&parseTime=True"
 	// martini setup
-	store := sessions.NewCookieStore([]byte("secret123"))
-	store.Options(sessions.Options{
-		MaxAge: 0,
-	})
 
 	martini.Env = martini.Prod
 	handler := martini.Classic()
@@ -65,3 +62,17 @@ func main() {
 		server.Log.Fatal(err)
 	}
 }
+
+func getDB() (*gorm.DB, error) {
+	db, err := mysql.GetClient(*confDBHost, *confDBPort, *confDBName, *confDBUser, *confDBPass)
+	if err != nil {
+		return nil, err
+	}
+	gormdb, err := gorm.Open("mysql", db)
+	if err != nil {
+		return nil, err
+	}
+	gormdb.SingularTable(true)
+	gormdb.LogMode(true)
+	return gormdb, nil
+}

+ 57 - 0
services/apiprovider/product.go

@@ -0,0 +1,57 @@
+package main
+
+import (
+	"net/http"
+	"sparrow/pkg/models"
+	"sparrow/pkg/rpcs"
+	"sparrow/pkg/server"
+	"strconv"
+
+	"github.com/martini-contrib/render"
+)
+
+// SaveProduct 添加或修改产品
+func SaveProduct(user *UserToken, product models.Product, r render.Render) {
+	product.VendorID = int32(user.VendorID)
+	reply := models.Product{}
+	err := server.RPCCallByName("registry", "Registry.SaveProduct", &product, &reply)
+	if err != nil {
+		r.JSON(http.StatusOK, renderError(ErrSystemFault, err))
+		return
+	}
+	r.JSON(http.StatusOK, done(reply))
+	return
+}
+
+// DeleteProduct 删除产品
+func DeleteProduct(user *UserToken, product models.Product, r render.Render) {
+	if int32(user.VendorID) != product.VendorID {
+		r.JSON(http.StatusOK, renderError(ErrIllegalityAction, errIllegalityString))
+		return
+	}
+	r.JSON(http.StatusOK, done("删除成功"))
+	return
+}
+
+// GetProducts 分页
+func GetProducts(user *UserToken, req *http.Request, r render.Render) {
+	pi, _ := strconv.Atoi(req.URL.Query().Get("pi"))
+	ps, _ := strconv.Atoi(req.URL.Query().Get("ps"))
+	name := req.URL.Query().Get("name")
+
+	args := rpcs.ArgsProductList{
+		ProductName: name,
+	}
+	args.Pi = pi
+	args.Ps = ps
+	args.VendorID = user.VendorID
+	var reply map[string]interface{}
+	err := server.RPCCallByName("registry", "Registry.GetProducts", &args, &reply)
+	if err != nil {
+		r.JSON(http.StatusOK, renderError(ErrSystemFault, err))
+		return
+	}
+	r.JSON(http.StatusOK, done(reply))
+	return
+
+}

+ 8 - 1
services/apiprovider/router.go

@@ -12,6 +12,7 @@ import (
 // martini router
 func route(m *martini.ClassicMartini, perm *permissionsql.Permissions) {
 
+	// jwt handler
 	handler := New(Config{
 		ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
 			return []byte(SignedString), nil
@@ -59,8 +60,14 @@ func route(m *martini.ClassicMartini, perm *permissionsql.Permissions) {
 	// user api group
 	// jwt check and pass UserToken
 	m.Group("/api/v1/user", func(r martini.Router) {
-		// 修改密码
+		// user modify password api
 		r.Post("/modifypass", ModifyPassword)
+		// user add a product
+		r.Post("/product", binding.Bind(models.Product{}), SaveProduct)
+		// delete a product
+		r.Delete("/product", binding.Bind(models.Product{}), DeleteProduct)
+		// get products
+		r.Get("/product", GetProducts)
 	}, handler.Serve)
 
 }

+ 25 - 12
services/apiprovider/user.go

@@ -1,7 +1,6 @@
 package main
 
 import (
-	"errors"
 	"net/http"
 	"sparrow/pkg/models"
 	"sparrow/pkg/rpcs"
@@ -15,33 +14,47 @@ import (
 // SignedString 签名
 const SignedString = "www.yehaoji.com"
 
-var (
-	// ErrBadRequestString 参数不全错误
-	ErrBadRequestString = errors.New("请求参数不全")
-)
-
 // UserToken 用户token结构体
 type UserToken struct {
 	UserID   uint
 	UserName string
 	UserType string
 	RoleCode int
+	VendorID uint
 	jwt.StandardClaims
 }
 
 // UserLogin 用户登陆
 func UserLogin(loginRequest models.LoginRequest, r render.Render) {
 
+	reply := models.User{}
+	err := server.RPCCallByName("registry", "Registry.Login", &loginRequest, &reply)
+	if err != nil {
+		r.JSON(http.StatusOK, renderError(ErrSystemFault, err))
+		return
+	}
+
+	usertype := "member"
+	if reply.UserType == 1 {
+		usertype = "admin"
+	}
 	claims := UserToken{
-		UserID:   1,
-		UserName: "test",
-		UserType: "member",
-		RoleCode: 100,
+		UserID:   reply.ID,
+		UserName: reply.UserName,
+		UserType: usertype,
+		RoleCode: reply.UserRoleID,
+		VendorID: reply.VendorID,
 	}
 	claims.ExpiresAt = time.Now().Add(time.Hour * 24).Unix()
 	token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
 	ser, _ := token.SignedString([]byte(SignedString))
-	r.JSON(http.StatusOK, ser)
+	result := map[string]interface{}{
+		"username":     reply.UserName,
+		"userkey":      reply.UserKey,
+		"company":      reply.Vendor.VendorName,
+		"access_token": ser,
+	}
+	r.JSON(http.StatusOK, result)
 	return
 }
 
@@ -84,7 +97,7 @@ func ModifyPassword(req *http.Request, r render.Render, user *UserToken) {
 	newpass := req.URL.Query().Get("newpass")
 	oldpass := req.URL.Query().Get("oldpass")
 	if len(newpass) == 0 || len(oldpass) == 0 {
-		r.JSON(http.StatusOK, renderError(ErrWrongRequestFormat, ErrBadRequestString))
+		r.JSON(http.StatusOK, renderError(ErrWrongRequestFormat, errBadRequestString))
 		return
 	}
 	args := rpcs.ArgsUserModifyPass{

+ 2 - 2
services/controller/flags.go

@@ -6,10 +6,10 @@ import (
 
 const (
 	flagMongoHost    = "mongohost"
-	defaultMongoHost = "192.168.175.110:27017"
+	defaultMongoHost = "192.168.175.60:27017"
 
 	flagRabbitHost    = "rabbithost"
-	defaultRabbitHost = "amqp://pandocloud:123@192.168.175.110:5672/"
+	defaultRabbitHost = "amqp://knowocloud:123456@192.168.175.60:5672/"
 )
 
 var (

+ 45 - 0
services/knowoapi/controllers/errorresponse.go

@@ -0,0 +1,45 @@
+package controllers
+
+import (
+	"fmt"
+
+	"github.com/kataras/iris"
+)
+
+const (
+	ErrOk       = 0
+	ErrNormal   = -1
+	ErrDatabase = 10000
+)
+
+// ErrorResponse 失败的httpResponse
+type ErrorResponse struct {
+	Code    int         `json:"code"`    //错误码
+	Message string      `json:"message"` //错误信息
+	Result  interface{} `json:"result"`  //返回的结果
+}
+
+func responseError(ctx iris.Context, code int, message string) {
+	ctx.JSON(ErrorResponse{
+		Code:    code,
+		Message: message,
+	})
+}
+func fail(ctx iris.Context, code int, format string, a ...interface{}) {
+	ctx.StatusCode(code)
+	if format != "" {
+		ctx.Values().Set("reason", fmt.Sprintf(format, a))
+	}
+	ctx.StopExecution()
+}
+func done(ctx iris.Context, result interface{}) {
+	ctx.JSON(ErrorResponse{
+		Code:    0,
+		Message: "success",
+		Result:  result,
+	})
+}
+
+func badRequest(ctx iris.Context, err error) {
+	fail(ctx, iris.StatusBadRequest, "错误:%s", err.Error())
+}

+ 16 - 0
services/knowoapi/controllers/parsebody.go

@@ -0,0 +1,16 @@
+package controllers
+
+import (
+	"knowoapi/schema"
+
+	"github.com/kataras/iris"
+)
+
+// parseBody 转化请求body
+func parseBody(ctx iris.Context, body schema.BodyTemplate) error {
+	err := ctx.ReadJSON(body)
+	if err != nil {
+		return err
+	}
+	return body.Validate()
+}

+ 96 - 0
services/knowoapi/controllers/produdct.go

@@ -0,0 +1,96 @@
+package controllers
+
+import (
+	"sparrow/pkg/models"
+	"sparrow/services/knowoapi/services"
+
+	"github.com/kataras/iris"
+)
+
+// ProductController 产品API
+type ProductController struct {
+	Ctx     iris.Context
+	Service services.ProductService
+	Token   Token
+}
+
+// Post /product 添加产品
+func (a *ProductController) Post() {
+	product := new(models.Product)
+	if err := parseBody(a.Ctx, product); err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	product.VendorID = int32(a.Token.getVendorID(a.Ctx))
+	err := a.Service.Create(product)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, product)
+}
+
+// Delete /product 删除
+func (a *ProductController) Delete() {
+	product := new(models.Product)
+	if err := parseBody(a.Ctx, product); err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	if product.VendorID != int32(a.Token.getVendorID(a.Ctx)) {
+		responseError(a.Ctx, ErrNormal, "非法操作")
+		return
+	}
+	err := a.Service.Delete(product)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, "删除成功")
+}
+
+// Put /produdct 更新产品信息
+func (a *ProductController) Put() {
+	product := new(models.Product)
+	if err := parseBody(a.Ctx, product); err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	if product.VendorID != int32(a.Token.getVendorID(a.Ctx)) {
+		responseError(a.Ctx, ErrNormal, "非法操作")
+		return
+	}
+	pro, err := a.Service.Update(product)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, pro)
+}
+
+// Get /product 查询我的产品
+func (a *ProductController) Get() {
+	pi, err := a.Ctx.URLParamInt("pi")
+	if err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	ps, err := a.Ctx.URLParamInt("ps")
+	if err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	name := a.Ctx.URLParam("companyname")
+	ds, total, err := a.Service.GetVendorProducts(a.Token.getVendorID(a.Ctx),
+		pi,
+		ps,
+		name)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, map[string]interface{}{
+		"list":  ds,
+		"total": total,
+	})
+}

+ 57 - 0
services/knowoapi/controllers/token.go

@@ -0,0 +1,57 @@
+package controllers
+
+import (
+	"sparrow/pkg/models"
+	"sparrow/services/knowoapi/model"
+	"time"
+
+	"github.com/kataras/iris"
+
+	jwt "github.com/dgrijalva/jwt-go"
+)
+
+// Token helper
+type Token struct {
+}
+
+// UserClaims user claim
+type UserClaims struct {
+	UserID   uint
+	UserName string
+	UserType string
+	RoleCode int
+	VendorID uint
+	jwt.StandardClaims
+}
+
+// tokenMaker 生成token
+func (t *Token) tokenMaker(user *models.User) string {
+	claims := UserClaims{
+		UserName: user.UserName,
+		UserID:   user.ID,
+		RoleCode: user.UserRoleID,
+		VendorID: user.VendorID,
+	}
+	claims.ExpiresAt = time.Now().Add(time.Hour * 24).Unix()
+	claims.Issuer = "lutai"
+	token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
+	ser, _ := token.SignedString([]byte(model.SignedString))
+	return ser
+}
+
+func (t *Token) getUserClaims(ctx iris.Context) jwt.MapClaims {
+	tk := ctx.Values().Get("jwt").(*jwt.Token)
+	return tk.Claims.(jwt.MapClaims)
+}
+
+// 获取当前用户recordid
+func (t *Token) getRecorID(ctx iris.Context) uint {
+	claims := t.getUserClaims(ctx)
+	return uint(claims["UserID"].(float64))
+}
+
+// 获取当前用户vendorid
+func (t *Token) getVendorID(ctx iris.Context) uint {
+	claims := t.getUserClaims(ctx)
+	return uint(claims["VendorID"].(float64))
+}

+ 89 - 0
services/knowoapi/controllers/user.go

@@ -0,0 +1,89 @@
+package controllers
+
+import (
+	"sparrow/pkg/models"
+	"sparrow/services/knowoapi/model"
+	"sparrow/services/knowoapi/services"
+
+	"github.com/kataras/iris"
+)
+
+// UserController api
+type UserController struct {
+	Ctx     iris.Context
+	Service services.UserService
+	Token   Token
+}
+
+// PostLogin /login
+func (a *UserController) PostLogin() {
+	lr := models.LoginRequest{}
+	if err := parseBody(a.Ctx, &lr); err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	b, user, err := a.Service.Login(&lr)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, "用户名或密码错误")
+		return
+	}
+	if !b {
+		responseError(a.Ctx, model.ErrUserNameOrPassCode, "用户名或密码错误")
+		return
+	}
+	result := map[string]interface{}{
+		"user_id":      user.ID,
+		"company":      user.Vendor.VendorName,
+		"access_token": a.Token.tokenMaker(user),
+		"user_role":    user.UserRoleID,
+	}
+	done(a.Ctx, result)
+}
+
+// PostRegistry 注册
+//POST /registry
+func (a *UserController) PostRegistry() {
+	req := &models.Reqrequest{}
+	if err := parseBody(a.Ctx, req); err != nil {
+		badRequest(a.Ctx, err)
+		return
+	}
+	b, err := a.Service.CheckEmail(req.Email)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	if b {
+		responseError(a.Ctx, -1, "Email已经被注册")
+		return
+	}
+	b, err = a.Service.CheckPhone(req.Phone)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	if b {
+		responseError(a.Ctx, -1, "手机号被注册")
+		return
+	}
+	b, err = a.Service.CheckUserName(req.UserName)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	if b {
+		responseError(a.Ctx, -1, "该用户名已经被使用")
+		return
+	}
+	user, err := a.Service.Register(req)
+	if err != nil {
+		responseError(a.Ctx, ErrDatabase, err.Error())
+		return
+	}
+	done(a.Ctx, map[string]interface{}{
+		"user_name": user.UserName,
+		"user_key":  user.UserKey,
+		"vendor_id": user.VendorID,
+		"user_id":   user.ID,
+	})
+}

+ 50 - 0
services/knowoapi/flags.go

@@ -0,0 +1,50 @@
+package main
+
+import (
+	"flag"
+	"sparrow/pkg/mysql"
+
+	"github.com/jinzhu/gorm"
+)
+
+const (
+	flagRabbitHost    = "rabbithost"
+	defaultRabbitHost = "amqp://knowocloud:123456@192.168.175.60:5672/"
+
+	flagDBHost = "dbhost"
+	flagDBPort = "dbport"
+	flagDBName = "dbname"
+	flagDBUser = "dbuser"
+	flagDBPass = "dbpass"
+
+	flagAESKey = "aeskey"
+
+	defaultDBHost = "192.168.175.60"
+	defaultDBPort = "3306"
+	defaultDBName = "SparrowCloud"
+	defaultDBUser = "SparrowCloud"
+)
+
+var (
+	confDBHost     = flag.String(flagDBHost, defaultDBHost, "database host address.")
+	confDBPort     = flag.String(flagDBPort, defaultDBPort, "database host port.")
+	confDBName     = flag.String(flagDBName, defaultDBName, "database name.")
+	confDBUser     = flag.String(flagDBUser, defaultDBUser, "database user.")
+	confDBPass     = flag.String(flagDBPass, "", "databse password.")
+	confRabbitHost = flag.String(flagRabbitHost, defaultRabbitHost, "rabbitmq host address, amqp://user:password@ip:port/")
+	confAESKey     = flag.String(flagAESKey, "", "use your own aes encryting key.")
+)
+
+func getDB() (*gorm.DB, error) {
+	db, err := mysql.GetClient(*confDBHost, *confDBPort, *confDBName, *confDBUser, *confDBPass)
+	if err != nil {
+		return nil, err
+	}
+	gormdb, err := gorm.Open("mysql", db)
+	if err != nil {
+		return nil, err
+	}
+	gormdb.SingularTable(true)
+	gormdb.LogMode(true)
+	return gormdb, nil
+}

二進制
services/knowoapi/knowoapi


+ 47 - 0
services/knowoapi/main.go

@@ -0,0 +1,47 @@
+package main
+
+import (
+	"sparrow/pkg/generator"
+	"sparrow/pkg/server"
+	"sparrow/services/knowoapi/model"
+
+	"github.com/kataras/iris"
+)
+
+func main() {
+
+	err := server.Init("knowoapi")
+	if err != nil {
+		server.Log.Fatal(err)
+		return
+	}
+
+	//iris init
+
+	app := iris.New()
+	db, err := getDB()
+	if err != nil {
+		server.Log.Fatal(err)
+	}
+	models := new(model.All).Init(db)
+
+	gen, err := generator.NewKeyGenerator(*confAESKey)
+	if err != nil {
+		server.Log.Fatal(err)
+	}
+	registerErrors(app)
+	//注册路由
+	registerRouters(app, models, gen)
+	app.Build()
+	// register a http handler
+	err = server.RegisterHTTPHandler(app)
+	if err != nil {
+		server.Log.Errorf("RegisterHTTPHandler Error: %s", err)
+		return
+	}
+	// go
+	err = server.Run()
+	if err != nil {
+		server.Log.Fatal(err)
+	}
+}

+ 18 - 0
services/knowoapi/model/all.go

@@ -0,0 +1,18 @@
+package model
+
+import "github.com/jinzhu/gorm"
+
+// All 导出
+type All struct {
+	Product *Product
+	Vendor  *Vendor
+	User    *User
+}
+
+// Init 初始化所有model
+func (a *All) Init(db *gorm.DB) *All {
+	a.Product = new(Product).Init(db)
+	a.Vendor = new(Vendor).Init(db)
+	a.User = new(User).Init(db)
+	return a
+}

+ 24 - 0
services/knowoapi/model/cache.go

@@ -0,0 +1,24 @@
+package model
+
+import (
+	"flag"
+	"sparrow/pkg/cache"
+)
+
+const (
+	flagCacheSize    = "cacheSize"
+	defaultCacheSize = 102400
+)
+
+var (
+	confCacheSize = flag.Int(flagCacheSize, defaultCacheSize, "maximum size of cache")
+)
+
+var MemCache cache.Cache
+
+func getCache() cache.Cache {
+	if MemCache == nil {
+		MemCache = cache.NewMemCache(*confCacheSize)
+	}
+	return MemCache
+}

+ 45 - 0
services/knowoapi/model/product.go

@@ -0,0 +1,45 @@
+package model
+
+import (
+	"sparrow/pkg/models"
+
+	"github.com/jinzhu/gorm"
+)
+
+// Product ``
+type Product struct {
+	db *gorm.DB
+}
+
+// Init 初始化
+func (a *Product) Init(_db *gorm.DB) *Product {
+	a.db = _db
+	return a
+}
+
+//Create 添加
+func (a *Product) Create(product *models.Product) error {
+	return a.db.Save(product).Error
+}
+
+// Delete 删除
+func (a *Product) Delete(product *models.Product) error {
+	return a.db.Delete(product).Error
+}
+
+// Update 更新
+func (a *Product) Update(product *models.Product) (pro models.Product, err error) {
+	err = a.db.Model(&pro).Update(product).Error
+	return
+}
+
+// GetVendorProducts 获取厂商的产品列表
+func (a *Product) GetVendorProducts(vendorid uint, pi, ps int, name string) (datas []models.Product, total int, err error) {
+	tx := a.db.Where("vendor_id = ? and 1=1", vendorid)
+	if name != "" {
+		tx = tx.Where("product_name like ?", "%"+name+"%")
+	}
+	err = tx.Limit(ps).Offset((pi - 1) * ps).Find(&datas).Error
+	tx.Model(&models.Product{}).Count(&total)
+	return
+}

+ 127 - 0
services/knowoapi/model/user.go

@@ -0,0 +1,127 @@
+package model
+
+import (
+	"fmt"
+	"sparrow/pkg/models"
+
+	"github.com/jinzhu/gorm"
+)
+
+// SignedString 签名
+const SignedString = "www.yehaoji.com"
+
+const (
+	ErrUserNameOrPassCode = 20001
+	ErrUserNameExsitCode  = 20002
+	ErrPhoneExistCode     = 20003
+	ErrEmailExistCode     = 20004
+)
+
+type User struct {
+	db *gorm.DB
+}
+
+// Init 初始化
+func (a *User) Init(_db *gorm.DB) *User {
+	a.db = _db
+	return a
+}
+
+// Create create
+func (a *User) Create(user *models.User) error {
+	return a.db.Create(user).Error
+}
+
+// LoginCheck 登陆检测
+func (a *User) LoginCheck(loginname, loginpass string) (bool, *models.User, error) {
+	user := &models.User{}
+	err := a.db.
+		Where("(user_name = ? or phone = ? or email = ?) AND user_pass = ?",
+			loginname,
+			loginname,
+			loginname,
+			loginpass).First(user).
+		Error
+	if err != nil {
+		return false, nil, err
+	}
+	if user.ID > 0 {
+		vendor := models.Vendor{}
+		// check cache
+		cache := getCache()
+		key := fmt.Sprintf("Vendor:%d", user.VendorID)
+		if v, ok := cache.Get(key); ok {
+			user.Vendor = v.(models.Vendor)
+		} else {
+			a.db.Model(user).Related(&vendor)
+			cache.Set(key, vendor)
+			user.Vendor = vendor
+		}
+
+		return true, user, nil
+	}
+
+	return false, nil, nil
+}
+
+// UpdatePassword 更新密码
+func (a *User) UpdatePassword(userid int, oldpass, newpass string) (bool, error) {
+	var count int
+	user := &models.User{}
+	err := a.db.Model(user).Where(map[string]interface{}{
+		"id":        userid,
+		"user_pass": oldpass,
+	}).Update("user_pass", newpass).Count(&count).Error
+	if err != nil {
+		return false, err
+	}
+	if count > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+// CheckUserName 检测用户名
+func (a *User) CheckUserName(name string) (bool, error) {
+	user := models.User{}
+	var count int
+	err := a.db.First(&user).
+		Where("user_name = ?", name).Count(&count).Error
+	if err != nil {
+		return false, err
+	}
+	if count > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+// CheckPhone 检测手机号
+func (a *User) CheckPhone(name string) (bool, error) {
+	user := models.User{}
+	var count int
+	err := a.db.First(&user).
+		Where("user_name = ?", name).Count(&count).Error
+	if err != nil {
+		return false, err
+	}
+	if count > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+// CheckEmail 检测Email
+func (a *User) CheckEmail(name string) (bool, error) {
+	user := models.User{}
+	var count int
+	err := a.db.First(&user).
+		Where("user_name = ?", name).Count(&count).Error
+	if err != nil {
+		return false, err
+	}
+	if count > 0 {
+		return true, nil
+	}
+	return false, nil
+}

+ 33 - 0
services/knowoapi/model/vendor.go

@@ -0,0 +1,33 @@
+package model
+
+import (
+	"sparrow/pkg/models"
+
+	"github.com/jinzhu/gorm"
+)
+
+// Vendor 供应商
+type Vendor struct {
+	db *gorm.DB
+}
+
+// Init 1
+func (a *Vendor) Init(db *gorm.DB) *Vendor {
+	a.db = db
+	return a
+}
+
+// Create 增加
+func (a *Vendor) Create(vendor *models.Vendor) error {
+	return a.db.Create(vendor).Error
+}
+
+// Delete delete
+func (a *Vendor) Delete(vendor *models.Vendor) error {
+	return a.db.Delete(vendor).Error
+}
+
+// Update update
+func (a *Vendor) Update(vendor *models.Vendor) error {
+	return a.db.Update(vendor).Error
+}

+ 60 - 0
services/knowoapi/router.go

@@ -0,0 +1,60 @@
+package main
+
+import (
+	"sparrow/pkg/generator"
+	"sparrow/services/knowoapi/controllers"
+	"sparrow/services/knowoapi/model"
+	"sparrow/services/knowoapi/services"
+
+	jwt "github.com/dgrijalva/jwt-go"
+	jwtmiddleware "github.com/iris-contrib/middleware/jwt"
+	"github.com/kataras/iris"
+	"github.com/kataras/iris/mvc"
+)
+
+var router iris.Party
+
+func registerErrors(srv *iris.Application) {
+	srv.OnAnyErrorCode(handleErrors)
+}
+func handleErrors(ctx iris.Context) {
+	//logger.Default().Serve(ctx)
+	err := controllers.ErrorResponse{
+		Code:    ctx.GetStatusCode(),
+		Message: ctx.Values().GetStringDefault("reason", "未知错误"),
+	}
+	ctx.JSON(err)
+}
+
+// jwt 中间件配置
+func newJWThandle() func(ctx iris.Context) {
+	jwtHandler := jwtmiddleware.New(jwtmiddleware.Config{
+		ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
+			return []byte(model.SignedString), nil
+		},
+		ErrorHandler: func(ctx iris.Context, message string) {
+			ctx.StatusCode(iris.StatusUnauthorized)
+			ctx.Values().Set("reason", message)
+		},
+		SigningMethod: jwt.SigningMethodHS256,
+	})
+
+	return jwtHandler.Serve
+}
+
+// 请求路由
+func registerRouters(srv *iris.Application, models *model.All, gen *generator.KeyGenerator) {
+	pService := services.NewProductService(models, gen)
+	userService := services.NewUserService(models, gen)
+	router = srv.Party("/api/v1")
+
+	// 登陆,注册
+	loginAPI := mvc.New(router.Party("/"))
+	loginAPI.Register(userService).Handle(new(controllers.UserController))
+
+	// 用户接口组
+	userRouter := router.Party("/user")
+
+	productAPI := mvc.New(userRouter.Party("/product", newJWThandle()))
+	productAPI.Register(pService).Handle(new(controllers.ProductController))
+}

+ 45 - 0
services/knowoapi/services/product.go

@@ -0,0 +1,45 @@
+package services
+
+import (
+	"sparrow/pkg/generator"
+	"sparrow/pkg/models"
+	"sparrow/services/knowoapi/model"
+)
+
+//ProductService 业务接口
+type ProductService interface {
+	Create(*models.Product) error
+	Delete(*models.Product) error
+	Update(*models.Product) (models.Product, error)
+	// 查询厂商所有产品列表
+	GetVendorProducts(vendorid uint, pi, ps int, name string) ([]models.Product, int, error)
+}
+
+type productService struct {
+	model  *model.All
+	keyGen *generator.KeyGenerator
+}
+
+// NewProductService ·
+func NewProductService(pm *model.All, gen *generator.KeyGenerator) ProductService {
+	return &productService{
+		model:  pm,
+		keyGen: gen,
+	}
+}
+func (p *productService) Create(product *models.Product) error {
+	product.ProductKey, _ = p.keyGen.GenRandomKey(product.ID)
+	return p.model.Product.Create(product)
+}
+
+func (p *productService) Delete(pro *models.Product) error {
+	return p.model.Product.Delete(pro)
+}
+
+func (p *productService) Update(pro *models.Product) (models.Product, error) {
+	return p.model.Product.Update(pro)
+}
+
+func (p *productService) GetVendorProducts(vendorid uint, pi, ps int, name string) ([]models.Product, int, error) {
+	return p.model.Product.GetVendorProducts(vendorid, pi, ps, name)
+}

+ 76 - 0
services/knowoapi/services/user.go

@@ -0,0 +1,76 @@
+package services
+
+import (
+	"sparrow/pkg/generator"
+	"sparrow/pkg/models"
+	"sparrow/pkg/utils"
+	"sparrow/services/knowoapi/model"
+)
+
+// UserService 用户业务接口
+type UserService interface {
+	// 登陆
+	Login(*models.LoginRequest) (bool, *models.User, error)
+	// 注册
+	Register(*models.Reqrequest) (*models.User, error)
+	// 修改密码
+	ModifyPassword(int, string, string) (bool, error)
+	// check name exsits
+	CheckUserName(string) (bool, error)
+	// check phone number exsits
+	CheckPhone(string) (bool, error)
+	// check email exsits
+	CheckEmail(string) (bool, error)
+}
+
+type userservice struct {
+	model  *model.All
+	keyGen *generator.KeyGenerator
+}
+
+// NewUserService ``
+func NewUserService(all *model.All, keyGen *generator.KeyGenerator) UserService {
+	return userservice{
+		model:  all,
+		keyGen: keyGen,
+	}
+}
+
+func (a userservice) Login(user *models.LoginRequest) (bool, *models.User, error) {
+	user.Password = utils.Md5(user.Password + model.SignedString)
+	return a.model.User.LoginCheck(user.UserName, user.Password)
+}
+
+func (a userservice) Register(user *models.Reqrequest) (*models.User, error) {
+	_u := &models.User{}
+	_u.UserKey, _ = a.keyGen.GenRandomKey(_u.ID)
+	_u.UserPass = utils.Md5(user.PassWord + model.SignedString)
+	_u.UserName = user.UserName
+	_u.Phone = user.Phone
+	_u.Email = user.Email
+	vedor := &models.Vendor{
+		VendorName: user.VendorName,
+	}
+	vedor.VendorKey, _ = a.keyGen.GenRandomKey(vedor.ID)
+	err := a.model.Vendor.Create(vedor)
+	if err != nil {
+		return nil, err
+	}
+	_u.VendorID = vedor.ID
+	return _u, a.model.User.Create(_u)
+}
+func (a userservice) ModifyPassword(userid int, new, old string) (bool, error) {
+	old = utils.Md5(old + model.SignedString)
+	new = utils.Md5(new + model.SignedString)
+	return a.model.User.UpdatePassword(userid, new, old)
+}
+
+func (a userservice) CheckUserName(name string) (bool, error) {
+	return a.model.User.CheckUserName(name)
+}
+func (a userservice) CheckPhone(name string) (bool, error) {
+	return a.model.User.CheckPhone(name)
+}
+func (a userservice) CheckEmail(name string) (bool, error) {
+	return a.model.User.CheckEmail(name)
+}

+ 0 - 3
services/registry/db.go

@@ -29,9 +29,6 @@ var (
 	confDBPass = flag.String(flagDBPass, "", "databse password.")
 )
 
-// DB db
-var DB *gorm.DB
-
 func getDB() (*gorm.DB, error) {
 	db, err := mysql.GetClient(*confDBHost, *confDBPort, *confDBName, *confDBUser, *confDBPass)
 	if err != nil {

+ 152 - 0
services/registry/product.go

@@ -0,0 +1,152 @@
+package main
+
+import (
+	"errors"
+	"fmt"
+	"sparrow/pkg/models"
+	"sparrow/pkg/rpcs"
+	"sparrow/pkg/server"
+)
+
+// SaveProduct 如果product.ID为空则会添加,否则更新相应产品信息
+func (r *Registry) SaveProduct(product *models.Product, reply *models.Product) error {
+	db, err := getDB()
+	if err != nil {
+		return err
+	}
+
+	if product.ID == 0 {
+		// create product
+		err = db.Save(product).Error
+		if err != nil {
+			return err
+		}
+
+		key, err := r.keygen.GenRandomKey(product.ID)
+		if err != nil {
+			return err
+		}
+
+		product.ProductKey = key
+	}
+
+	err = db.Save(product).Error
+	if err != nil {
+		return err
+	}
+
+	cache := getCache()
+	cacheKey := fmt.Sprintf("Product:%v", product.ID)
+	if _, ok := cache.Get(cacheKey); ok {
+		cache.Delete(cacheKey)
+	}
+
+	setProduct(reply, product)
+
+	return nil
+}
+
+// DelProduct 删除一个产品
+func (r *Registry) DelProduct(product *models.Product, reply *models.Product) error {
+	// clear cache
+	cache := getCache()
+	key := fmt.Sprintf("Product:%v", product.ID)
+	if _, ok := cache.Get(key); ok {
+		cache.Delete(key)
+	}
+	// clear db
+	db, err := getDB()
+	if err != nil {
+		return err
+	}
+	err = db.Delete(product).Error
+	if err != nil {
+		server.Log.Errorf("delete product error: %s", err.Error())
+		return errDbActFailt
+	}
+	return nil
+}
+
+// GetProducts 获取当前用户的产品
+func (r *Registry) GetProducts(args *rpcs.ArgsProductList, reply map[string]interface{}) error {
+	db, err := getDB()
+	if err != nil {
+		return err
+	}
+	tx := db.Where("1=1")
+	if args.ProductName != "" {
+		tx = tx.Where("product_name like ?", "%"+args.ProductName+"%")
+	}
+	var datas []models.Product
+	var total int
+	err = tx.Limit(args.Ps).Offset((args.Pi - 1) * args.Ps).Find(&datas).Error
+	tx.Model(&models.Product{}).Count(&total)
+	if err != nil {
+		return err
+	}
+	reply = map[string]interface{}{
+		"list":  datas,
+		"total": total,
+	}
+	return nil
+}
+
+// FindProduct will find product by specified ID
+func (r *Registry) FindProduct(id int32, reply *models.Product) error {
+	db, err := getDB()
+	if err != nil {
+		return err
+	}
+
+	cache := getCache()
+	cacheKey := fmt.Sprintf("Product:%v", id)
+	if cacheValue, ok := cache.Get(cacheKey); ok {
+		product := cacheValue.(*models.Product)
+		setProduct(reply, product)
+	} else {
+		err = db.First(reply, id).Error
+		if err != nil {
+			return err
+		}
+		var storage models.Product
+		storage = *reply
+		cache.Set(cacheKey, &storage)
+	}
+
+	return nil
+}
+
+// ValidProduct try to validate the given product key.
+// if success, it will reply the corresponding product
+func (r *Registry) ValidateProduct(key string, reply *models.Product) error {
+	db, err := getDB()
+	if err != nil {
+		return err
+	}
+
+	id, err := r.keygen.DecodeIdFromRandomKey(key)
+	if err != nil {
+		return err
+	}
+
+	cache := getCache()
+	cacheKey := fmt.Sprintf("Product:%v", id)
+	if cacheValue, ok := cache.Get(cacheKey); ok {
+		product := cacheValue.(*models.Product)
+		setProduct(reply, product)
+	} else {
+		err = db.First(reply, id).Error
+		if err != nil {
+			return err
+		}
+		var storage models.Product
+		storage = *reply
+		cache.Set(cacheKey, &storage)
+	}
+
+	if reply.ProductKey != key {
+		return errors.New("product key not match.")
+	}
+
+	return nil
+}

+ 4 - 110
services/registry/registry.go

@@ -15,6 +15,8 @@ const (
 
 var confAESKey = flag.String(flagAESKey, "", "use your own aes encryting key.")
 
+var errDbActFailt = errors.New("数据库操作失败")
+
 type Registry struct {
 	keygen *generator.KeyGenerator
 }
@@ -107,51 +109,13 @@ func (r *Registry) SaveVendor(vendor *models.Vendor, reply *models.Vendor) error
 	if _, ok := cache.Get(cacheKey); ok {
 		cache.Delete(cacheKey)
 	}
-
+	// write cache
+	cache.Set(cacheKey, vendor)
 	setVendor(reply, vendor)
 
 	return nil
 }
 
-// SaveProduct will create a product if the ID field is not initialized
-// if ID field is initialized, it will update the conresponding product.
-func (r *Registry) SaveProduct(product *models.Product, reply *models.Product) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-
-	if product.ID == 0 {
-		// create product
-		err = db.Save(product).Error
-		if err != nil {
-			return err
-		}
-
-		key, err := r.keygen.GenRandomKey(product.ID)
-		if err != nil {
-			return err
-		}
-
-		product.ProductKey = key
-	}
-
-	err = db.Save(product).Error
-	if err != nil {
-		return err
-	}
-
-	cache := getCache()
-	cacheKey := fmt.Sprintf("Product:%v", product.ID)
-	if _, ok := cache.Get(cacheKey); ok {
-		cache.Delete(cacheKey)
-	}
-
-	setProduct(reply, product)
-
-	return nil
-}
-
 // SaveApplication will create a application if the ID field is not initialized
 // if ID field is initialized, it will update the conresponding application.
 func (r *Registry) SaveApplication(app *models.Application, reply *models.Application) error {
@@ -260,16 +224,6 @@ func (r *Registry) GetVendors(noarg int, reply *[]models.Vendor) error {
 	return db.Find(reply).Error
 }
 
-// GetProducts will get all products in the platform.
-func (r *Registry) GetProducts(noarg int, reply *[]models.Product) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-
-	return db.Find(reply).Error
-}
-
 // GetApplications will get all applications in the platform.
 func (r *Registry) GetApplications(noarg int, reply *[]models.Application) error {
 	db, err := getDB()
@@ -280,31 +234,6 @@ func (r *Registry) GetApplications(noarg int, reply *[]models.Application) error
 	return db.Find(reply).Error
 }
 
-// FindProduct will find product by specified ID
-func (r *Registry) FindProduct(id int32, reply *models.Product) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-
-	cache := getCache()
-	cacheKey := fmt.Sprintf("Product:%v", id)
-	if cacheValue, ok := cache.Get(cacheKey); ok {
-		product := cacheValue.(*models.Product)
-		setProduct(reply, product)
-	} else {
-		err = db.First(reply, id).Error
-		if err != nil {
-			return err
-		}
-		var storage models.Product
-		storage = *reply
-		cache.Set(cacheKey, &storage)
-	}
-
-	return nil
-}
-
 // FindAppliation will find product by specified ID
 func (r *Registry) FindApplication(id int32, reply *models.Application) error {
 	db, err := getDB()
@@ -330,41 +259,6 @@ func (r *Registry) FindApplication(id int32, reply *models.Application) error {
 	return nil
 }
 
-// ValidProduct try to validate the given product key.
-// if success, it will reply the corresponding product
-func (r *Registry) ValidateProduct(key string, reply *models.Product) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-
-	id, err := r.keygen.DecodeIdFromRandomKey(key)
-	if err != nil {
-		return err
-	}
-
-	cache := getCache()
-	cacheKey := fmt.Sprintf("Product:%v", id)
-	if cacheValue, ok := cache.Get(cacheKey); ok {
-		product := cacheValue.(*models.Product)
-		setProduct(reply, product)
-	} else {
-		err = db.First(reply, id).Error
-		if err != nil {
-			return err
-		}
-		var storage models.Product
-		storage = *reply
-		cache.Set(cacheKey, &storage)
-	}
-
-	if reply.ProductKey != key {
-		return errors.New("product key not match.")
-	}
-
-	return nil
-}
-
 // RegisterDevice try to register a device to our platform.
 // if the device has already been registered,
 // the registration will success return the registered device before.

+ 2 - 40
services/registry/registry_test.go

@@ -144,20 +144,7 @@ func testApplication(t *testing.T, r *Registry) {
 		t.Error("wrong key should fail product key validation.")
 	}
 }
-func testRegister(t *testing.T, r *Registry) {
-	args := &models.User{
-		UserName: "lijian",
-		UserPass: "Lijian",
-		Phone:    "15275410996",
-		Email:    "Email@qq1.com",
-	}
-	reply := models.User{}
-	err := r.Register(args, &reply)
-	if err != nil {
-		t.Fatal(err)
-	}
-	t.Log(args)
-}
+
 func testDevice(t *testing.T, r *Registry) {
 	args := &rpcs.ArgsDeviceRegister{
 		ProductKey:    testProductKey,
@@ -210,28 +197,6 @@ func testDevice(t *testing.T, r *Registry) {
 	}
 	t.Log(device)
 }
-func testModifyPassword(t *testing.T, r *Registry) {
-	userid := uint(1)
-	pass := "lijian"
-	oldpass := "1234356"
-	reply := &models.User{}
-	err := r.ModifyPass(userid, oldpass, pass, reply)
-	if err != nil {
-		t.Fatal(err)
-	}
-	t.Log(reply)
-}
-func testLogin(t *testing.T, r *Registry) {
-	reply := &models.User{}
-	err := r.Login(&models.LoginRequest{
-		UserName: "lijian",
-		Password: "lijian",
-	}, reply)
-	if err != nil {
-		t.Fatal(err)
-	}
-	t.Fatal(reply)
-}
 func TestRegistry(t *testing.T) {
 	err := mysql.MigrateDatabase(defaultDBHost, defaultDBPort, defaultDBName, defaultDBUser, "123456")
 	if err != nil {
@@ -249,8 +214,5 @@ func TestRegistry(t *testing.T) {
 	//testVendor(t, r)
 	//testProduct(t, r)
 	//testApplication(t, r)
-	//testDevice(t, r)
-	//testRegister(t, r)
-	//testModifyPassword(t, r)
-	testLogin(t, r)
+	testDevice(t, r)
 }

+ 0 - 106
services/registry/user.go

@@ -1,106 +0,0 @@
-package main
-
-import (
-	"errors"
-	"sparrow/pkg/models"
-	"sparrow/pkg/rpcs"
-)
-
-// ErrUserNameExists 用户名已经存在
-var ErrUserNameExists = errors.New("用户名已经存在")
-
-// ErrPhoneExists 手机号已经存在
-var ErrPhoneExists = errors.New("手机号已经被注册过")
-
-// ErrEmailExists Email地址已经被注册
-var ErrEmailExists = errors.New("Email地址已经被使用")
-
-// ErrNameOrPassError 登录失败
-var ErrNameOrPassError = errors.New("用户名或密码不正确")
-
-// Login 用户登录
-func (r *Registry) Login(args *models.LoginRequest, reply *models.User) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-
-	err = db.Where("(user_name = ? or phone = ? or email = ?) AND user_pass = ?",
-		args.UserName, args.UserName, args.UserName, mdd(args.Password)).First(&reply).Error
-	if err != nil {
-		return err
-	}
-	if reply.ID == 0 {
-		return ErrNameOrPassError
-	}
-	return nil
-}
-
-// Register 用户注册
-func (r *Registry) Register(args *models.User, reply *models.User) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-	err = checkUserExists(args.UserName, args.Phone, args.Email)
-	if err != nil {
-		return err
-	}
-	args.UserKey, err = r.keygen.GenRandomKey(args.ID)
-	if err != nil {
-		return err
-	}
-	args.UserPass = mdd(args.UserPass)
-	err = db.Save(args).Error
-	if err != nil {
-		return err
-	}
-	reply = args
-	return nil
-}
-
-//ModifyPass 修改密码
-func (r *Registry) ModifyPass(args *rpcs.ArgsUserModifyPass, reply *models.User) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-	pass := mdd(args.NewPass)
-	oldpass := mdd(args.OldPass)
-	var count int
-	err = db.Model(&reply).Where(map[string]interface{}{
-		"id":        args.UserID,
-		"user_pass": oldpass,
-	}).Update("user_pass", pass).Count(&count).Error
-	if err != nil {
-		return ErrNameOrPassError
-	}
-	if count == 0 {
-		return ErrNameOrPassError
-	}
-	return nil
-}
-
-//SendVerifyCode 发送手机验证码
-
-//check username, phone or email exists
-func checkUserExists(name, phone, email string) error {
-	db, err := getDB()
-	if err != nil {
-		return err
-	}
-	reply := &models.User{}
-	db.First(reply, map[string]interface{}{"user_name": name})
-	if reply.ID != 0 {
-		return ErrUserNameExists
-	}
-	db.First(reply, map[string]interface{}{"phone": phone})
-	if reply.ID != 0 {
-		return ErrPhoneExists
-	}
-	db.First(reply, map[string]interface{}{"email": email})
-	if reply.ID != 0 {
-		return ErrEmailExists
-	}
-	return nil
-}

+ 0 - 11
services/registry/utils.go

@@ -1,20 +1,9 @@
 package main
 
 import (
-	"crypto/md5"
-	"encoding/hex"
-	"fmt"
 	"strconv"
 )
 
 func genDeviceIdentifier(vendor int32, product uint, device string) string {
 	return strconv.FormatInt(int64(vendor), 16) + "-" + strconv.FormatInt(int64(product), 16) + "-" + device
 }
-
-// mdd md5加密
-func mdd(s string) string {
-	h := md5.New()
-	h.Write([]byte(s))
-	cipherStr := h.Sum(nil)
-	return fmt.Sprintf("%s", hex.EncodeToString(cipherStr))
-}

+ 3 - 0
vendor/github.com/BurntSushi/toml/COMPATIBLE

@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
+

+ 14 - 0
vendor/github.com/BurntSushi/toml/COPYING

@@ -0,0 +1,14 @@
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+                    Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. You just DO WHAT THE FUCK YOU WANT TO.
+

+ 19 - 0
vendor/github.com/BurntSushi/toml/Makefile

@@ -0,0 +1,19 @@
+install:
+	go install ./...
+
+test: install
+	go test -v
+	toml-test toml-test-decoder
+	toml-test -encoder toml-test-encoder
+
+fmt:
+	gofmt -w *.go */*.go
+	colcheck *.go */*.go
+
+tags:
+	find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+	git push origin master
+	git push github master
+

+ 509 - 0
vendor/github.com/BurntSushi/toml/decode.go

@@ -0,0 +1,509 @@
+package toml
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"reflect"
+	"strings"
+	"time"
+)
+
+func e(format string, args ...interface{}) error {
+	return fmt.Errorf("toml: "+format, args...)
+}
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+	UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+	_, err := Decode(string(p), v)
+	return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+	undecoded interface{}
+	context   Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md := MetaData{decoded: make(map[string]bool)}
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md.context = primValue.context
+	defer func() { md.context = nil }()
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+	}
+	if rv.IsNil() {
+		return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+	}
+	p, err := parse(data)
+	if err != nil {
+		return MetaData{}, err
+	}
+	md := MetaData{
+		p.mapping, p.types, p.ordered,
+		make(map[string]bool, len(p.ordered)), nil,
+	}
+	return md, md.unify(p.mapping, indirect(rv))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+	bs, err := ioutil.ReadFile(fpath)
+	if err != nil {
+		return MetaData{}, err
+	}
+	return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+	bs, err := ioutil.ReadAll(r)
+	if err != nil {
+		return MetaData{}, err
+	}
+	return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+	// Special case. Look for a `Primitive` value.
+	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+		// Save the undecoded data and the key context into the primitive
+		// value.
+		context := make(Key, len(md.context))
+		copy(context, md.context)
+		rv.Set(reflect.ValueOf(Primitive{
+			undecoded: data,
+			context:   context,
+		}))
+		return nil
+	}
+
+	// Special case. Unmarshaler Interface support.
+	if rv.CanAddr() {
+		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+			return v.UnmarshalTOML(data)
+		}
+	}
+
+	// Special case. Handle time.Time values specifically.
+	// TODO: Remove this code when we decide to drop support for Go 1.1.
+	// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+	// interfaces.
+	if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+		return md.unifyDatetime(data, rv)
+	}
+
+	// Special case. Look for a value satisfying the TextUnmarshaler interface.
+	if v, ok := rv.Interface().(TextUnmarshaler); ok {
+		return md.unifyText(data, v)
+	}
+	// BUG(burntsushi)
+	// The behavior here is incorrect whenever a Go type satisfies the
+	// encoding.TextUnmarshaler interface but also corresponds to a TOML
+	// hash or array. In particular, the unmarshaler should only be applied
+	// to primitive TOML values. But at this point, it will be applied to
+	// all kinds of values and produce an incorrect error whenever those values
+	// are hashes or arrays (including arrays of tables).
+
+	k := rv.Kind()
+
+	// laziness
+	if k >= reflect.Int && k <= reflect.Uint64 {
+		return md.unifyInt(data, rv)
+	}
+	switch k {
+	case reflect.Ptr:
+		elem := reflect.New(rv.Type().Elem())
+		err := md.unify(data, reflect.Indirect(elem))
+		if err != nil {
+			return err
+		}
+		rv.Set(elem)
+		return nil
+	case reflect.Struct:
+		return md.unifyStruct(data, rv)
+	case reflect.Map:
+		return md.unifyMap(data, rv)
+	case reflect.Array:
+		return md.unifyArray(data, rv)
+	case reflect.Slice:
+		return md.unifySlice(data, rv)
+	case reflect.String:
+		return md.unifyString(data, rv)
+	case reflect.Bool:
+		return md.unifyBool(data, rv)
+	case reflect.Interface:
+		// we only support empty interfaces.
+		if rv.NumMethod() > 0 {
+			return e("unsupported type %s", rv.Type())
+		}
+		return md.unifyAnything(data, rv)
+	case reflect.Float32:
+		fallthrough
+	case reflect.Float64:
+		return md.unifyFloat64(data, rv)
+	}
+	return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if mapping == nil {
+			return nil
+		}
+		return e("type mismatch for %s: expected table but found %T",
+			rv.Type().String(), mapping)
+	}
+
+	for key, datum := range tmap {
+		var f *field
+		fields := cachedTypeFields(rv.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if ff.name == key {
+				f = ff
+				break
+			}
+			if f == nil && strings.EqualFold(ff.name, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			subv := rv
+			for _, i := range f.index {
+				subv = indirect(subv.Field(i))
+			}
+			if isUnifiable(subv) {
+				md.decoded[md.context.add(key).String()] = true
+				md.context = append(md.context, key)
+				if err := md.unify(datum, subv); err != nil {
+					return err
+				}
+				md.context = md.context[0 : len(md.context)-1]
+			} else if f.name != "" {
+				// Bad user! No soup for you!
+				return e("cannot write unexported field %s.%s",
+					rv.Type().String(), f.name)
+			}
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if tmap == nil {
+			return nil
+		}
+		return badtype("map", mapping)
+	}
+	if rv.IsNil() {
+		rv.Set(reflect.MakeMap(rv.Type()))
+	}
+	for k, v := range tmap {
+		md.decoded[md.context.add(k).String()] = true
+		md.context = append(md.context, k)
+
+		rvkey := indirect(reflect.New(rv.Type().Key()))
+		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+		if err := md.unify(v, rvval); err != nil {
+			return err
+		}
+		md.context = md.context[0 : len(md.context)-1]
+
+		rvkey.SetString(k)
+		rv.SetMapIndex(rvkey, rvval)
+	}
+	return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return badtype("slice", data)
+	}
+	sliceLen := datav.Len()
+	if sliceLen != rv.Len() {
+		return e("expected array length %d; got TOML array of length %d",
+			rv.Len(), sliceLen)
+	}
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return badtype("slice", data)
+	}
+	n := datav.Len()
+	if rv.IsNil() || rv.Cap() < n {
+		rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+	}
+	rv.SetLen(n)
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+	sliceLen := data.Len()
+	for i := 0; i < sliceLen; i++ {
+		v := data.Index(i).Interface()
+		sliceval := indirect(rv.Index(i))
+		if err := md.unify(v, sliceval); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+	if _, ok := data.(time.Time); ok {
+		rv.Set(reflect.ValueOf(data))
+		return nil
+	}
+	return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+	if s, ok := data.(string); ok {
+		rv.SetString(s)
+		return nil
+	}
+	return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(float64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			fallthrough
+		case reflect.Float64:
+			rv.SetFloat(num)
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+	return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(int64); ok {
+		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+			switch rv.Kind() {
+			case reflect.Int, reflect.Int64:
+				// No bounds checking necessary.
+			case reflect.Int8:
+				if num < math.MinInt8 || num > math.MaxInt8 {
+					return e("value %d is out of range for int8", num)
+				}
+			case reflect.Int16:
+				if num < math.MinInt16 || num > math.MaxInt16 {
+					return e("value %d is out of range for int16", num)
+				}
+			case reflect.Int32:
+				if num < math.MinInt32 || num > math.MaxInt32 {
+					return e("value %d is out of range for int32", num)
+				}
+			}
+			rv.SetInt(num)
+		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+			unum := uint64(num)
+			switch rv.Kind() {
+			case reflect.Uint, reflect.Uint64:
+				// No bounds checking necessary.
+			case reflect.Uint8:
+				if num < 0 || unum > math.MaxUint8 {
+					return e("value %d is out of range for uint8", num)
+				}
+			case reflect.Uint16:
+				if num < 0 || unum > math.MaxUint16 {
+					return e("value %d is out of range for uint16", num)
+				}
+			case reflect.Uint32:
+				if num < 0 || unum > math.MaxUint32 {
+					return e("value %d is out of range for uint32", num)
+				}
+			}
+			rv.SetUint(unum)
+		} else {
+			panic("unreachable")
+		}
+		return nil
+	}
+	return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+	if b, ok := data.(bool); ok {
+		rv.SetBool(b)
+		return nil
+	}
+	return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+	rv.Set(reflect.ValueOf(data))
+	return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+	var s string
+	switch sdata := data.(type) {
+	case TextMarshaler:
+		text, err := sdata.MarshalText()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case fmt.Stringer:
+		s = sdata.String()
+	case string:
+		s = sdata
+	case bool:
+		s = fmt.Sprintf("%v", sdata)
+	case int64:
+		s = fmt.Sprintf("%d", sdata)
+	case float64:
+		s = fmt.Sprintf("%f", sdata)
+	default:
+		return badtype("primitive (string-like)", data)
+	}
+	if err := v.UnmarshalText([]byte(s)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+	return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+	if v.Kind() != reflect.Ptr {
+		if v.CanSet() {
+			pv := v.Addr()
+			if _, ok := pv.Interface().(TextUnmarshaler); ok {
+				return pv
+			}
+		}
+		return v
+	}
+	if v.IsNil() {
+		v.Set(reflect.New(v.Type().Elem()))
+	}
+	return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+	if rv.CanSet() {
+		return true
+	}
+	if _, ok := rv.Interface().(TextUnmarshaler); ok {
+		return true
+	}
+	return false
+}
+
+func badtype(expected string, data interface{}) error {
+	return e("cannot load TOML value of type %T into a Go %s", data, expected)
+}

+ 121 - 0
vendor/github.com/BurntSushi/toml/decode_meta.go

@@ -0,0 +1,121 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	keys    []Key
+	decoded map[string]bool
+	context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+//	// access the TOML key 'a.b.c'
+//	IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+	if len(key) == 0 {
+		return false
+	}
+
+	var hash map[string]interface{}
+	var ok bool
+	var hashOrVal interface{} = md.mapping
+	for _, k := range key {
+		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+			return false
+		}
+		if hashOrVal, ok = hash[k]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+	fullkey := strings.Join(key, ".")
+	if typ, ok := md.types[fullkey]; ok {
+		return typ.typeString()
+	}
+	return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+	return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+	var ss []string
+	for i := range k {
+		ss = append(ss, k.maybeQuoted(i))
+	}
+	return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+	quote := false
+	for _, c := range k[i] {
+		if !isBareKeyChar(c) {
+			quote = true
+			break
+		}
+	}
+	if quote {
+		return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+	}
+	return k[i]
+}
+
+func (k Key) add(piece string) Key {
+	newKey := make(Key, len(k)+1)
+	copy(newKey, k)
+	newKey[len(k)] = piece
+	return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+	return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+	undecoded := make([]Key, 0, len(md.keys))
+	for _, key := range md.keys {
+		if !md.decoded[key.String()] {
+			undecoded = append(undecoded, key)
+		}
+	}
+	return undecoded
+}

+ 27 - 0
vendor/github.com/BurntSushi/toml/doc.go

@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/toml-lang/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml

+ 568 - 0
vendor/github.com/BurntSushi/toml/encode.go

@@ -0,0 +1,568 @@
+package toml
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+	errArrayMixedElementTypes = errors.New(
+		"toml: cannot encode array with mixed element types")
+	errArrayNilElement = errors.New(
+		"toml: cannot encode array with nil element")
+	errNonString = errors.New(
+		"toml: cannot encode a map with non-string key type")
+	errAnonNonStruct = errors.New(
+		"toml: cannot encode an anonymous field that is not a struct")
+	errArrayNoTable = errors.New(
+		"toml: TOML array element cannot contain a table")
+	errNoKey = errors.New(
+		"toml: top-level values must be Go maps or structs")
+	errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+	"\t", "\\t",
+	"\n", "\\n",
+	"\r", "\\r",
+	"\"", "\\\"",
+	"\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+	// A single indentation level. By default it is two spaces.
+	Indent string
+
+	// hasWritten is whether we have written any output to w yet.
+	hasWritten bool
+	w          *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:      bufio.NewWriter(w),
+		Indent: "  ",
+	}
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+	rv := eindirect(reflect.ValueOf(v))
+	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+		return err
+	}
+	return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if terr, ok := r.(tomlEncodeError); ok {
+				err = terr.error
+				return
+			}
+			panic(r)
+		}
+	}()
+	enc.encode(key, rv)
+	return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+	// Special case. Time needs to be in ISO8601 format.
+	// Special case. If we can marshal the type to text, then we used that.
+	// Basically, this prevents the encoder for handling these types as
+	// generic structs (or whatever the underlying type of a TextMarshaler is).
+	switch rv.Interface().(type) {
+	case time.Time, TextMarshaler:
+		enc.keyEqElement(key, rv)
+		return
+	}
+
+	k := rv.Kind()
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64,
+		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+		enc.keyEqElement(key, rv)
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+			enc.eArrayOfTables(key, rv)
+		} else {
+			enc.keyEqElement(key, rv)
+		}
+	case reflect.Interface:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Map:
+		if rv.IsNil() {
+			return
+		}
+		enc.eTable(key, rv)
+	case reflect.Ptr:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Struct:
+		enc.eTable(key, rv)
+	default:
+		panic(e("unsupported type for key '%s': %s", key, k))
+	}
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+	switch v := rv.Interface().(type) {
+	case time.Time:
+		// Special case time.Time as a primitive. Has to come before
+		// TextMarshaler below because time.Time implements
+		// encoding.TextMarshaler, but we need to always use UTC.
+		enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+		return
+	case TextMarshaler:
+		// Special case. Use text marshaler if it's available for this value.
+		if s, err := v.MarshalText(); err != nil {
+			encPanic(err)
+		} else {
+			enc.writeQuoted(string(s))
+		}
+		return
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		enc.wf(strconv.FormatBool(rv.Bool()))
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64:
+		enc.wf(strconv.FormatInt(rv.Int(), 10))
+	case reflect.Uint, reflect.Uint8, reflect.Uint16,
+		reflect.Uint32, reflect.Uint64:
+		enc.wf(strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Float32:
+		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+	case reflect.Float64:
+		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+	case reflect.Array, reflect.Slice:
+		enc.eArrayOrSliceElement(rv)
+	case reflect.Interface:
+		enc.eElement(rv.Elem())
+	case reflect.String:
+		enc.writeQuoted(rv.String())
+	default:
+		panic(e("unexpected primitive type: %s", rv.Kind()))
+	}
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+	if !strings.Contains(fstr, ".") {
+		return fstr + ".0"
+	}
+	return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+	enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+	length := rv.Len()
+	enc.wf("[")
+	for i := 0; i < length; i++ {
+		elem := rv.Index(i)
+		enc.eElement(elem)
+		if i != length-1 {
+			enc.wf(", ")
+		}
+	}
+	enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	for i := 0; i < rv.Len(); i++ {
+		trv := rv.Index(i)
+		if isNil(trv) {
+			continue
+		}
+		panicIfInvalidKey(key)
+		enc.newline()
+		enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+		enc.newline()
+		enc.eMapOrStruct(key, trv)
+	}
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+	panicIfInvalidKey(key)
+	if len(key) == 1 {
+		// Output an extra newline between top-level tables.
+		// (The newline isn't written if nothing else has been written though.)
+		enc.newline()
+	}
+	if len(key) > 0 {
+		enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+		enc.newline()
+	}
+	enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+	switch rv := eindirect(rv); rv.Kind() {
+	case reflect.Map:
+		enc.eMap(key, rv)
+	case reflect.Struct:
+		enc.eStruct(key, rv)
+	default:
+		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+	}
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+	rt := rv.Type()
+	if rt.Key().Kind() != reflect.String {
+		encPanic(errNonString)
+	}
+
+	// Sort keys so that we have deterministic output. And write keys directly
+	// underneath this key first, before writing sub-structs or sub-maps.
+	var mapKeysDirect, mapKeysSub []string
+	for _, mapKey := range rv.MapKeys() {
+		k := mapKey.String()
+		if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+			mapKeysSub = append(mapKeysSub, k)
+		} else {
+			mapKeysDirect = append(mapKeysDirect, k)
+		}
+	}
+
+	var writeMapKeys = func(mapKeys []string) {
+		sort.Strings(mapKeys)
+		for _, mapKey := range mapKeys {
+			mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+			if isNil(mrv) {
+				// Don't write anything for nil fields.
+				continue
+			}
+			enc.encode(key.add(mapKey), mrv)
+		}
+	}
+	writeMapKeys(mapKeysDirect)
+	writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+	// Write keys for fields directly under this key first, because if we write
+	// a field that creates a new table, then all keys under it will be in that
+	// table (not the one we're writing here).
+	rt := rv.Type()
+	var fieldsDirect, fieldsSub [][]int
+	var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			// skip unexported fields
+			if f.PkgPath != "" && !f.Anonymous {
+				continue
+			}
+			frv := rv.Field(i)
+			if f.Anonymous {
+				t := f.Type
+				switch t.Kind() {
+				case reflect.Struct:
+					// Treat anonymous struct fields with
+					// tag names as though they are not
+					// anonymous, like encoding/json does.
+					if getOptions(f.Tag).name == "" {
+						addFields(t, frv, f.Index)
+						continue
+					}
+				case reflect.Ptr:
+					if t.Elem().Kind() == reflect.Struct &&
+						getOptions(f.Tag).name == "" {
+						if !frv.IsNil() {
+							addFields(t.Elem(), frv.Elem(), f.Index)
+						}
+						continue
+					}
+					// Fall through to the normal field encoding logic below
+					// for non-struct anonymous fields.
+				}
+			}
+
+			if typeIsHash(tomlTypeOfGo(frv)) {
+				fieldsSub = append(fieldsSub, append(start, f.Index...))
+			} else {
+				fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+			}
+		}
+	}
+	addFields(rt, rv, nil)
+
+	var writeFields = func(fields [][]int) {
+		for _, fieldIndex := range fields {
+			sft := rt.FieldByIndex(fieldIndex)
+			sf := rv.FieldByIndex(fieldIndex)
+			if isNil(sf) {
+				// Don't write anything for nil fields.
+				continue
+			}
+
+			opts := getOptions(sft.Tag)
+			if opts.skip {
+				continue
+			}
+			keyName := sft.Name
+			if opts.name != "" {
+				keyName = opts.name
+			}
+			if opts.omitempty && isEmpty(sf) {
+				continue
+			}
+			if opts.omitzero && isZero(sf) {
+				continue
+			}
+
+			enc.encode(key.add(keyName), sf)
+		}
+	}
+	writeFields(fieldsDirect)
+	writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() {
+		return nil
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		return tomlBool
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64:
+		return tomlInteger
+	case reflect.Float32, reflect.Float64:
+		return tomlFloat
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlHash, tomlArrayType(rv)) {
+			return tomlArrayHash
+		}
+		return tomlArray
+	case reflect.Ptr, reflect.Interface:
+		return tomlTypeOfGo(rv.Elem())
+	case reflect.String:
+		return tomlString
+	case reflect.Map:
+		return tomlHash
+	case reflect.Struct:
+		switch rv.Interface().(type) {
+		case time.Time:
+			return tomlDatetime
+		case TextMarshaler:
+			return tomlString
+		default:
+			return tomlHash
+		}
+	default:
+		panic("unexpected reflect.Kind: " + rv.Kind().String())
+	}
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+		return nil
+	}
+	firstType := tomlTypeOfGo(rv.Index(0))
+	if firstType == nil {
+		encPanic(errArrayNilElement)
+	}
+
+	rvlen := rv.Len()
+	for i := 1; i < rvlen; i++ {
+		elem := rv.Index(i)
+		switch elemType := tomlTypeOfGo(elem); {
+		case elemType == nil:
+			encPanic(errArrayNilElement)
+		case !typeEqual(firstType, elemType):
+			encPanic(errArrayMixedElementTypes)
+		}
+	}
+	// If we have a nested array, then we must make sure that the nested
+	// array contains ONLY primitives.
+	// This checks arbitrarily nested arrays.
+	if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+		nest := tomlArrayType(eindirect(rv.Index(0)))
+		if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+			encPanic(errArrayNoTable)
+		}
+	}
+	return firstType
+}
+
+type tagOptions struct {
+	skip      bool // "-"
+	name      string
+	omitempty bool
+	omitzero  bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+	t := tag.Get("toml")
+	if t == "-" {
+		return tagOptions{skip: true}
+	}
+	var opts tagOptions
+	parts := strings.Split(t, ",")
+	opts.name = parts[0]
+	for _, s := range parts[1:] {
+		switch s {
+		case "omitempty":
+			opts.omitempty = true
+		case "omitzero":
+			opts.omitzero = true
+		}
+	}
+	return opts
+}
+
+func isZero(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return rv.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return rv.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return rv.Float() == 0.0
+	}
+	return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+		return rv.Len() == 0
+	case reflect.Bool:
+		return !rv.Bool()
+	}
+	return false
+}
+
+func (enc *Encoder) newline() {
+	if enc.hasWritten {
+		enc.wf("\n")
+	}
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	panicIfInvalidKey(key)
+	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+	enc.eElement(val)
+	enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+	if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+		encPanic(err)
+	}
+	enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+	return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+	panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return eindirect(v.Elem())
+	default:
+		return v
+	}
+}
+
+func isNil(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return rv.IsNil()
+	default:
+		return false
+	}
+}
+
+func panicIfInvalidKey(key Key) {
+	for _, k := range key {
+		if len(k) == 0 {
+			encPanic(e("Key '%s' is not a valid table name. Key names "+
+				"cannot be empty.", key.maybeQuotedAll()))
+		}
+	}
+}
+
+func isValidKeyName(s string) bool {
+	return len(s) != 0
+}

+ 19 - 0
vendor/github.com/BurntSushi/toml/encoding_types.go

@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+	"encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler

+ 18 - 0
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go

@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+	MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+	UnmarshalText(text []byte) error
+}

+ 953 - 0
vendor/github.com/BurntSushi/toml/lex.go

@@ -0,0 +1,953 @@
+package toml
+
+import (
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type itemType int
+
+const (
+	itemError itemType = iota
+	itemNIL            // used in the parser to indicate no type
+	itemEOF
+	itemText
+	itemString
+	itemRawString
+	itemMultilineString
+	itemRawMultilineString
+	itemBool
+	itemInteger
+	itemFloat
+	itemDatetime
+	itemArray // the start of an array
+	itemArrayEnd
+	itemTableStart
+	itemTableEnd
+	itemArrayTableStart
+	itemArrayTableEnd
+	itemKeyStart
+	itemCommentStart
+	itemInlineTableStart
+	itemInlineTableEnd
+)
+
+const (
+	eof              = 0
+	comma            = ','
+	tableStart       = '['
+	tableEnd         = ']'
+	arrayTableStart  = '['
+	arrayTableEnd    = ']'
+	tableSep         = '.'
+	keySep           = '='
+	arrayStart       = '['
+	arrayEnd         = ']'
+	commentStart     = '#'
+	stringStart      = '"'
+	stringEnd        = '"'
+	rawStringStart   = '\''
+	rawStringEnd     = '\''
+	inlineTableStart = '{'
+	inlineTableEnd   = '}'
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+	input string
+	start int
+	pos   int
+	line  int
+	state stateFn
+	items chan item
+
+	// Allow for backing up up to three runes.
+	// This is necessary because TOML contains 3-rune tokens (""" and ''').
+	prevWidths [3]int
+	nprev      int // how many of prevWidths are in use
+	// If we emit an eof, we can still back up, but it is not OK to call
+	// next again.
+	atEOF bool
+
+	// A stack of state functions used to maintain context.
+	// The idea is to reuse parts of the state machine in various places.
+	// For example, values can appear at the top level or within arbitrarily
+	// nested arrays. The last state on the stack is used after a value has
+	// been lexed. Similarly for comments.
+	stack []stateFn
+}
+
+type item struct {
+	typ  itemType
+	val  string
+	line int
+}
+
+func (lx *lexer) nextItem() item {
+	for {
+		select {
+		case item := <-lx.items:
+			return item
+		default:
+			lx.state = lx.state(lx)
+		}
+	}
+}
+
+func lex(input string) *lexer {
+	lx := &lexer{
+		input: input,
+		state: lexTop,
+		line:  1,
+		items: make(chan item, 10),
+		stack: make([]stateFn, 0, 10),
+	}
+	return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+	lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+	if len(lx.stack) == 0 {
+		return lx.errorf("BUG in lexer: no states to pop")
+	}
+	last := lx.stack[len(lx.stack)-1]
+	lx.stack = lx.stack[0 : len(lx.stack)-1]
+	return last
+}
+
+func (lx *lexer) current() string {
+	return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+	lx.items <- item{typ, lx.current(), lx.line}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+	lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+	if lx.atEOF {
+		panic("next called after EOF")
+	}
+	if lx.pos >= len(lx.input) {
+		lx.atEOF = true
+		return eof
+	}
+
+	if lx.input[lx.pos] == '\n' {
+		lx.line++
+	}
+	lx.prevWidths[2] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[0]
+	if lx.nprev < 3 {
+		lx.nprev++
+	}
+	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+	lx.prevWidths[0] = w
+	lx.pos += w
+	return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+	lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only twice between calls to next.
+func (lx *lexer) backup() {
+	if lx.atEOF {
+		lx.atEOF = false
+		return
+	}
+	if lx.nprev < 1 {
+		panic("backed up too far")
+	}
+	w := lx.prevWidths[0]
+	lx.prevWidths[0] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[2]
+	lx.nprev--
+	lx.pos -= w
+	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+		lx.line--
+	}
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+	if lx.next() == valid {
+		return true
+	}
+	lx.backup()
+	return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+	r := lx.next()
+	lx.backup()
+	return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+	for {
+		r := lx.next()
+		if pred(r) {
+			continue
+		}
+		lx.backup()
+		lx.ignore()
+		return
+	}
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+	lx.items <- item{
+		itemError,
+		fmt.Sprintf(format, values...),
+		lx.line,
+	}
+	return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+	r := lx.next()
+	if isWhitespace(r) || isNL(r) {
+		return lexSkip(lx, lexTop)
+	}
+	switch r {
+	case commentStart:
+		lx.push(lexTop)
+		return lexCommentStart
+	case tableStart:
+		return lexTableStart
+	case eof:
+		if lx.pos > lx.start {
+			return lx.errorf("unexpected EOF")
+		}
+		lx.emit(itemEOF)
+		return nil
+	}
+
+	// At this point, the only valid item can be a key, so we back up
+	// and let the key lexer do the rest.
+	lx.backup()
+	lx.push(lexTopEnd)
+	return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == commentStart:
+		// a comment will read to a newline for us.
+		lx.push(lexTop)
+		return lexCommentStart
+	case isWhitespace(r):
+		return lexTopEnd
+	case isNL(r):
+		lx.ignore()
+		return lexTop
+	case r == eof:
+		lx.emit(itemEOF)
+		return nil
+	}
+	return lx.errorf("expected a top-level item to end with a newline, "+
+		"comment, or EOF, but got %q instead", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+	if lx.peek() == arrayTableStart {
+		lx.next()
+		lx.emit(itemArrayTableStart)
+		lx.push(lexArrayTableEnd)
+	} else {
+		lx.emit(itemTableStart)
+		lx.push(lexTableEnd)
+	}
+	return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+	lx.emit(itemTableEnd)
+	return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+	if r := lx.next(); r != arrayTableEnd {
+		return lx.errorf("expected end of table array name delimiter %q, "+
+			"but got %q instead", arrayTableEnd, r)
+	}
+	lx.emit(itemArrayTableEnd)
+	return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == tableEnd || r == eof:
+		return lx.errorf("unexpected end of table name " +
+			"(table names cannot be empty)")
+	case r == tableSep:
+		return lx.errorf("unexpected table separator " +
+			"(table names cannot be empty)")
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		lx.push(lexTableNameEnd)
+		return lexValue // reuse string lexing
+	default:
+		return lexBareTableName
+	}
+}
+
+// lexBareTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+	r := lx.next()
+	if isBareKeyChar(r) {
+		return lexBareTableName
+	}
+	lx.backup()
+	lx.emit(itemText)
+	return lexTableNameEnd
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexTableNameEnd
+	case r == tableSep:
+		lx.ignore()
+		return lexTableNameStart
+	case r == tableEnd:
+		return lx.pop()
+	default:
+		return lx.errorf("expected '.' or ']' to end table name, "+
+			"but got %q instead", r)
+	}
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+	r := lx.peek()
+	switch {
+	case r == keySep:
+		return lx.errorf("unexpected key separator %q", keySep)
+	case isWhitespace(r) || isNL(r):
+		lx.next()
+		return lexSkip(lx, lexKeyStart)
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		lx.emit(itemKeyStart)
+		lx.push(lexKeyEnd)
+		return lexValue // reuse string lexing
+	default:
+		lx.ignore()
+		lx.emit(itemKeyStart)
+		return lexBareKey
+	}
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isBareKeyChar(r):
+		return lexBareKey
+	case isWhitespace(r):
+		lx.backup()
+		lx.emit(itemText)
+		return lexKeyEnd
+	case r == keySep:
+		lx.backup()
+		lx.emit(itemText)
+		return lexKeyEnd
+	default:
+		return lx.errorf("bare keys cannot contain %q", r)
+	}
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case r == keySep:
+		return lexSkip(lx, lexValue)
+	case isWhitespace(r):
+		return lexSkip(lx, lexKeyEnd)
+	default:
+		return lx.errorf("expected key separator %q, but got %q instead",
+			keySep, r)
+	}
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+	// We allow whitespace to precede a value, but NOT newlines.
+	// In array syntax, the array states are responsible for ignoring newlines.
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case isDigit(r):
+		lx.backup() // avoid an extra state and use the same as above
+		return lexNumberOrDateStart
+	}
+	switch r {
+	case arrayStart:
+		lx.ignore()
+		lx.emit(itemArray)
+		return lexArrayValue
+	case inlineTableStart:
+		lx.ignore()
+		lx.emit(itemInlineTableStart)
+		return lexInlineTableValue
+	case stringStart:
+		if lx.accept(stringStart) {
+			if lx.accept(stringStart) {
+				lx.ignore() // Ignore """
+				return lexMultilineString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the '"'
+		return lexString
+	case rawStringStart:
+		if lx.accept(rawStringStart) {
+			if lx.accept(rawStringStart) {
+				lx.ignore() // Ignore """
+				return lexMultilineRawString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case '+', '-':
+		return lexNumberStart
+	case '.': // special error case, be kind to users
+		return lx.errorf("floats must start with a digit, not '.'")
+	}
+	if unicode.IsLetter(r) {
+		// Be permissive here; lexBool will give a nice error if the
+		// user wrote something like
+		//   x = foo
+		// (i.e. not 'true' or 'false' but is something else word-like.)
+		lx.backup()
+		return lexBool
+	}
+	return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValue)
+	case r == commentStart:
+		lx.push(lexArrayValue)
+		return lexCommentStart
+	case r == comma:
+		return lx.errorf("unexpected comma")
+	case r == arrayEnd:
+		// NOTE(caleb): The spec isn't clear about whether you can have
+		// a trailing comma or not, so we'll allow it.
+		return lexArrayEnd
+	}
+
+	lx.backup()
+	lx.push(lexArrayValueEnd)
+	return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValueEnd)
+	case r == commentStart:
+		lx.push(lexArrayValueEnd)
+		return lexCommentStart
+	case r == comma:
+		lx.ignore()
+		return lexArrayValue // move on to the next value
+	case r == arrayEnd:
+		return lexArrayEnd
+	}
+	return lx.errorf(
+		"expected a comma or array terminator %q, but got %q instead",
+		arrayEnd, r,
+	)
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemArrayEnd)
+	return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValue)
+	case isNL(r):
+		return lx.errorf("newlines not allowed within inline tables")
+	case r == commentStart:
+		lx.push(lexInlineTableValue)
+		return lexCommentStart
+	case r == comma:
+		return lx.errorf("unexpected comma")
+	case r == inlineTableEnd:
+		return lexInlineTableEnd
+	}
+	lx.backup()
+	lx.push(lexInlineTableValueEnd)
+	return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValueEnd)
+	case isNL(r):
+		return lx.errorf("newlines not allowed within inline tables")
+	case r == commentStart:
+		lx.push(lexInlineTableValueEnd)
+		return lexCommentStart
+	case r == comma:
+		lx.ignore()
+		return lexInlineTableValue
+	case r == inlineTableEnd:
+		return lexInlineTableEnd
+	}
+	return lx.errorf("expected a comma or an inline table terminator %q, "+
+		"but got %q instead", inlineTableEnd, r)
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemInlineTableEnd)
+	return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == eof:
+		return lx.errorf("unexpected EOF")
+	case isNL(r):
+		return lx.errorf("strings cannot contain newlines")
+	case r == '\\':
+		lx.push(lexString)
+		return lexStringEscape
+	case r == stringEnd:
+		lx.backup()
+		lx.emit(itemString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+	switch lx.next() {
+	case eof:
+		return lx.errorf("unexpected EOF")
+	case '\\':
+		return lexMultilineStringEscape
+	case stringEnd:
+		if lx.accept(stringEnd) {
+			if lx.accept(stringEnd) {
+				lx.backup()
+				lx.backup()
+				lx.backup()
+				lx.emit(itemMultilineString)
+				lx.next()
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+	}
+	return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == eof:
+		return lx.errorf("unexpected EOF")
+	case isNL(r):
+		return lx.errorf("strings cannot contain newlines")
+	case r == rawStringEnd:
+		lx.backup()
+		lx.emit(itemRawString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+	switch lx.next() {
+	case eof:
+		return lx.errorf("unexpected EOF")
+	case rawStringEnd:
+		if lx.accept(rawStringEnd) {
+			if lx.accept(rawStringEnd) {
+				lx.backup()
+				lx.backup()
+				lx.backup()
+				lx.emit(itemRawMultilineString)
+				lx.next()
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+	}
+	return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+	// Handle the special case first:
+	if isNL(lx.next()) {
+		return lexMultilineString
+	}
+	lx.backup()
+	lx.push(lexMultilineString)
+	return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case 'b':
+		fallthrough
+	case 't':
+		fallthrough
+	case 'n':
+		fallthrough
+	case 'f':
+		fallthrough
+	case 'r':
+		fallthrough
+	case '"':
+		fallthrough
+	case '\\':
+		return lx.pop()
+	case 'u':
+		return lexShortUnicodeEscape
+	case 'U':
+		return lexLongUnicodeEscape
+	}
+	return lx.errorf("invalid escape character %q; only the following "+
+		"escape characters are allowed: "+
+		`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 4; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(`expected four hexadecimal digits after '\u', `+
+				"but got %q instead", lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 8; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(`expected eight hexadecimal digits after '\U', `+
+				"but got %q instead", lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '_':
+		return lexNumber
+	case 'e', 'E':
+		return lexFloat
+	case '.':
+		return lx.errorf("floats must start with a digit, not '.'")
+	}
+	return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '-':
+		return lexDatetime
+	case '_':
+		return lexNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDatetime
+	}
+	switch r {
+	case '-', 'T', ':', '.', 'Z':
+		return lexDatetime
+	}
+
+	lx.backup()
+	lx.emit(itemDatetime)
+	return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that a sign
+// has already been read, but that *no* digits have been consumed.
+// lexNumberStart will move to the appropriate integer or float states.
+func lexNumberStart(lx *lexer) stateFn {
+	// We MUST see a digit. Even floats have to start with a digit.
+	r := lx.next()
+	if !isDigit(r) {
+		if r == '.' {
+			return lx.errorf("floats must start with a digit, not '.'")
+		}
+		return lx.errorf("expected a digit but got %q", r)
+	}
+	return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexNumber
+	}
+	switch r {
+	case '_':
+		return lexNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexFloat
+	}
+	switch r {
+	case '_', '.', '-', '+', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemFloat)
+	return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+	var rs []rune
+	for {
+		r := lx.next()
+		if !unicode.IsLetter(r) {
+			lx.backup()
+			break
+		}
+		rs = append(rs, r)
+	}
+	s := string(rs)
+	switch s {
+	case "true", "false":
+		lx.emit(itemBool)
+		return lx.pop()
+	}
+	return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemCommentStart)
+	return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+	r := lx.peek()
+	if isNL(r) || r == eof {
+		lx.emit(itemText)
+		return lx.pop()
+	}
+	lx.next()
+	return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+	return func(lx *lexer) stateFn {
+		lx.ignore()
+		return nextState
+	}
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+	return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+	return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+	return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+	return (r >= '0' && r <= '9') ||
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+	return (r >= 'A' && r <= 'Z') ||
+		(r >= 'a' && r <= 'z') ||
+		(r >= '0' && r <= '9') ||
+		r == '_' ||
+		r == '-'
+}
+
+func (itype itemType) String() string {
+	switch itype {
+	case itemError:
+		return "Error"
+	case itemNIL:
+		return "NIL"
+	case itemEOF:
+		return "EOF"
+	case itemText:
+		return "Text"
+	case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+		return "String"
+	case itemBool:
+		return "Bool"
+	case itemInteger:
+		return "Integer"
+	case itemFloat:
+		return "Float"
+	case itemDatetime:
+		return "DateTime"
+	case itemTableStart:
+		return "TableStart"
+	case itemTableEnd:
+		return "TableEnd"
+	case itemKeyStart:
+		return "KeyStart"
+	case itemArray:
+		return "Array"
+	case itemArrayEnd:
+		return "ArrayEnd"
+	case itemCommentStart:
+		return "CommentStart"
+	}
+	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}

+ 592 - 0
vendor/github.com/BurntSushi/toml/parse.go

@@ -0,0 +1,592 @@
+package toml
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+	"unicode"
+	"unicode/utf8"
+)
+
+type parser struct {
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	lx      *lexer
+
+	// A list of keys in the order that they appear in the TOML data.
+	ordered []Key
+
+	// the full key for the current hash in scope
+	context Key
+
+	// the base key name for everything except hashes
+	currentKey string
+
+	// rough approximation of line number
+	approxLine int
+
+	// A map of 'key.group.names' to whether they were created implicitly.
+	implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+	return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			var ok bool
+			if err, ok = r.(parseError); ok {
+				return
+			}
+			panic(r)
+		}
+	}()
+
+	p = &parser{
+		mapping:   make(map[string]interface{}),
+		types:     make(map[string]tomlType),
+		lx:        lex(data),
+		ordered:   make([]Key, 0),
+		implicits: make(map[string]bool),
+	}
+	for {
+		item := p.next()
+		if item.typ == itemEOF {
+			break
+		}
+		p.topLevel(item)
+	}
+
+	return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+	msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+		p.approxLine, p.current(), fmt.Sprintf(format, v...))
+	panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+	it := p.lx.nextItem()
+	if it.typ == itemError {
+		p.panicf("%s", it.val)
+	}
+	return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+	it := p.next()
+	p.assertEqual(typ, it.typ)
+	return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+	if expected != got {
+		p.bug("Expected '%s' but got '%s'.", expected, got)
+	}
+}
+
+func (p *parser) topLevel(item item) {
+	switch item.typ {
+	case itemCommentStart:
+		p.approxLine = item.line
+		p.expect(itemText)
+	case itemTableStart:
+		kg := p.next()
+		p.approxLine = kg.line
+
+		var key Key
+		for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+			key = append(key, p.keyString(kg))
+		}
+		p.assertEqual(itemTableEnd, kg.typ)
+
+		p.establishContext(key, false)
+		p.setType("", tomlHash)
+		p.ordered = append(p.ordered, key)
+	case itemArrayTableStart:
+		kg := p.next()
+		p.approxLine = kg.line
+
+		var key Key
+		for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+			key = append(key, p.keyString(kg))
+		}
+		p.assertEqual(itemArrayTableEnd, kg.typ)
+
+		p.establishContext(key, true)
+		p.setType("", tomlArrayHash)
+		p.ordered = append(p.ordered, key)
+	case itemKeyStart:
+		kname := p.next()
+		p.approxLine = kname.line
+		p.currentKey = p.keyString(kname)
+
+		val, typ := p.value(p.next())
+		p.setValue(p.currentKey, val)
+		p.setType(p.currentKey, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+		p.currentKey = ""
+	default:
+		p.bug("Unexpected type at top level: %s", item.typ)
+	}
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+	switch it.typ {
+	case itemText:
+		return it.val
+	case itemString, itemMultilineString,
+		itemRawString, itemRawMultilineString:
+		s, _ := p.value(it)
+		return s.(string)
+	default:
+		p.bug("Unexpected key type: %s", it.typ)
+		panic("unreachable")
+	}
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+	switch it.typ {
+	case itemString:
+		return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+	case itemMultilineString:
+		trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+		return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+	case itemRawString:
+		return it.val, p.typeOfPrimitive(it)
+	case itemRawMultilineString:
+		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+	case itemBool:
+		switch it.val {
+		case "true":
+			return true, p.typeOfPrimitive(it)
+		case "false":
+			return false, p.typeOfPrimitive(it)
+		}
+		p.bug("Expected boolean value, but got '%s'.", it.val)
+	case itemInteger:
+		if !numUnderscoresOK(it.val) {
+			p.panicf("Invalid integer %q: underscores must be surrounded by digits",
+				it.val)
+		}
+		val := strings.Replace(it.val, "_", "", -1)
+		num, err := strconv.ParseInt(val, 10, 64)
+		if err != nil {
+			// Distinguish integer values. Normally, it'd be a bug if the lexer
+			// provides an invalid integer, but it's possible that the number is
+			// out of range of valid values (which the lexer cannot determine).
+			// So mark the former as a bug but the latter as a legitimate user
+			// error.
+			if e, ok := err.(*strconv.NumError); ok &&
+				e.Err == strconv.ErrRange {
+
+				p.panicf("Integer '%s' is out of the range of 64-bit "+
+					"signed integers.", it.val)
+			} else {
+				p.bug("Expected integer value, but got '%s'.", it.val)
+			}
+		}
+		return num, p.typeOfPrimitive(it)
+	case itemFloat:
+		parts := strings.FieldsFunc(it.val, func(r rune) bool {
+			switch r {
+			case '.', 'e', 'E':
+				return true
+			}
+			return false
+		})
+		for _, part := range parts {
+			if !numUnderscoresOK(part) {
+				p.panicf("Invalid float %q: underscores must be "+
+					"surrounded by digits", it.val)
+			}
+		}
+		if !numPeriodsOK(it.val) {
+			// As a special case, numbers like '123.' or '1.e2',
+			// which are valid as far as Go/strconv are concerned,
+			// must be rejected because TOML says that a fractional
+			// part consists of '.' followed by 1+ digits.
+			p.panicf("Invalid float %q: '.' must be followed "+
+				"by one or more digits", it.val)
+		}
+		val := strings.Replace(it.val, "_", "", -1)
+		num, err := strconv.ParseFloat(val, 64)
+		if err != nil {
+			if e, ok := err.(*strconv.NumError); ok &&
+				e.Err == strconv.ErrRange {
+
+				p.panicf("Float '%s' is out of the range of 64-bit "+
+					"IEEE-754 floating-point numbers.", it.val)
+			} else {
+				p.panicf("Invalid float value: %q", it.val)
+			}
+		}
+		return num, p.typeOfPrimitive(it)
+	case itemDatetime:
+		var t time.Time
+		var ok bool
+		var err error
+		for _, format := range []string{
+			"2006-01-02T15:04:05Z07:00",
+			"2006-01-02T15:04:05",
+			"2006-01-02",
+		} {
+			t, err = time.ParseInLocation(format, it.val, time.Local)
+			if err == nil {
+				ok = true
+				break
+			}
+		}
+		if !ok {
+			p.panicf("Invalid TOML Datetime: %q.", it.val)
+		}
+		return t, p.typeOfPrimitive(it)
+	case itemArray:
+		array := make([]interface{}, 0)
+		types := make([]tomlType, 0)
+
+		for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+			if it.typ == itemCommentStart {
+				p.expect(itemText)
+				continue
+			}
+
+			val, typ := p.value(it)
+			array = append(array, val)
+			types = append(types, typ)
+		}
+		return array, p.typeOfArray(types)
+	case itemInlineTableStart:
+		var (
+			hash         = make(map[string]interface{})
+			outerContext = p.context
+			outerKey     = p.currentKey
+		)
+
+		p.context = append(p.context, p.currentKey)
+		p.currentKey = ""
+		for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+			if it.typ != itemKeyStart {
+				p.bug("Expected key start but instead found %q, around line %d",
+					it.val, p.approxLine)
+			}
+			if it.typ == itemCommentStart {
+				p.expect(itemText)
+				continue
+			}
+
+			// retrieve key
+			k := p.next()
+			p.approxLine = k.line
+			kname := p.keyString(k)
+
+			// retrieve value
+			p.currentKey = kname
+			val, typ := p.value(p.next())
+			// make sure we keep metadata up to date
+			p.setType(kname, typ)
+			p.ordered = append(p.ordered, p.context.add(p.currentKey))
+			hash[kname] = val
+		}
+		p.context = outerContext
+		p.currentKey = outerKey
+		return hash, tomlHash
+	}
+	p.bug("Unexpected value type: %s", it.typ)
+	panic("unreachable")
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+	accept := false
+	for _, r := range s {
+		if r == '_' {
+			if !accept {
+				return false
+			}
+			accept = false
+			continue
+		}
+		accept = true
+	}
+	return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+	period := false
+	for _, r := range s {
+		if period && !isDigit(r) {
+			return false
+		}
+		period = r == '.'
+	}
+	return !period
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+	var ok bool
+
+	// Always start at the top level and drill down for our context.
+	hashContext := p.mapping
+	keyContext := make(Key, 0)
+
+	// We only need implicit hashes for key[0:-1]
+	for _, k := range key[0 : len(key)-1] {
+		_, ok = hashContext[k]
+		keyContext = append(keyContext, k)
+
+		// No key? Make an implicit hash and move on.
+		if !ok {
+			p.addImplicit(keyContext)
+			hashContext[k] = make(map[string]interface{})
+		}
+
+		// If the hash context is actually an array of tables, then set
+		// the hash context to the last element in that array.
+		//
+		// Otherwise, it better be a table, since this MUST be a key group (by
+		// virtue of it not being the last element in a key).
+		switch t := hashContext[k].(type) {
+		case []map[string]interface{}:
+			hashContext = t[len(t)-1]
+		case map[string]interface{}:
+			hashContext = t
+		default:
+			p.panicf("Key '%s' was already created as a hash.", keyContext)
+		}
+	}
+
+	p.context = keyContext
+	if array {
+		// If this is the first element for this array, then allocate a new
+		// list of tables for it.
+		k := key[len(key)-1]
+		if _, ok := hashContext[k]; !ok {
+			hashContext[k] = make([]map[string]interface{}, 0, 5)
+		}
+
+		// Add a new table. But make sure the key hasn't already been used
+		// for something else.
+		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+			hashContext[k] = append(hash, make(map[string]interface{}))
+		} else {
+			p.panicf("Key '%s' was already created and cannot be used as "+
+				"an array.", keyContext)
+		}
+	} else {
+		p.setValue(key[len(key)-1], make(map[string]interface{}))
+	}
+	p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+	var tmpHash interface{}
+	var ok bool
+
+	hash := p.mapping
+	keyContext := make(Key, 0)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+		if tmpHash, ok = hash[k]; !ok {
+			p.bug("Context for key '%s' has not been established.", keyContext)
+		}
+		switch t := tmpHash.(type) {
+		case []map[string]interface{}:
+			// The context is a table of hashes. Pick the most recent table
+			// defined as the current hash.
+			hash = t[len(t)-1]
+		case map[string]interface{}:
+			hash = t
+		default:
+			p.bug("Expected hash to have type 'map[string]interface{}', but "+
+				"it has '%T' instead.", tmpHash)
+		}
+	}
+	keyContext = append(keyContext, key)
+
+	if _, ok := hash[key]; ok {
+		// Typically, if the given key has already been set, then we have
+		// to raise an error since duplicate keys are disallowed. However,
+		// it's possible that a key was previously defined implicitly. In this
+		// case, it is allowed to be redefined concretely. (See the
+		// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+		//
+		// But we have to make sure to stop marking it as an implicit. (So that
+		// another redefinition provokes an error.)
+		//
+		// Note that since it has already been defined (as a hash), we don't
+		// want to overwrite it. So our business is done.
+		if p.isImplicit(keyContext) {
+			p.removeImplicit(keyContext)
+			return
+		}
+
+		// Otherwise, we have a concrete key trying to override a previous
+		// key, which is *always* wrong.
+		p.panicf("Key '%s' has already been defined.", keyContext)
+	}
+	hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+	keyContext := make(Key, 0, len(p.context)+1)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+	}
+	if len(key) > 0 { // allow type setting for hashes
+		keyContext = append(keyContext, key)
+	}
+	p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+	p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+	p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+	return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+	if len(p.currentKey) == 0 {
+		return p.context.String()
+	}
+	if len(p.context) == 0 {
+		return p.currentKey
+	}
+	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+	if len(s) == 0 || s[0] != '\n' {
+		return s
+	}
+	return s[1:]
+}
+
+func stripEscapedWhitespace(s string) string {
+	esc := strings.Split(s, "\\\n")
+	if len(esc) > 1 {
+		for i := 1; i < len(esc); i++ {
+			esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+		}
+	}
+	return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+	var replaced []rune
+	s := []byte(str)
+	r := 0
+	for r < len(s) {
+		if s[r] != '\\' {
+			c, size := utf8.DecodeRune(s[r:])
+			r += size
+			replaced = append(replaced, c)
+			continue
+		}
+		r += 1
+		if r >= len(s) {
+			p.bug("Escape sequence at end of string.")
+			return ""
+		}
+		switch s[r] {
+		default:
+			p.bug("Expected valid escape code after \\, but got %q.", s[r])
+			return ""
+		case 'b':
+			replaced = append(replaced, rune(0x0008))
+			r += 1
+		case 't':
+			replaced = append(replaced, rune(0x0009))
+			r += 1
+		case 'n':
+			replaced = append(replaced, rune(0x000A))
+			r += 1
+		case 'f':
+			replaced = append(replaced, rune(0x000C))
+			r += 1
+		case 'r':
+			replaced = append(replaced, rune(0x000D))
+			r += 1
+		case '"':
+			replaced = append(replaced, rune(0x0022))
+			r += 1
+		case '\\':
+			replaced = append(replaced, rune(0x005C))
+			r += 1
+		case 'u':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+			replaced = append(replaced, escaped)
+			r += 5
+		case 'U':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+			replaced = append(replaced, escaped)
+			r += 9
+		}
+	}
+	return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+	s := string(bs)
+	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+	if err != nil {
+		p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+			"lexer claims it's OK: %s", s, err)
+	}
+	if !utf8.ValidRune(rune(hex)) {
+		p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+	}
+	return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+	return ty == itemString || ty == itemMultilineString ||
+		ty == itemRawString || ty == itemRawMultilineString
+}

+ 1 - 0
vendor/github.com/BurntSushi/toml/session.vim

@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1

+ 91 - 0
vendor/github.com/BurntSushi/toml/type_check.go

@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+	typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+	if t1 == nil || t2 == nil {
+		return false
+	}
+	return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+	return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+	return btype.typeString()
+}
+
+var (
+	tomlInteger   tomlBaseType = "Integer"
+	tomlFloat     tomlBaseType = "Float"
+	tomlDatetime  tomlBaseType = "Datetime"
+	tomlString    tomlBaseType = "String"
+	tomlBool      tomlBaseType = "Bool"
+	tomlArray     tomlBaseType = "Array"
+	tomlHash      tomlBaseType = "Hash"
+	tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+	switch lexItem.typ {
+	case itemInteger:
+		return tomlInteger
+	case itemFloat:
+		return tomlFloat
+	case itemDatetime:
+		return tomlDatetime
+	case itemString:
+		return tomlString
+	case itemMultilineString:
+		return tomlString
+	case itemRawString:
+		return tomlString
+	case itemRawMultilineString:
+		return tomlString
+	case itemBool:
+		return tomlBool
+	}
+	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+	panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+	// Empty arrays are cool.
+	if len(types) == 0 {
+		return tomlArray
+	}
+
+	theType := types[0]
+	for _, t := range types[1:] {
+		if !typeEqual(theType, t) {
+			p.panicf("Array contains values of type '%s' and '%s', but "+
+				"arrays must be homogeneous.", theType, t)
+		}
+	}
+	return tomlArray
+}

+ 242 - 0
vendor/github.com/BurntSushi/toml/type_fields.go

@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+	name  string       // the name of the field (`toml` tag included)
+	tag   bool         // whether field has a `toml` tag
+	index []int        // represents the depth of an anonymous field
+	typ   reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				opts := getOptions(sf.Tag)
+				if opts.skip {
+					continue
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := opts.name != ""
+					name := opts.name
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, field{name, tagged, index, ft})
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					f := field{name: ft.Name(), index: index, typ: ft}
+					next = append(next, f)
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with TOML tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}

+ 27 - 0
vendor/github.com/Joker/jade/LICENSE.md

@@ -0,0 +1,27 @@
+Copyright (c) 2015, Joker
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of jade nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 193 - 0
vendor/github.com/Joker/jade/config.go

@@ -0,0 +1,193 @@
+package jade
+
+//go:generate stringer -type=itemType,NodeType -trimprefix=item -output=config_string.go
+
+var TabSize = 4
+
+var (
+	golang_mode  = false
+	tag__bgn     = "<%s%s>"
+	tag__end     = "</%s>"
+	tag__void    = "<%s%s/>"
+	tag__arg     = ` %s="{{ print %s }}"`
+	tag__arg_str = ` %s="%s"`
+	tag__arg_add = `%s " " %s`
+	tag__arg_bgn = ""
+	tag__arg_end = ""
+
+	cond__if     = "{{ if %s }}"
+	cond__unless = "{{ if not %s }}"
+	cond__case   = "{{/* switch %s */}}"
+	cond__while  = "{{ range %s }}"
+	cond__for    = "{{/* %s, %s */}}{{ range %s }}"
+	cond__end    = "{{ end }}"
+
+	cond__for_if   = "{{ if gt len %s 0 }}{{/* %s, %s */}}{{ range %s }}"
+	code__for_else = "{{ end }}{{ else }}"
+
+	code__longcode  = "{{/* %s */}}"
+	code__buffered  = "{{ %s }}"
+	code__unescaped = "{{ %s }}"
+	code__else      = "{{ else }}"
+	code__else_if   = "{{ else if %s }}"
+	code__case_when = "{{/* case %s: */}}"
+	code__case_def  = "{{/* default: */}}"
+	code__mix_block = "{{/* block */}}"
+
+	text__str     = "%s"
+	text__comment = "<!--%s -->"
+
+	mixin__bgn       = "\n%s"
+	mixin__end       = ""
+	mixin__var_bgn   = "{{/* var ("
+	mixin__var       = " %s = %s "
+	mixin__var_rest  = " %s = %#v "
+	mixin__var_block = " block = `%s` "
+	mixin__var_end   = ") */}}\n"
+)
+
+type itemType int8
+
+const (
+	itemError itemType = iota // error occurred; value is text of error
+	itemEOF
+
+	itemEndL
+	itemIdent
+	itemEmptyLine // empty line
+
+	itemText // plain text
+
+	itemComment // html comment
+	itemHTMLTag // html <tag>
+	itemDoctype // Doctype tag
+
+	itemDiv           // html div for . or #
+	itemTag           // html tag
+	itemTagInline     // inline tags
+	itemTagEnd        // for <tag />
+	itemTagVoid       // self-closing tags
+	itemTagVoidInline // inline + self-closing tags
+
+	itemID    // id    attribute
+	itemClass // class attribute
+
+	itemAttrStart
+	itemAttrEnd
+	itemAttr
+	itemAttrSpace
+	itemAttrComma
+	itemAttrEqual
+	itemAttrEqualUn
+
+	itemFilter
+	itemFilterText
+
+	// itemKeyword // used only to delimit the keywords
+
+	itemInclude
+	itemExtends
+	itemBlock
+	itemBlockAppend
+	itemBlockPrepend
+	itemMixin
+	itemMixinCall
+	itemMixinBlock
+
+	itemCode
+	itemCodeBuffered
+	itemCodeUnescaped
+
+	itemIf
+	itemElse
+	itemElseIf
+	itemUnless
+
+	itemEach
+	itemWhile
+	itemFor
+	itemForIfNotContain
+	itemForElse
+
+	itemCase
+	itemCaseWhen
+	itemCaseDefault
+)
+
+var key = map[string]itemType{
+	"include": itemInclude,
+	"extends": itemExtends,
+	"block":   itemBlock,
+	"append":  itemBlockAppend,
+	"prepend": itemBlockPrepend,
+	"mixin":   itemMixin,
+
+	"if":      itemIf,
+	"else":    itemElse,
+	"unless":  itemUnless,
+	"for":     itemFor,
+	"each":    itemEach,
+	"while":   itemWhile,
+	"case":    itemCase,
+	"when":    itemCaseWhen,
+	"default": itemCaseDefault,
+
+	"doctype": itemDoctype,
+
+	"a":       itemTagInline,
+	"abbr":    itemTagInline,
+	"acronym": itemTagInline,
+	"b":       itemTagInline,
+	"code":    itemTagInline,
+	"em":      itemTagInline,
+	"font":    itemTagInline,
+	"i":       itemTagInline,
+	"ins":     itemTagInline,
+	"kbd":     itemTagInline,
+	"map":     itemTagInline,
+	"samp":    itemTagInline,
+	"small":   itemTagInline,
+	"span":    itemTagInline,
+	"strong":  itemTagInline,
+	"sub":     itemTagInline,
+	"sup":     itemTagInline,
+
+	"area":    itemTagVoid,
+	"base":    itemTagVoid,
+	"col":     itemTagVoid,
+	"command": itemTagVoid,
+	"embed":   itemTagVoid,
+	"hr":      itemTagVoid,
+	"input":   itemTagVoid,
+	"keygen":  itemTagVoid,
+	"link":    itemTagVoid,
+	"meta":    itemTagVoid,
+	"param":   itemTagVoid,
+	"source":  itemTagVoid,
+	"track":   itemTagVoid,
+	"wbr":     itemTagVoid,
+
+	"br":  itemTagVoidInline,
+	"img": itemTagVoidInline,
+}
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+	return t
+}
+
+const (
+	NodeText NodeType = iota
+	NodeList
+	NodeTag
+	NodeCode
+	NodeCond
+	NodeString
+	NodeDoctype
+	NodeMixin
+	NodeBlock
+)

+ 27 - 0
vendor/github.com/Joker/jade/config_string.go

@@ -0,0 +1,27 @@
+// Code generated by "stringer -type=itemType,NodeType -trimprefix=item -output=config_string.go"; DO NOT EDIT.
+
+package jade
+
+import "strconv"
+
+const _itemType_name = "ErrorEOFEndLIdentEmptyLineTextCommentHTMLTagDoctypeDivTagTagInlineTagEndTagVoidTagVoidInlineIDClassAttrStartAttrEndAttrAttrSpaceAttrCommaAttrEqualAttrEqualUnFilterFilterTextIncludeExtendsBlockBlockAppendBlockPrependMixinMixinCallMixinBlockCodeCodeBufferedCodeUnescapedIfElseElseIfUnlessEachWhileForForIfNotContainForElseCaseCaseWhenCaseDefault"
+
+var _itemType_index = [...]uint16{0, 5, 8, 12, 17, 26, 30, 37, 44, 51, 54, 57, 66, 72, 79, 92, 94, 99, 108, 115, 119, 128, 137, 146, 157, 163, 173, 180, 187, 192, 203, 215, 220, 229, 239, 243, 255, 268, 270, 274, 280, 286, 290, 295, 298, 313, 320, 324, 332, 343}
+
+func (i itemType) String() string {
+	if i < 0 || i >= itemType(len(_itemType_index)-1) {
+		return "itemType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _itemType_name[_itemType_index[i]:_itemType_index[i+1]]
+}
+
+const _NodeType_name = "NodeTextNodeListNodeTagNodeCodeNodeCondNodeStringNodeDoctypeNodeMixinNodeBlock"
+
+var _NodeType_index = [...]uint8{0, 8, 16, 23, 31, 39, 49, 60, 69, 78}
+
+func (i NodeType) String() string {
+	if i < 0 || i >= NodeType(len(_NodeType_index)-1) {
+		return "NodeType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _NodeType_name[_NodeType_index[i]:_NodeType_index[i+1]]
+}

+ 661 - 0
vendor/github.com/Joker/jade/jade_lex.go

@@ -0,0 +1,661 @@
+package jade
+
+import (
+	"strings"
+)
+
+func lexIndents(l *lexer) stateFn {
+	d := l.indents()
+	if d == -1 {
+		l.depth = 0
+		l.emit(itemEmptyLine)
+	} else {
+		l.depth = d
+		l.emit(itemIdent)
+	}
+	return lexTags
+}
+func (l *lexer) indents() (depth int) {
+	for {
+		switch l.next() {
+		case ' ':
+			depth += 1
+		case '\t':
+			depth += TabSize
+		case '\r':
+			// skip
+		case '\n':
+			return -1
+		default:
+			l.backup()
+			return
+		}
+	}
+}
+
+func lexEndLine(l *lexer) stateFn {
+	switch r := l.next(); {
+	case r == '\r':
+		if l.next() == '\n' {
+			l.emit(itemEndL)
+			return lexIndents
+		}
+		return l.errorf("lexTags: standalone '\\r' ")
+	case r == '\n':
+		l.emit(itemEndL)
+		return lexIndents
+	case r == eof:
+		l.depth = 0
+		l.emit(itemEOF)
+		return nil
+	default:
+		return l.errorf("lexEndLine: unexpected token %#U `%s`", r, string(r))
+	}
+}
+
+// lexTags scans tags.
+func lexTags(l *lexer) stateFn {
+	switch r := l.next(); {
+
+	case isEndOfLine(r), r == eof:
+		l.backup()
+		return lexEndLine
+	case r == ' ' || r == '\t':
+		l.backup()
+		return lexIndents
+	//
+	//
+	case r == '.':
+		n := l.skipSpaces()
+		if n == 0 {
+			l.emit(itemDiv)
+			return lexClass
+		}
+		if n == -1 {
+			l.ignore()
+			return lexLongText
+		}
+		return l.errorf("lexTags: class name cannot start with a space.")
+	case r == '#':
+		l.emit(itemDiv)
+		return lexID
+	case r == ':':
+		l.ignore()
+		if l.emitWordByType(itemFilter) {
+			return lexFilter
+		}
+		return l.errorf("lexTags: expect filter name")
+	case r == '|':
+		r = l.next()
+		if r != ' ' {
+			l.backup()
+		}
+		l.ignore()
+		return lexText
+	case r == '<':
+		l.emitLineByType(itemHTMLTag)
+		return lexEndLine
+	case r == '+':
+		l.skipSpaces()
+		l.ignore()
+		if l.emitWordByType(itemMixinCall) {
+			return lexAfterTag
+		}
+		return l.errorf("lexTags: expect mixin name")
+	case r == '/':
+		return lexComment
+	case r == '-':
+		l.ignore()
+		return lexCode
+	case r == '=':
+		l.skipSpaces()
+		l.ignore()
+		l.emitLineByType(itemCodeBuffered)
+		return lexEndLine
+	case r == '!':
+		np := l.next()
+		if np == '=' {
+			l.skipSpaces()
+			l.ignore()
+			l.emitLineByType(itemCodeUnescaped)
+			return lexEndLine
+		}
+		if np == '!' && l.next() == '!' && l.depth == 0 {
+			if l.skipSpaces() != -1 {
+				l.ignore()
+				l.emitLineByType(itemDoctype)
+				return lexEndLine
+			}
+		}
+		return l.errorf("expect '=' after '!'")
+	case isAlphaNumeric(r):
+		l.backup()
+		return lexTagName
+	default:
+		return l.errorf("lexTags: unexpected token %#U `%s`", r, string(r))
+	}
+}
+
+//
+//
+
+func lexID(l *lexer) stateFn {
+	if l.emitWordByType(itemID) {
+		return lexAfterTag
+	}
+	return l.errorf("lexID: expect id name")
+}
+func lexClass(l *lexer) stateFn {
+	if l.emitWordByType(itemClass) {
+		return lexAfterTag
+	}
+	return l.errorf("lexClass: expect class name")
+}
+
+func lexFilter(l *lexer) stateFn {
+	l.multiline()
+	l.emit(itemFilterText)
+	return lexIndents
+}
+
+func lexCode(l *lexer) stateFn {
+	if l.skipSpaces() == -1 {
+		l.multiline()
+		l.emit(itemCode)
+		return lexIndents
+	} else {
+		l.ignore()
+		l.emitLineByType(itemCode)
+		return lexEndLine
+	}
+}
+func lexComment(l *lexer) stateFn {
+	sp := l.next()
+	tp := l.peek()
+	if sp == '/' {
+		if tp == '-' {
+			l.multiline()
+			l.ignore()
+			return lexIndents
+		} else {
+			l.ignore()
+			l.multiline()
+			l.emit(itemComment)
+			return lexIndents
+		}
+	}
+	return l.errorf("lexComment: unexpected token '%#U' expect '/'", sp)
+}
+
+//
+//
+
+func lexText(l *lexer) stateFn {
+	if l.skipSpaces() == -1 {
+		l.ignore()
+		return lexEndLine
+	}
+	return text(l)
+}
+func lexLongText(l *lexer) stateFn {
+	l.longtext = true
+	return text(l)
+}
+func text(l *lexer) stateFn {
+	for {
+		switch r := l.next(); {
+		case r == '\\':
+			l.next()
+			continue
+		case r == '#':
+			sp := l.peek()
+			if sp == '[' {
+				l.backup()
+				if l.pos > l.start {
+					l.emit(itemText)
+				}
+				l.next()
+				l.next()
+				l.skipSpaces()
+				l.interpolation += 1
+				l.depth += 1
+				// l.emit(itemInterpolation)
+				l.ignore()
+				return lexTags
+			}
+			if sp == '{' {
+				l.interpol(itemCodeBuffered)
+			}
+		case r == '$':
+			sp := l.peek()
+			if sp == '{' {
+				l.interpol(itemCodeBuffered)
+			}
+		case r == '!':
+			sp := l.peek()
+			if sp == '{' {
+				l.interpol(itemCodeUnescaped)
+			}
+		case r == ']':
+			if l.interpolation > 0 {
+				l.backup()
+				if l.pos > l.start {
+					l.emit(itemText)
+				}
+				l.next()
+				// l.emit(itemInterpolationEnd)
+				l.ignore()
+				l.interpolation -= 1
+				l.depth -= 1
+			}
+		case r == eof:
+			l.backup()
+			l.emit(itemText)
+			return lexEndLine
+		case r == '\n':
+			if l.longtext {
+				var (
+					indent int
+					pos    Pos
+				)
+				l.backup()
+				pos = l.pos
+				l.next()
+				indent = l.indents()
+				if indent != -1 {
+					if indent < l.depth {
+						l.pos = pos
+						if l.pos > l.start {
+							l.emit(itemText)
+						}
+						l.longtext = false
+						return lexIndents
+					}
+				} else {
+					l.backup()
+				}
+			} else {
+				l.backup()
+				if l.pos > l.start {
+					l.emit(itemText)
+				}
+				return lexIndents
+			}
+		}
+	}
+}
+func (l *lexer) interpol(item itemType) {
+	l.backup()
+	if l.pos > l.start {
+		l.emit(itemText)
+	}
+	l.next()
+	l.next()
+	l.skipSpaces()
+	l.ignore()
+Loop:
+	for {
+		switch r := l.next(); {
+		case r == '`':
+			l.toStopRune('`', false)
+		case r == '"':
+			l.toStopRune('"', false)
+		case r == '\'':
+			l.toStopRune('\'', false)
+		case r == '\n', r == eof:
+			l.backup()
+			l.errorf("interpolation error: expect '}'")
+			return
+		case r == '}':
+			break Loop
+		}
+	}
+	l.backup()
+	l.emit(item)
+	l.next()
+	l.ignore()
+}
+
+func lexTagName(l *lexer) stateFn {
+	for {
+		switch r := l.next(); {
+		case isAlphaNumeric(r):
+			// absorb.
+		default:
+			l.backup()
+			word := l.input[l.start:l.pos]
+			if w, ok := key[word]; ok {
+				switch w {
+				case itemElse:
+					l.emit(w)
+					l.skipSpaces()
+					l.ignore()
+					return lexTags
+				case itemDoctype, itemExtends:
+					if l.depth == 0 {
+						ss := l.skipSpaces()
+						l.ignore()
+						if ss != -1 {
+							l.emitLineByType(w)
+						} else if w == itemDoctype {
+							l.emit(w)
+						} else {
+							return l.errorf("lexTagName: itemExtends need path ")
+						}
+						return lexEndLine
+					} else {
+						l.emit(itemTag)
+					}
+				case itemBlock:
+					sp := l.skipSpaces()
+					l.ignore()
+					if sp == -1 {
+						l.emit(itemMixinBlock)
+					} else if strings.HasPrefix(l.input[l.pos:], "prepend ") {
+						l.toStopRune(' ', true)
+						l.skipSpaces()
+						l.ignore()
+						l.emitLineByType(itemBlockPrepend)
+					} else if strings.HasPrefix(l.input[l.pos:], "append ") {
+						l.toStopRune(' ', true)
+						l.skipSpaces()
+						l.ignore()
+						l.emitLineByType(itemBlockAppend)
+					} else {
+						l.emitLineByType(itemBlock)
+					}
+					return lexEndLine
+				case itemBlockAppend, itemBlockPrepend,
+					itemIf, itemUnless, itemCase,
+					itemEach, itemWhile, itemFor,
+					itemInclude:
+
+					l.skipSpaces()
+					l.ignore()
+					l.emitLineByType(w)
+					return lexEndLine
+				case itemMixin:
+					l.skipSpaces()
+					l.ignore()
+					l.emitWordByType(w)
+					return lexAfterTag
+				case itemCaseWhen:
+					l.skipSpaces()
+					l.ignore()
+					l.toStopRune(':', true)
+					l.emit(w)
+					return lexAfterTag
+				default:
+					l.emit(w)
+				}
+			} else {
+				l.emit(itemTag)
+			}
+			return lexAfterTag
+		}
+	}
+}
+
+func lexAfterTag(l *lexer) stateFn {
+	switch r := l.next(); {
+	case r == '(':
+		l.emit(itemAttrStart)
+		return lexAttr
+	case r == '/':
+		l.emit(itemTagEnd)
+		return lexAfterTag
+	case r == ':':
+		l.skipSpaces()
+		l.ignore()
+		l.depth += 1
+		return lexTags
+	case r == ' ' || r == '\t':
+		l.ignore()
+		l.depth += 1
+		return lexText
+	case r == ']':
+		if l.interpolation > 0 {
+			l.ignore()
+			if l.pos > l.start {
+				l.emit(itemText)
+			}
+			l.interpolation -= 1
+			l.depth -= 1
+			if l.longtext {
+				return lexLongText
+			} else {
+				return lexText
+			}
+		}
+		return l.errorf("lexAfterTag: %#U", r)
+	case r == '=':
+		l.skipSpaces()
+		l.ignore()
+		l.depth += 1
+		l.emitLineByType(itemCodeBuffered)
+		return lexEndLine
+	case r == '!':
+		if l.next() == '=' {
+			l.skipSpaces()
+			l.ignore()
+			l.depth += 1
+			l.emitLineByType(itemCodeUnescaped)
+			return lexEndLine
+		}
+		return l.errorf("expect '=' after '!'")
+	case r == '#':
+		l.ignore()
+		return lexID
+	case r == '&':
+		l.toStopRune(')', false)
+		l.ignore() // TODO: now ignore div(data-bar="foo")&attributes({'data-foo': 'baz'})
+		return lexAfterTag
+	case r == '.':
+		switch l.skipSpaces() {
+		case 0:
+			l.ignore()
+			return lexClass
+		case -1:
+			if sp := l.next(); sp != eof {
+				l.ignore()
+				l.depth += 1
+				return lexLongText
+			}
+			return lexEndLine
+		default:
+			l.ignore()
+			l.depth += 1
+			return lexText
+		}
+	case isEndOfLine(r), r == eof:
+		l.backup()
+		return lexEndLine
+	default:
+		return l.errorf("lexAfterTag: %#U", r)
+	}
+}
+
+//
+//
+
+func lexAttr(l *lexer) stateFn {
+	b1, b2, b3 := 0, 0, 0
+	for {
+		switch r := l.next(); {
+		case r == '"' || r == '\'':
+			l.toStopRune(r, false)
+		case r == '`':
+			for {
+				r = l.next()
+				if r == '`' {
+					break
+				}
+			}
+		case r == '(':
+			b1 += 1
+		case r == ')':
+			b1 -= 1
+			if b1 == -1 {
+				if b2 != 0 || b3 != 0 {
+					return l.errorf("lexAttrName: mismatched bracket")
+				}
+				l.backup()
+				if l.pos > l.start {
+					l.emit(itemAttr)
+				}
+				l.next()
+				l.emit(itemAttrEnd)
+				return lexAfterTag
+			}
+		case r == '[':
+			b2 += 1
+		case r == ']':
+			b2 -= 1
+			if b2 == -1 {
+				return l.errorf("lexAttrName: mismatched bracket '['")
+			}
+		case r == '{':
+			b3 += 1
+		case r == '}':
+			b3 -= 1
+			if b3 == -1 {
+				return l.errorf("lexAttrName: mismatched bracket '{'")
+			}
+		case r == ' ' || r == '\t':
+			l.backup()
+			if l.pos > l.start {
+				l.emit(itemAttr)
+			}
+			l.skipSpaces()
+			l.emit(itemAttrSpace)
+		case r == '=':
+			if l.peek() == '=' {
+				l.toStopRune(' ', true)
+				l.emit(itemAttr)
+				continue
+			}
+			l.backup()
+			l.emit(itemAttr)
+			l.next()
+			l.emit(itemAttrEqual)
+		case r == '!':
+			if l.peek() == '=' {
+				l.backup()
+				l.emit(itemAttr)
+				l.next()
+				l.next()
+				l.emit(itemAttrEqualUn)
+			}
+		case r == ',' || r == '\n':
+			if b1 == 0 && b2 == 0 && b3 == 0 {
+				l.backup()
+				if l.pos > l.start {
+					l.emit(itemAttr)
+				}
+				l.next()
+				l.emit(itemAttrComma)
+			}
+		case r == eof:
+			return l.errorf("lexAttr: expected ')'")
+		}
+	}
+}
+
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+
+func (l *lexer) emitWordByType(item itemType) bool {
+	for {
+		if !isAlphaNumeric(l.next()) {
+			l.backup()
+			break
+		}
+	}
+	if l.pos > l.start {
+		l.emit(item)
+		return true
+	}
+	return false
+}
+
+func (l *lexer) emitLineByType(item itemType) bool {
+	var r rune
+	for {
+		r = l.next()
+		if r == '\n' || r == '\r' || r == eof {
+			l.backup()
+			if l.pos > l.start {
+				l.emit(item)
+				return true
+			}
+			return false
+		}
+	}
+}
+
+//
+
+func (l *lexer) skipSpaces() (out int) {
+	for {
+		switch l.next() {
+		case ' ', '\t':
+			out += 1
+		case '\n', eof:
+			l.backup()
+			return -1
+		default:
+			l.backup()
+			return
+		}
+	}
+}
+
+func (l *lexer) toStopRune(stopRune rune, backup bool) {
+	for {
+		switch r := l.next(); {
+		case r == stopRune:
+			if backup {
+				l.backup()
+			}
+			return
+		case r == eof || r == '\r' || r == '\n':
+			l.backup()
+			return
+		}
+	}
+}
+
+func (l *lexer) multiline() {
+	var (
+		indent int
+		pos    Pos
+	)
+	for {
+		switch r := l.next(); {
+		case r == '\n':
+			l.backup()
+			pos = l.pos
+			l.next()
+			indent = l.indents()
+			if indent != -1 {
+				if indent <= l.depth {
+					l.pos = pos
+					return
+				}
+			} else {
+				l.backup()
+			}
+		case r == eof:
+			l.backup()
+			return
+		}
+	}
+}

+ 556 - 0
vendor/github.com/Joker/jade/jade_node.go

@@ -0,0 +1,556 @@
+package jade
+
+import (
+	"bytes"
+	"fmt"
+	"go/parser"
+	"html"
+	"io"
+	"log"
+	"regexp"
+	"strings"
+)
+
+type TagNode struct {
+	NodeType
+	Pos
+	tr       *Tree
+	Nodes    []Node
+	AttrName []string
+	AttrCode []string
+	TagName  string
+	tagType  itemType
+	tab      int
+}
+
+func (t *Tree) newTag(pos Pos, name string, tagType itemType) *TagNode {
+	return &TagNode{tr: t, NodeType: NodeTag, Pos: pos, TagName: name, tagType: tagType, tab: t.tab}
+}
+
+func (l *TagNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *TagNode) tree() *Tree {
+	return l.tr
+}
+
+func (l *TagNode) attr(a, b string) {
+	for k, v := range l.AttrName {
+		if v == a {
+			l.AttrCode[k] = fmt.Sprintf(tag__arg_add, l.AttrCode[k], b)
+			return
+		}
+	}
+
+	l.AttrName = append(l.AttrName, a)
+	l.AttrCode = append(l.AttrCode, b)
+}
+
+func codeStrFmt(a string) (string, bool) {
+	var (
+		str   = []rune(a)
+		lng   = len(str)
+		first = str[0]
+		last  = str[lng-1]
+		unesc = false
+	)
+	if first == 'ߐ' { // FIXME temporarily ߐ - [AttrEqualUn] Unescaped flag set in parseAttributes()
+		str = append(str[:0], str[1:]...)
+		lng -= 1
+		first = str[0]
+		last = str[lng-1]
+		unesc = true
+	}
+	switch first {
+	case '"', '\'':
+		if first == last {
+			for k, v := range str[1 : lng-1] {
+				if v == first && str[k] != '\\' {
+					return "", false
+				}
+			}
+			if unesc {
+				return string(str[1 : lng-1]), true
+			}
+			return html.EscapeString(string(str[1 : lng-1])), true
+		}
+	case '`':
+		if first == last {
+			if !strings.ContainsAny(string(str[1:lng-1]), "`") {
+				if unesc {
+					return string(str[1 : lng-1]), true
+				}
+				return html.EscapeString(string(str[1 : lng-1])), true
+			}
+		}
+	}
+	return "", false
+}
+
+func query(a string) (string, bool) {
+	var (
+		re    = regexp.MustCompile(`^(.+)\?(.+):(.+)$`)
+		match = re.FindStringSubmatch(a)
+	)
+	if len(match) == 4 {
+		for _, v := range match[1:4] {
+			if _, err := parser.ParseExpr(v); err != nil {
+				return "", false
+			}
+		}
+		return "qf(" + match[1] + ", " + match[2] + ", " + match[3] + ")", true
+	}
+	return "", false
+}
+
+func (l *TagNode) String() string {
+	var b = new(bytes.Buffer)
+	l.WriteIn(b)
+	return b.String()
+}
+func (l *TagNode) WriteIn(b io.Writer) {
+	var (
+		attr = new(bytes.Buffer)
+	)
+	if len(l.AttrName) > 0 {
+		fmt.Fprint(attr, tag__arg_bgn)
+		for k, name := range l.AttrName {
+			if arg, ok := codeStrFmt(l.AttrCode[k]); ok {
+				fmt.Fprintf(attr, tag__arg_str, name, arg)
+			} else if !golang_mode {
+				fmt.Fprintf(attr, tag__arg, name, l.AttrCode[k])
+			} else if _, err := parser.ParseExpr(l.AttrCode[k]); err == nil {
+				fmt.Fprintf(attr, tag__arg, name, l.Pos, l.AttrCode[k])
+			} else if arg, ok := query(l.AttrCode[k]); ok {
+				fmt.Fprintf(attr, tag__arg, name, l.Pos, arg)
+			} else {
+				log.Fatalln("Error tag attribute value ==> ", l.AttrCode[k])
+			}
+		}
+		fmt.Fprint(attr, tag__arg_end)
+	}
+	switch l.tagType {
+	case itemTagVoid:
+		fmt.Fprintf(b, tag__void, l.TagName, attr)
+	case itemTagVoidInline:
+		fmt.Fprintf(b, tag__void, l.TagName, attr)
+	default:
+		fmt.Fprintf(b, tag__bgn, l.TagName, attr)
+		for _, inner := range l.Nodes {
+			inner.WriteIn(b)
+		}
+		fmt.Fprintf(b, tag__end, l.TagName)
+	}
+}
+
+func (l *TagNode) CopyTag() *TagNode {
+	if l == nil {
+		return l
+	}
+	n := l.tr.newTag(l.Pos, l.TagName, l.tagType)
+	n.tab = l.tab
+	n.AttrCode = l.AttrCode
+	n.AttrName = l.AttrName
+	for _, elem := range l.Nodes {
+		n.append(elem.Copy())
+	}
+	return n
+}
+
+func (l *TagNode) Copy() Node {
+	return l.CopyTag()
+}
+
+//
+//
+
+type CondNode struct {
+	NodeType
+	Pos
+	tr       *Tree
+	Nodes    []Node
+	cond     string
+	condType itemType
+	tab      int
+}
+
+func (t *Tree) newCond(pos Pos, cond string, condType itemType) *CondNode {
+	return &CondNode{tr: t, NodeType: NodeCond, Pos: pos, cond: cond, condType: condType, tab: t.tab}
+}
+
+func (l *CondNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *CondNode) tree() *Tree {
+	return l.tr
+}
+
+func (l *CondNode) String() string {
+	var b = new(bytes.Buffer)
+	l.WriteIn(b)
+	return b.String()
+}
+func (l *CondNode) WriteIn(b io.Writer) {
+	switch l.condType {
+	case itemIf:
+		fmt.Fprintf(b, cond__if, l.cond)
+	case itemUnless:
+		fmt.Fprintf(b, cond__unless, l.cond)
+	case itemCase:
+		fmt.Fprintf(b, cond__case, l.cond)
+	case itemWhile:
+		fmt.Fprintf(b, cond__while, l.cond)
+	case itemFor, itemEach:
+		if k, v, name, ok := l.parseForArgs(); ok {
+			fmt.Fprintf(b, cond__for, k, v, name)
+		} else {
+			fmt.Fprintf(b, "\n{{ Error malformed each: %s }}", l.cond)
+		}
+	case itemForIfNotContain:
+		if k, v, name, ok := l.parseForArgs(); ok {
+			fmt.Fprintf(b, cond__for_if, name, k, v, name)
+		} else {
+			fmt.Fprintf(b, "\n{{ Error malformed each: %s }}", l.cond)
+		}
+	default:
+		fmt.Fprintf(b, "{{ Error Cond %s }}", l.cond)
+	}
+
+	for _, n := range l.Nodes {
+		n.WriteIn(b)
+	}
+
+	fmt.Fprint(b, cond__end)
+}
+
+func (l *CondNode) parseForArgs() (k, v, name string, ok bool) {
+	sp := strings.SplitN(l.cond, " in ", 2)
+	if len(sp) != 2 {
+		return
+	}
+	name = strings.Trim(sp[1], " ")
+	re := regexp.MustCompile(`^(\w+)\s*,\s*(\w+)$`)
+	kv := re.FindAllStringSubmatch(strings.Trim(sp[0], " "), -1)
+	if len(kv) == 1 && len(kv[0]) == 3 {
+		k = kv[0][2]
+		v = kv[0][1]
+		ok = true
+		return
+	}
+	r2 := regexp.MustCompile(`^\w+$`)
+	kv2 := r2.FindAllString(strings.Trim(sp[0], " "), -1)
+	if len(kv2) == 1 {
+		k = "_"
+		v = kv2[0]
+		ok = true
+		return
+	}
+	return
+}
+
+func (l *CondNode) CopyCond() *CondNode {
+	if l == nil {
+		return l
+	}
+	n := l.tr.newCond(l.Pos, l.cond, l.condType)
+	n.tab = l.tab
+	for _, elem := range l.Nodes {
+		n.append(elem.Copy())
+	}
+	return n
+}
+
+func (l *CondNode) Copy() Node {
+	return l.CopyCond()
+}
+
+//
+//
+
+type CodeNode struct {
+	NodeType
+	Pos
+	tr       *Tree
+	codeType itemType
+	Code     []byte // The text; may span newlines.
+	tab      int
+}
+
+func (t *Tree) newCode(pos Pos, text string, codeType itemType) *CodeNode {
+	return &CodeNode{tr: t, NodeType: NodeCode, Pos: pos, Code: []byte(text), codeType: codeType, tab: t.tab}
+}
+
+func (t *CodeNode) String() string {
+	var b = new(bytes.Buffer)
+	t.WriteIn(b)
+	return b.String()
+}
+func (t *CodeNode) WriteIn(b io.Writer) {
+	switch t.codeType {
+	case itemCode:
+		fmt.Fprintf(b, code__longcode, t.Code)
+	case itemCodeBuffered:
+		if !golang_mode {
+			fmt.Fprintf(b, code__buffered, t.Code)
+		} else if cb, ok := codeStrFmt(string(t.Code)); ok {
+			fmt.Fprintf(b, code__buffered, t.Pos, `"`+cb+`"`)
+		} else {
+			fmt.Fprintf(b, code__buffered, t.Pos, t.Code)
+		}
+	case itemCodeUnescaped:
+		fmt.Fprintf(b, code__unescaped, t.Code)
+	case itemElse:
+		fmt.Fprintf(b, code__else)
+	case itemElseIf:
+		fmt.Fprintf(b, code__else_if, t.Code)
+	case itemForElse:
+		fmt.Fprintf(b, code__for_else)
+	case itemCaseWhen:
+		fmt.Fprintf(b, code__case_when, t.Code)
+	case itemCaseDefault:
+		fmt.Fprintf(b, code__case_def)
+	case itemMixinBlock:
+		fmt.Fprintf(b, code__mix_block)
+	default:
+		fmt.Fprintf(b, "{{ Error Code %s }}", t.Code)
+	}
+}
+
+func (t *CodeNode) tree() *Tree {
+	return t.tr
+}
+
+func (t *CodeNode) Copy() Node {
+	return &CodeNode{tr: t.tr, NodeType: NodeCode, Pos: t.Pos, codeType: t.codeType, Code: append([]byte{}, t.Code...), tab: t.tab}
+}
+
+//
+//
+
+type BlockNode struct {
+	NodeType
+	Pos
+	tr        *Tree
+	blockType itemType
+	Name      string
+	tab       int
+}
+
+func (t *Tree) newBlock(pos Pos, name string, textType itemType) *BlockNode {
+	return &BlockNode{tr: t, NodeType: NodeBlock, Pos: pos, Name: name, blockType: textType, tab: t.tab}
+}
+
+func (t *BlockNode) String() string {
+	var b = new(bytes.Buffer)
+	t.WriteIn(b)
+	return b.String()
+}
+func (t *BlockNode) WriteIn(b io.Writer) {
+	var (
+		out_blk         = t.tr.block[t.Name]
+		out_pre, ok_pre = t.tr.block[t.Name+"_prepend"]
+		out_app, ok_app = t.tr.block[t.Name+"_append"]
+	)
+	if ok_pre {
+		out_pre.WriteIn(b)
+	}
+	out_blk.WriteIn(b)
+
+	if ok_app {
+		out_app.WriteIn(b)
+	}
+}
+
+func (t *BlockNode) tree() *Tree {
+	return t.tr
+}
+
+func (t *BlockNode) Copy() Node {
+	return &BlockNode{tr: t.tr, NodeType: NodeBlock, Pos: t.Pos, blockType: t.blockType, Name: t.Name, tab: t.tab}
+}
+
+//
+//
+
+type TextNode struct {
+	NodeType
+	Pos
+	tr       *Tree
+	textType itemType
+	Text     []byte // The text; may span newlines.
+	tab      int
+}
+
+func (t *Tree) newText(pos Pos, text string, textType itemType) *TextNode {
+	return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text), textType: textType, tab: t.tab}
+}
+
+func (t *TextNode) String() string {
+	var b = new(bytes.Buffer)
+	t.WriteIn(b)
+	return b.String()
+}
+func (t *TextNode) WriteIn(b io.Writer) {
+	switch t.textType {
+	case itemComment:
+		fmt.Fprintf(b, text__comment, t.Text)
+	default:
+		fmt.Fprintf(b, text__str, t.Text)
+	}
+}
+
+func (t *TextNode) tree() *Tree {
+	return t.tr
+}
+
+func (t *TextNode) Copy() Node {
+	return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, textType: t.textType, Text: append([]byte{}, t.Text...), tab: t.tab}
+}
+
+//
+//
+
+type MixinNode struct {
+	NodeType
+	Pos
+	tr        *Tree
+	Nodes     []Node
+	AttrName  []string
+	AttrCode  []string
+	AttrRest  []string
+	MixinName string
+	block     string
+	tagType   itemType
+	tab       int
+}
+
+func (t *Tree) newMixin(pos Pos) *MixinNode {
+	return &MixinNode{tr: t, NodeType: NodeMixin, Pos: pos, tab: t.tab}
+}
+
+func (l *MixinNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *MixinNode) attr(a, b string) {
+	l.AttrName = append(l.AttrName, a)
+	l.AttrCode = append(l.AttrCode, b)
+}
+
+func (l *MixinNode) tree() *Tree {
+	return l.tr
+}
+
+func (l *MixinNode) String() string {
+	var b = new(bytes.Buffer)
+	l.WriteIn(b)
+	return b.String()
+}
+func (l *MixinNode) WriteIn(b io.Writer) {
+	var (
+		attr = new(bytes.Buffer)
+		an   = len(l.AttrName)
+		rest = len(l.AttrRest)
+	)
+
+	if an > 0 {
+		fmt.Fprintf(attr, mixin__var_bgn)
+		fmt.Fprintf(attr, mixin__var_block, l.block)
+		if rest > 0 {
+			fmt.Fprintf(attr, mixin__var_rest, strings.TrimLeft(l.AttrName[an-1], "."), l.AttrRest)
+			l.AttrName = l.AttrName[:an-1]
+		}
+		for k, name := range l.AttrName {
+			fmt.Fprintf(attr, mixin__var, name, l.AttrCode[k])
+		}
+		fmt.Fprintf(attr, mixin__var_end)
+	}
+
+	fmt.Fprintf(b, mixin__bgn, attr)
+	for _, n := range l.Nodes {
+		n.WriteIn(b)
+	}
+	fmt.Fprintf(b, mixin__end)
+}
+
+func (l *MixinNode) CopyMixin() *MixinNode {
+	if l == nil {
+		return l
+	}
+	n := l.tr.newMixin(l.Pos)
+	n.tab = l.tab
+	for _, elem := range l.Nodes {
+		n.append(elem.Copy())
+	}
+	return n
+}
+
+func (l *MixinNode) Copy() Node {
+	return l.CopyMixin()
+}
+
+//
+//
+
+type DoctypeNode struct {
+	NodeType
+	Pos
+	tr      *Tree
+	doctype string
+}
+
+func (t *Tree) newDoctype(pos Pos, text string) *DoctypeNode {
+	doc := ""
+	txt := strings.Trim(text, " ")
+	if len(txt) > 0 {
+		sls := strings.SplitN(txt, " ", 2)
+		switch sls[0] {
+		case "5", "html":
+			doc = `<!DOCTYPE html%s>`
+		case "xml":
+			doc = `<?xml version="1.0" encoding="utf-8"%s ?>`
+		case "1.1", "xhtml":
+			doc = `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"%s>`
+		case "basic":
+			doc = `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd"%s>`
+		case "strict":
+			doc = `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"%s>`
+		case "frameset":
+			doc = `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"%s>`
+		case "transitional":
+			doc = `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"%s>`
+		case "mobile":
+			doc = `<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd"%s>`
+		case "4", "4strict":
+			doc = `<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"%s>`
+		case "4frameset":
+			doc = `<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd"%s>`
+		case "4transitional":
+			doc = `<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"%s>`
+		}
+		if doc == "" {
+			doc = fmt.Sprintf("<!DOCTYPE %s>", txt)
+		} else if doc != "" && len(sls) == 2 {
+			doc = fmt.Sprintf(doc, " "+sls[1])
+		} else {
+			doc = fmt.Sprintf(doc, "")
+		}
+	} else {
+		doc = `<!DOCTYPE html>`
+	}
+	return &DoctypeNode{tr: t, NodeType: NodeDoctype, Pos: pos, doctype: doc}
+}
+func (d *DoctypeNode) String() string {
+	return fmt.Sprintf(text__str, d.doctype)
+}
+func (d *DoctypeNode) WriteIn(b io.Writer) {
+	b.Write([]byte(d.doctype))
+}
+func (d *DoctypeNode) tree() *Tree {
+	return d.tr
+}
+func (d *DoctypeNode) Copy() Node {
+	return &DoctypeNode{tr: d.tr, NodeType: NodeDoctype, Pos: d.Pos, doctype: d.doctype}
+}

+ 450 - 0
vendor/github.com/Joker/jade/jade_parse.go

@@ -0,0 +1,450 @@
+package jade
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+func (t *Tree) topParse() {
+	t.Root = t.newList(t.peek().pos)
+	var (
+		ext   bool
+		token = t.nextNonSpace()
+	)
+	if token.typ == itemExtends {
+		ext = true
+		t.Root.append(t.parseSubFile(token.val))
+		token = t.nextNonSpace()
+	}
+	for {
+		switch token.typ {
+		case itemInclude:
+			t.Root.append(t.parseInclude(token))
+		case itemBlock, itemBlockPrepend, itemBlockAppend:
+			if ext {
+				t.parseBlock(token)
+			} else {
+				t.Root.append(t.parseBlock(token))
+			}
+		case itemMixin:
+			t.mixin[token.val] = t.parseMixin(token)
+		case itemEOF:
+			return
+		case itemExtends:
+			t.errorf(`Declaration of template inheritance ("extends") should be the first thing in the file. There can only be one extends statement per file.`)
+		case itemError:
+			t.errorf("%s line: %d\n", token.val, token.line)
+		default:
+			if ext {
+				t.errorf(`Only import, named blocks and mixins can appear at the top level of an extending template`)
+			}
+			t.Root.append(t.hub(token))
+		}
+		token = t.nextNonSpace()
+	}
+}
+
+func (t *Tree) hub(token item) (n Node) {
+	for {
+		switch token.typ {
+		case itemDiv:
+			token.val = "div"
+			fallthrough
+		case itemTag, itemTagInline, itemTagVoid, itemTagVoidInline:
+			return t.parseTag(token)
+		case itemText, itemComment, itemHTMLTag:
+			return t.newText(token.pos, token.val, token.typ)
+		case itemCode, itemCodeBuffered, itemCodeUnescaped, itemMixinBlock:
+			return t.newCode(token.pos, token.val, token.typ)
+		case itemIf, itemUnless:
+			return t.parseIf(token)
+		case itemFor, itemEach, itemWhile:
+			return t.parseFor(token)
+		case itemCase:
+			return t.parseCase(token)
+		case itemBlock, itemBlockPrepend, itemBlockAppend:
+			return t.parseBlock(token)
+		case itemMixinCall:
+			return t.parseMixinUse(token)
+		case itemInclude:
+			return t.parseInclude(token)
+		case itemDoctype:
+			return t.newDoctype(token.pos, token.val)
+		case itemFilter, itemFilterText:
+			return t.parseFilter(token)
+		case itemError:
+			t.errorf("Error lex: %s line: %d\n", token.val, token.line)
+		default:
+			t.errorf(`Error hub(): unexpected token  "%s"  type  "%s"`, token.val, token.typ)
+		}
+	}
+}
+
+func (t *Tree) parseFilter(tk item) Node {
+	// TODO add golang filters
+	return t.newList(tk.pos)
+}
+
+func (t *Tree) parseTag(tk item) Node {
+	var (
+		deep = tk.depth
+		tag  = t.newTag(tk.pos, tk.val, tk.typ)
+	)
+Loop:
+	for {
+		switch token := t.nextNonSpace(); {
+		case token.depth > deep:
+			if tag.tagType == itemTagVoid || tag.tagType == itemTagVoidInline {
+				break Loop
+			}
+			t.tab++
+			tag.append(t.hub(token))
+			t.tab--
+		case token.depth == deep:
+			switch token.typ {
+			case itemClass:
+				tag.attr("class", `"`+token.val+`"`)
+			case itemID:
+				tag.attr("id", `"`+token.val+`"`)
+			case itemAttrStart:
+				t.parseAttributes(tag)
+			case itemTagEnd:
+				tag.tagType = itemTagVoid
+				return tag
+			default:
+				break Loop
+			}
+		default:
+			break Loop
+		}
+	}
+	t.backup()
+	return tag
+}
+
+type pAttr interface {
+	attr(string, string)
+}
+
+func (t *Tree) parseAttributes(tag pAttr) {
+	var (
+		aname string
+		equal bool
+		unesc bool
+		stack = make([]string, 0, 4)
+	)
+	for {
+		switch token := t.next(); token.typ {
+		case itemAttrSpace:
+			// skip
+		case itemAttr:
+			switch {
+			case aname == "":
+				aname = token.val
+			case aname != "" && !equal:
+				tag.attr(aname, `"`+aname+`"`)
+				aname = token.val
+			case aname != "" && equal:
+				if unesc {
+					stack = append(stack, "ߐ"+token.val)
+					unesc = false
+				} else {
+					stack = append(stack, token.val)
+				}
+			}
+		case itemAttrEqualUn:
+			unesc = true
+			fallthrough
+		case itemAttrEqual:
+			equal = true
+			switch len_stack := len(stack); {
+			case len_stack == 0 && aname != "":
+				// skip
+			case len_stack > 1 && aname != "":
+				tag.attr(aname, strings.Join(stack[:len(stack)-1], " "))
+
+				aname = stack[len(stack)-1]
+				stack = stack[:0]
+			case len_stack == 1 && aname == "":
+				aname = stack[0]
+				stack = stack[:0]
+			default:
+				t.errorf("unexpected '='")
+			}
+		case itemAttrComma:
+			equal = false
+			switch len_stack := len(stack); {
+			case len_stack > 0 && aname != "":
+				tag.attr(aname, strings.Join(stack, " "))
+				aname = ""
+				stack = stack[:0]
+			case len_stack == 0 && aname != "":
+				tag.attr(aname, `"`+aname+`"`)
+				aname = ""
+			}
+		case itemAttrEnd:
+			switch len_stack := len(stack); {
+			case len_stack > 0 && aname != "":
+				tag.attr(aname, strings.Join(stack, " "))
+			case len_stack > 0 && aname == "":
+				for _, a := range stack {
+					tag.attr(a, a)
+				}
+			case len_stack == 0 && aname != "":
+				tag.attr(aname, `"`+aname+`"`)
+			}
+			return
+		default:
+			t.errorf("unexpected %s", token.val)
+		}
+	}
+}
+
+func (t *Tree) parseIf(tk item) Node {
+	var (
+		deep = tk.depth
+		cond = t.newCond(tk.pos, tk.val, tk.typ)
+	)
+Loop:
+	for {
+		switch token := t.nextNonSpace(); {
+		case token.depth > deep:
+			t.tab++
+			cond.append(t.hub(token))
+			t.tab--
+		case token.depth == deep:
+			switch token.typ {
+			case itemElse:
+				ni := t.peek()
+				if ni.typ == itemIf {
+					token = t.next()
+					cond.append(t.newCode(token.pos, token.val, itemElseIf))
+				} else {
+					cond.append(t.newCode(token.pos, token.val, token.typ))
+				}
+			default:
+				break Loop
+			}
+		default:
+			break Loop
+		}
+	}
+	t.backup()
+	return cond
+}
+
+func (t *Tree) parseFor(tk item) Node {
+	var (
+		deep = tk.depth
+		cond = t.newCond(tk.pos, tk.val, tk.typ)
+	)
+Loop:
+	for {
+		switch token := t.nextNonSpace(); {
+		case token.depth > deep:
+			t.tab++
+			cond.append(t.hub(token))
+			t.tab--
+		case token.depth == deep:
+			if token.typ == itemElse {
+				cond.condType = itemForIfNotContain
+				cond.append(t.newCode(token.pos, token.val, itemForElse))
+			} else {
+				break Loop
+			}
+		default:
+			break Loop
+		}
+	}
+	t.backup()
+	return cond
+}
+
+func (t *Tree) parseCase(tk item) Node {
+	var (
+		deep   = tk.depth
+		_case_ = t.newCond(tk.pos, tk.val, tk.typ)
+	)
+	for {
+		if token := t.nextNonSpace(); token.depth > deep {
+			switch token.typ {
+			case itemCaseWhen, itemCaseDefault:
+				_case_.append(t.newCode(token.pos, token.val, token.typ))
+			default:
+				t.tab++
+				_case_.append(t.hub(token))
+				t.tab--
+			}
+		} else {
+			break
+		}
+	}
+	t.backup()
+	return _case_
+}
+
+func (t *Tree) parseMixin(tk item) *MixinNode {
+	var (
+		deep  = tk.depth
+		mixin = t.newMixin(tk.pos)
+	)
+Loop:
+	for {
+		switch token := t.nextNonSpace(); {
+		case token.depth > deep:
+			t.tab++
+			mixin.append(t.hub(token))
+			t.tab--
+		case token.depth == deep:
+			if token.typ == itemAttrStart {
+				t.parseAttributes(mixin)
+			} else {
+				break Loop
+			}
+		default:
+			break Loop
+		}
+	}
+	t.backup()
+	return mixin
+}
+
+func (t *Tree) parseMixinUse(tk item) Node {
+	tMix, ok := t.mixin[tk.val]
+	if !ok {
+		t.errorf(`Mixin "%s" must be declared before use.`, tk.val)
+	}
+	var (
+		deep  = tk.depth
+		mixin = tMix.CopyMixin()
+	)
+Loop:
+	for {
+		switch token := t.nextNonSpace(); {
+		case token.depth > deep:
+			t.tab++
+			mixin.append(t.hub(token))
+			t.tab--
+		case token.depth == deep:
+			if token.typ == itemAttrStart {
+				t.parseAttributes(mixin)
+			} else {
+				break Loop
+			}
+		default:
+			break Loop
+		}
+	}
+	t.backup()
+
+	use := len(mixin.AttrName)
+	tpl := len(tMix.AttrName)
+	switch {
+	case use < tpl:
+		i := 0
+		diff := tpl - use
+		mixin.AttrCode = append(mixin.AttrCode, make([]string, diff)...) // Extend slice
+		for index := 0; index < diff; index++ {
+			i = tpl - index - 1
+			if tMix.AttrName[i] != tMix.AttrCode[i] {
+				mixin.AttrCode[i] = tMix.AttrCode[i]
+			} else {
+				mixin.AttrCode[i] = `""`
+			}
+		}
+		mixin.AttrName = tMix.AttrName
+	case use > tpl:
+		if tpl <= 0 {
+			break
+		}
+		if strings.HasPrefix(tMix.AttrName[tpl-1], "...") {
+			mixin.AttrRest = mixin.AttrCode[tpl-1:]
+		}
+		mixin.AttrCode = mixin.AttrCode[:tpl]
+		mixin.AttrName = tMix.AttrName
+	case use == tpl:
+		mixin.AttrName = tMix.AttrName
+	}
+	return mixin
+}
+
+func (t *Tree) parseBlock(tk item) *BlockNode {
+	block := t.newList(tk.pos)
+	for {
+		token := t.nextNonSpace()
+		if token.depth > tk.depth {
+			block.append(t.hub(token))
+		} else {
+			break
+		}
+	}
+	t.backup()
+	var suf string
+	switch tk.typ {
+	case itemBlockPrepend:
+		suf = "_prepend"
+	case itemBlockAppend:
+		suf = "_append"
+	}
+	t.block[tk.val+suf] = block
+	return t.newBlock(tk.pos, tk.val, tk.typ)
+}
+
+func (t *Tree) parseInclude(tk item) *ListNode {
+	switch ext := filepath.Ext(tk.val); ext {
+	case ".jade", ".pug", "":
+		return t.parseSubFile(tk.val)
+	case ".js", ".css", ".tpl", ".md":
+		ln := t.newList(tk.pos)
+		ln.append(t.newText(tk.pos, t.read(tk.val), itemText))
+		return ln
+	default:
+		t.errorf(`file extension is not supported`)
+		return nil
+	}
+}
+
+func (t *Tree) parseSubFile(path string) *ListNode {
+	var incTree = New(path)
+	incTree.tab = t.tab
+	incTree.block = t.block
+	incTree.mixin = t.mixin
+	_, err := incTree.Parse(t.read(path))
+	if err != nil {
+		t.errorf(`%s`, err)
+	}
+	return incTree.Root
+}
+
+func (t *Tree) read(path string) string {
+	var (
+		bb  []byte
+		ext string
+		err error
+	)
+	switch ext = filepath.Ext(path); ext {
+	case ".jade", ".pug", ".js", ".css", ".tpl", ".md":
+		bb, err = ioutil.ReadFile(path)
+	case "":
+		if _, err = os.Stat(path + ".jade"); os.IsNotExist(err) {
+			if _, err = os.Stat(path + ".pug"); os.IsNotExist(err) {
+				t.errorf(`".jade" or ".pug" file required`)
+			} else {
+				ext = ".pug"
+			}
+		} else {
+			ext = ".jade"
+		}
+		bb, err = ioutil.ReadFile(path + ext)
+	default:
+		t.errorf(`file extension  %s  is not supported`, ext)
+	}
+	if err != nil {
+		dir, _ := os.Getwd()
+		t.errorf(`%s  work dir: %s `, err, dir)
+	}
+
+	return string(bb)
+}

+ 220 - 0
vendor/github.com/Joker/jade/lex.go

@@ -0,0 +1,220 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jade
+
+import (
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+	typ   itemType // The type of this item.
+	pos   Pos      // The starting position, in bytes, of this item in the input string.
+	val   string   // The value of this item.
+	line  int      // The line number at the start of this item.
+	depth int
+}
+
+func (i item) String() string {
+	switch {
+	case i.typ == itemEOF:
+		return "EOF"
+	case i.typ == itemError:
+		return i.val
+	// case i.typ > itemKeyword:
+	// 	return fmt.Sprintf("<%s>", i.val)
+	case len(i.val) > 10:
+		return fmt.Sprintf("%.10q...", i.val)
+	}
+	return fmt.Sprintf("%q", i.val)
+}
+
+const (
+	eof        = -1
+	spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
+)
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+	name  string    // the name of the input; used only for error reports
+	input string    // the string being scanned
+	pos   Pos       // current position in the input
+	start Pos       // start position of this item
+	width Pos       // width of last rune read from input
+	items chan item // channel of scanned items
+	line  int       // 1+number of newlines seen
+
+	depth         int  // current tag depth
+	interpolation int  // interpolation depth
+	longtext      bool // long text flag
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+	if int(l.pos) >= len(l.input) {
+		l.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+	l.width = Pos(w)
+	l.pos += l.width
+	if r == '\n' {
+		l.line++
+	}
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+	r := l.next()
+	l.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+	l.pos -= l.width
+	// Correct newline count.
+	if l.width == 1 && l.input[l.pos] == '\n' {
+		l.line--
+	}
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+	l.items <- item{t, l.start, l.input[l.start:l.pos], l.line, l.depth}
+	// Some items contain text internally. If so, count their newlines.
+	switch t {
+	// case itemText, itemRawString, itemLeftDelim, itemRightDelim:
+	case itemText:
+		l.line += strings.Count(l.input[l.start:l.pos], "\n")
+	}
+	l.start = l.pos
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+	l.line += strings.Count(l.input[l.start:l.pos], "\n")
+	l.start = l.pos
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+	if strings.ContainsRune(valid, l.next()) {
+		return true
+	}
+	l.backup()
+	return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+	for strings.ContainsRune(valid, l.next()) {
+	}
+	l.backup()
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.line, l.depth}
+	return nil
+}
+
+// nextItem returns the next item from the input.
+// Called by the parser, not in the lexing goroutine.
+func (l *lexer) nextItem() item {
+	return <-l.items
+}
+
+// drain drains the output so the lexing goroutine will exit.
+// Called by the parser, not in the lexing goroutine.
+func (l *lexer) drain() {
+	for range l.items {
+	}
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input string) *lexer {
+	l := &lexer{
+		name:  name,
+		input: input,
+		items: make(chan item),
+		line:  1,
+	}
+	go l.run()
+	return l
+}
+
+func (l *lexer) run() {
+	for state := lexIndents; state != nil; {
+		state = state(l)
+	}
+	close(l.items)
+}
+
+// atTerminator reports whether the input is at valid termination character to
+// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
+// like "$x+2" not being acceptable without a space, in case we decide one
+// day to implement arithmetic.
+func (l *lexer) atTerminator() bool {
+	r := l.peek()
+	if isSpace(r) || isEndOfLine(r) {
+		return true
+	}
+	switch r {
+	case eof, '.', ',', '|', ':', ')', '(':
+		return true
+	}
+
+	return false
+}
+
+func (l *lexer) scanNumber() bool {
+	// Optional leading sign.
+	l.accept("+-")
+	// Is it hex?
+	digits := "0123456789"
+	if l.accept("0") && l.accept("xX") {
+		digits = "0123456789abcdefABCDEF"
+	}
+	l.acceptRun(digits)
+	if l.accept(".") {
+		l.acceptRun(digits)
+	}
+	if l.accept("eE") {
+		l.accept("+-")
+		l.acceptRun("0123456789")
+	}
+	// Is it imaginary?
+	l.accept("i")
+	// Next thing mustn't be alphanumeric.
+	if isAlphaNumeric(l.peek()) {
+		l.next()
+		return false
+	}
+	return true
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+	return r == ' ' || r == '\t'
+}
+
+// isEndOfLine reports whether r is an end-of-line character.
+func isEndOfLine(r rune) bool {
+	return r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+	return r == '_' || r == '-' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}

+ 86 - 0
vendor/github.com/Joker/jade/node.go

@@ -0,0 +1,86 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jade
+
+import (
+	"bytes"
+	"io"
+)
+
+// var textFormat = "%s" // Changed to "%q" in tests for better error messages.
+
+// A Node is an element in the parse tree. The interface is trivial.
+// The interface contains an unexported method so that only
+// types local to this package can satisfy it.
+type Node interface {
+	Type() NodeType
+	String() string
+	WriteIn(io.Writer)
+	// Copy does a deep copy of the Node and all its components.
+	// To avoid type assertions, some XxxNodes also have specialized
+	// CopyXxx methods that return *XxxNode.
+	Copy() Node
+	Position() Pos // byte position of start of node in full original input string
+	// tree returns the containing *Tree.
+	// It is unexported so all implementations of Node are in this package.
+	tree() *Tree
+}
+
+// Pos represents a byte position in the original input text from which
+// this template was parsed.
+type Pos int
+
+func (p Pos) Position() Pos {
+	return p
+}
+
+// Nodes.
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+	NodeType
+	Pos
+	tr    *Tree
+	Nodes []Node // The element nodes in lexical order.
+	tab   int
+}
+
+func (t *Tree) newList(pos Pos) *ListNode {
+	return &ListNode{tr: t, NodeType: NodeList, Pos: pos, tab: t.tab}
+}
+
+func (l *ListNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) tree() *Tree {
+	return l.tr
+}
+
+func (l *ListNode) String() string {
+	b := new(bytes.Buffer)
+	l.WriteIn(b)
+	return b.String()
+}
+func (l *ListNode) WriteIn(b io.Writer) {
+	for _, n := range l.Nodes {
+		n.WriteIn(b)
+	}
+}
+
+func (l *ListNode) CopyList() *ListNode {
+	if l == nil {
+		return l
+	}
+	n := l.tr.newList(l.Pos)
+	for _, elem := range l.Nodes {
+		n.append(elem.Copy())
+	}
+	return n
+}
+
+func (l *ListNode) Copy() Node {
+	return l.CopyList()
+}

+ 146 - 0
vendor/github.com/Joker/jade/parse.go

@@ -0,0 +1,146 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jade
+
+import (
+	"fmt"
+	"runtime"
+)
+
+// Tree is the representation of a single parsed template.
+type Tree struct {
+	Name string    // name of the template represented by the tree.
+	Root *ListNode // top-level root of the tree.
+	text string    // text parsed to create the template (or its parent)
+
+	// Parsing only; cleared after parse.
+	lex       *lexer
+	token     [3]item // three-token lookahead for parser.
+	peekCount int
+	tab       int // depth of focus
+
+	mixin map[string]*MixinNode
+	block map[string]*ListNode
+}
+
+// Copy returns a copy of the Tree. Any parsing state is discarded.
+func (t *Tree) Copy() *Tree {
+	if t == nil {
+		return nil
+	}
+	return &Tree{
+		Name: t.Name,
+		Root: t.Root.CopyList(),
+		text: t.text,
+	}
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+	if t.peekCount > 0 {
+		t.peekCount--
+	} else {
+		t.token[0] = t.lex.nextItem()
+	}
+	return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+	t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens.
+// The zeroth token is already there.
+func (t *Tree) backup2(t1 item) {
+	t.token[1] = t1
+	t.peekCount = 2
+}
+
+// backup3 backs the input stream up three tokens
+// The zeroth token is already there.
+func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
+	t.token[1] = t1
+	t.token[2] = t2
+	t.peekCount = 3
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+	if t.peekCount > 0 {
+		return t.token[t.peekCount-1]
+	}
+	t.peekCount = 1
+	t.token[0] = t.lex.nextItem()
+	return t.token[0]
+}
+
+// nextNonSpace returns the next non-space token.
+func (t *Tree) nextNonSpace() (token item) {
+	for {
+		token = t.next()
+		if token.typ != itemIdent && token.typ != itemEndL && token.typ != itemEmptyLine {
+			break
+		}
+	}
+	// fmt.Println("\t\tnextNonSpace", token.val)
+	return token
+}
+
+// peekNonSpace returns but does not consume the next non-space token.
+func (t *Tree) peekNonSpace() (token item) {
+	for {
+		token = t.next()
+		if token.typ != itemIdent && token.typ != itemEndL && token.typ != itemEmptyLine {
+			break
+		}
+	}
+	t.backup()
+	return token
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...interface{}) {
+	t.Root = nil
+	format = fmt.Sprintf("template:%d: %s", t.token[0].line, format)
+	panic(fmt.Errorf(format, args...))
+}
+
+//
+//
+//
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+	e := recover()
+	if e != nil {
+		if _, ok := e.(runtime.Error); ok {
+			panic(e)
+		}
+		if t != nil {
+			t.lex.drain()
+			t.lex = nil
+		}
+		*errp = e.(error)
+	}
+}
+
+func (t *Tree) Parse(text string) (tree *Tree, err error) {
+	defer t.recover(&err)
+	t.lex = lex(t.Name, text)
+	t.text = text
+	t.topParse()
+	t.lex = nil
+	return t, nil
+}
+
+// New allocates a new parse tree with the given name.
+func New(name string) *Tree {
+	return &Tree{
+		Name:  name,
+		mixin: map[string]*MixinNode{},
+		block: map[string]*ListNode{},
+	}
+}

+ 58 - 0
vendor/github.com/Joker/jade/template.go

@@ -0,0 +1,58 @@
+// Jade.go - template engine. Package implements Jade-lang templates for generating Go html/template output.
+package jade
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"path/filepath"
+)
+
+/*
+Parse parses the template definition string to construct a representation of the template for execution.
+
+Trivial usage:
+
+	package main
+
+	import (
+		"fmt"
+		"github.com/Joker/jade"
+	)
+
+	func main() {
+		tpl, err := jade.Parse("tpl_name", "doctype 5: html: body: p Hello world!")
+		if err != nil {
+			fmt.Printf("Parse error: %v", err)
+			return
+		}
+
+		fmt.Printf( "Output:\n\n%s", tpl  )
+	}
+
+Output:
+
+	<!DOCTYPE html><html><body><p>Hello world!</p></body></html>
+*/
+func Parse(name, text string) (string, error) {
+	outTpl, err := New(name).Parse(text)
+	if err != nil {
+		return "", err
+	}
+	b := new(bytes.Buffer)
+	outTpl.WriteIn(b)
+	return b.String(), nil
+}
+
+// ParseFile parse the jade template file in given filename
+func ParseFile(filename string) (string, error) {
+	bs, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return "", err
+	}
+	return Parse(filepath.Base(filename), string(bs))
+}
+
+func (t *Tree) WriteIn(b io.Writer) {
+	t.Root.WriteIn(b)
+}

+ 20 - 0
vendor/github.com/Shopify/goreferrer/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Steven Normore
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 4147 - 0
vendor/github.com/Shopify/goreferrer/default_rules.go

@@ -0,0 +1,4147 @@
+package goreferrer
+
+import (
+	"strings"
+)
+
+var DefaultRules RuleSet
+
+func init() {
+	domainRules, err := LoadJsonDomainRules(strings.NewReader(defaultRules))
+	if err != nil {
+		panic(err)
+	}
+
+	DefaultRules = RuleSet{
+		DomainRules: domainRules,
+		UaRules: map[string]UaRule{
+			"Twitter": UaRule{
+				Url:    "twitter://twitter.com",
+				Domain: "twitter",
+				Tld:    "com",
+			},
+			"Pinterest": UaRule{
+				Url:    "pinterest://pinterest.com",
+				Domain: "pinterest",
+				Tld:    "com",
+			},
+			"Facebook": UaRule{
+				Url:    "facebook://facebook.com",
+				Domain: "facebook",
+				Tld:    "com",
+			},
+			"FBAV": UaRule{
+				Url:    "facebook://facebook.com",
+				Domain: "facebook",
+				Tld:    "com",
+			},
+		},
+	}
+}
+
+const defaultRules = `
+{
+    "email": {
+        "AOL Mail": {
+            "domains": [
+                "mail.aol.com",
+                "cpw.mail.aol.com"
+            ]
+        },
+        "Gmail": {
+            "domains": [
+                "mail.google.com"
+            ]
+        },
+        "Xfinity":{
+            "domains": [
+                "web.mail.comcast.net"
+            ]
+        },
+        "Orange Webmail": {
+            "domains": [
+                "orange.fr/webmail"
+            ]
+        },
+        "Outlook.com": {
+            "domains": [
+                "mail.live.com",
+                "outlook.live.com",
+                "blu180.mail.live.com",
+                "col130.mail.live.com",
+                "blu184.mail.live.com",
+                "bay179.mail.live.com",
+                "col131.mail.live.com",
+                "blu179.mail.live.com",
+                "bay180.mail.live.com",
+                "blu182.mail.live.com",
+                "blu181.mail.live.com",
+                "bay182.mail.live.com",
+                "snt149.mail.live.com",
+                "bay181.mail.live.com",
+                "col129.mail.live.com",
+                "snt148.mail.live.com",
+                "snt147.mail.live.com",
+                "snt146.mail.live.com",
+                "snt153.mail.live.com",
+                "snt152.mail.live.com",
+                "snt150.mail.live.com",
+                "snt151.mail.live.com",
+                "col128.mail.live.com",
+                "blu185.mail.live.com",
+                "dub125.mail.live.com",
+                "dub128.mail.live.com",
+                "dub127.mail.live.com",
+                "dub131.mail.live.com",
+                "col125.mail.live.com",
+                "dub130.mail.live.com",
+                "blu172.mail.live.com",
+                "bay169.mail.live.com",
+                "blu175.mail.live.com",
+                "blu173.mail.live.com",
+                "bay176.mail.live.com",
+                "blu176.mail.live.com",
+                "col126.mail.live.com",
+                "col127.mail.live.com",
+                "blu177.mail.live.com",
+                "blu174.mail.live.com",
+                "bay174.mail.live.com",
+                "bay172.mail.live.com",
+                "blu169.mail.live.com",
+                "bay177.mail.live.com",
+                "blu178.mail.live.com",
+                "blu171.mail.live.com",
+                "dub126.mail.live.com",
+                "blu168.mail.live.com",
+                "bay173.mail.live.com",
+                "bay175.mail.live.com",
+                "bay178.mail.live.com",
+                "bay168.mail.live.com",
+                "bay167.mail.live.com",
+                "blu170.mail.live.com",
+                "dub124.mail.live.com",
+                "dub122.mail.live.com",
+                "dub121.mail.live.com",
+                "dub129.mail.live.com",
+                "dub114.mail.live.com",
+                "dub110.mail.live.com",
+                "dub111.mail.live.com",
+                "dub113.mail.live.com",
+                "dub109.mail.live.com",
+                "dub120.mail.live.com",
+                "dub115.mail.live.com",
+                "dub123.mail.live.com",
+                "dub119.mail.live.com",
+                "dub118.mail.live.com",
+                "dub112.mail.live.com",
+                "dub117.mail.live.com",
+                "dub116.mail.live.com",
+                "blu183.mail.live.com"
+            ]
+        },
+        "Yahoo! Mail": {
+            "domains": [
+                "mail.yahoo.net",
+                "mail.yahoo.com",
+                "mail.yahoo.co.uk"
+            ]
+        },
+        "MailChimp": {
+            "domains": [
+                "list-manage.com",
+                "list-manage1.com",
+                "list-manage2.com",
+                "list-manage3.com",
+                "list-manage4.com",
+                "list-manage5.com",
+                "list-manage6.com",
+                "list-manage7.com",
+                "list-manage8.com",
+                "list-manage9.com"
+            ]
+        }
+    },
+    "search": {
+        "1.cz": {
+            "domains": [
+                "1.cz"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "1und1": {
+            "domains": [
+                "search.1und1.de"
+            ],
+            "parameters": [
+                "su"
+            ]
+        },
+        "ABCs\u00f8k": {
+            "domains": [
+                "abcsolk.no",
+                "verden.abcsok.no"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "AOL": {
+            "domains": [
+                "search.aol.com",
+                "search.aol.ca",
+                "m.search.aol.com",
+                "search.aol.it",
+                "aolsearch.aol.com",
+                "www.aolrecherche.aol.fr",
+                "www.aolrecherches.aol.fr",
+                "www.aolimages.aol.fr",
+                "aim.search.aol.com",
+                "www.recherche.aol.fr",
+                "find.web.aol.com",
+                "recherche.aol.ca",
+                "aolsearch.aol.co.uk",
+                "search.aol.co.uk",
+                "aolrecherche.aol.fr",
+                "sucheaol.aol.de",
+                "suche.aol.de",
+                "suche.aolsvc.de",
+                "aolbusqueda.aol.com.mx",
+                "alicesuche.aol.de",
+                "alicesuchet.aol.de",
+                "suchet2.aol.de",
+                "search.hp.my.aol.com.au",
+                "search.hp.my.aol.de",
+                "search.hp.my.aol.it",
+                "search-intl.netscape.com",
+                "www.aol.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "APOLL07": {
+            "domains": [
+                "apollo7.de"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Abacho": {
+            "domains": [
+                "www.abacho.de",
+                "www.abacho.com",
+                "www.abacho.co.uk",
+                "www.se.abacho.com",
+                "www.tr.abacho.com",
+                "www.abacho.at",
+                "www.abacho.fr",
+                "www.abacho.es",
+                "www.abacho.ch",
+                "www.abacho.it"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Acoon": {
+            "domains": [
+                "www.acoon.de"
+            ],
+            "parameters": [
+                "begriff"
+            ]
+        },
+        "Alexa": {
+            "domains": [
+                "alexa.com",
+                "search.toolbars.alexa.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Alice Adsl": {
+            "domains": [
+                "rechercher.aliceadsl.fr"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "AllTheWeb": {
+            "domains": [
+                "www.alltheweb.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Altavista": {
+            "domains": [
+                "www.altavista.com",
+                "search.altavista.com",
+                "listings.altavista.com",
+                "altavista.de",
+                "altavista.fr",
+                "be-nl.altavista.com",
+                "be-fr.altavista.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Apollo Latvia": {
+            "domains": [
+                "apollo.lv/portal/search/"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Amazon": {
+            "domains": [
+                "amazon.com",
+                "amazon.co.uk",
+                "amazon.ca",
+                "amazon.de",
+                "amazon.fr",
+                "amazonaws.com",
+                "amazon.co.jp",
+                "amazon.es",
+                "amazon.it",
+                "amazon.in"
+            ],
+            "parameters": [
+                "field-keywords"
+            ]
+        },
+        "Apontador": {
+            "domains": [
+                "apontador.com.br",
+                "www.apontador.com.br"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Aport": {
+            "domains": [
+                "sm.aport.ru"
+            ],
+            "parameters": [
+                "r"
+            ]
+        },
+        "Arcor": {
+            "domains": [
+                "www.arcor.de"
+            ],
+            "parameters": [
+                "Keywords"
+            ]
+        },
+        "Arianna": {
+            "domains": [
+                "arianna.libero.it",
+                "www.arianna.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Ask": {
+            "domains": [
+                "ask.com",
+                "web.ask.com",
+                "int.ask.com",
+                "mws.ask.com",
+                "uk.ask.com",
+                "images.ask.com",
+                "ask.reference.com",
+                "www.askkids.com",
+                "iwon.ask.com",
+                "www.ask.co.uk",
+                "www.qbyrd.com",
+                "search-results.com",
+                "uk.search-results.com",
+                "www.search-results.com",
+                "int.search-results.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Atlas": {
+            "domains": [
+                "searchatlas.centrum.cz"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Austronaut": {
+            "domains": [
+                "www2.austronaut.at",
+                "www1.astronaut.at"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Babylon": {
+            "domains": [
+                "search.babylon.com",
+                "searchassist.babylon.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Baidu": {
+            "domains": [
+                "www.baidu.com",
+                "www1.baidu.com",
+                "zhidao.baidu.com",
+                "tieba.baidu.com",
+                "news.baidu.com",
+                "web.gougou.com",
+                "m.baidu.com",
+                "image.baidu.com",
+                "tieba.baidu.com",
+                "fanyi.baidu.com",
+                "zhidao.baidu.com",
+                "www.baidu.co.th",
+                "m5.baidu.com",
+                "m.siteapp.baidu.com"
+            ],
+            "parameters": [
+                "wd",
+                "word",
+                "kw",
+                "k"
+            ]
+        },
+        "Biglobe": {
+            "domains": [
+                "cgi.search.biglobe.ne.jp"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Bing": {
+            "domains": [
+                "bing.com",
+                "www.bing.com",
+                "msnbc.msn.com",
+                "dizionario.it.msn.com",
+                "cc.bingj.com",
+                "m.bing.com"
+            ],
+            "parameters": [
+                "q",
+                "Q"
+            ]
+        },
+        "Bing Images": {
+            "domains": [
+                "bing.com/images/search",
+                "www.bing.com/images/search"
+            ],
+            "parameters": [
+                "q",
+                "Q"
+            ]
+        },
+        "Blogdigger": {
+            "domains": [
+                "www.blogdigger.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Blogpulse": {
+            "domains": [
+                "www.blogpulse.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Bluewin": {
+            "domains": [
+                "search.bluewin.ch"
+            ],
+            "parameters": [
+                "searchTerm"
+            ]
+        },
+        "Centrum": {
+            "domains": [
+                "serach.centrum.cz",
+                "morfeo.centrum.cz"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Charter": {
+            "domains": [
+                "www.charter.net"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Clix": {
+            "domains": [
+                "pesquisa.clix.pt"
+            ],
+            "parameters": [
+                "question"
+            ]
+        },
+        "Comcast": {
+            "domains": [
+                "search.comcast.net",
+                "comcast.net",
+                "xfinity.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Compuserve": {
+            "domains": [
+                "websearch.cs.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Conduit": {
+            "domains": [
+                "search.conduit.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Crawler": {
+            "domains": [
+                "www.crawler.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Cuil": {
+            "domains": [
+                "www.cuil.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Daemon search": {
+            "domains": [
+                "daemon-search.com",
+                "my.daemon-search.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "DasOertliche": {
+            "domains": [
+                "www.dasoertliche.de"
+            ],
+            "parameters": [
+                "kw"
+            ]
+        },
+        "DasTelefonbuch": {
+            "domains": [
+                "www1.dastelefonbuch.de"
+            ],
+            "parameters": [
+                "kw"
+            ]
+        },
+        "Daum": {
+            "domains": [
+                "search.daum.net"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Delfi": {
+            "domains": [
+                "otsing.delfi.ee"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Delfi latvia": {
+            "domains": [
+                "smart.delfi.lv"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Digg": {
+            "domains": [
+                "digg.com"
+            ],
+            "parameters": [
+                "s"
+            ]
+        },
+        "DuckDuckGo": {
+            "domains": [
+                "duckduckgo.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Ecosia": {
+            "domains": [
+                "ecosia.org"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "El Mundo": {
+            "domains": [
+                "ariadna.elmundo.es"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Eniro": {
+            "domains": [
+                "www.eniro.se"
+            ],
+            "parameters": [
+                "q",
+                "search_word"
+            ]
+        },
+        "Eurip": {
+            "domains": [
+                "www.eurip.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Euroseek": {
+            "domains": [
+                "www.euroseek.com"
+            ],
+            "parameters": [
+                "string"
+            ]
+        },
+        "Everyclick": {
+            "domains": [
+                "www.everyclick.com"
+            ],
+            "parameters": [
+                "keyword"
+            ]
+        },
+        "Exalead": {
+            "domains": [
+                "www.exalead.fr",
+                "www.exalead.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Excite": {
+            "domains": [
+                "search.excite.it",
+                "search.excite.fr",
+                "search.excite.de",
+                "search.excite.co.uk",
+                "serach.excite.es",
+                "search.excite.nl",
+                "msxml.excite.com",
+                "www.excite.co.jp"
+            ],
+            "parameters": [
+                "q",
+                "search"
+            ]
+        },
+        "Fast Browser Search": {
+            "domains": [
+                "www.fastbrowsersearch.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Fireball": {
+            "domains": [
+                "www.fireball.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Firstfind": {
+            "domains": [
+                "www.firstsfind.com"
+            ],
+            "parameters": [
+                "qry"
+            ]
+        },
+        "Fixsuche": {
+            "domains": [
+                "www.fixsuche.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Flix": {
+            "domains": [
+                "www.flix.de"
+            ],
+            "parameters": [
+                "keyword"
+            ]
+        },
+        "Forestle": {
+            "domains": [
+                "forestle.org",
+                "www.forestle.org",
+                "forestle.mobi"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Francite": {
+            "domains": [
+                "recherche.francite.com"
+            ],
+            "parameters": [
+                "name"
+            ]
+        },
+        "Free": {
+            "domains": [
+                "search.free.fr",
+                "search1-2.free.fr",
+                "search1-1.free.fr"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Freecause": {
+            "domains": [
+                "search.freecause.com"
+            ],
+            "parameters": [
+                "p"
+            ]
+        },
+        "Freenet": {
+            "domains": [
+                "suche.freenet.de"
+            ],
+            "parameters": [
+                "query",
+                "Keywords"
+            ]
+        },
+        "FriendFeed": {
+            "domains": [
+                "friendfeed.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "GAIS": {
+            "domains": [
+                "gais.cs.ccu.edu.tw"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "GMX": {
+            "domains": [
+                "suche.gmx.net"
+            ],
+            "parameters": [
+                "su"
+            ]
+        },
+        "Geona": {
+            "domains": [
+                "geona.net"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Gigablast": {
+            "domains": [
+                "www.gigablast.com",
+                "dir.gigablast.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Gnadenmeer": {
+            "domains": [
+                "www.gnadenmeer.de"
+            ],
+            "parameters": [
+                "keyword"
+            ]
+        },
+        "Gomeo": {
+            "domains": [
+                "www.gomeo.com"
+            ],
+            "parameters": [
+                "Keywords"
+            ]
+        },
+        "Google": {
+            "domains": [
+                "www.google.com",
+                "www.google.ac",
+                "www.google.ad",
+                "www.google.al",
+                "www.google.com.af",
+                "www.google.com.ag",
+                "www.google.com.ai",
+                "www.google.am",
+                "www.google.it.ao",
+                "www.google.com.ar",
+                "www.google.as",
+                "www.google.at",
+                "www.google.com.au",
+                "www.google.az",
+                "www.google.ba",
+                "www.google.com.bd",
+                "www.google.be",
+                "www.google.bf",
+                "www.google.bg",
+                "www.google.com.bh",
+                "www.google.bi",
+                "www.google.bj",
+                "www.google.com.bn",
+                "www.google.com.bo",
+                "www.google.com.br",
+                "www.google.bs",
+                "www.google.co.bw",
+                "www.google.com.by",
+                "www.google.by",
+                "www.google.com.bz",
+                "www.google.ca",
+                "www.google.com.kh",
+                "www.google.cc",
+                "www.google.cd",
+                "www.google.cf",
+                "www.google.cat",
+                "www.google.cg",
+                "www.google.ch",
+                "www.google.ci",
+                "www.google.co.ck",
+                "www.google.cl",
+                "www.google.cm",
+                "www.google.cn",
+                "www.google.com.co",
+                "www.google.co.cr",
+                "www.google.com.cu",
+                "www.google.cv",
+                "www.google.com.cy",
+                "www.google.cz",
+                "www.google.de",
+                "www.google.dj",
+                "www.google.dk",
+                "www.google.dm",
+                "www.google.com.do",
+                "www.google.dz",
+                "www.google.com.ec",
+                "www.google.ee",
+                "www.google.com.eg",
+                "www.google.es",
+                "www.google.com.et",
+                "www.google.fi",
+                "www.google.com.fj",
+                "www.google.fm",
+                "www.google.fr",
+                "www.google.ga",
+                "www.google.gd",
+                "www.google.ge",
+                "www.google.gf",
+                "www.google.gg",
+                "www.google.com.gh",
+                "www.google.com.gi",
+                "www.google.gl",
+                "www.google.gm",
+                "www.google.gp",
+                "www.google.gr",
+                "www.google.com.gt",
+                "www.google.gy",
+                "www.google.com.hk",
+                "www.google.hn",
+                "www.google.hr",
+                "www.google.ht",
+                "www.google.hu",
+                "www.google.co.id",
+                "www.google.iq",
+                "www.google.ie",
+                "www.google.co.il",
+                "www.google.im",
+                "www.google.co.in",
+                "www.google.io",
+                "www.google.is",
+                "www.google.it",
+                "www.google.je",
+                "www.google.com.jm",
+                "www.google.jo",
+                "www.google.co.jp",
+                "www.google.co.ke",
+                "www.google.com.kh",
+                "www.google.ki",
+                "www.google.kg",
+                "www.google.co.kr",
+                "www.google.com.kw",
+                "www.google.kz",
+                "www.google.la",
+                "www.google.com.lb",
+                "www.google.com.lc",
+                "www.google.li",
+                "www.google.lk",
+                "www.google.co.ls",
+                "www.google.lt",
+                "www.google.lu",
+                "www.google.lv",
+                "www.google.com.ly",
+                "www.google.co.ma",
+                "www.google.md",
+                "www.google.me",
+                "www.google.mg",
+                "www.google.mk",
+                "www.google.ml",
+                "www.google.mn",
+                "www.google.ms",
+                "www.google.com.mt",
+                "www.google.mu",
+                "www.google.mv",
+                "www.google.mw",
+                "www.google.com.mx",
+                "www.google.com.my",
+                "www.google.co.mz",
+                "www.google.com.na",
+                "www.google.ne",
+                "www.google.com.nf",
+                "www.google.com.ng",
+                "www.google.com.ni",
+                "www.google.nl",
+                "www.google.no",
+                "www.google.com.np",
+                "www.google.nr",
+                "www.google.nu",
+                "www.google.co.nz",
+                "www.google.com.om",
+                "www.google.com.pa",
+                "www.google.com.pe",
+                "www.google.com.ph",
+                "www.google.com.pk",
+                "www.google.pl",
+                "www.google.pn",
+                "www.google.com.pr",
+                "www.google.ps",
+                "www.google.pt",
+                "www.google.com.py",
+                "www.google.com.qa",
+                "www.google.ro",
+                "www.google.rs",
+                "www.google.ru",
+                "www.google.rw",
+                "www.google.com.sa",
+                "www.google.com.sb",
+                "www.google.sc",
+                "www.google.se",
+                "www.google.com.sg",
+                "www.google.sh",
+                "www.google.si",
+                "www.google.sk",
+                "www.google.com.sl",
+                "www.google.sn",
+                "www.google.sm",
+                "www.google.so",
+                "www.google.st",
+                "www.google.com.sv",
+                "www.google.td",
+                "www.google.tg",
+                "www.google.co.th",
+                "www.google.com.tj",
+                "www.google.tk",
+                "www.google.tl",
+                "www.google.tm",
+                "www.google.to",
+                "www.google.com.tn",
+                "www.google.com.tr",
+                "www.google.tt",
+                "www.google.tn",
+                "www.google.com.tw",
+                "www.google.co.tz",
+                "www.google.com.ua",
+                "www.google.co.ug",
+                "www.google.ae",
+                "www.google.co.uk",
+                "www.google.us",
+                "www.google.com.uy",
+                "www.google.co.uz",
+                "www.google.com.vc",
+                "www.google.co.ve",
+                "www.google.vg",
+                "www.google.co.vi",
+                "www.google.com.vn",
+                "www.google.vu",
+                "www.google.ws",
+                "www.google.co.za",
+                "www.google.co.zm",
+                "www.google.co.zw",
+                "www.google.com.mm",
+                "www.google.sr",
+                "www.google.com.pg",
+                "www.google.bt",
+                "www.google.ng",
+                "www.google.com.iq",
+                "www.google.co.ao",
+                "google.com",
+                "google.ac",
+                "google.ad",
+                "google.al",
+                "google.com.af",
+                "google.com.ag",
+                "google.com.ai",
+                "google.am",
+                "google.it.ao",
+                "google.com.ar",
+                "google.as",
+                "google.at",
+                "google.com.au",
+                "google.az",
+                "google.ba",
+                "google.com.bd",
+                "google.be",
+                "google.bf",
+                "google.bg",
+                "google.com.bh",
+                "google.bi",
+                "google.bj",
+                "google.com.bn",
+                "google.com.bo",
+                "google.com.br",
+                "google.bs",
+                "google.co.bw",
+                "google.com.by",
+                "google.by",
+                "google.com.bz",
+                "google.ca",
+                "google.com.kh",
+                "google.cc",
+                "google.cd",
+                "google.cf",
+                "google.cat",
+                "google.cg",
+                "google.ch",
+                "google.ci",
+                "google.co.ck",
+                "google.cl",
+                "google.cm",
+                "google.cn",
+                "google.com.co",
+                "google.co.cr",
+                "google.com.cu",
+                "google.cv",
+                "google.com.cy",
+                "google.cz",
+                "google.de",
+                "google.dj",
+                "google.dk",
+                "google.dm",
+                "google.com.do",
+                "google.dz",
+                "google.com.ec",
+                "google.ee",
+                "google.com.eg",
+                "google.es",
+                "google.com.et",
+                "google.fi",
+                "google.com.fj",
+                "google.fm",
+                "google.fr",
+                "google.ga",
+                "google.gd",
+                "google.ge",
+                "google.gf",
+                "google.gg",
+                "google.com.gh",
+                "google.com.gi",
+                "google.gl",
+                "google.gm",
+                "google.gp",
+                "google.gr",
+                "google.com.gt",
+                "google.gy",
+                "google.com.hk",
+                "google.hn",
+                "google.hr",
+                "google.ht",
+                "google.hu",
+                "google.co.id",
+                "google.iq",
+                "google.ie",
+                "google.co.il",
+                "google.im",
+                "google.co.in",
+                "google.io",
+                "google.is",
+                "google.it",
+                "google.je",
+                "google.com.jm",
+                "google.jo",
+                "google.co.jp",
+                "google.co.ke",
+                "google.com.kh",
+                "google.ki",
+                "google.kg",
+                "google.co.kr",
+                "google.com.kw",
+                "google.kz",
+                "google.la",
+                "google.com.lb",
+                "google.com.lc",
+                "google.li",
+                "google.lk",
+                "google.co.ls",
+                "google.lt",
+                "google.lu",
+                "google.lv",
+                "google.com.ly",
+                "google.co.ma",
+                "google.md",
+                "google.me",
+                "google.mg",
+                "google.mk",
+                "google.ml",
+                "google.mn",
+                "google.ms",
+                "google.com.mt",
+                "google.mu",
+                "google.mv",
+                "google.mw",
+                "google.com.mx",
+                "google.com.my",
+                "google.co.mz",
+                "google.com.na",
+                "google.ne",
+                "google.com.nf",
+                "google.com.ng",
+                "google.com.ni",
+                "google.nl",
+                "google.no",
+                "google.com.np",
+                "google.nr",
+                "google.nu",
+                "google.co.nz",
+                "google.com.om",
+                "google.com.pa",
+                "google.com.pe",
+                "google.com.ph",
+                "google.com.pk",
+                "google.pl",
+                "google.pn",
+                "google.com.pr",
+                "google.ps",
+                "google.pt",
+                "google.com.py",
+                "google.com.qa",
+                "google.ro",
+                "google.rs",
+                "google.ru",
+                "google.rw",
+                "google.com.sa",
+                "google.com.sb",
+                "google.sc",
+                "google.se",
+                "google.com.sg",
+                "google.sh",
+                "google.si",
+                "google.sk",
+                "google.com.sl",
+                "google.sn",
+                "google.sm",
+                "google.so",
+                "google.st",
+                "google.com.sv",
+                "google.td",
+                "google.tg",
+                "google.tn",
+                "google.co.th",
+                "google.com.tj",
+                "google.tk",
+                "google.tl",
+                "google.tm",
+                "google.to",
+                "google.com.tn",
+                "google.com.tr",
+                "google.tt",
+                "google.com.tw",
+                "google.co.tz",
+                "google.com.ua",
+                "google.co.ug",
+                "google.ae",
+                "google.co.uk",
+                "google.us",
+                "google.com.uy",
+                "google.co.uz",
+                "google.com.vc",
+                "google.co.ve",
+                "google.vg",
+                "google.co.vi",
+                "google.com.vn",
+                "google.vu",
+                "google.ws",
+                "google.co.za",
+                "google.co.zm",
+                "google.co.zw",
+                "search.avg.com",
+                "isearch.avg.com",
+                "www.cnn.com",
+                "darkoogle.com",
+                "search.darkoogle.com",
+                "search.foxtab.com",
+                "www.gooofullsearch.com",
+                "search.hiyo.com",
+                "search.incredimail.com",
+                "search1.incredimail.com",
+                "search2.incredimail.com",
+                "search3.incredimail.com",
+                "search4.incredimail.com",
+                "search.incredibar.com",
+                "search.sweetim.com",
+                "www.fastweb.it",
+                "search.juno.com",
+                "find.tdc.dk",
+                "searchresults.verizon.com",
+                "search.walla.co.il",
+                "search.alot.com",
+                "www.googleearth.de",
+                "www.googleearth.fr",
+                "webcache.googleusercontent.com",
+                "encrypted.google.com",
+                "googlesyndicatedsearch.com",
+                "www.googleadservices.com"
+            ],
+            "parameters": [
+                "q",
+                "query",
+                "Keywords",
+                "*"
+            ]
+        },
+        "Google Blogsearch": {
+            "domains": [
+                "blogsearch.google.ac",
+                "blogsearch.google.ad",
+                "blogsearch.google.ae",
+                "blogsearch.google.am",
+                "blogsearch.google.as",
+                "blogsearch.google.at",
+                "blogsearch.google.az",
+                "blogsearch.google.ba",
+                "blogsearch.google.be",
+                "blogsearch.google.bf",
+                "blogsearch.google.bg",
+                "blogsearch.google.bi",
+                "blogsearch.google.bj",
+                "blogsearch.google.bs",
+                "blogsearch.google.by",
+                "blogsearch.google.ca",
+                "blogsearch.google.cat",
+                "blogsearch.google.cc",
+                "blogsearch.google.cd",
+                "blogsearch.google.cf",
+                "blogsearch.google.cg",
+                "blogsearch.google.ch",
+                "blogsearch.google.ci",
+                "blogsearch.google.cl",
+                "blogsearch.google.cm",
+                "blogsearch.google.cn",
+                "blogsearch.google.co.bw",
+                "blogsearch.google.co.ck",
+                "blogsearch.google.co.cr",
+                "blogsearch.google.co.id",
+                "blogsearch.google.co.il",
+                "blogsearch.google.co.in",
+                "blogsearch.google.co.jp",
+                "blogsearch.google.co.ke",
+                "blogsearch.google.co.kr",
+                "blogsearch.google.co.ls",
+                "blogsearch.google.co.ma",
+                "blogsearch.google.co.mz",
+                "blogsearch.google.co.nz",
+                "blogsearch.google.co.th",
+                "blogsearch.google.co.tz",
+                "blogsearch.google.co.ug",
+                "blogsearch.google.co.uk",
+                "blogsearch.google.co.uz",
+                "blogsearch.google.co.ve",
+                "blogsearch.google.co.vi",
+                "blogsearch.google.co.za",
+                "blogsearch.google.co.zm",
+                "blogsearch.google.co.zw",
+                "blogsearch.google.com",
+                "blogsearch.google.com.af",
+                "blogsearch.google.com.ag",
+                "blogsearch.google.com.ai",
+                "blogsearch.google.com.ar",
+                "blogsearch.google.com.au",
+                "blogsearch.google.com.bd",
+                "blogsearch.google.com.bh",
+                "blogsearch.google.com.bn",
+                "blogsearch.google.com.bo",
+                "blogsearch.google.com.br",
+                "blogsearch.google.com.by",
+                "blogsearch.google.com.bz",
+                "blogsearch.google.com.co",
+                "blogsearch.google.com.cu",
+                "blogsearch.google.com.cy",
+                "blogsearch.google.com.do",
+                "blogsearch.google.com.ec",
+                "blogsearch.google.com.eg",
+                "blogsearch.google.com.et",
+                "blogsearch.google.com.fj",
+                "blogsearch.google.com.gh",
+                "blogsearch.google.com.gi",
+                "blogsearch.google.com.gt",
+                "blogsearch.google.com.hk",
+                "blogsearch.google.com.jm",
+                "blogsearch.google.com.kh",
+                "blogsearch.google.com.kh",
+                "blogsearch.google.com.kw",
+                "blogsearch.google.com.lb",
+                "blogsearch.google.com.lc",
+                "blogsearch.google.com.ly",
+                "blogsearch.google.com.mt",
+                "blogsearch.google.com.mx",
+                "blogsearch.google.com.my",
+                "blogsearch.google.com.na",
+                "blogsearch.google.com.nf",
+                "blogsearch.google.com.ng",
+                "blogsearch.google.com.ni",
+                "blogsearch.google.com.np",
+                "blogsearch.google.com.om",
+                "blogsearch.google.com.pa",
+                "blogsearch.google.com.pe",
+                "blogsearch.google.com.ph",
+                "blogsearch.google.com.pk",
+                "blogsearch.google.com.pr",
+                "blogsearch.google.com.py",
+                "blogsearch.google.com.qa",
+                "blogsearch.google.com.sa",
+                "blogsearch.google.com.sb",
+                "blogsearch.google.com.sg",
+                "blogsearch.google.com.sl",
+                "blogsearch.google.com.sv",
+                "blogsearch.google.com.tj",
+                "blogsearch.google.com.tn",
+                "blogsearch.google.com.tr",
+                "blogsearch.google.com.tw",
+                "blogsearch.google.com.ua",
+                "blogsearch.google.com.uy",
+                "blogsearch.google.com.vc",
+                "blogsearch.google.com.vn",
+                "blogsearch.google.cv",
+                "blogsearch.google.cz",
+                "blogsearch.google.de",
+                "blogsearch.google.dj",
+                "blogsearch.google.dk",
+                "blogsearch.google.dm",
+                "blogsearch.google.dz",
+                "blogsearch.google.ee",
+                "blogsearch.google.es",
+                "blogsearch.google.fi",
+                "blogsearch.google.fm",
+                "blogsearch.google.fr",
+                "blogsearch.google.ga",
+                "blogsearch.google.gd",
+                "blogsearch.google.ge",
+                "blogsearch.google.gf",
+                "blogsearch.google.gg",
+                "blogsearch.google.gl",
+                "blogsearch.google.gm",
+                "blogsearch.google.gp",
+                "blogsearch.google.gr",
+                "blogsearch.google.gy",
+                "blogsearch.google.hn",
+                "blogsearch.google.hr",
+                "blogsearch.google.ht",
+                "blogsearch.google.hu",
+                "blogsearch.google.ie",
+                "blogsearch.google.im",
+                "blogsearch.google.io",
+                "blogsearch.google.iq",
+                "blogsearch.google.is",
+                "blogsearch.google.it",
+                "blogsearch.google.it.ao",
+                "blogsearch.google.je",
+                "blogsearch.google.jo",
+                "blogsearch.google.kg",
+                "blogsearch.google.ki",
+                "blogsearch.google.kz",
+                "blogsearch.google.la",
+                "blogsearch.google.li",
+                "blogsearch.google.lk",
+                "blogsearch.google.lt",
+                "blogsearch.google.lu",
+                "blogsearch.google.lv",
+                "blogsearch.google.md",
+                "blogsearch.google.me",
+                "blogsearch.google.mg",
+                "blogsearch.google.mk",
+                "blogsearch.google.ml",
+                "blogsearch.google.mn",
+                "blogsearch.google.ms",
+                "blogsearch.google.mu",
+                "blogsearch.google.mv",
+                "blogsearch.google.mw",
+                "blogsearch.google.ne",
+                "blogsearch.google.nl",
+                "blogsearch.google.no",
+                "blogsearch.google.nr",
+                "blogsearch.google.nu",
+                "blogsearch.google.pl",
+                "blogsearch.google.pn",
+                "blogsearch.google.ps",
+                "blogsearch.google.pt",
+                "blogsearch.google.ro",
+                "blogsearch.google.rs",
+                "blogsearch.google.ru",
+                "blogsearch.google.rw",
+                "blogsearch.google.sc",
+                "blogsearch.google.se",
+                "blogsearch.google.sh",
+                "blogsearch.google.si",
+                "blogsearch.google.sk",
+                "blogsearch.google.sm",
+                "blogsearch.google.sn",
+                "blogsearch.google.so",
+                "blogsearch.google.st",
+                "blogsearch.google.td",
+                "blogsearch.google.tg",
+                "blogsearch.google.tk",
+                "blogsearch.google.tl",
+                "blogsearch.google.tm",
+                "blogsearch.google.to",
+                "blogsearch.google.tt",
+                "blogsearch.google.us",
+                "blogsearch.google.vg",
+                "blogsearch.google.vu",
+                "blogsearch.google.ws"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Google Images": {
+            "domains": [
+                "google.ac/imgres",
+                "google.ad/imgres",
+                "google.ae/imgres",
+                "google.am/imgres",
+                "google.as/imgres",
+                "google.at/imgres",
+                "google.az/imgres",
+                "google.ba/imgres",
+                "google.be/imgres",
+                "google.bf/imgres",
+                "google.bg/imgres",
+                "google.bi/imgres",
+                "google.bj/imgres",
+                "google.bs/imgres",
+                "google.by/imgres",
+                "google.ca/imgres",
+                "google.cat/imgres",
+                "google.cc/imgres",
+                "google.cd/imgres",
+                "google.cf/imgres",
+                "google.cg/imgres",
+                "google.ch/imgres",
+                "google.ci/imgres",
+                "google.cl/imgres",
+                "google.cm/imgres",
+                "google.cn/imgres",
+                "google.co.bw/imgres",
+                "google.co.ck/imgres",
+                "google.co.cr/imgres",
+                "google.co.id/imgres",
+                "google.co.il/imgres",
+                "google.co.in/imgres",
+                "google.co.jp/imgres",
+                "google.co.ke/imgres",
+                "google.co.kr/imgres",
+                "google.co.ls/imgres",
+                "google.co.ma/imgres",
+                "google.co.mz/imgres",
+                "google.co.nz/imgres",
+                "google.co.th/imgres",
+                "google.co.tz/imgres",
+                "google.co.ug/imgres",
+                "google.co.uk/imgres",
+                "google.co.uz/imgres",
+                "google.co.ve/imgres",
+                "google.co.vi/imgres",
+                "google.co.za/imgres",
+                "google.co.zm/imgres",
+                "google.co.zw/imgres",
+                "google.com/imgres",
+                "google.com.af/imgres",
+                "google.com.ag/imgres",
+                "google.com.ai/imgres",
+                "google.com.ar/imgres",
+                "google.com.au/imgres",
+                "google.com.bd/imgres",
+                "google.com.bh/imgres",
+                "google.com.bn/imgres",
+                "google.com.bo/imgres",
+                "google.com.br/imgres",
+                "google.com.by/imgres",
+                "google.com.bz/imgres",
+                "google.com.co/imgres",
+                "google.com.cu/imgres",
+                "google.com.cy/imgres",
+                "google.com.do/imgres",
+                "google.com.ec/imgres",
+                "google.com.eg/imgres",
+                "google.com.et/imgres",
+                "google.com.fj/imgres",
+                "google.com.gh/imgres",
+                "google.com.gi/imgres",
+                "google.com.gt/imgres",
+                "google.com.hk/imgres",
+                "google.com.jm/imgres",
+                "google.com.kh/imgres",
+                "google.com.kh/imgres",
+                "google.com.kw/imgres",
+                "google.com.lb/imgres",
+                "google.com.lc/imgres",
+                "google.com.ly/imgres",
+                "google.com.mt/imgres",
+                "google.com.mx/imgres",
+                "google.com.my/imgres",
+                "google.com.na/imgres",
+                "google.com.nf/imgres",
+                "google.com.ng/imgres",
+                "google.com.ni/imgres",
+                "google.com.np/imgres",
+                "google.com.om/imgres",
+                "google.com.pa/imgres",
+                "google.com.pe/imgres",
+                "google.com.ph/imgres",
+                "google.com.pk/imgres",
+                "google.com.pr/imgres",
+                "google.com.py/imgres",
+                "google.com.qa/imgres",
+                "google.com.sa/imgres",
+                "google.com.sb/imgres",
+                "google.com.sg/imgres",
+                "google.com.sl/imgres",
+                "google.com.sv/imgres",
+                "google.com.tj/imgres",
+                "google.com.tn/imgres",
+                "google.com.tr/imgres",
+                "google.com.tw/imgres",
+                "google.com.ua/imgres",
+                "google.com.uy/imgres",
+                "google.com.vc/imgres",
+                "google.com.vn/imgres",
+                "google.cv/imgres",
+                "google.cz/imgres",
+                "google.de/imgres",
+                "google.dj/imgres",
+                "google.dk/imgres",
+                "google.dm/imgres",
+                "google.dz/imgres",
+                "google.ee/imgres",
+                "google.es/imgres",
+                "google.fi/imgres",
+                "google.fm/imgres",
+                "google.fr/imgres",
+                "google.ga/imgres",
+                "google.gd/imgres",
+                "google.ge/imgres",
+                "google.gf/imgres",
+                "google.gg/imgres",
+                "google.gl/imgres",
+                "google.gm/imgres",
+                "google.gp/imgres",
+                "google.gr/imgres",
+                "google.gy/imgres",
+                "google.hn/imgres",
+                "google.hr/imgres",
+                "google.ht/imgres",
+                "google.hu/imgres",
+                "google.ie/imgres",
+                "google.im/imgres",
+                "google.io/imgres",
+                "google.iq/imgres",
+                "google.is/imgres",
+                "google.it/imgres",
+                "google.it.ao/imgres",
+                "google.je/imgres",
+                "google.jo/imgres",
+                "google.kg/imgres",
+                "google.ki/imgres",
+                "google.kz/imgres",
+                "google.la/imgres",
+                "google.li/imgres",
+                "google.lk/imgres",
+                "google.lt/imgres",
+                "google.lu/imgres",
+                "google.lv/imgres",
+                "google.md/imgres",
+                "google.me/imgres",
+                "google.mg/imgres",
+                "google.mk/imgres",
+                "google.ml/imgres",
+                "google.mn/imgres",
+                "google.ms/imgres",
+                "google.mu/imgres",
+                "google.mv/imgres",
+                "google.mw/imgres",
+                "google.ne/imgres",
+                "google.nl/imgres",
+                "google.no/imgres",
+                "google.nr/imgres",
+                "google.nu/imgres",
+                "google.pl/imgres",
+                "google.pn/imgres",
+                "google.ps/imgres",
+                "google.pt/imgres",
+                "google.ro/imgres",
+                "google.rs/imgres",
+                "google.ru/imgres",
+                "google.rw/imgres",
+                "google.sc/imgres",
+                "google.se/imgres",
+                "google.sh/imgres",
+                "google.si/imgres",
+                "google.sk/imgres",
+                "google.sm/imgres",
+                "google.sn/imgres",
+                "google.so/imgres",
+                "google.st/imgres",
+                "google.td/imgres",
+                "google.tg/imgres",
+                "google.tk/imgres",
+                "google.tl/imgres",
+                "google.tm/imgres",
+                "google.to/imgres",
+                "google.tt/imgres",
+                "google.us/imgres",
+                "google.vg/imgres",
+                "google.vu/imgres",
+                "images.google.ws",
+                "images.google.ac",
+                "images.google.ad",
+                "images.google.ae",
+                "images.google.am",
+                "images.google.as",
+                "images.google.at",
+                "images.google.az",
+                "images.google.ba",
+                "images.google.be",
+                "images.google.bf",
+                "images.google.bg",
+                "images.google.bi",
+                "images.google.bj",
+                "images.google.bs",
+                "images.google.by",
+                "images.google.ca",
+                "images.google.cat",
+                "images.google.cc",
+                "images.google.cd",
+                "images.google.cf",
+                "images.google.cg",
+                "images.google.ch",
+                "images.google.ci",
+                "images.google.cl",
+                "images.google.cm",
+                "images.google.cn",
+                "images.google.co.bw",
+                "images.google.co.ck",
+                "images.google.co.cr",
+                "images.google.co.id",
+                "images.google.co.il",
+                "images.google.co.in",
+                "images.google.co.jp",
+                "images.google.co.ke",
+                "images.google.co.kr",
+                "images.google.co.ls",
+                "images.google.co.ma",
+                "images.google.co.mz",
+                "images.google.co.nz",
+                "images.google.co.th",
+                "images.google.co.tz",
+                "images.google.co.ug",
+                "images.google.co.uk",
+                "images.google.co.uz",
+                "images.google.co.ve",
+                "images.google.co.vi",
+                "images.google.co.za",
+                "images.google.co.zm",
+                "images.google.co.zw",
+                "images.google.com",
+                "images.google.com.af",
+                "images.google.com.ag",
+                "images.google.com.ai",
+                "images.google.com.ar",
+                "images.google.com.au",
+                "images.google.com.bd",
+                "images.google.com.bh",
+                "images.google.com.bn",
+                "images.google.com.bo",
+                "images.google.com.br",
+                "images.google.com.by",
+                "images.google.com.bz",
+                "images.google.com.co",
+                "images.google.com.cu",
+                "images.google.com.cy",
+                "images.google.com.do",
+                "images.google.com.ec",
+                "images.google.com.eg",
+                "images.google.com.et",
+                "images.google.com.fj",
+                "images.google.com.gh",
+                "images.google.com.gi",
+                "images.google.com.gt",
+                "images.google.com.hk",
+                "images.google.com.jm",
+                "images.google.com.kh",
+                "images.google.com.kh",
+                "images.google.com.kw",
+                "images.google.com.lb",
+                "images.google.com.lc",
+                "images.google.com.ly",
+                "images.google.com.mt",
+                "images.google.com.mx",
+                "images.google.com.my",
+                "images.google.com.na",
+                "images.google.com.nf",
+                "images.google.com.ng",
+                "images.google.com.ni",
+                "images.google.com.np",
+                "images.google.com.om",
+                "images.google.com.pa",
+                "images.google.com.pe",
+                "images.google.com.ph",
+                "images.google.com.pk",
+                "images.google.com.pr",
+                "images.google.com.py",
+                "images.google.com.qa",
+                "images.google.com.sa",
+                "images.google.com.sb",
+                "images.google.com.sg",
+                "images.google.com.sl",
+                "images.google.com.sv",
+                "images.google.com.tj",
+                "images.google.com.tn",
+                "images.google.com.tr",
+                "images.google.com.tw",
+                "images.google.com.ua",
+                "images.google.com.uy",
+                "images.google.com.vc",
+                "images.google.com.vn",
+                "images.google.cv",
+                "images.google.cz",
+                "images.google.de",
+                "images.google.dj",
+                "images.google.dk",
+                "images.google.dm",
+                "images.google.dz",
+                "images.google.ee",
+                "images.google.es",
+                "images.google.fi",
+                "images.google.fm",
+                "images.google.fr",
+                "images.google.ga",
+                "images.google.gd",
+                "images.google.ge",
+                "images.google.gf",
+                "images.google.gg",
+                "images.google.gl",
+                "images.google.gm",
+                "images.google.gp",
+                "images.google.gr",
+                "images.google.gy",
+                "images.google.hn",
+                "images.google.hr",
+                "images.google.ht",
+                "images.google.hu",
+                "images.google.ie",
+                "images.google.im",
+                "images.google.io",
+                "images.google.iq",
+                "images.google.is",
+                "images.google.it",
+                "images.google.it.ao",
+                "images.google.je",
+                "images.google.jo",
+                "images.google.kg",
+                "images.google.ki",
+                "images.google.kz",
+                "images.google.la",
+                "images.google.li",
+                "images.google.lk",
+                "images.google.lt",
+                "images.google.lu",
+                "images.google.lv",
+                "images.google.md",
+                "images.google.me",
+                "images.google.mg",
+                "images.google.mk",
+                "images.google.ml",
+                "images.google.mn",
+                "images.google.ms",
+                "images.google.mu",
+                "images.google.mv",
+                "images.google.mw",
+                "images.google.ne",
+                "images.google.nl",
+                "images.google.no",
+                "images.google.nr",
+                "images.google.nu",
+                "images.google.pl",
+                "images.google.pn",
+                "images.google.ps",
+                "images.google.pt",
+                "images.google.ro",
+                "images.google.rs",
+                "images.google.ru",
+                "images.google.rw",
+                "images.google.sc",
+                "images.google.se",
+                "images.google.sh",
+                "images.google.si",
+                "images.google.sk",
+                "images.google.sm",
+                "images.google.sn",
+                "images.google.so",
+                "images.google.st",
+                "images.google.td",
+                "images.google.tg",
+                "images.google.tk",
+                "images.google.tl",
+                "images.google.tm",
+                "images.google.to",
+                "images.google.tt",
+                "images.google.us",
+                "images.google.vg",
+                "images.google.vu",
+                "images.google.ws"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Google News": {
+            "domains": [
+                "news.google.ac",
+                "news.google.ad",
+                "news.google.ae",
+                "news.google.am",
+                "news.google.as",
+                "news.google.at",
+                "news.google.az",
+                "news.google.ba",
+                "news.google.be",
+                "news.google.bf",
+                "news.google.bg",
+                "news.google.bi",
+                "news.google.bj",
+                "news.google.bs",
+                "news.google.by",
+                "news.google.ca",
+                "news.google.cat",
+                "news.google.cc",
+                "news.google.cd",
+                "news.google.cf",
+                "news.google.cg",
+                "news.google.ch",
+                "news.google.ci",
+                "news.google.cl",
+                "news.google.cm",
+                "news.google.cn",
+                "news.google.co.bw",
+                "news.google.co.ck",
+                "news.google.co.cr",
+                "news.google.co.id",
+                "news.google.co.il",
+                "news.google.co.in",
+                "news.google.co.jp",
+                "news.google.co.ke",
+                "news.google.co.kr",
+                "news.google.co.ls",
+                "news.google.co.ma",
+                "news.google.co.mz",
+                "news.google.co.nz",
+                "news.google.co.th",
+                "news.google.co.tz",
+                "news.google.co.ug",
+                "news.google.co.uk",
+                "news.google.co.uz",
+                "news.google.co.ve",
+                "news.google.co.vi",
+                "news.google.co.za",
+                "news.google.co.zm",
+                "news.google.co.zw",
+                "news.google.com",
+                "news.google.com.af",
+                "news.google.com.ag",
+                "news.google.com.ai",
+                "news.google.com.ar",
+                "news.google.com.au",
+                "news.google.com.bd",
+                "news.google.com.bh",
+                "news.google.com.bn",
+                "news.google.com.bo",
+                "news.google.com.br",
+                "news.google.com.by",
+                "news.google.com.bz",
+                "news.google.com.co",
+                "news.google.com.cu",
+                "news.google.com.cy",
+                "news.google.com.do",
+                "news.google.com.ec",
+                "news.google.com.eg",
+                "news.google.com.et",
+                "news.google.com.fj",
+                "news.google.com.gh",
+                "news.google.com.gi",
+                "news.google.com.gt",
+                "news.google.com.hk",
+                "news.google.com.jm",
+                "news.google.com.kh",
+                "news.google.com.kh",
+                "news.google.com.kw",
+                "news.google.com.lb",
+                "news.google.com.lc",
+                "news.google.com.ly",
+                "news.google.com.mt",
+                "news.google.com.mx",
+                "news.google.com.my",
+                "news.google.com.na",
+                "news.google.com.nf",
+                "news.google.com.ng",
+                "news.google.com.ni",
+                "news.google.com.np",
+                "news.google.com.om",
+                "news.google.com.pa",
+                "news.google.com.pe",
+                "news.google.com.ph",
+                "news.google.com.pk",
+                "news.google.com.pr",
+                "news.google.com.py",
+                "news.google.com.qa",
+                "news.google.com.sa",
+                "news.google.com.sb",
+                "news.google.com.sg",
+                "news.google.com.sl",
+                "news.google.com.sv",
+                "news.google.com.tj",
+                "news.google.com.tn",
+                "news.google.com.tr",
+                "news.google.com.tw",
+                "news.google.com.ua",
+                "news.google.com.uy",
+                "news.google.com.vc",
+                "news.google.com.vn",
+                "news.google.cv",
+                "news.google.cz",
+                "news.google.de",
+                "news.google.dj",
+                "news.google.dk",
+                "news.google.dm",
+                "news.google.dz",
+                "news.google.ee",
+                "news.google.es",
+                "news.google.fi",
+                "news.google.fm",
+                "news.google.fr",
+                "news.google.ga",
+                "news.google.gd",
+                "news.google.ge",
+                "news.google.gf",
+                "news.google.gg",
+                "news.google.gl",
+                "news.google.gm",
+                "news.google.gp",
+                "news.google.gr",
+                "news.google.gy",
+                "news.google.hn",
+                "news.google.hr",
+                "news.google.ht",
+                "news.google.hu",
+                "news.google.ie",
+                "news.google.im",
+                "news.google.io",
+                "news.google.iq",
+                "news.google.is",
+                "news.google.it",
+                "news.google.it.ao",
+                "news.google.je",
+                "news.google.jo",
+                "news.google.kg",
+                "news.google.ki",
+                "news.google.kz",
+                "news.google.la",
+                "news.google.li",
+                "news.google.lk",
+                "news.google.lt",
+                "news.google.lu",
+                "news.google.lv",
+                "news.google.md",
+                "news.google.me",
+                "news.google.mg",
+                "news.google.mk",
+                "news.google.ml",
+                "news.google.mn",
+                "news.google.ms",
+                "news.google.mu",
+                "news.google.mv",
+                "news.google.mw",
+                "news.google.ne",
+                "news.google.nl",
+                "news.google.no",
+                "news.google.nr",
+                "news.google.nu",
+                "news.google.pl",
+                "news.google.pn",
+                "news.google.ps",
+                "news.google.pt",
+                "news.google.ro",
+                "news.google.rs",
+                "news.google.ru",
+                "news.google.rw",
+                "news.google.sc",
+                "news.google.se",
+                "news.google.sh",
+                "news.google.si",
+                "news.google.sk",
+                "news.google.sm",
+                "news.google.sn",
+                "news.google.so",
+                "news.google.st",
+                "news.google.td",
+                "news.google.tg",
+                "news.google.tk",
+                "news.google.tl",
+                "news.google.tm",
+                "news.google.to",
+                "news.google.tt",
+                "news.google.us",
+                "news.google.vg",
+                "news.google.vu",
+                "news.google.ws"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Google Product Search": {
+            "domains": [
+                "google.ac/products",
+                "google.ad/products",
+                "google.ae/products",
+                "google.am/products",
+                "google.as/products",
+                "google.at/products",
+                "google.az/products",
+                "google.ba/products",
+                "google.be/products",
+                "google.bf/products",
+                "google.bg/products",
+                "google.bi/products",
+                "google.bj/products",
+                "google.bs/products",
+                "google.by/products",
+                "google.ca/products",
+                "google.cat/products",
+                "google.cc/products",
+                "google.cd/products",
+                "google.cf/products",
+                "google.cg/products",
+                "google.ch/products",
+                "google.ci/products",
+                "google.cl/products",
+                "google.cm/products",
+                "google.cn/products",
+                "google.co.bw/products",
+                "google.co.ck/products",
+                "google.co.cr/products",
+                "google.co.id/products",
+                "google.co.il/products",
+                "google.co.in/products",
+                "google.co.jp/products",
+                "google.co.ke/products",
+                "google.co.kr/products",
+                "google.co.ls/products",
+                "google.co.ma/products",
+                "google.co.mz/products",
+                "google.co.nz/products",
+                "google.co.th/products",
+                "google.co.tz/products",
+                "google.co.ug/products",
+                "google.co.uk/products",
+                "google.co.uz/products",
+                "google.co.ve/products",
+                "google.co.vi/products",
+                "google.co.za/products",
+                "google.co.zm/products",
+                "google.co.zw/products",
+                "google.com/products",
+                "google.com.af/products",
+                "google.com.ag/products",
+                "google.com.ai/products",
+                "google.com.ar/products",
+                "google.com.au/products",
+                "google.com.bd/products",
+                "google.com.bh/products",
+                "google.com.bn/products",
+                "google.com.bo/products",
+                "google.com.br/products",
+                "google.com.by/products",
+                "google.com.bz/products",
+                "google.com.co/products",
+                "google.com.cu/products",
+                "google.com.cy/products",
+                "google.com.do/products",
+                "google.com.ec/products",
+                "google.com.eg/products",
+                "google.com.et/products",
+                "google.com.fj/products",
+                "google.com.gh/products",
+                "google.com.gi/products",
+                "google.com.gt/products",
+                "google.com.hk/products",
+                "google.com.jm/products",
+                "google.com.kh/products",
+                "google.com.kh/products",
+                "google.com.kw/products",
+                "google.com.lb/products",
+                "google.com.lc/products",
+                "google.com.ly/products",
+                "google.com.mt/products",
+                "google.com.mx/products",
+                "google.com.my/products",
+                "google.com.na/products",
+                "google.com.nf/products",
+                "google.com.ng/products",
+                "google.com.ni/products",
+                "google.com.np/products",
+                "google.com.om/products",
+                "google.com.pa/products",
+                "google.com.pe/products",
+                "google.com.ph/products",
+                "google.com.pk/products",
+                "google.com.pr/products",
+                "google.com.py/products",
+                "google.com.qa/products",
+                "google.com.sa/products",
+                "google.com.sb/products",
+                "google.com.sg/products",
+                "google.com.sl/products",
+                "google.com.sv/products",
+                "google.com.tj/products",
+                "google.com.tn/products",
+                "google.com.tr/products",
+                "google.com.tw/products",
+                "google.com.ua/products",
+                "google.com.uy/products",
+                "google.com.vc/products",
+                "google.com.vn/products",
+                "google.cv/products",
+                "google.cz/products",
+                "google.de/products",
+                "google.dj/products",
+                "google.dk/products",
+                "google.dm/products",
+                "google.dz/products",
+                "google.ee/products",
+                "google.es/products",
+                "google.fi/products",
+                "google.fm/products",
+                "google.fr/products",
+                "google.ga/products",
+                "google.gd/products",
+                "google.ge/products",
+                "google.gf/products",
+                "google.gg/products",
+                "google.gl/products",
+                "google.gm/products",
+                "google.gp/products",
+                "google.gr/products",
+                "google.gy/products",
+                "google.hn/products",
+                "google.hr/products",
+                "google.ht/products",
+                "google.hu/products",
+                "google.ie/products",
+                "google.im/products",
+                "google.io/products",
+                "google.iq/products",
+                "google.is/products",
+                "google.it/products",
+                "google.it.ao/products",
+                "google.je/products",
+                "google.jo/products",
+                "google.kg/products",
+                "google.ki/products",
+                "google.kz/products",
+                "google.la/products",
+                "google.li/products",
+                "google.lk/products",
+                "google.lt/products",
+                "google.lu/products",
+                "google.lv/products",
+                "google.md/products",
+                "google.me/products",
+                "google.mg/products",
+                "google.mk/products",
+                "google.ml/products",
+                "google.mn/products",
+                "google.ms/products",
+                "google.mu/products",
+                "google.mv/products",
+                "google.mw/products",
+                "google.ne/products",
+                "google.nl/products",
+                "google.no/products",
+                "google.nr/products",
+                "google.nu/products",
+                "google.pl/products",
+                "google.pn/products",
+                "google.ps/products",
+                "google.pt/products",
+                "google.ro/products",
+                "google.rs/products",
+                "google.ru/products",
+                "google.rw/products",
+                "google.sc/products",
+                "google.se/products",
+                "google.sh/products",
+                "google.si/products",
+                "google.sk/products",
+                "google.sm/products",
+                "google.sn/products",
+                "google.so/products",
+                "google.st/products",
+                "google.td/products",
+                "google.tg/products",
+                "google.tk/products",
+                "google.tl/products",
+                "google.tm/products",
+                "google.to/products",
+                "google.tt/products",
+                "google.us/products",
+                "google.vg/products",
+                "google.vu/products",
+                "google.ws/products",
+                "www.google.ac/products",
+                "www.google.ad/products",
+                "www.google.ae/products",
+                "www.google.am/products",
+                "www.google.as/products",
+                "www.google.at/products",
+                "www.google.az/products",
+                "www.google.ba/products",
+                "www.google.be/products",
+                "www.google.bf/products",
+                "www.google.bg/products",
+                "www.google.bi/products",
+                "www.google.bj/products",
+                "www.google.bs/products",
+                "www.google.by/products",
+                "www.google.ca/products",
+                "www.google.cat/products",
+                "www.google.cc/products",
+                "www.google.cd/products",
+                "www.google.cf/products",
+                "www.google.cg/products",
+                "www.google.ch/products",
+                "www.google.ci/products",
+                "www.google.cl/products",
+                "www.google.cm/products",
+                "www.google.cn/products",
+                "www.google.co.bw/products",
+                "www.google.co.ck/products",
+                "www.google.co.cr/products",
+                "www.google.co.id/products",
+                "www.google.co.il/products",
+                "www.google.co.in/products",
+                "www.google.co.jp/products",
+                "www.google.co.ke/products",
+                "www.google.co.kr/products",
+                "www.google.co.ls/products",
+                "www.google.co.ma/products",
+                "www.google.co.mz/products",
+                "www.google.co.nz/products",
+                "www.google.co.th/products",
+                "www.google.co.tz/products",
+                "www.google.co.ug/products",
+                "www.google.co.uk/products",
+                "www.google.co.uz/products",
+                "www.google.co.ve/products",
+                "www.google.co.vi/products",
+                "www.google.co.za/products",
+                "www.google.co.zm/products",
+                "www.google.co.zw/products",
+                "www.google.com/products",
+                "www.google.com.af/products",
+                "www.google.com.ag/products",
+                "www.google.com.ai/products",
+                "www.google.com.ar/products",
+                "www.google.com.au/products",
+                "www.google.com.bd/products",
+                "www.google.com.bh/products",
+                "www.google.com.bn/products",
+                "www.google.com.bo/products",
+                "www.google.com.br/products",
+                "www.google.com.by/products",
+                "www.google.com.bz/products",
+                "www.google.com.co/products",
+                "www.google.com.cu/products",
+                "www.google.com.cy/products",
+                "www.google.com.do/products",
+                "www.google.com.ec/products",
+                "www.google.com.eg/products",
+                "www.google.com.et/products",
+                "www.google.com.fj/products",
+                "www.google.com.gh/products",
+                "www.google.com.gi/products",
+                "www.google.com.gt/products",
+                "www.google.com.hk/products",
+                "www.google.com.jm/products",
+                "www.google.com.kh/products",
+                "www.google.com.kh/products",
+                "www.google.com.kw/products",
+                "www.google.com.lb/products",
+                "www.google.com.lc/products",
+                "www.google.com.ly/products",
+                "www.google.com.mt/products",
+                "www.google.com.mx/products",
+                "www.google.com.my/products",
+                "www.google.com.na/products",
+                "www.google.com.nf/products",
+                "www.google.com.ng/products",
+                "www.google.com.ni/products",
+                "www.google.com.np/products",
+                "www.google.com.om/products",
+                "www.google.com.pa/products",
+                "www.google.com.pe/products",
+                "www.google.com.ph/products",
+                "www.google.com.pk/products",
+                "www.google.com.pr/products",
+                "www.google.com.py/products",
+                "www.google.com.qa/products",
+                "www.google.com.sa/products",
+                "www.google.com.sb/products",
+                "www.google.com.sg/products",
+                "www.google.com.sl/products",
+                "www.google.com.sv/products",
+                "www.google.com.tj/products",
+                "www.google.com.tn/products",
+                "www.google.com.tr/products",
+                "www.google.com.tw/products",
+                "www.google.com.ua/products",
+                "www.google.com.uy/products",
+                "www.google.com.vc/products",
+                "www.google.com.vn/products",
+                "www.google.cv/products",
+                "www.google.cz/products",
+                "www.google.de/products",
+                "www.google.dj/products",
+                "www.google.dk/products",
+                "www.google.dm/products",
+                "www.google.dz/products",
+                "www.google.ee/products",
+                "www.google.es/products",
+                "www.google.fi/products",
+                "www.google.fm/products",
+                "www.google.fr/products",
+                "www.google.ga/products",
+                "www.google.gd/products",
+                "www.google.ge/products",
+                "www.google.gf/products",
+                "www.google.gg/products",
+                "www.google.gl/products",
+                "www.google.gm/products",
+                "www.google.gp/products",
+                "www.google.gr/products",
+                "www.google.gy/products",
+                "www.google.hn/products",
+                "www.google.hr/products",
+                "www.google.ht/products",
+                "www.google.hu/products",
+                "www.google.ie/products",
+                "www.google.im/products",
+                "www.google.io/products",
+                "www.google.iq/products",
+                "www.google.is/products",
+                "www.google.it/products",
+                "www.google.it.ao/products",
+                "www.google.je/products",
+                "www.google.jo/products",
+                "www.google.kg/products",
+                "www.google.ki/products",
+                "www.google.kz/products",
+                "www.google.la/products",
+                "www.google.li/products",
+                "www.google.lk/products",
+                "www.google.lt/products",
+                "www.google.lu/products",
+                "www.google.lv/products",
+                "www.google.md/products",
+                "www.google.me/products",
+                "www.google.mg/products",
+                "www.google.mk/products",
+                "www.google.ml/products",
+                "www.google.mn/products",
+                "www.google.ms/products",
+                "www.google.mu/products",
+                "www.google.mv/products",
+                "www.google.mw/products",
+                "www.google.ne/products",
+                "www.google.nl/products",
+                "www.google.no/products",
+                "www.google.nr/products",
+                "www.google.nu/products",
+                "www.google.pl/products",
+                "www.google.pn/products",
+                "www.google.ps/products",
+                "www.google.pt/products",
+                "www.google.ro/products",
+                "www.google.rs/products",
+                "www.google.ru/products",
+                "www.google.rw/products",
+                "www.google.sc/products",
+                "www.google.se/products",
+                "www.google.sh/products",
+                "www.google.si/products",
+                "www.google.sk/products",
+                "www.google.sm/products",
+                "www.google.sn/products",
+                "www.google.so/products",
+                "www.google.st/products",
+                "www.google.td/products",
+                "www.google.tg/products",
+                "www.google.tk/products",
+                "www.google.tl/products",
+                "www.google.tm/products",
+                "www.google.to/products",
+                "www.google.tt/products",
+                "www.google.us/products",
+                "www.google.vg/products",
+                "www.google.vu/products",
+                "www.google.ws/products"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Google Video": {
+            "domains": [
+                "video.google.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Goyellow.de": {
+            "domains": [
+                "www.goyellow.de"
+            ],
+            "parameters": [
+                "MDN"
+            ]
+        },
+        "Gule Sider": {
+            "domains": [
+                "www.gulesider.no"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "HighBeam": {
+            "domains": [
+                "www.highbeam.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Hit-Parade": {
+            "domains": [
+                "req.-hit-parade.com",
+                "class.hit-parade.com",
+                "www.hit-parade.com"
+            ],
+            "parameters": [
+                "p7"
+            ]
+        },
+        "Holmes": {
+            "domains": [
+                "holmes.ge"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Hooseek.com": {
+            "domains": [
+                "www.hooseek.com"
+            ],
+            "parameters": [
+                "recherche"
+            ]
+        },
+        "Hotbot": {
+            "domains": [
+                "www.hotbot.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Haosou": {
+            "domains": [
+                "www.haosou.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "I-play": {
+            "domains": [
+                "start.iplay.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "ICQ": {
+            "domains": [
+                "www.icq.com",
+                "search.icq.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "IXquick": {
+            "domains": [
+                "ixquick.com",
+                "www.eu.ixquick.com",
+                "ixquick.de",
+                "www.ixquick.de",
+                "us.ixquick.com",
+                "s1.us.ixquick.com",
+                "s2.us.ixquick.com",
+                "s3.us.ixquick.com",
+                "s4.us.ixquick.com",
+                "s5.us.ixquick.com",
+                "eu.ixquick.com",
+                "s8-eu.ixquick.com",
+                "s1-eu.ixquick.de"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Icerockeet": {
+            "domains": [
+                "blogs.icerocket.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Ilse": {
+            "domains": [
+                "www.ilse.nl"
+            ],
+            "parameters": [
+                "search_for"
+            ]
+        },
+        "InfoSpace": {
+            "domains": [
+                "infospace.com",
+                "dogpile.com",
+                "www.dogpile.com",
+                "metacrawler.com",
+                "webfetch.com",
+                "webcrawler.com",
+                "search.kiwee.com",
+                "isearch.babylon.com",
+                "start.facemoods.com",
+                "search.magnetic.com",
+                "search.searchcompletion.com",
+                "clusty.com"
+            ],
+            "parameters": [
+                "q",
+                "s"
+            ]
+        },
+        "Inbox": {
+            "domains": [
+                "inbox.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        }, 
+        "Info": {
+            "domains": [
+                "info.com"
+            ],
+            "parameters": [
+                "qkw"
+            ]
+        }, 
+        "Interia": {
+            "domains": [
+                "www.google.interia.pl"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Jungle Key": {
+            "domains": [
+                "junglekey.com",
+                "junglekey.fr"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Jungle Spider": {
+            "domains": [
+                "www.jungle-spider.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Jyxo": {
+            "domains": [
+                "jyxo.1188.cz"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Kataweb": {
+            "domains": [
+                "www.kataweb.it"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Kvasir": {
+            "domains": [
+                "www.kvasir.no"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "La Toile Du Quebec Via Google": {
+            "domains": [
+                "www.toile.com",
+                "web.toile.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Latne": {
+            "domains": [
+                "www.latne.lv"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Lo.st": {
+            "domains": [
+                "lo.st"
+            ],
+            "parameters": [
+                "x_query"
+            ]
+        },
+        "Looksmart": {
+            "domains": [
+                "www.looksmart.com"
+            ],
+            "parameters": [
+                "key"
+            ]
+        },
+        "Lycos": {
+            "domains": [
+                "search.lycos.com",
+                "www.lycos.com",
+                "lycos.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Mail.ru": {
+            "domains": [
+                "go.mail.ru"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Mamma": {
+            "domains": [
+                "www.mamma.com",
+                "mamma75.mamma.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Meinestadt": {
+            "domains": [
+                "www.meinestadt.de"
+            ],
+            "parameters": [
+                "words"
+            ]
+        },
+        "Meta": {
+            "domains": [
+                "meta.ua"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "MetaCrawler.de": {
+            "domains": [
+                "s1.metacrawler.de",
+                "s2.metacrawler.de",
+                "s3.metacrawler.de"
+            ],
+            "parameters": [
+                "qry"
+            ]
+        },
+        "Metager": {
+            "domains": [
+                "meta.rrzn.uni-hannover.de",
+                "www.metager.de"
+            ],
+            "parameters": [
+                "eingabe"
+            ]
+        },
+        "Metager2": {
+            "domains": [
+                "metager2.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Mister Wong": {
+            "domains": [
+                "www.mister-wong.com",
+                "www.mister-wong.de"
+            ],
+            "parameters": [
+                "Keywords"
+            ]
+        },
+        "Monstercrawler": {
+            "domains": [
+                "www.monstercrawler.com"
+            ],
+            "parameters": [
+                "qry"
+            ]
+        },
+        "Mozbot": {
+            "domains": [
+                "www.mozbot.fr",
+                "www.mozbot.co.uk",
+                "www.mozbot.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "MySearch": {
+            "domains": [
+                "www.mysearch.com",
+                "ms114.mysearch.com",
+                "ms146.mysearch.com",
+                "kf.mysearch.myway.com",
+                "ki.mysearch.myway.com",
+                "search.myway.com",
+                "search.mywebsearch.com"
+            ],
+            "parameters": [
+                "searchfor",
+                "searchFor"
+            ]
+        },
+        "Najdi": {
+            "domains": [
+                "www.najdi.si"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Nate": {
+            "domains": [
+                "search.nate.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Naver": {
+            "domains": [
+                "search.naver.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Needtofind": {
+            "domains": [
+                "ko.search.need2find.com"
+            ],
+            "parameters": [
+                "searchfor"
+            ]
+        },
+        "Neti": {
+            "domains": [
+                "www.neti.ee"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Nifty": {
+            "domains": [
+                "search.nifty.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Nigma": {
+            "domains": [
+                "nigma.ru"
+            ],
+            "parameters": [
+                "s"
+            ]
+        },
+        "Onet": {
+            "domains": [
+                "szukaj.onet.pl"
+            ],
+            "parameters": [
+                "qt"
+            ]
+        },
+        "Online.no": {
+            "domains": [
+                "online.no"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Opplysningen 1881": {
+            "domains": [
+                "www.1881.no"
+            ],
+            "parameters": [
+                "Query"
+            ]
+        },
+        "Orange": {
+            "domains": [
+                "busca.orange.es",
+                "search.orange.co.uk"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Paperball": {
+            "domains": [
+                "www.paperball.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "PeoplePC": {
+            "domains": [
+                "search.peoplepc.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Picsearch": {
+            "domains": [
+                "www.picsearch.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Plazoo": {
+            "domains": [
+                "www.plazoo.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Poisk.ru": {
+            "domains": [
+                "www.plazoo.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "PriceRunner": {
+            "domains": [
+                "www.pricerunner.co.uk"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Qualigo": {
+            "domains": [
+                "www.qualigo.at",
+                "www.qualigo.ch",
+                "www.qualigo.de",
+                "www.qualigo.nl"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "RPMFind": {
+            "domains": [
+                "rpmfind.net",
+                "fr2.rpmfind.net"
+            ],
+            "parameters": [
+                "rpmfind.net",
+                "fr2.rpmfind.net"
+            ]
+        },
+        "Rakuten": {
+            "domains": [
+                "websearch.rakuten.co.jp"
+            ],
+            "parameters": [
+                "qt"
+            ]
+        },
+        "Rambler": {
+            "domains": [
+                "nova.rambler.ru"
+            ],
+            "parameters": [
+                "query",
+                "words"
+            ]
+        },
+        "Road Runner Search": {
+            "domains": [
+                "search.rr.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Sapo": {
+            "domains": [
+                "pesquisa.sapo.pt"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Search.ch": {
+            "domains": [
+                "www.search.ch"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Search.com": {
+            "domains": [
+                "www.search.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "SearchCanvas": {
+            "domains": [
+                "www.searchcanvas.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Searchalot": {
+            "domains": [
+                "searchalot.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "SearchLock": {
+            "domains": [
+                "searchlock.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Searchy": {
+            "domains": [
+                "www.searchy.co.uk"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Seznam": {
+            "domains": [
+                "search.seznam.cz"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Sharelook": {
+            "domains": [
+                "www.sharelook.fr"
+            ],
+            "parameters": [
+                "keyword"
+            ]
+        },
+        "Skynet": {
+            "domains": [
+                "www.skynet.be"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Softonic": {
+            "domains": [
+                "search.softonic.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Sogou": {
+            "domains": [
+                "www.sougou.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Startpagina": {
+            "domains": [
+                "startgoogle.startpagina.nl"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Startsiden": {
+            "domains": [
+                "www.startsiden.no"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Suchmaschine.com": {
+            "domains": [
+                "www.suchmaschine.com"
+            ],
+            "parameters": [
+                "suchstr"
+            ]
+        },
+        "Suchnase": {
+            "domains": [
+                "www.suchnase.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Superpages": {
+            "domains": [
+                "superpages.com"
+            ],
+            "parameters": [
+                "C"
+            ]
+        },
+        "T-Online": {
+            "domains": [
+                "suche.t-online.de",
+                "brisbane.t-online.de",
+                "navigationshilfe.t-online.de"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "TalkTalk": {
+            "domains": [
+                "www.talktalk.co.uk"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Technorati": {
+            "domains": [
+                "technorati.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Teoma": {
+            "domains": [
+                "www.teoma.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Terra": {
+            "domains": [
+                "buscador.terra.es",
+                "buscador.terra.cl",
+                "buscador.terra.com.br"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Tiscali": {
+            "domains": [
+                "search.tiscali.it",
+                "search-dyn.tiscali.it",
+                "hledani.tiscali.cz"
+            ],
+            "parameters": [
+                "q",
+                "key"
+            ]
+        },
+        "Tixuma": {
+            "domains": [
+                "www.tixuma.de"
+            ],
+            "parameters": [
+                "sc"
+            ]
+        },
+        "Toolbarhome": {
+            "domains": [
+                "www.toolbarhome.com",
+                "vshare.toolbarhome.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Trouvez.com": {
+            "domains": [
+                "www.trouvez.com"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "TrovaRapido": {
+            "domains": [
+                "www.trovarapido.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Trusted-Search": {
+            "domains": [
+                "www.trusted--search.com"
+            ],
+            "parameters": [
+                "w"
+            ]
+        },
+        "Twingly": {
+            "domains": [
+                "www.twingly.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "URL.ORGanizier": {
+            "domains": [
+                "www.url.org"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Vinden": {
+            "domains": [
+                "www.vinden.nl"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Vindex": {
+            "domains": [
+                "www.vindex.nl",
+                "search.vindex.nl"
+            ],
+            "parameters": [
+                "search_for"
+            ]
+        },
+        "Virgilio": {
+            "domains": [
+                "ricerca.virgilio.it",
+                "ricercaimmagini.virgilio.it",
+                "ricercavideo.virgilio.it",
+                "ricercanews.virgilio.it",
+                "mobile.virgilio.it"
+            ],
+            "parameters": [
+                "qs"
+            ]
+        },
+        "Voila": {
+            "domains": [
+                "search.ke.voila.fr",
+                "www.lemoteur.fr"
+            ],
+            "parameters": [
+                "rdata"
+            ]
+        },
+        "Volny": {
+            "domains": [
+                "web.volny.cz"
+            ],
+            "parameters": [
+                "search"
+            ]
+        },
+        "WWW": {
+            "domains": [
+                "search.www.ee"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Walhello": {
+            "domains": [
+                "www.walhello.info",
+                "www.walhello.com",
+                "www.walhello.de",
+                "www.walhello.nl"
+            ],
+            "parameters": [
+                "key"
+            ]
+        },
+        "Web.de": {
+            "domains": [
+                "suche.web.de"
+            ],
+            "parameters": [
+                "su"
+            ]
+        },
+        "Web.nl": {
+            "domains": [
+                "www.web.nl"
+            ],
+            "parameters": [
+                "zoekwoord"
+            ]
+        },
+        "WebSearch": {
+            "domains": [
+                "www.websearch.com"
+            ],
+            "parameters": [
+                "qkw",
+                "q"
+            ]
+        },
+        "Weborama": {
+            "domains": [
+                "www.weborama.com"
+            ],
+            "parameters": [
+                "QUERY"
+            ]
+        },
+        "Winamp": {
+            "domains": [
+                "search.winamp.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Wirtualna Polska": {
+            "domains": [
+                "szukaj.wp.pl"
+            ],
+            "parameters": [
+                "szukaj"
+            ]
+        },
+        "Witch": {
+            "domains": [
+                "www.witch.de"
+            ],
+            "parameters": [
+                "search"
+            ]
+        },
+        "X-recherche": {
+            "domains": [
+                "www.x-recherche.com"
+            ],
+            "parameters": [
+                "MOTS"
+            ]
+        },
+        "Yahoo!": {
+            "domains": [
+                "search.yahoo.com",
+                "yahoo.com",
+                "ar.search.yahoo.com",
+                "ar.yahoo.com",
+                "au.search.yahoo.com",
+                "au.yahoo.com",
+                "br.search.yahoo.com",
+                "br.yahoo.com",
+                "cade.searchde.yahoo.com",
+                "cade.yahoo.com",
+                "chinese.searchinese.yahoo.com",
+                "chinese.yahoo.com",
+                "cn.search.yahoo.com",
+                "cn.yahoo.com",
+                "de.search.yahoo.com",
+                "de.yahoo.com",
+                "dk.search.yahoo.com",
+                "dk.yahoo.com",
+                "es.search.yahoo.com",
+                "es.yahoo.com",
+                "espanol.searchpanol.yahoo.com",
+                "espanol.searchpanol.yahoo.com",
+                "espanol.yahoo.com",
+                "espanol.yahoo.com",
+                "fr.search.yahoo.com",
+                "fr.yahoo.com",
+                "ie.search.yahoo.com",
+                "ie.yahoo.com",
+                "it.search.yahoo.com",
+                "it.yahoo.com",
+                "kr.search.yahoo.com",
+                "kr.yahoo.com",
+                "mx.search.yahoo.com",
+                "mx.yahoo.com",
+                "no.search.yahoo.com",
+                "no.yahoo.com",
+                "nz.search.yahoo.com",
+                "nz.yahoo.com",
+                "one.cn.yahoo.com",
+                "one.searchn.yahoo.com",
+                "qc.search.yahoo.com",
+                "qc.search.yahoo.com",
+                "qc.search.yahoo.com",
+                "qc.yahoo.com",
+                "qc.yahoo.com",
+                "se.search.yahoo.com",
+                "se.search.yahoo.com",
+                "se.yahoo.com",
+                "search.searcharch.yahoo.com",
+                "search.yahoo.com",
+                "uk.search.yahoo.com",
+                "uk.yahoo.com",
+                "www.yahoo.co.jp",
+                "search.yahoo.co.jp",
+                "www.cercato.it",
+                "search.offerbox.com",
+                "ys.mirostart.com",
+                "image.search.yahoo.co.jp",
+                "m.chiebukuro.yahoo.co.jp",
+                "detail.chiebukuro.yahoo.co.jp"
+            ],
+            "parameters": [
+                "p",
+                "q"
+            ]
+        },
+        "Yahoo! Images": {
+            "domains": [
+                "image.yahoo.cn",
+                "images.search.yahoo.com"
+            ],
+            "parameters": [
+                "p",
+                "q"
+            ]
+        },
+        "Yam": {
+            "domains": [
+                "search.yam.com"
+            ],
+            "parameters": [
+                "k"
+            ]
+        },
+        "Yandex": {
+            "domains": [
+                "yandex.ru",
+                "yandex.ua",
+                "yandex.com",
+                "www.yandex.ru",
+                "www.yandex.ua",
+                "www.yandex.com"
+            ],
+            "parameters": [
+                "text"
+            ]
+        },
+        "Yandex Images": {
+            "domains": [
+                "images.yandex.ru",
+                "images.yandex.ua",
+                "images.yandex.com"
+            ],
+            "parameters": [
+                "text"
+            ]
+        },
+        "Yasni": {
+            "domains": [
+                "www.yasni.de",
+                "www.yasni.com",
+                "www.yasni.co.uk",
+                "www.yasni.ch",
+                "www.yasni.at"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "Yatedo": {
+            "domains": [
+                "www.yatedo.com",
+                "www.yatedo.fr"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Yellowpages": {
+            "domains": [
+                "www.yellowpages.com",
+                "www.yellowpages.com.au",
+                "www.yellowpages.ca"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Yippy": {
+            "domains": [
+                "search.yippy.com"
+            ],
+            "parameters": [
+                "q",
+                "query"
+            ]
+        },
+        "YouGoo": {
+            "domains": [
+                "www.yougoo.fr"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Zapmeta": {
+            "domains": [
+                "www.zapmeta.com",
+                "www.zapmeta.nl",
+                "www.zapmeta.de",
+                "uk.zapmeta.com"
+            ],
+            "parameters": [
+                "q",
+                "query"
+            ]
+        },
+        "Zhongsou": {
+            "domains": [
+                "p.zhongsou.com"
+            ],
+            "parameters": [
+                "w"
+            ]
+        },
+        "Zoek": {
+            "domains": [
+                "www3.zoek.nl"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Zoeken": {
+            "domains": [
+                "www.zoeken.nl"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "Zoohoo": {
+            "domains": [
+                "zoohoo.cz"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "all.by": {
+            "domains": [
+                "all.by"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "arama": {
+            "domains": [
+                "arama.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "blekko": {
+            "domains": [
+                "blekko.com"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "canoe.ca": {
+            "domains": [
+                "web.canoe.ca"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "dmoz": {
+            "domains": [
+                "dmoz.org",
+                "editors.dmoz.org"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "earthlink": {
+            "domains": [
+                "search.earthlink.net"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "eo": {
+            "domains": [
+                "eo.st"
+            ],
+            "parameters": [
+                "x_query"
+            ]
+        },
+        "goo": {
+            "domains": [
+                "search.goo.ne.jp",
+                "ocnsearch.goo.ne.jp"
+            ],
+            "parameters": [
+                "MT"
+            ]
+        },
+        "maailm": {
+            "domains": [
+                "www.maailm.com"
+            ],
+            "parameters": [
+                "tekst"
+            ]
+        },
+        "qip": {
+            "domains": [
+                "search.qip.ru"
+            ],
+            "parameters": [
+                "query"
+            ]
+        },
+        "soso.com": {
+            "domains": [
+                "www.soso.com"
+            ],
+            "parameters": [
+                "w"
+            ]
+        },
+        "suche.info": {
+            "domains": [
+                "suche.info"
+            ],
+            "parameters": [
+                "q"
+            ]
+        },
+        "uol.com.br": {
+            "domains": [
+                "busca.uol.com.br"
+            ],
+            "parameters": [
+                "q"
+            ]
+        }
+    },
+    "social": {
+        "Badoo": {
+            "domains": [
+                "badoo.com"
+            ]
+        },
+        "Bebo": {
+            "domains": [
+                "bebo.com"
+            ]
+        },
+        "BlackPlanet": {
+            "domains": [
+                "blackplanet.com"
+            ]
+        },
+        "Bloglovin'": {
+            "domains": [
+                "bloglovin.com"
+            ]
+        },
+        "Buzznet": {
+            "domains": [
+                "wayn.com"
+            ]
+        },
+        "Classmates": {
+            "domains": [
+                "classmates.com"
+            ]
+        },
+        "Cyworld": {
+            "domains": [
+                "global.cyworld.com"
+            ]
+        },
+        "DeviantArt":{
+            "domains": [
+                "deviantart.com"
+            ]
+        },
+        "Douban": {
+            "domains": [
+                "douban.com"
+            ]
+        },
+        "Facebook": {
+            "domains": [
+                "facebook.com",
+                "fb.me"
+            ]
+        },
+        "Flickr": {
+            "domains": [
+                "flickr.com"
+            ]
+        },
+        "Flixster": {
+            "domains": [
+                "flixster.com"
+            ]
+        },
+        "Flipboard": {
+            "domains": [
+                "flipboard.com"
+            ]
+        },
+        "Fotolog": {
+            "domains": [
+                "fotolog.com"
+            ]
+        },
+        "Foursquare": {
+            "domains": [
+                "foursquare.com"
+            ]
+        },
+        "Friends Reunited": {
+            "domains": [
+                "friendsreunited.com"
+            ]
+        },
+        "Friendster": {
+            "domains": [
+                "friendster.com"
+            ]
+        },
+        "Gaia Online": {
+            "domains": [
+                "gaiaonline.com"
+            ]
+        },
+        "Geni": {
+            "domains": [
+                "geni.com"
+            ]
+        },
+        "GitHub": {
+            "domains": [
+                "github.com"
+            ]
+        },
+        "Google+": {
+            "domains": [
+                "url.google.com",
+                "plus.google.com",
+                "plus.url.google.com"
+            ]
+        },
+        "Habbo": {
+            "domains": [
+                "habbo.com"
+            ]
+        },
+        "Hacker News": {
+            "domains": [
+                "news.ycombinator.com"
+            ]
+        },
+        "Hyves": {
+            "domains": [
+                "hyves.nl"
+            ]
+        },
+        "Iconosquare": {
+            "domains": [
+                "iconosquare.com"
+            ]
+        },
+        "Identi.ca": {
+            "domains": [
+                "identi.ca"
+            ]
+        },
+        "Imgur": {
+            "domains": [
+                "imgur.com"
+            ]
+        },
+        "Instagram": {
+            "domains": [
+                "instagram.com"
+            ]
+        },
+        "Last.fm": {
+            "domains": [
+                "lastfm.ru"
+            ]
+        },
+        "LinkedIn": {
+            "domains": [
+                "linkedin.com",
+                "lnkd.in"
+            ]
+        },
+        "LiveJournal": {
+            "domains": [
+                "livejournal.ru"
+            ]
+        },
+        "Mail.ru": {
+            "domains": [
+                "my.mail.ru"
+            ]
+        },
+        "Medium": {
+            "domains": [
+                "medium.com"
+            ]
+        },
+        "Meetup": {
+            "domains": [
+                "meetup.com"
+            ]
+        },
+        "Messenger": {
+            "domains": [
+                "messenger.com"
+            ]
+        },
+        "Mixi": {
+            "domains": [
+                "mixi.jp"
+            ]
+        },
+        "MoiKrug.ru": {
+            "domains": [
+                "moikrug.ru"
+            ]
+        },
+        "Multiply": {
+            "domains": [
+                "multiply.com"
+            ]
+        },
+        "MyHeritage": {
+            "domains": [
+                "myheritage.com"
+            ]
+        },
+        "MyLife": {
+            "domains": [
+                "mylife.ru"
+            ]
+        },
+        "Myspace": {
+            "domains": [
+                "myspace.com"
+            ]
+        },
+        "Nasza-klasa.pl": {
+            "domains": [
+                "nk.pl"
+            ]
+        },
+        "Netlog": {
+            "domains": [
+                "netlog.com"
+            ]
+        },
+        "Odnoklassniki": {
+            "domains": [
+                "odnoklassniki.ru"
+            ]
+        },
+        "Orkut": {
+            "domains": [
+                "orkut.com"
+            ]
+        },
+        "Paper.li": {
+            "domains": [
+                "paper.li"
+            ]
+        },
+        "Pinterest": {
+            "domains": [
+                "pinterest.com"
+            ]
+        },
+        "Plaxo": {
+            "domains": [
+                "plaxo.com"
+            ]
+        },
+        "Polyvore": {
+            "domains": [
+                "polyvore.com"
+            ]
+        },
+        "Qzone": {
+            "domains": [
+                "qzone.qq.com"
+            ]
+        },
+        "Reddit": {
+            "domains": [
+                "reddit.com"
+            ]
+        },
+        "Renren": {
+            "domains": [
+                "renren.com"
+            ]
+        },
+        "Skyrock": {
+            "domains": [
+                "skyrock.com"
+            ]
+        },
+        "Sonico.com": {
+            "domains": [
+                "sonico.com"
+            ]
+        },
+        "SourceForge": {
+            "domains": [
+                "sourceforge.net"
+            ]
+        },
+        "StackOverflow": {
+            "domains": [
+                "stackoverflow.com"
+            ]
+        },
+        "StudiVZ": {
+            "domains": [
+                "studivz.net"
+            ]
+        },
+        "StumbleUpon": {
+            "domains": [
+                "stumbleupon.com"
+            ]
+        },
+        "Tagged": {
+            "domains": [
+                "login.tagged.com"
+            ]
+        },
+        "Taringa!": {
+            "domains": [
+                "taringa.net"
+            ]
+        },
+        "Tuenti": {
+            "domains": [
+                "tuenti.com"
+            ]
+        },
+        "Tumblr": {
+            "domains": [
+                "tumblr.com",
+                "umblr.com"
+            ]
+        },
+        "Twitter": {
+            "domains": [
+                "twitter.com",
+                "t.co"
+            ]
+        },
+        "Twitch":{
+          "domains": [
+                "twitch.tv"
+          ]
+        },
+        "Viadeo": {
+            "domains": [
+                "viadeo.com"
+            ]
+        },
+        "Vimeo": {
+            "domains": [
+                "vimeo.com"
+            ]
+        },
+        "Vkontakte": {
+            "domains": [
+                "vk.com",
+                "vkontakte.ru"
+            ]
+        },
+        "Wanelo": {
+          "domains": [
+             "wanelo.com"
+            ]
+        },
+        "WAYN": {
+            "domains": [
+                "wayn.com"
+            ]
+        },
+        "WeeWorld": {
+            "domains": [
+                "weeworld.com"
+            ]
+        },
+        "Weibo": {
+            "domains": [
+                "weibo.com",
+                "t.cn"
+            ]
+        },
+        "Windows Live Spaces": {
+            "domains": [
+                "login.live.com"
+            ]
+        },
+        "XING": {
+            "domains": [
+                "xing.com"
+            ]
+        },
+        "Xanga": {
+            "domains": [
+                "xanga.com"
+            ]
+        },
+        "hi5": {
+            "domains": [
+                "hi5.com"
+            ]
+        },
+        "myYearbook": {
+            "domains": [
+                "myyearbook.com"
+            ]
+        },
+        "vKruguDruzei.ru": {
+            "domains": [
+                "vkrugudruzei.ru"
+            ]
+        },
+        "YouTube": {
+            "domains": [
+                "youtube.com",
+                "youtu.be"
+            ]
+        }
+    },
+    "unknown": {
+        "Google": {
+            "domains": [
+                "support.google.com",
+                "developers.google.com",
+                "maps.google.com",
+                "accounts.google.com",
+                "drive.google.com",
+                "sites.google.com",
+                "groups.google.com",
+                "groups.google.co.uk",
+                "news.google.co.uk"
+            ]
+        },
+        "Yahoo!": {
+            "domains": [
+                "finance.yahoo.com",
+                "news.yahoo.com",
+                "eurosport.yahoo.com",
+                "sports.yahoo.com",
+                "astrology.yahoo.com",
+                "travel.yahoo.com",
+                "answers.yahoo.com",
+                "screen.yahoo.com",
+                "weather.yahoo.com",
+                "messenger.yahoo.com",
+                "games.yahoo.com",
+                "shopping.yahoo.net",
+                "movies.yahoo.com",
+                "cars.yahoo.com",
+                "lifestyle.yahoo.com",
+                "omg.yahoo.com",
+                "match.yahoo.net"
+            ]
+        }
+    }
+}
+`

+ 76 - 0
vendor/github.com/Shopify/goreferrer/referrer.go

@@ -0,0 +1,76 @@
+package goreferrer
+
+type ReferrerType int
+
+const (
+	Invalid ReferrerType = iota
+	Indirect
+	Direct
+	Email
+	Search
+	Social
+)
+
+func (r ReferrerType) String() string {
+	switch r {
+	default:
+		return "invalid"
+	case Indirect:
+		return "indirect"
+	case Direct:
+		return "direct"
+	case Email:
+		return "email"
+	case Search:
+		return "search"
+	case Social:
+		return "social"
+	}
+}
+
+type Referrer struct {
+	Type       ReferrerType
+	Label      string
+	URL        string
+	Subdomain  string
+	Domain     string
+	Tld        string
+	Path       string
+	Query      string
+	GoogleType GoogleSearchType
+}
+
+func (r *Referrer) RegisteredDomain() string {
+	if r.Domain != "" && r.Tld != "" {
+		return r.Domain + "." + r.Tld
+	}
+
+	return ""
+}
+
+func (r *Referrer) Host() string {
+	if r.Subdomain != "" {
+		return r.Subdomain + "." + r.RegisteredDomain()
+	}
+
+	return r.RegisteredDomain()
+}
+
+type GoogleSearchType int
+
+const (
+	NotGoogleSearch GoogleSearchType = iota
+	OrganicSearch
+	Adwords
+)
+
+func (g GoogleSearchType) String() string {
+	switch g {
+	default:
+		return "not google search"
+	case OrganicSearch:
+		return "organic google search"
+	case Adwords:
+		return "google adwords referrer"
+	}
+}

+ 53 - 0
vendor/github.com/Shopify/goreferrer/rich_url.go

@@ -0,0 +1,53 @@
+package goreferrer
+
+import (
+	"net/url"
+	"strings"
+
+	"golang.org/x/net/publicsuffix"
+)
+
+type richUrl struct {
+	*url.URL
+	Subdomain string
+	Domain    string
+	Tld       string
+}
+
+func parseRichUrl(s string) (*richUrl, bool) {
+	u, err := url.Parse(s)
+	if err != nil {
+		return nil, false
+	}
+
+	// assume a default scheme of http://
+	if u.Scheme == "" {
+		s = "http://" + s
+		u, err = url.Parse(s)
+		if err != nil {
+			return nil, false
+		}
+	}
+
+	tld, _ := publicsuffix.PublicSuffix(u.Host)
+	if tld == "" || len(u.Host)-len(tld) < 2 {
+		return nil, false
+	}
+
+	hostWithoutTld := u.Host[:len(u.Host)-len(tld)-1]
+	lastDot := strings.LastIndex(hostWithoutTld, ".")
+	if lastDot == -1 {
+		return &richUrl{URL: u, Domain: hostWithoutTld, Tld: tld}, true
+	}
+
+	return &richUrl{
+		URL:       u,
+		Subdomain: hostWithoutTld[:lastDot],
+		Domain:    hostWithoutTld[lastDot+1:],
+		Tld:       tld,
+	}, true
+}
+
+func (u *richUrl) RegisteredDomain() string {
+	return u.Domain + "." + u.Tld
+}

+ 206 - 0
vendor/github.com/Shopify/goreferrer/rules.go

@@ -0,0 +1,206 @@
+package goreferrer
+
+import (
+	"encoding/json"
+	"io"
+	"net/url"
+	"path"
+	"strings"
+)
+
+type DomainRule struct {
+	Type       ReferrerType
+	Label      string
+	Domain     string
+	Parameters []string
+}
+
+type UaRule struct {
+	Url    string
+	Domain string
+	Tld    string
+}
+
+func (u UaRule) RegisteredDomain() string {
+	if u.Domain == "" || u.Tld == "" {
+		return ""
+	}
+
+	return u.Domain + "." + u.Tld
+}
+
+type RuleSet struct {
+	DomainRules map[string]DomainRule
+	UaRules     map[string]UaRule
+}
+
+func NewRuleSet() RuleSet {
+	return RuleSet{
+		DomainRules: make(map[string]DomainRule),
+		UaRules:     make(map[string]UaRule),
+	}
+}
+
+func (r RuleSet) Merge(other RuleSet) {
+	for k, v := range other.DomainRules {
+		r.DomainRules[k] = v
+	}
+	for k, v := range other.UaRules {
+		r.UaRules[k] = v
+	}
+}
+
+func (r RuleSet) Parse(URL string) Referrer {
+	return r.ParseWith(URL, nil, "")
+}
+
+func (r RuleSet) ParseWith(URL string, domains []string, agent string) Referrer {
+	ref := Referrer{
+		Type: Indirect,
+		URL:  strings.Trim(URL, " \t\r\n"),
+	}
+
+	uaRule := r.getUaRule(agent)
+	if ref.URL == "" {
+		ref.URL = uaRule.Url
+	}
+	if ref.URL == "" {
+		ref.Type = Direct
+		return ref
+	}
+
+	u, ok := parseRichUrl(ref.URL)
+	if !ok {
+		ref.Type = Invalid
+		return ref
+	}
+
+	ref.Subdomain = u.Subdomain
+	ref.Domain = u.Domain
+	ref.Tld = u.Tld
+	ref.Path = cleanPath(u.Path)
+
+	if ref.Domain == "" {
+		ref.Domain = uaRule.Domain
+	}
+	if ref.Tld == "" {
+		ref.Tld = uaRule.Tld
+	}
+
+	for _, domain := range domains {
+		if u.Host == domain {
+			ref.Type = Direct
+			return ref
+		}
+	}
+
+	variations := []string{
+		path.Join(u.Host, u.Path),
+		path.Join(u.RegisteredDomain(), u.Path),
+		u.Host,
+		u.RegisteredDomain(),
+	}
+
+	for _, host := range variations {
+		domainRule, exists := r.DomainRules[host]
+		if !exists {
+			continue
+		}
+
+		query := getQuery(u.Query(), domainRule.Parameters)
+		if query == "" {
+			values, err := url.ParseQuery(u.Fragment)
+			if err == nil {
+				query = getQuery(values, domainRule.Parameters)
+			}
+		}
+
+		ref.Type = domainRule.Type
+		ref.Label = domainRule.Label
+		ref.Query = query
+		ref.GoogleType = googleSearchType(ref)
+		return ref
+	}
+
+	ref.Label = strings.Title(u.Domain)
+	return ref
+}
+
+func (r *RuleSet) getUaRule(agent string) UaRule {
+	for pattern, rule := range r.UaRules {
+		if strings.Contains(agent, pattern) {
+			return rule
+		}
+	}
+
+	return UaRule{}
+}
+
+func getQuery(values url.Values, params []string) string {
+	for _, param := range params {
+		query := values.Get(param)
+		if query != "" {
+			return query
+		}
+	}
+
+	return ""
+}
+
+func googleSearchType(ref Referrer) GoogleSearchType {
+	if ref.Type != Search || !strings.Contains(ref.Label, "Google") {
+		return NotGoogleSearch
+	}
+
+	if strings.HasPrefix(ref.Path, "/aclk") || strings.HasPrefix(ref.Path, "/pagead/aclk") {
+		return Adwords
+	}
+
+	return OrganicSearch
+}
+
+func cleanPath(path string) string {
+	if i := strings.Index(path, ";"); i != -1 {
+		return path[:i]
+	}
+	return path
+}
+
+type jsonRule struct {
+	Domains    []string
+	Parameters []string
+}
+
+type jsonRules struct {
+	Email  map[string]jsonRule
+	Search map[string]jsonRule
+	Social map[string]jsonRule
+}
+
+func LoadJsonDomainRules(reader io.Reader) (map[string]DomainRule, error) {
+	var decoded jsonRules
+	if err := json.NewDecoder(reader).Decode(&decoded); err != nil {
+		return nil, err
+	}
+
+	rules := NewRuleSet()
+	rules.Merge(extractRules(decoded.Email, Email))
+	rules.Merge(extractRules(decoded.Search, Search))
+	rules.Merge(extractRules(decoded.Social, Social))
+	return rules.DomainRules, nil
+}
+
+func extractRules(ruleMap map[string]jsonRule, Type ReferrerType) RuleSet {
+	rules := NewRuleSet()
+	for label, jsonRule := range ruleMap {
+		for _, domain := range jsonRule.Domains {
+			rules.DomainRules[domain] = DomainRule{
+				Type:       Type,
+				Label:      label,
+				Domain:     domain,
+				Parameters: jsonRule.Parameters,
+			}
+		}
+	}
+	return rules
+}

+ 22 - 0
vendor/github.com/aymerick/raymond/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Aymerick JEHANNE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 785 - 0
vendor/github.com/aymerick/raymond/ast/node.go

@@ -0,0 +1,785 @@
+// Package ast provides structures to represent a handlebars Abstract Syntax Tree, and a Visitor interface to visit that tree.
+package ast
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// References:
+//   - https://github.com/wycats/handlebars.js/blob/master/lib/handlebars/compiler/ast.js
+//   - https://github.com/wycats/handlebars.js/blob/master/docs/compiler-api.md
+//   - https://github.com/golang/go/blob/master/src/text/template/parse/node.go
+
+// Node is an element in the AST.
+type Node interface {
+	// node type
+	Type() NodeType
+
+	// location of node in original input string
+	Location() Loc
+
+	// string representation, used for debugging
+	String() string
+
+	// accepts visitor
+	Accept(Visitor) interface{}
+}
+
+// Visitor is the interface to visit an AST.
+type Visitor interface {
+	VisitProgram(*Program) interface{}
+
+	// statements
+	VisitMustache(*MustacheStatement) interface{}
+	VisitBlock(*BlockStatement) interface{}
+	VisitPartial(*PartialStatement) interface{}
+	VisitContent(*ContentStatement) interface{}
+	VisitComment(*CommentStatement) interface{}
+
+	// expressions
+	VisitExpression(*Expression) interface{}
+	VisitSubExpression(*SubExpression) interface{}
+	VisitPath(*PathExpression) interface{}
+
+	// literals
+	VisitString(*StringLiteral) interface{}
+	VisitBoolean(*BooleanLiteral) interface{}
+	VisitNumber(*NumberLiteral) interface{}
+
+	// miscellaneous
+	VisitHash(*Hash) interface{}
+	VisitHashPair(*HashPair) interface{}
+}
+
+// NodeType represents an AST Node type.
+type NodeType int
+
+// Type returns itself, and permits struct includers to satisfy that part of Node interface.
+func (t NodeType) Type() NodeType {
+	return t
+}
+
+const (
+	// NodeProgram is the program node
+	NodeProgram NodeType = iota
+
+	// NodeMustache is the mustache statement node
+	NodeMustache
+
+	// NodeBlock is the block statement node
+	NodeBlock
+
+	// NodePartial is the partial statement node
+	NodePartial
+
+	// NodeContent is the content statement node
+	NodeContent
+
+	// NodeComment is the comment statement node
+	NodeComment
+
+	// NodeExpression is the expression node
+	NodeExpression
+
+	// NodeSubExpression is the subexpression node
+	NodeSubExpression
+
+	// NodePath is the expression path node
+	NodePath
+
+	// NodeBoolean is the literal boolean node
+	NodeBoolean
+
+	// NodeNumber is the literal number node
+	NodeNumber
+
+	// NodeString is the literal string node
+	NodeString
+
+	// NodeHash is the hash node
+	NodeHash
+
+	// NodeHashPair is the hash pair node
+	NodeHashPair
+)
+
+// Loc represents the position of a parsed node in source file.
+type Loc struct {
+	Pos  int // Byte position
+	Line int // Line number
+}
+
+// Location returns itself, and permits struct includers to satisfy that part of Node interface.
+func (l Loc) Location() Loc {
+	return l
+}
+
+// Strip describes node whitespace management.
+type Strip struct {
+	Open  bool
+	Close bool
+
+	OpenStandalone   bool
+	CloseStandalone  bool
+	InlineStandalone bool
+}
+
+// NewStrip instanciates a Strip for given open and close mustaches.
+func NewStrip(openStr, closeStr string) *Strip {
+	return &Strip{
+		Open:  (len(openStr) > 2) && openStr[2] == '~',
+		Close: (len(closeStr) > 2) && closeStr[len(closeStr)-3] == '~',
+	}
+}
+
+// NewStripForStr instanciates a Strip for given tag.
+func NewStripForStr(str string) *Strip {
+	return &Strip{
+		Open:  (len(str) > 2) && str[2] == '~',
+		Close: (len(str) > 2) && str[len(str)-3] == '~',
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (s *Strip) String() string {
+	return fmt.Sprintf("Open: %t, Close: %t, OpenStandalone: %t, CloseStandalone: %t, InlineStandalone: %t", s.Open, s.Close, s.OpenStandalone, s.CloseStandalone, s.InlineStandalone)
+}
+
+//
+// Program
+//
+
+// Program represents a program node.
+type Program struct {
+	NodeType
+	Loc
+
+	Body        []Node // [ Statement ... ]
+	BlockParams []string
+	Chained     bool
+
+	// whitespace management
+	Strip *Strip
+}
+
+// NewProgram instanciates a new program node.
+func NewProgram(pos int, line int) *Program {
+	return &Program{
+		NodeType: NodeProgram,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *Program) String() string {
+	return fmt.Sprintf("Program{Pos: %d}", node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *Program) Accept(visitor Visitor) interface{} {
+	return visitor.VisitProgram(node)
+}
+
+// AddStatement adds given statement to program.
+func (node *Program) AddStatement(statement Node) {
+	node.Body = append(node.Body, statement)
+}
+
+//
+// Mustache Statement
+//
+
+// MustacheStatement represents a mustache node.
+type MustacheStatement struct {
+	NodeType
+	Loc
+
+	Unescaped  bool
+	Expression *Expression
+
+	// whitespace management
+	Strip *Strip
+}
+
+// NewMustacheStatement instanciates a new mustache node.
+func NewMustacheStatement(pos int, line int, unescaped bool) *MustacheStatement {
+	return &MustacheStatement{
+		NodeType:  NodeMustache,
+		Loc:       Loc{pos, line},
+		Unescaped: unescaped,
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *MustacheStatement) String() string {
+	return fmt.Sprintf("Mustache{Pos: %d}", node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *MustacheStatement) Accept(visitor Visitor) interface{} {
+	return visitor.VisitMustache(node)
+}
+
+//
+// Block Statement
+//
+
+// BlockStatement represents a block node.
+type BlockStatement struct {
+	NodeType
+	Loc
+
+	Expression *Expression
+
+	Program *Program
+	Inverse *Program
+
+	// whitespace management
+	OpenStrip    *Strip
+	InverseStrip *Strip
+	CloseStrip   *Strip
+}
+
+// NewBlockStatement instanciates a new block node.
+func NewBlockStatement(pos int, line int) *BlockStatement {
+	return &BlockStatement{
+		NodeType: NodeBlock,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *BlockStatement) String() string {
+	return fmt.Sprintf("Block{Pos: %d}", node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *BlockStatement) Accept(visitor Visitor) interface{} {
+	return visitor.VisitBlock(node)
+}
+
+//
+// Partial Statement
+//
+
+// PartialStatement represents a partial node.
+type PartialStatement struct {
+	NodeType
+	Loc
+
+	Name   Node   // PathExpression | SubExpression
+	Params []Node // [ Expression ... ]
+	Hash   *Hash
+
+	// whitespace management
+	Strip  *Strip
+	Indent string
+}
+
+// NewPartialStatement instanciates a new partial node.
+func NewPartialStatement(pos int, line int) *PartialStatement {
+	return &PartialStatement{
+		NodeType: NodePartial,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *PartialStatement) String() string {
+	return fmt.Sprintf("Partial{Name:%s, Pos:%d}", node.Name, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *PartialStatement) Accept(visitor Visitor) interface{} {
+	return visitor.VisitPartial(node)
+}
+
+//
+// Content Statement
+//
+
+// ContentStatement represents a content node.
+type ContentStatement struct {
+	NodeType
+	Loc
+
+	Value    string
+	Original string
+
+	// whitespace management
+	RightStripped bool
+	LeftStripped  bool
+}
+
+// NewContentStatement instanciates a new content node.
+func NewContentStatement(pos int, line int, val string) *ContentStatement {
+	return &ContentStatement{
+		NodeType: NodeContent,
+		Loc:      Loc{pos, line},
+
+		Value:    val,
+		Original: val,
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *ContentStatement) String() string {
+	return fmt.Sprintf("Content{Value:'%s', Pos:%d}", node.Value, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *ContentStatement) Accept(visitor Visitor) interface{} {
+	return visitor.VisitContent(node)
+}
+
+//
+// Comment Statement
+//
+
+// CommentStatement represents a comment node.
+type CommentStatement struct {
+	NodeType
+	Loc
+
+	Value string
+
+	// whitespace management
+	Strip *Strip
+}
+
+// NewCommentStatement instanciates a new comment node.
+func NewCommentStatement(pos int, line int, val string) *CommentStatement {
+	return &CommentStatement{
+		NodeType: NodeComment,
+		Loc:      Loc{pos, line},
+
+		Value: val,
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *CommentStatement) String() string {
+	return fmt.Sprintf("Comment{Value:'%s', Pos:%d}", node.Value, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *CommentStatement) Accept(visitor Visitor) interface{} {
+	return visitor.VisitComment(node)
+}
+
+//
+// Expression
+//
+
+// Expression represents an expression node.
+type Expression struct {
+	NodeType
+	Loc
+
+	Path   Node   // PathExpression | StringLiteral | BooleanLiteral | NumberLiteral
+	Params []Node // [ Expression ... ]
+	Hash   *Hash
+}
+
+// NewExpression instanciates a new expression node.
+func NewExpression(pos int, line int) *Expression {
+	return &Expression{
+		NodeType: NodeExpression,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *Expression) String() string {
+	return fmt.Sprintf("Expr{Path:%s, Pos:%d}", node.Path, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *Expression) Accept(visitor Visitor) interface{} {
+	return visitor.VisitExpression(node)
+}
+
+// HelperName returns helper name, or an empty string if this expression can't be a helper.
+func (node *Expression) HelperName() string {
+	path, ok := node.Path.(*PathExpression)
+	if !ok {
+		return ""
+	}
+
+	if path.Data || (len(path.Parts) != 1) || (path.Depth > 0) || path.Scoped {
+		return ""
+	}
+
+	return path.Parts[0]
+}
+
+// FieldPath returns path expression representing a field path, or nil if this is not a field path.
+func (node *Expression) FieldPath() *PathExpression {
+	path, ok := node.Path.(*PathExpression)
+	if !ok {
+		return nil
+	}
+
+	return path
+}
+
+// LiteralStr returns the string representation of literal value, with a boolean set to false if this is not a literal.
+func (node *Expression) LiteralStr() (string, bool) {
+	return LiteralStr(node.Path)
+}
+
+// Canonical returns the canonical form of expression node as a string.
+func (node *Expression) Canonical() string {
+	if str, ok := HelperNameStr(node.Path); ok {
+		return str
+	}
+
+	return ""
+}
+
+// HelperNameStr returns the string representation of a helper name, with a boolean set to false if this is not a valid helper name.
+//
+// helperName : path | dataName | STRING | NUMBER | BOOLEAN | UNDEFINED | NULL
+func HelperNameStr(node Node) (string, bool) {
+	// PathExpression
+	if str, ok := PathExpressionStr(node); ok {
+		return str, ok
+	}
+
+	// Literal
+	if str, ok := LiteralStr(node); ok {
+		return str, ok
+	}
+
+	return "", false
+}
+
+// PathExpressionStr returns the string representation of path expression value, with a boolean set to false if this is not a path expression.
+func PathExpressionStr(node Node) (string, bool) {
+	if path, ok := node.(*PathExpression); ok {
+		result := path.Original
+
+		// "[foo bar]"" => "foo bar"
+		if (len(result) >= 2) && (result[0] == '[') && (result[len(result)-1] == ']') {
+			result = result[1 : len(result)-1]
+		}
+
+		return result, true
+	}
+
+	return "", false
+}
+
+// LiteralStr returns the string representation of literal value, with a boolean set to false if this is not a literal.
+func LiteralStr(node Node) (string, bool) {
+	if lit, ok := node.(*StringLiteral); ok {
+		return lit.Value, true
+	}
+
+	if lit, ok := node.(*BooleanLiteral); ok {
+		return lit.Canonical(), true
+	}
+
+	if lit, ok := node.(*NumberLiteral); ok {
+		return lit.Canonical(), true
+	}
+
+	return "", false
+}
+
+//
+// SubExpression
+//
+
+// SubExpression represents a subexpression node.
+type SubExpression struct {
+	NodeType
+	Loc
+
+	Expression *Expression
+}
+
+// NewSubExpression instanciates a new subexpression node.
+func NewSubExpression(pos int, line int) *SubExpression {
+	return &SubExpression{
+		NodeType: NodeSubExpression,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *SubExpression) String() string {
+	return fmt.Sprintf("Sexp{Path:%s, Pos:%d}", node.Expression.Path, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *SubExpression) Accept(visitor Visitor) interface{} {
+	return visitor.VisitSubExpression(node)
+}
+
+//
+// Path Expression
+//
+
+// PathExpression represents a path expression node.
+type PathExpression struct {
+	NodeType
+	Loc
+
+	Original string
+	Depth    int
+	Parts    []string
+	Data     bool
+	Scoped   bool
+}
+
+// NewPathExpression instanciates a new path expression node.
+func NewPathExpression(pos int, line int, data bool) *PathExpression {
+	result := &PathExpression{
+		NodeType: NodePath,
+		Loc:      Loc{pos, line},
+
+		Data: data,
+	}
+
+	if data {
+		result.Original = "@"
+	}
+
+	return result
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *PathExpression) String() string {
+	return fmt.Sprintf("Path{Original:'%s', Pos:%d}", node.Original, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *PathExpression) Accept(visitor Visitor) interface{} {
+	return visitor.VisitPath(node)
+}
+
+// Part adds path part.
+func (node *PathExpression) Part(part string) {
+	node.Original += part
+
+	switch part {
+	case "..":
+		node.Depth++
+		node.Scoped = true
+	case ".", "this":
+		node.Scoped = true
+	default:
+		node.Parts = append(node.Parts, part)
+	}
+}
+
+// Sep adds path separator.
+func (node *PathExpression) Sep(separator string) {
+	node.Original += separator
+}
+
+// IsDataRoot returns true if path expression is @root.
+func (node *PathExpression) IsDataRoot() bool {
+	return node.Data && (node.Parts[0] == "root")
+}
+
+//
+// String Literal
+//
+
+// StringLiteral represents a string node.
+type StringLiteral struct {
+	NodeType
+	Loc
+
+	Value string
+}
+
+// NewStringLiteral instanciates a new string node.
+func NewStringLiteral(pos int, line int, val string) *StringLiteral {
+	return &StringLiteral{
+		NodeType: NodeString,
+		Loc:      Loc{pos, line},
+
+		Value: val,
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *StringLiteral) String() string {
+	return fmt.Sprintf("String{Value:'%s', Pos:%d}", node.Value, node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *StringLiteral) Accept(visitor Visitor) interface{} {
+	return visitor.VisitString(node)
+}
+
+//
+// Boolean Literal
+//
+
+// BooleanLiteral represents a boolean node.
+type BooleanLiteral struct {
+	NodeType
+	Loc
+
+	Value    bool
+	Original string
+}
+
+// NewBooleanLiteral instanciates a new boolean node.
+func NewBooleanLiteral(pos int, line int, val bool, original string) *BooleanLiteral {
+	return &BooleanLiteral{
+		NodeType: NodeBoolean,
+		Loc:      Loc{pos, line},
+
+		Value:    val,
+		Original: original,
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *BooleanLiteral) String() string {
+	return fmt.Sprintf("Boolean{Value:%s, Pos:%d}", node.Canonical(), node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *BooleanLiteral) Accept(visitor Visitor) interface{} {
+	return visitor.VisitBoolean(node)
+}
+
+// Canonical returns the canonical form of boolean node as a string (ie. "true" | "false").
+func (node *BooleanLiteral) Canonical() string {
+	if node.Value {
+		return "true"
+	}
+
+	return "false"
+}
+
+//
+// Number Literal
+//
+
+// NumberLiteral represents a number node.
+type NumberLiteral struct {
+	NodeType
+	Loc
+
+	Value    float64
+	IsInt    bool
+	Original string
+}
+
+// NewNumberLiteral instanciates a new number node.
+func NewNumberLiteral(pos int, line int, val float64, isInt bool, original string) *NumberLiteral {
+	return &NumberLiteral{
+		NodeType: NodeNumber,
+		Loc:      Loc{pos, line},
+
+		Value:    val,
+		IsInt:    isInt,
+		Original: original,
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *NumberLiteral) String() string {
+	return fmt.Sprintf("Number{Value:%s, Pos:%d}", node.Canonical(), node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *NumberLiteral) Accept(visitor Visitor) interface{} {
+	return visitor.VisitNumber(node)
+}
+
+// Canonical returns the canonical form of number node as a string (eg: "12", "-1.51").
+func (node *NumberLiteral) Canonical() string {
+	prec := -1
+	if node.IsInt {
+		prec = 0
+	}
+	return strconv.FormatFloat(node.Value, 'f', prec, 64)
+}
+
+// Number returns an integer or a float.
+func (node *NumberLiteral) Number() interface{} {
+	if node.IsInt {
+		return int(node.Value)
+	}
+
+	return node.Value
+}
+
+//
+// Hash
+//
+
+// Hash represents a hash node.
+type Hash struct {
+	NodeType
+	Loc
+
+	Pairs []*HashPair
+}
+
+// NewHash instanciates a new hash node.
+func NewHash(pos int, line int) *Hash {
+	return &Hash{
+		NodeType: NodeHash,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *Hash) String() string {
+	result := fmt.Sprintf("Hash{[%d", node.Loc.Pos)
+
+	for i, p := range node.Pairs {
+		if i > 0 {
+			result += ", "
+		}
+		result += p.String()
+	}
+
+	return result + fmt.Sprintf("], Pos:%d}", node.Loc.Pos)
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *Hash) Accept(visitor Visitor) interface{} {
+	return visitor.VisitHash(node)
+}
+
+//
+// HashPair
+//
+
+// HashPair represents a hash pair node.
+type HashPair struct {
+	NodeType
+	Loc
+
+	Key string
+	Val Node // Expression
+}
+
+// NewHashPair instanciates a new hash pair node.
+func NewHashPair(pos int, line int) *HashPair {
+	return &HashPair{
+		NodeType: NodeHashPair,
+		Loc:      Loc{pos, line},
+	}
+}
+
+// String returns a string representation of receiver that can be used for debugging.
+func (node *HashPair) String() string {
+	return node.Key + "=" + node.Val.String()
+}
+
+// Accept is the receiver entry point for visitors.
+func (node *HashPair) Accept(visitor Visitor) interface{} {
+	return visitor.VisitHashPair(node)
+}

+ 279 - 0
vendor/github.com/aymerick/raymond/ast/print.go

@@ -0,0 +1,279 @@
+package ast
+
+import (
+	"fmt"
+	"strings"
+)
+
+// printVisitor implements the Visitor interface to print a AST.
+type printVisitor struct {
+	buf   string
+	depth int
+
+	original bool
+	inBlock  bool
+}
+
+func newPrintVisitor() *printVisitor {
+	return &printVisitor{}
+}
+
+// Print returns a string representation of given AST, that can be used for debugging purpose.
+func Print(node Node) string {
+	visitor := newPrintVisitor()
+	node.Accept(visitor)
+	return visitor.output()
+}
+
+func (v *printVisitor) output() string {
+	return v.buf
+}
+
+func (v *printVisitor) indent() {
+	for i := 0; i < v.depth; {
+		v.buf += "  "
+		i++
+	}
+}
+
+func (v *printVisitor) str(val string) {
+	v.buf += val
+}
+
+func (v *printVisitor) nl() {
+	v.str("\n")
+}
+
+func (v *printVisitor) line(val string) {
+	v.indent()
+	v.str(val)
+	v.nl()
+}
+
+//
+// Visitor interface
+//
+
+// Statements
+
+// VisitProgram implements corresponding Visitor interface method
+func (v *printVisitor) VisitProgram(node *Program) interface{} {
+	if len(node.BlockParams) > 0 {
+		v.line("BLOCK PARAMS: [ " + strings.Join(node.BlockParams, " ") + " ]")
+	}
+
+	for _, n := range node.Body {
+		n.Accept(v)
+	}
+
+	return nil
+}
+
+// VisitMustache implements corresponding Visitor interface method
+func (v *printVisitor) VisitMustache(node *MustacheStatement) interface{} {
+	v.indent()
+	v.str("{{ ")
+
+	node.Expression.Accept(v)
+
+	v.str(" }}")
+	v.nl()
+
+	return nil
+}
+
+// VisitBlock implements corresponding Visitor interface method
+func (v *printVisitor) VisitBlock(node *BlockStatement) interface{} {
+	v.inBlock = true
+
+	v.line("BLOCK:")
+	v.depth++
+
+	node.Expression.Accept(v)
+
+	if node.Program != nil {
+		v.line("PROGRAM:")
+		v.depth++
+		node.Program.Accept(v)
+		v.depth--
+	}
+
+	if node.Inverse != nil {
+		// if node.Program != nil {
+		// 	v.depth++
+		// }
+
+		v.line("{{^}}")
+		v.depth++
+		node.Inverse.Accept(v)
+		v.depth--
+
+		// if node.Program != nil {
+		// 	v.depth--
+		// }
+	}
+
+	v.inBlock = false
+
+	return nil
+}
+
+// VisitPartial implements corresponding Visitor interface method
+func (v *printVisitor) VisitPartial(node *PartialStatement) interface{} {
+	v.indent()
+	v.str("{{> PARTIAL:")
+
+	v.original = true
+	node.Name.Accept(v)
+	v.original = false
+
+	if len(node.Params) > 0 {
+		v.str(" ")
+		node.Params[0].Accept(v)
+	}
+
+	// hash
+	if node.Hash != nil {
+		v.str(" ")
+		node.Hash.Accept(v)
+	}
+
+	v.str(" }}")
+	v.nl()
+
+	return nil
+}
+
+// VisitContent implements corresponding Visitor interface method
+func (v *printVisitor) VisitContent(node *ContentStatement) interface{} {
+	v.line("CONTENT[ '" + node.Value + "' ]")
+
+	return nil
+}
+
+// VisitComment implements corresponding Visitor interface method
+func (v *printVisitor) VisitComment(node *CommentStatement) interface{} {
+	v.line("{{! '" + node.Value + "' }}")
+
+	return nil
+}
+
+// Expressions
+
+// VisitExpression implements corresponding Visitor interface method
+func (v *printVisitor) VisitExpression(node *Expression) interface{} {
+	if v.inBlock {
+		v.indent()
+	}
+
+	// path
+	node.Path.Accept(v)
+
+	// params
+	v.str(" [")
+	for i, n := range node.Params {
+		if i > 0 {
+			v.str(", ")
+		}
+		n.Accept(v)
+	}
+	v.str("]")
+
+	// hash
+	if node.Hash != nil {
+		v.str(" ")
+		node.Hash.Accept(v)
+	}
+
+	if v.inBlock {
+		v.nl()
+	}
+
+	return nil
+}
+
+// VisitSubExpression implements corresponding Visitor interface method
+func (v *printVisitor) VisitSubExpression(node *SubExpression) interface{} {
+	node.Expression.Accept(v)
+
+	return nil
+}
+
+// VisitPath implements corresponding Visitor interface method
+func (v *printVisitor) VisitPath(node *PathExpression) interface{} {
+	if v.original {
+		v.str(node.Original)
+	} else {
+		path := strings.Join(node.Parts, "/")
+
+		result := ""
+		if node.Data {
+			result += "@"
+		}
+
+		v.str(result + "PATH:" + path)
+	}
+
+	return nil
+}
+
+// Literals
+
+// VisitString implements corresponding Visitor interface method
+func (v *printVisitor) VisitString(node *StringLiteral) interface{} {
+	if v.original {
+		v.str(node.Value)
+	} else {
+		v.str("\"" + node.Value + "\"")
+	}
+
+	return nil
+}
+
+// VisitBoolean implements corresponding Visitor interface method
+func (v *printVisitor) VisitBoolean(node *BooleanLiteral) interface{} {
+	if v.original {
+		v.str(node.Original)
+	} else {
+		v.str(fmt.Sprintf("BOOLEAN{%s}", node.Canonical()))
+	}
+
+	return nil
+}
+
+// VisitNumber implements corresponding Visitor interface method
+func (v *printVisitor) VisitNumber(node *NumberLiteral) interface{} {
+	if v.original {
+		v.str(node.Original)
+	} else {
+		v.str(fmt.Sprintf("NUMBER{%s}", node.Canonical()))
+	}
+
+	return nil
+}
+
+// Miscellaneous
+
+// VisitHash implements corresponding Visitor interface method
+func (v *printVisitor) VisitHash(node *Hash) interface{} {
+	v.str("HASH{")
+
+	for i, p := range node.Pairs {
+		if i > 0 {
+			v.str(", ")
+		}
+		p.Accept(v)
+	}
+
+	v.str("}")
+
+	return nil
+}
+
+// VisitHashPair implements corresponding Visitor interface method
+func (v *printVisitor) VisitHashPair(node *HashPair) interface{} {
+	v.str(node.Key + "=")
+	node.Val.Accept(v)
+
+	return nil
+}

+ 95 - 0
vendor/github.com/aymerick/raymond/data_frame.go

@@ -0,0 +1,95 @@
+package raymond
+
+import "reflect"
+
+// DataFrame represents a private data frame.
+//
+// Cf. private variables documentation at: http://handlebarsjs.com/block_helpers.html
+type DataFrame struct {
+	parent *DataFrame
+	data   map[string]interface{}
+}
+
+// NewDataFrame instanciates a new private data frame.
+func NewDataFrame() *DataFrame {
+	return &DataFrame{
+		data: make(map[string]interface{}),
+	}
+}
+
+// Copy instanciates a new private data frame with receiver as parent.
+func (p *DataFrame) Copy() *DataFrame {
+	result := NewDataFrame()
+
+	for k, v := range p.data {
+		result.data[k] = v
+	}
+
+	result.parent = p
+
+	return result
+}
+
+// newIterDataFrame instanciates a new private data frame with receiver as parent and with iteration data set (@index, @key, @first, @last)
+func (p *DataFrame) newIterDataFrame(length int, i int, key interface{}) *DataFrame {
+	result := p.Copy()
+
+	result.Set("index", i)
+	result.Set("key", key)
+	result.Set("first", i == 0)
+	result.Set("last", i == length-1)
+
+	return result
+}
+
+// Set sets a data value.
+func (p *DataFrame) Set(key string, val interface{}) {
+	p.data[key] = val
+}
+
+// Get gets a data value.
+func (p *DataFrame) Get(key string) interface{} {
+	return p.find([]string{key})
+}
+
+// find gets a deep data value
+//
+// @todo This is NOT consistent with the way we resolve data in template (cf. `evalDataPathExpression()`) ! FIX THAT !
+func (p *DataFrame) find(parts []string) interface{} {
+	data := p.data
+
+	for i, part := range parts {
+		val := data[part]
+		if val == nil {
+			return nil
+		}
+
+		if i == len(parts)-1 {
+			// found
+			return val
+		}
+
+		valValue := reflect.ValueOf(val)
+		if valValue.Kind() != reflect.Map {
+			// not found
+			return nil
+		}
+
+		// continue
+		data = mapStringInterface(valValue)
+	}
+
+	// not found
+	return nil
+}
+
+// mapStringInterface converts any `map` to `map[string]interface{}`
+func mapStringInterface(value reflect.Value) map[string]interface{} {
+	result := make(map[string]interface{})
+
+	for _, key := range value.MapKeys() {
+		result[strValue(key)] = value.MapIndex(key).Interface()
+	}
+
+	return result
+}

+ 65 - 0
vendor/github.com/aymerick/raymond/escape.go

@@ -0,0 +1,65 @@
+package raymond
+
+import (
+	"bytes"
+	"strings"
+)
+
+//
+// That whole file is borrowed from https://github.com/golang/go/tree/master/src/html/escape.go
+//
+// With changes:
+//    &#39 => &apos;
+//    &#34 => &quot;
+//
+// To stay in sync with JS implementation, and make mustache tests pass.
+//
+
+type writer interface {
+	WriteString(string) (int, error)
+}
+
+const escapedChars = `&'<>"`
+
+func escape(w writer, s string) error {
+	i := strings.IndexAny(s, escapedChars)
+	for i != -1 {
+		if _, err := w.WriteString(s[:i]); err != nil {
+			return err
+		}
+		var esc string
+		switch s[i] {
+		case '&':
+			esc = "&amp;"
+		case '\'':
+			esc = "&apos;"
+		case '<':
+			esc = "&lt;"
+		case '>':
+			esc = "&gt;"
+		case '"':
+			esc = "&quot;"
+		default:
+			panic("unrecognized escape character")
+		}
+		s = s[i+1:]
+		if _, err := w.WriteString(esc); err != nil {
+			return err
+		}
+		i = strings.IndexAny(s, escapedChars)
+	}
+	_, err := w.WriteString(s)
+	return err
+}
+
+// Escape escapes special HTML characters.
+//
+// It can be used by helpers that return a SafeString and that need to escape some content by themselves.
+func Escape(s string) string {
+	if strings.IndexAny(s, escapedChars) == -1 {
+		return s
+	}
+	var buf bytes.Buffer
+	escape(&buf, s)
+	return buf.String()
+}

+ 1005 - 0
vendor/github.com/aymerick/raymond/eval.go

@@ -0,0 +1,1005 @@
+package raymond
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"github.com/aymerick/raymond/ast"
+)
+
+var (
+	// @note borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
+	errorType       = reflect.TypeOf((*error)(nil)).Elem()
+	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+	zero reflect.Value
+)
+
+// evalVisitor evaluates a handlebars template with context
+type evalVisitor struct {
+	tpl *Template
+
+	// contexts stack
+	ctx []reflect.Value
+
+	// current data frame (chained with parent)
+	dataFrame *DataFrame
+
+	// block parameters stack
+	blockParams []map[string]interface{}
+
+	// block statements stack
+	blocks []*ast.BlockStatement
+
+	// expressions stack
+	exprs []*ast.Expression
+
+	// memoize expressions that were function calls
+	exprFunc map[*ast.Expression]bool
+
+	// used for info on panic
+	curNode ast.Node
+}
+
+// NewEvalVisitor instanciate a new evaluation visitor with given context and initial private data frame
+//
+// If privData is nil, then a default data frame is created
+func newEvalVisitor(tpl *Template, ctx interface{}, privData *DataFrame) *evalVisitor {
+	frame := privData
+	if frame == nil {
+		frame = NewDataFrame()
+	}
+
+	return &evalVisitor{
+		tpl:       tpl,
+		ctx:       []reflect.Value{reflect.ValueOf(ctx)},
+		dataFrame: frame,
+		exprFunc:  make(map[*ast.Expression]bool),
+	}
+}
+
+// at sets current node
+func (v *evalVisitor) at(node ast.Node) {
+	v.curNode = node
+}
+
+//
+// Contexts stack
+//
+
+// pushCtx pushes new context to the stack
+func (v *evalVisitor) pushCtx(ctx reflect.Value) {
+	v.ctx = append(v.ctx, ctx)
+}
+
+// popCtx pops last context from stack
+func (v *evalVisitor) popCtx() reflect.Value {
+	if len(v.ctx) == 0 {
+		return zero
+	}
+
+	var result reflect.Value
+	result, v.ctx = v.ctx[len(v.ctx)-1], v.ctx[:len(v.ctx)-1]
+
+	return result
+}
+
+// rootCtx returns root context
+func (v *evalVisitor) rootCtx() reflect.Value {
+	return v.ctx[0]
+}
+
+// curCtx returns current context
+func (v *evalVisitor) curCtx() reflect.Value {
+	return v.ancestorCtx(0)
+}
+
+// ancestorCtx returns ancestor context
+func (v *evalVisitor) ancestorCtx(depth int) reflect.Value {
+	index := len(v.ctx) - 1 - depth
+	if index < 0 {
+		return zero
+	}
+
+	return v.ctx[index]
+}
+
+//
+// Private data frame
+//
+
+// setDataFrame sets new data frame
+func (v *evalVisitor) setDataFrame(frame *DataFrame) {
+	v.dataFrame = frame
+}
+
+// popDataFrame sets back parent data frame
+func (v *evalVisitor) popDataFrame() {
+	v.dataFrame = v.dataFrame.parent
+}
+
+//
+// Block Parameters stack
+//
+
+// pushBlockParams pushes new block params to the stack
+func (v *evalVisitor) pushBlockParams(params map[string]interface{}) {
+	v.blockParams = append(v.blockParams, params)
+}
+
+// popBlockParams pops last block params from stack
+func (v *evalVisitor) popBlockParams() map[string]interface{} {
+	var result map[string]interface{}
+
+	if len(v.blockParams) == 0 {
+		return result
+	}
+
+	result, v.blockParams = v.blockParams[len(v.blockParams)-1], v.blockParams[:len(v.blockParams)-1]
+	return result
+}
+
+// blockParam iterates on stack to find given block parameter, and returns its value or nil if not founc
+func (v *evalVisitor) blockParam(name string) interface{} {
+	for i := len(v.blockParams) - 1; i >= 0; i-- {
+		for k, v := range v.blockParams[i] {
+			if name == k {
+				return v
+			}
+		}
+	}
+
+	return nil
+}
+
+//
+// Blocks stack
+//
+
+// pushBlock pushes new block statement to stack
+func (v *evalVisitor) pushBlock(block *ast.BlockStatement) {
+	v.blocks = append(v.blocks, block)
+}
+
+// popBlock pops last block statement from stack
+func (v *evalVisitor) popBlock() *ast.BlockStatement {
+	if len(v.blocks) == 0 {
+		return nil
+	}
+
+	var result *ast.BlockStatement
+	result, v.blocks = v.blocks[len(v.blocks)-1], v.blocks[:len(v.blocks)-1]
+
+	return result
+}
+
+// curBlock returns current block statement
+func (v *evalVisitor) curBlock() *ast.BlockStatement {
+	if len(v.blocks) == 0 {
+		return nil
+	}
+
+	return v.blocks[len(v.blocks)-1]
+}
+
+//
+// Expressions stack
+//
+
+// pushExpr pushes new expression to stack
+func (v *evalVisitor) pushExpr(expression *ast.Expression) {
+	v.exprs = append(v.exprs, expression)
+}
+
+// popExpr pops last expression from stack
+func (v *evalVisitor) popExpr() *ast.Expression {
+	if len(v.exprs) == 0 {
+		return nil
+	}
+
+	var result *ast.Expression
+	result, v.exprs = v.exprs[len(v.exprs)-1], v.exprs[:len(v.exprs)-1]
+
+	return result
+}
+
+// curExpr returns current expression
+func (v *evalVisitor) curExpr() *ast.Expression {
+	if len(v.exprs) == 0 {
+		return nil
+	}
+
+	return v.exprs[len(v.exprs)-1]
+}
+
+//
+// Error functions
+//
+
+// errPanic panics
+func (v *evalVisitor) errPanic(err error) {
+	panic(fmt.Errorf("Evaluation error: %s\nCurrent node:\n\t%s", err, v.curNode))
+}
+
+// errorf panics with a custom message
+func (v *evalVisitor) errorf(format string, args ...interface{}) {
+	v.errPanic(fmt.Errorf(format, args...))
+}
+
+//
+// Evaluation
+//
+
+// evalProgram eEvaluates program with given context and returns string result
+func (v *evalVisitor) evalProgram(program *ast.Program, ctx interface{}, data *DataFrame, key interface{}) string {
+	blockParams := make(map[string]interface{})
+
+	// compute block params
+	if len(program.BlockParams) > 0 {
+		blockParams[program.BlockParams[0]] = ctx
+	}
+
+	if (len(program.BlockParams) > 1) && (key != nil) {
+		blockParams[program.BlockParams[1]] = key
+	}
+
+	// push contexts
+	if len(blockParams) > 0 {
+		v.pushBlockParams(blockParams)
+	}
+
+	ctxVal := reflect.ValueOf(ctx)
+	if ctxVal.IsValid() {
+		v.pushCtx(ctxVal)
+	}
+
+	if data != nil {
+		v.setDataFrame(data)
+	}
+
+	// evaluate program
+	result, _ := program.Accept(v).(string)
+
+	// pop contexts
+	if data != nil {
+		v.popDataFrame()
+	}
+
+	if ctxVal.IsValid() {
+		v.popCtx()
+	}
+
+	if len(blockParams) > 0 {
+		v.popBlockParams()
+	}
+
+	return result
+}
+
+// evalPath evaluates all path parts with given context
+func (v *evalVisitor) evalPath(ctx reflect.Value, parts []string, exprRoot bool) (reflect.Value, bool) {
+	partResolved := false
+
+	for i := 0; i < len(parts); i++ {
+		part := parts[i]
+
+		// "[foo bar]"" => "foo bar"
+		if (len(part) >= 2) && (part[0] == '[') && (part[len(part)-1] == ']') {
+			part = part[1 : len(part)-1]
+		}
+
+		ctx = v.evalField(ctx, part, exprRoot)
+		if !ctx.IsValid() {
+			break
+		}
+
+		// we resolved at least one part of path
+		partResolved = true
+	}
+
+	return ctx, partResolved
+}
+
+// evalField evaluates field with given context
+func (v *evalVisitor) evalField(ctx reflect.Value, fieldName string, exprRoot bool) reflect.Value {
+	result := zero
+
+	ctx, _ = indirect(ctx)
+	if !ctx.IsValid() {
+		return result
+	}
+
+	// check if this is a method call
+	result, isMeth := v.evalMethod(ctx, fieldName, exprRoot)
+	if !isMeth {
+		switch ctx.Kind() {
+		case reflect.Struct:
+			// example: firstName => FirstName
+			expFieldName := strings.Title(fieldName)
+
+			// check if struct have this field and that it is exported
+			if tField, ok := ctx.Type().FieldByName(expFieldName); ok && (tField.PkgPath == "") {
+				// struct field
+				result = ctx.FieldByIndex(tField.Index)
+				break
+			}
+
+			// attempts to find template variable name as a struct tag
+			result = v.evalStructTag(ctx, fieldName)
+		case reflect.Map:
+			nameVal := reflect.ValueOf(fieldName)
+			if nameVal.Type().AssignableTo(ctx.Type().Key()) {
+				// map key
+				result = ctx.MapIndex(nameVal)
+			}
+		case reflect.Array, reflect.Slice:
+			if i, err := strconv.Atoi(fieldName); (err == nil) && (i < ctx.Len()) {
+				result = ctx.Index(i)
+			}
+		}
+	}
+
+	// check if result is a function
+	result, _ = indirect(result)
+	if result.Kind() == reflect.Func {
+		result = v.evalFieldFunc(fieldName, result, exprRoot)
+	}
+
+	return result
+}
+
+// evalFieldFunc tries to evaluate given method name, and a boolean to indicate if this was a method call
+func (v *evalVisitor) evalMethod(ctx reflect.Value, name string, exprRoot bool) (reflect.Value, bool) {
+	if ctx.Kind() != reflect.Interface && ctx.CanAddr() {
+		ctx = ctx.Addr()
+	}
+
+	method := ctx.MethodByName(name)
+	if !method.IsValid() {
+		// example: subject() => Subject()
+		method = ctx.MethodByName(strings.Title(name))
+	}
+
+	if !method.IsValid() {
+		return zero, false
+	}
+
+	return v.evalFieldFunc(name, method, exprRoot), true
+}
+
+// evalFieldFunc evaluates given function
+func (v *evalVisitor) evalFieldFunc(name string, funcVal reflect.Value, exprRoot bool) reflect.Value {
+	ensureValidHelper(name, funcVal)
+
+	var options *Options
+	if exprRoot {
+		// create function arg with all params/hash
+		expr := v.curExpr()
+		options = v.helperOptions(expr)
+
+		// ok, that expression was a function call
+		v.exprFunc[expr] = true
+	} else {
+		// we are not at root of expression, so we are a parameter... and we don't like
+		// infinite loops caused by trying to parse ourself forever
+		options = newEmptyOptions(v)
+	}
+
+	return v.callFunc(name, funcVal, options)
+}
+
+// evalStructTag checks for the existence of a struct tag containing the
+// name of the variable in the template. This allows for a template variable to
+// be separated from the field in the struct.
+func (v *evalVisitor) evalStructTag(ctx reflect.Value, name string) reflect.Value {
+	val := reflect.ValueOf(ctx.Interface())
+
+	for i := 0; i < val.NumField(); i++ {
+		field := val.Type().Field(i)
+		tag := field.Tag.Get("handlebars")
+		if tag == name {
+			return val.Field(i)
+		}
+	}
+
+	return zero
+}
+
+// findBlockParam returns node's block parameter
+func (v *evalVisitor) findBlockParam(node *ast.PathExpression) (string, interface{}) {
+	if len(node.Parts) > 0 {
+		name := node.Parts[0]
+		if value := v.blockParam(name); value != nil {
+			return name, value
+		}
+	}
+
+	return "", nil
+}
+
+// evalPathExpression evaluates a path expression
+func (v *evalVisitor) evalPathExpression(node *ast.PathExpression, exprRoot bool) interface{} {
+	var result interface{}
+
+	if name, value := v.findBlockParam(node); value != nil {
+		// block parameter value
+
+		// We push a new context so we can evaluate the path expression (note: this may be a bad idea).
+		//
+		// Example:
+		//   {{#foo as |bar|}}
+		//     {{bar.baz}}
+		//   {{/foo}}
+		//
+		// With data:
+		//   {"foo": {"baz": "bat"}}
+		newCtx := map[string]interface{}{name: value}
+
+		v.pushCtx(reflect.ValueOf(newCtx))
+		result = v.evalCtxPathExpression(node, exprRoot)
+		v.popCtx()
+	} else {
+		ctxTried := false
+
+		if node.IsDataRoot() {
+			// context path
+			result = v.evalCtxPathExpression(node, exprRoot)
+
+			ctxTried = true
+		}
+
+		if (result == nil) && node.Data {
+			// if it is @root, then we tried to evaluate with root context but nothing was found
+			// so let's try with private data
+
+			// private data
+			result = v.evalDataPathExpression(node, exprRoot)
+		}
+
+		if (result == nil) && !ctxTried {
+			// context path
+			result = v.evalCtxPathExpression(node, exprRoot)
+		}
+	}
+
+	return result
+}
+
+// evalDataPathExpression evaluates a private data path expression
+func (v *evalVisitor) evalDataPathExpression(node *ast.PathExpression, exprRoot bool) interface{} {
+	// find data frame
+	frame := v.dataFrame
+	for i := node.Depth; i > 0; i-- {
+		if frame.parent == nil {
+			return nil
+		}
+		frame = frame.parent
+	}
+
+	// resolve data
+	// @note Can be changed to v.evalCtx() as context can't be an array
+	result, _ := v.evalCtxPath(reflect.ValueOf(frame.data), node.Parts, exprRoot)
+	return result
+}
+
+// evalCtxPathExpression evaluates a context path expression
+func (v *evalVisitor) evalCtxPathExpression(node *ast.PathExpression, exprRoot bool) interface{} {
+	v.at(node)
+
+	if node.IsDataRoot() {
+		// `@root` - remove the first part
+		parts := node.Parts[1:len(node.Parts)]
+
+		result, _ := v.evalCtxPath(v.rootCtx(), parts, exprRoot)
+		return result
+	}
+
+	return v.evalDepthPath(node.Depth, node.Parts, exprRoot)
+}
+
+// evalDepthPath iterates on contexts, starting at given depth, until there is one that resolve given path parts
+func (v *evalVisitor) evalDepthPath(depth int, parts []string, exprRoot bool) interface{} {
+	var result interface{}
+	partResolved := false
+
+	ctx := v.ancestorCtx(depth)
+
+	for (result == nil) && ctx.IsValid() && (depth <= len(v.ctx) && !partResolved) {
+		// try with context
+		result, partResolved = v.evalCtxPath(ctx, parts, exprRoot)
+
+		// As soon as we find the first part of a path, we must not try to resolve with parent context if result is finally `nil`
+		// Reference: "Dotted Names - Context Precedence" mustache test
+		if !partResolved && (result == nil) {
+			// try with previous context
+			depth++
+			ctx = v.ancestorCtx(depth)
+		}
+	}
+
+	return result
+}
+
+// evalCtxPath evaluates path with given context
+func (v *evalVisitor) evalCtxPath(ctx reflect.Value, parts []string, exprRoot bool) (interface{}, bool) {
+	var result interface{}
+	partResolved := false
+
+	switch ctx.Kind() {
+	case reflect.Array, reflect.Slice:
+		// Array context
+		var results []interface{}
+
+		for i := 0; i < ctx.Len(); i++ {
+			value, _ := v.evalPath(ctx.Index(i), parts, exprRoot)
+			if value.IsValid() {
+				results = append(results, value.Interface())
+			}
+		}
+
+		result = results
+	default:
+		// NOT array context
+		var value reflect.Value
+
+		value, partResolved = v.evalPath(ctx, parts, exprRoot)
+		if value.IsValid() {
+			result = value.Interface()
+		}
+	}
+
+	return result, partResolved
+}
+
+//
+// Helpers
+//
+
+// isHelperCall returns true if given expression is a helper call
+func (v *evalVisitor) isHelperCall(node *ast.Expression) bool {
+	if helperName := node.HelperName(); helperName != "" {
+		return v.findHelper(helperName) != zero
+	}
+	return false
+}
+
+// findHelper finds given helper
+func (v *evalVisitor) findHelper(name string) reflect.Value {
+	// check template helpers
+	if h := v.tpl.findHelper(name); h != zero {
+		return h
+	}
+
+	// check global helpers
+	return findHelper(name)
+}
+
+// callFunc calls function with given options
+func (v *evalVisitor) callFunc(name string, funcVal reflect.Value, options *Options) reflect.Value {
+	params := options.Params()
+
+	funcType := funcVal.Type()
+
+	// @todo Is there a better way to do that ?
+	strType := reflect.TypeOf("")
+	boolType := reflect.TypeOf(true)
+
+	// check parameters number
+	addOptions := false
+	numIn := funcType.NumIn()
+
+	if numIn == len(params)+1 {
+		lastArgType := funcType.In(numIn - 1)
+		if reflect.TypeOf(options).AssignableTo(lastArgType) {
+			addOptions = true
+		}
+	}
+
+	if !addOptions && (len(params) != numIn) {
+		v.errorf("Helper '%s' called with wrong number of arguments, needed %d but got %d", name, numIn, len(params))
+	}
+
+	// check and collect arguments
+	args := make([]reflect.Value, numIn)
+	for i, param := range params {
+		arg := reflect.ValueOf(param)
+		argType := funcType.In(i)
+
+		if !arg.IsValid() {
+			if canBeNil(argType) {
+				arg = reflect.Zero(argType)
+			} else if argType.Kind() == reflect.String {
+				arg = reflect.ValueOf("")
+			} else {
+				// @todo Maybe we can panic on that
+				return reflect.Zero(strType)
+			}
+		}
+
+		if !arg.Type().AssignableTo(argType) {
+			if strType.AssignableTo(argType) {
+				// convert parameter to string
+				arg = reflect.ValueOf(strValue(arg))
+			} else if boolType.AssignableTo(argType) {
+				// convert parameter to bool
+				val, _ := isTrueValue(arg)
+				arg = reflect.ValueOf(val)
+			} else {
+				v.errorf("Helper %s called with argument %d with type %s but it should be %s", name, i, arg.Type(), argType)
+			}
+		}
+
+		args[i] = arg
+	}
+
+	if addOptions {
+		args[numIn-1] = reflect.ValueOf(options)
+	}
+
+	result := funcVal.Call(args)
+
+	return result[0]
+}
+
+// callHelper invoqs helper function for given expression node
+func (v *evalVisitor) callHelper(name string, helper reflect.Value, node *ast.Expression) interface{} {
+	result := v.callFunc(name, helper, v.helperOptions(node))
+	if !result.IsValid() {
+		return nil
+	}
+
+	// @todo We maybe want to ensure here that helper returned a string or a SafeString
+	return result.Interface()
+}
+
+// helperOptions computes helper options argument from an expression
+func (v *evalVisitor) helperOptions(node *ast.Expression) *Options {
+	var params []interface{}
+	var hash map[string]interface{}
+
+	for _, paramNode := range node.Params {
+		param := paramNode.Accept(v)
+		params = append(params, param)
+	}
+
+	if node.Hash != nil {
+		hash, _ = node.Hash.Accept(v).(map[string]interface{})
+	}
+
+	return newOptions(v, params, hash)
+}
+
+//
+// Partials
+//
+
+// findPartial finds given partial
+func (v *evalVisitor) findPartial(name string) *partial {
+	// check template partials
+	if p := v.tpl.findPartial(name); p != nil {
+		return p
+	}
+
+	// check global partials
+	return findPartial(name)
+}
+
+// partialContext computes partial context
+func (v *evalVisitor) partialContext(node *ast.PartialStatement) reflect.Value {
+	if nb := len(node.Params); nb > 1 {
+		v.errorf("Unsupported number of partial arguments: %d", nb)
+	}
+
+	if (len(node.Params) > 0) && (node.Hash != nil) {
+		v.errorf("Passing both context and named parameters to a partial is not allowed")
+	}
+
+	if len(node.Params) == 1 {
+		return reflect.ValueOf(node.Params[0].Accept(v))
+	}
+
+	if node.Hash != nil {
+		hash, _ := node.Hash.Accept(v).(map[string]interface{})
+		return reflect.ValueOf(hash)
+	}
+
+	return zero
+}
+
+// evalPartial evaluates a partial
+func (v *evalVisitor) evalPartial(p *partial, node *ast.PartialStatement) string {
+	// get partial template
+	partialTpl, err := p.template()
+	if err != nil {
+		v.errPanic(err)
+	}
+
+	// push partial context
+	ctx := v.partialContext(node)
+	if ctx.IsValid() {
+		v.pushCtx(ctx)
+	}
+
+	// evaluate partial template
+	result, _ := partialTpl.program.Accept(v).(string)
+
+	// ident partial
+	result = indentLines(result, node.Indent)
+
+	if ctx.IsValid() {
+		v.popCtx()
+	}
+
+	return result
+}
+
+// indentLines indents all lines of given string
+func indentLines(str string, indent string) string {
+	if indent == "" {
+		return str
+	}
+
+	var indented []string
+
+	lines := strings.Split(str, "\n")
+	for i, line := range lines {
+		if (i == (len(lines) - 1)) && (line == "") {
+			// input string ends with a new line
+			indented = append(indented, line)
+		} else {
+			indented = append(indented, indent+line)
+		}
+	}
+
+	return strings.Join(indented, "\n")
+}
+
+//
+// Functions
+//
+
+// wasFuncCall returns true if given expression was a function call
+func (v *evalVisitor) wasFuncCall(node *ast.Expression) bool {
+	// check if expression was tagged as a function call
+	return v.exprFunc[node]
+}
+
+//
+// Visitor interface
+//
+
+// Statements
+
+// VisitProgram implements corresponding Visitor interface method
+func (v *evalVisitor) VisitProgram(node *ast.Program) interface{} {
+	v.at(node)
+
+	buf := new(bytes.Buffer)
+
+	for _, n := range node.Body {
+		if str := Str(n.Accept(v)); str != "" {
+			if _, err := buf.Write([]byte(str)); err != nil {
+				v.errPanic(err)
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+// VisitMustache implements corresponding Visitor interface method
+func (v *evalVisitor) VisitMustache(node *ast.MustacheStatement) interface{} {
+	v.at(node)
+
+	// evaluate expression
+	expr := node.Expression.Accept(v)
+
+	// check if this is a safe string
+	isSafe := isSafeString(expr)
+
+	// get string value
+	str := Str(expr)
+	if !isSafe && !node.Unescaped {
+		// escape html
+		str = Escape(str)
+	}
+
+	return str
+}
+
+// VisitBlock implements corresponding Visitor interface method
+func (v *evalVisitor) VisitBlock(node *ast.BlockStatement) interface{} {
+	v.at(node)
+
+	v.pushBlock(node)
+
+	var result interface{}
+
+	// evaluate expression
+	expr := node.Expression.Accept(v)
+
+	if v.isHelperCall(node.Expression) || v.wasFuncCall(node.Expression) {
+		// it is the responsibility of the helper/function to evaluate block
+		result = expr
+	} else {
+		val := reflect.ValueOf(expr)
+
+		truth, _ := isTrueValue(val)
+		if truth {
+			if node.Program != nil {
+				switch val.Kind() {
+				case reflect.Array, reflect.Slice:
+					concat := ""
+
+					// Array context
+					for i := 0; i < val.Len(); i++ {
+						// Computes new private data frame
+						frame := v.dataFrame.newIterDataFrame(val.Len(), i, nil)
+
+						// Evaluate program
+						concat += v.evalProgram(node.Program, val.Index(i).Interface(), frame, i)
+					}
+
+					result = concat
+				default:
+					// NOT array
+					result = v.evalProgram(node.Program, expr, nil, nil)
+				}
+			}
+		} else if node.Inverse != nil {
+			result, _ = node.Inverse.Accept(v).(string)
+		}
+	}
+
+	v.popBlock()
+
+	return result
+}
+
+// VisitPartial implements corresponding Visitor interface method
+func (v *evalVisitor) VisitPartial(node *ast.PartialStatement) interface{} {
+	v.at(node)
+
+	// partialName: helperName | sexpr
+	name, ok := ast.HelperNameStr(node.Name)
+	if !ok {
+		if subExpr, ok := node.Name.(*ast.SubExpression); ok {
+			name, _ = subExpr.Accept(v).(string)
+		}
+	}
+
+	if name == "" {
+		v.errorf("Unexpected partial name: %q", node.Name)
+	}
+
+	partial := v.findPartial(name)
+	if partial == nil {
+		v.errorf("Partial not found: %s", name)
+	}
+
+	return v.evalPartial(partial, node)
+}
+
+// VisitContent implements corresponding Visitor interface method
+func (v *evalVisitor) VisitContent(node *ast.ContentStatement) interface{} {
+	v.at(node)
+
+	// write content as is
+	return node.Value
+}
+
+// VisitComment implements corresponding Visitor interface method
+func (v *evalVisitor) VisitComment(node *ast.CommentStatement) interface{} {
+	v.at(node)
+
+	// ignore comments
+	return ""
+}
+
+// Expressions
+
+// VisitExpression implements corresponding Visitor interface method
+func (v *evalVisitor) VisitExpression(node *ast.Expression) interface{} {
+	v.at(node)
+
+	var result interface{}
+	done := false
+
+	v.pushExpr(node)
+
+	// helper call
+	if helperName := node.HelperName(); helperName != "" {
+		if helper := v.findHelper(helperName); helper != zero {
+			result = v.callHelper(helperName, helper, node)
+			done = true
+		}
+	}
+
+	if !done {
+		// literal
+		if literal, ok := node.LiteralStr(); ok {
+			if val := v.evalField(v.curCtx(), literal, true); val.IsValid() {
+				result = val.Interface()
+				done = true
+			}
+		}
+	}
+
+	if !done {
+		// field path
+		if path := node.FieldPath(); path != nil {
+			// @todo Find a cleaner way ! Don't break the pattern !
+			// this is an exception to visitor pattern, because we need to pass the info
+			// that this path is at root of current expression
+			if val := v.evalPathExpression(path, true); val != nil {
+				result = val
+			}
+		}
+	}
+
+	v.popExpr()
+
+	return result
+}
+
+// VisitSubExpression implements corresponding Visitor interface method
+func (v *evalVisitor) VisitSubExpression(node *ast.SubExpression) interface{} {
+	v.at(node)
+
+	return node.Expression.Accept(v)
+}
+
+// VisitPath implements corresponding Visitor interface method
+func (v *evalVisitor) VisitPath(node *ast.PathExpression) interface{} {
+	return v.evalPathExpression(node, false)
+}
+
+// Literals
+
+// VisitString implements corresponding Visitor interface method
+func (v *evalVisitor) VisitString(node *ast.StringLiteral) interface{} {
+	v.at(node)
+
+	return node.Value
+}
+
+// VisitBoolean implements corresponding Visitor interface method
+func (v *evalVisitor) VisitBoolean(node *ast.BooleanLiteral) interface{} {
+	v.at(node)
+
+	return node.Value
+}
+
+// VisitNumber implements corresponding Visitor interface method
+func (v *evalVisitor) VisitNumber(node *ast.NumberLiteral) interface{} {
+	v.at(node)
+
+	return node.Number()
+}
+
+// Miscellaneous
+
+// VisitHash implements corresponding Visitor interface method
+func (v *evalVisitor) VisitHash(node *ast.Hash) interface{} {
+	v.at(node)
+
+	result := make(map[string]interface{})
+
+	for _, pair := range node.Pairs {
+		if value := pair.Accept(v); value != nil {
+			result[pair.Key] = value
+		}
+	}
+
+	return result
+}
+
+// VisitHashPair implements corresponding Visitor interface method
+func (v *evalVisitor) VisitHashPair(node *ast.HashPair) interface{} {
+	v.at(node)
+
+	return node.Val.Accept(v)
+}

+ 382 - 0
vendor/github.com/aymerick/raymond/helper.go

@@ -0,0 +1,382 @@
+package raymond
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"sync"
+)
+
+// Options represents the options argument provided to helpers and context functions.
+type Options struct {
+	// evaluation visitor
+	eval *evalVisitor
+
+	// params
+	params []interface{}
+	hash   map[string]interface{}
+}
+
+// helpers stores all globally registered helpers
+var helpers = make(map[string]reflect.Value)
+
+// protects global helpers
+var helpersMutex sync.RWMutex
+
+func init() {
+	// register builtin helpers
+	RegisterHelper("if", ifHelper)
+	RegisterHelper("unless", unlessHelper)
+	RegisterHelper("with", withHelper)
+	RegisterHelper("each", eachHelper)
+	RegisterHelper("log", logHelper)
+	RegisterHelper("lookup", lookupHelper)
+	RegisterHelper("equal", equalHelper)
+}
+
+// RegisterHelper registers a global helper. That helper will be available to all templates.
+func RegisterHelper(name string, helper interface{}) {
+	helpersMutex.Lock()
+	defer helpersMutex.Unlock()
+
+	if helpers[name] != zero {
+		panic(fmt.Errorf("Helper already registered: %s", name))
+	}
+
+	val := reflect.ValueOf(helper)
+	ensureValidHelper(name, val)
+
+	helpers[name] = val
+}
+
+// RegisterHelpers registers several global helpers. Those helpers will be available to all templates.
+func RegisterHelpers(helpers map[string]interface{}) {
+	for name, helper := range helpers {
+		RegisterHelper(name, helper)
+	}
+}
+
+// ensureValidHelper panics if given helper is not valid
+func ensureValidHelper(name string, funcValue reflect.Value) {
+	if funcValue.Kind() != reflect.Func {
+		panic(fmt.Errorf("Helper must be a function: %s", name))
+	}
+
+	funcType := funcValue.Type()
+
+	if funcType.NumOut() != 1 {
+		panic(fmt.Errorf("Helper function must return a string or a SafeString: %s", name))
+	}
+
+	// @todo Check if first returned value is a string, SafeString or interface{} ?
+}
+
+// findHelper finds a globally registered helper
+func findHelper(name string) reflect.Value {
+	helpersMutex.RLock()
+	defer helpersMutex.RUnlock()
+
+	return helpers[name]
+}
+
+// newOptions instanciates a new Options
+func newOptions(eval *evalVisitor, params []interface{}, hash map[string]interface{}) *Options {
+	return &Options{
+		eval:   eval,
+		params: params,
+		hash:   hash,
+	}
+}
+
+// newEmptyOptions instanciates a new empty Options
+func newEmptyOptions(eval *evalVisitor) *Options {
+	return &Options{
+		eval: eval,
+		hash: make(map[string]interface{}),
+	}
+}
+
+//
+// Context Values
+//
+
+// Value returns field value from current context.
+func (options *Options) Value(name string) interface{} {
+	value := options.eval.evalField(options.eval.curCtx(), name, false)
+	if !value.IsValid() {
+		return nil
+	}
+
+	return value.Interface()
+}
+
+// ValueStr returns string representation of field value from current context.
+func (options *Options) ValueStr(name string) string {
+	return Str(options.Value(name))
+}
+
+// Ctx returns current evaluation context.
+func (options *Options) Ctx() interface{} {
+	return options.eval.curCtx().Interface()
+}
+
+//
+// Hash Arguments
+//
+
+// HashProp returns hash property.
+func (options *Options) HashProp(name string) interface{} {
+	return options.hash[name]
+}
+
+// HashStr returns string representation of hash property.
+func (options *Options) HashStr(name string) string {
+	return Str(options.hash[name])
+}
+
+// Hash returns entire hash.
+func (options *Options) Hash() map[string]interface{} {
+	return options.hash
+}
+
+//
+// Parameters
+//
+
+// Param returns parameter at given position.
+func (options *Options) Param(pos int) interface{} {
+	if len(options.params) > pos {
+		return options.params[pos]
+	}
+
+	return nil
+}
+
+// ParamStr returns string representation of parameter at given position.
+func (options *Options) ParamStr(pos int) string {
+	return Str(options.Param(pos))
+}
+
+// Params returns all parameters.
+func (options *Options) Params() []interface{} {
+	return options.params
+}
+
+//
+// Private data
+//
+
+// Data returns private data value.
+func (options *Options) Data(name string) interface{} {
+	return options.eval.dataFrame.Get(name)
+}
+
+// DataStr returns string representation of private data value.
+func (options *Options) DataStr(name string) string {
+	return Str(options.eval.dataFrame.Get(name))
+}
+
+// DataFrame returns current private data frame.
+func (options *Options) DataFrame() *DataFrame {
+	return options.eval.dataFrame
+}
+
+// NewDataFrame instanciates a new data frame that is a copy of current evaluation data frame.
+//
+// Parent of returned data frame is set to current evaluation data frame.
+func (options *Options) NewDataFrame() *DataFrame {
+	return options.eval.dataFrame.Copy()
+}
+
+// newIterDataFrame instanciates a new data frame and set iteration specific vars
+func (options *Options) newIterDataFrame(length int, i int, key interface{}) *DataFrame {
+	return options.eval.dataFrame.newIterDataFrame(length, i, key)
+}
+
+//
+// Evaluation
+//
+
+// evalBlock evaluates block with given context, private data and iteration key
+func (options *Options) evalBlock(ctx interface{}, data *DataFrame, key interface{}) string {
+	result := ""
+
+	if block := options.eval.curBlock(); (block != nil) && (block.Program != nil) {
+		result = options.eval.evalProgram(block.Program, ctx, data, key)
+	}
+
+	return result
+}
+
+// Fn evaluates block with current evaluation context.
+func (options *Options) Fn() string {
+	return options.evalBlock(nil, nil, nil)
+}
+
+// FnCtxData evaluates block with given context and private data frame.
+func (options *Options) FnCtxData(ctx interface{}, data *DataFrame) string {
+	return options.evalBlock(ctx, data, nil)
+}
+
+// FnWith evaluates block with given context.
+func (options *Options) FnWith(ctx interface{}) string {
+	return options.evalBlock(ctx, nil, nil)
+}
+
+// FnData evaluates block with given private data frame.
+func (options *Options) FnData(data *DataFrame) string {
+	return options.evalBlock(nil, data, nil)
+}
+
+// Inverse evaluates "else block".
+func (options *Options) Inverse() string {
+	result := ""
+	if block := options.eval.curBlock(); (block != nil) && (block.Inverse != nil) {
+		result, _ = block.Inverse.Accept(options.eval).(string)
+	}
+
+	return result
+}
+
+// Eval evaluates field for given context.
+func (options *Options) Eval(ctx interface{}, field string) interface{} {
+	if ctx == nil {
+		return nil
+	}
+
+	if field == "" {
+		return nil
+	}
+
+	val := options.eval.evalField(reflect.ValueOf(ctx), field, false)
+	if !val.IsValid() {
+		return nil
+	}
+
+	return val.Interface()
+}
+
+//
+// Misc
+//
+
+// isIncludableZero returns true if 'includeZero' option is set and first param is the number 0
+func (options *Options) isIncludableZero() bool {
+	b, ok := options.HashProp("includeZero").(bool)
+	if ok && b {
+		nb, ok := options.Param(0).(int)
+		if ok && nb == 0 {
+			return true
+		}
+	}
+
+	return false
+}
+
+//
+// Builtin helpers
+//
+
+// #if block helper
+func ifHelper(conditional interface{}, options *Options) interface{} {
+	if options.isIncludableZero() || IsTrue(conditional) {
+		return options.Fn()
+	}
+
+	return options.Inverse()
+}
+
+// #unless block helper
+func unlessHelper(conditional interface{}, options *Options) interface{} {
+	if options.isIncludableZero() || IsTrue(conditional) {
+		return options.Inverse()
+	}
+
+	return options.Fn()
+}
+
+// #with block helper
+func withHelper(context interface{}, options *Options) interface{} {
+	if IsTrue(context) {
+		return options.FnWith(context)
+	}
+
+	return options.Inverse()
+}
+
+// #each block helper
+func eachHelper(context interface{}, options *Options) interface{} {
+	if !IsTrue(context) {
+		return options.Inverse()
+	}
+
+	result := ""
+
+	val := reflect.ValueOf(context)
+	switch val.Kind() {
+	case reflect.Array, reflect.Slice:
+		for i := 0; i < val.Len(); i++ {
+			// computes private data
+			data := options.newIterDataFrame(val.Len(), i, nil)
+
+			// evaluates block
+			result += options.evalBlock(val.Index(i).Interface(), data, i)
+		}
+	case reflect.Map:
+		// note: a go hash is not ordered, so result may vary, this behaviour differs from the JS implementation
+		keys := val.MapKeys()
+		for i := 0; i < len(keys); i++ {
+			key := keys[i].Interface()
+			ctx := val.MapIndex(keys[i]).Interface()
+
+			// computes private data
+			data := options.newIterDataFrame(len(keys), i, key)
+
+			// evaluates block
+			result += options.evalBlock(ctx, data, key)
+		}
+	case reflect.Struct:
+		var exportedFields []int
+
+		// collect exported fields only
+		for i := 0; i < val.NumField(); i++ {
+			if tField := val.Type().Field(i); tField.PkgPath == "" {
+				exportedFields = append(exportedFields, i)
+			}
+		}
+
+		for i, fieldIndex := range exportedFields {
+			key := val.Type().Field(fieldIndex).Name
+			ctx := val.Field(fieldIndex).Interface()
+
+			// computes private data
+			data := options.newIterDataFrame(len(exportedFields), i, key)
+
+			// evaluates block
+			result += options.evalBlock(ctx, data, key)
+		}
+	}
+
+	return result
+}
+
+// #log helper
+func logHelper(message string) interface{} {
+	log.Print(message)
+	return ""
+}
+
+// #lookup helper
+func lookupHelper(obj interface{}, field string, options *Options) interface{} {
+	return Str(options.Eval(obj, field))
+}
+
+// #equal helper
+// Ref: https://github.com/aymerick/raymond/issues/7
+func equalHelper(a interface{}, b interface{}, options *Options) interface{} {
+	if Str(a) == Str(b) {
+		return options.Fn()
+	}
+
+	return ""
+}

+ 639 - 0
vendor/github.com/aymerick/raymond/lexer/lexer.go

@@ -0,0 +1,639 @@
+// Package lexer provides a handlebars tokenizer.
+package lexer
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// References:
+//   - https://github.com/wycats/handlebars.js/blob/master/src/handlebars.l
+//   - https://github.com/golang/go/blob/master/src/text/template/parse/lex.go
+
+const (
+	// Mustaches detection
+	escapedEscapedOpenMustache  = "\\\\{{"
+	escapedOpenMustache         = "\\{{"
+	openMustache                = "{{"
+	closeMustache               = "}}"
+	closeStripMustache          = "~}}"
+	closeUnescapedStripMustache = "}~}}"
+)
+
+const eof = -1
+
+// lexFunc represents a function that returns the next lexer function.
+type lexFunc func(*Lexer) lexFunc
+
+// Lexer is a lexical analyzer.
+type Lexer struct {
+	input    string     // input to scan
+	name     string     // lexer name, used for testing purpose
+	tokens   chan Token // channel of scanned tokens
+	nextFunc lexFunc    // the next function to execute
+
+	pos   int // current byte position in input string
+	line  int // current line position in input string
+	width int // size of last rune scanned from input string
+	start int // start position of the token we are scanning
+
+	// the shameful contextual properties needed because `nextFunc` is not enough
+	closeComment *regexp.Regexp // regexp to scan close of current comment
+	rawBlock     bool           // are we parsing a raw block content ?
+}
+
+var (
+	lookheadChars        = `[\s` + regexp.QuoteMeta("=~}/)|") + `]`
+	literalLookheadChars = `[\s` + regexp.QuoteMeta("~})") + `]`
+
+	// characters not allowed in an identifier
+	unallowedIDChars = " \n\t!\"#%&'()*+,./;<=>@[\\]^`{|}~"
+
+	// regular expressions
+	rID                  = regexp.MustCompile(`^[^` + regexp.QuoteMeta(unallowedIDChars) + `]+`)
+	rDotID               = regexp.MustCompile(`^\.` + lookheadChars)
+	rTrue                = regexp.MustCompile(`^true` + literalLookheadChars)
+	rFalse               = regexp.MustCompile(`^false` + literalLookheadChars)
+	rOpenRaw             = regexp.MustCompile(`^\{\{\{\{`)
+	rCloseRaw            = regexp.MustCompile(`^\}\}\}\}`)
+	rOpenEndRaw          = regexp.MustCompile(`^\{\{\{\{/`)
+	rOpenEndRawLookAhead = regexp.MustCompile(`\{\{\{\{/`)
+	rOpenUnescaped       = regexp.MustCompile(`^\{\{~?\{`)
+	rCloseUnescaped      = regexp.MustCompile(`^\}~?\}\}`)
+	rOpenBlock           = regexp.MustCompile(`^\{\{~?#`)
+	rOpenEndBlock        = regexp.MustCompile(`^\{\{~?/`)
+	rOpenPartial         = regexp.MustCompile(`^\{\{~?>`)
+	// {{^}} or {{else}}
+	rInverse          = regexp.MustCompile(`^(\{\{~?\^\s*~?\}\}|\{\{~?\s*else\s*~?\}\})`)
+	rOpenInverse      = regexp.MustCompile(`^\{\{~?\^`)
+	rOpenInverseChain = regexp.MustCompile(`^\{\{~?\s*else`)
+	// {{ or {{&
+	rOpen            = regexp.MustCompile(`^\{\{~?&?`)
+	rClose           = regexp.MustCompile(`^~?\}\}`)
+	rOpenBlockParams = regexp.MustCompile(`^as\s+\|`)
+	// {{!--  ... --}}
+	rOpenCommentDash  = regexp.MustCompile(`^\{\{~?!--\s*`)
+	rCloseCommentDash = regexp.MustCompile(`^\s*--~?\}\}`)
+	// {{! ... }}
+	rOpenComment  = regexp.MustCompile(`^\{\{~?!\s*`)
+	rCloseComment = regexp.MustCompile(`^\s*~?\}\}`)
+)
+
+// Scan scans given input.
+//
+// Tokens can then be fetched sequentially thanks to NextToken() function on returned lexer.
+func Scan(input string) *Lexer {
+	return scanWithName(input, "")
+}
+
+// scanWithName scans given input, with a name used for testing
+//
+// Tokens can then be fetched sequentially thanks to NextToken() function on returned lexer.
+func scanWithName(input string, name string) *Lexer {
+	result := &Lexer{
+		input:  input,
+		name:   name,
+		tokens: make(chan Token),
+		line:   1,
+	}
+
+	go result.run()
+
+	return result
+}
+
+// Collect scans and collect all tokens.
+//
+// This should be used for debugging purpose only. You should use Scan() and lexer.NextToken() functions instead.
+func Collect(input string) []Token {
+	var result []Token
+
+	l := Scan(input)
+	for {
+		token := l.NextToken()
+		result = append(result, token)
+
+		if token.Kind == TokenEOF || token.Kind == TokenError {
+			break
+		}
+	}
+
+	return result
+}
+
+// NextToken returns the next scanned token.
+func (l *Lexer) NextToken() Token {
+	result := <-l.tokens
+
+	return result
+}
+
+// run starts lexical analysis
+func (l *Lexer) run() {
+	for l.nextFunc = lexContent; l.nextFunc != nil; {
+		l.nextFunc = l.nextFunc(l)
+	}
+}
+
+// next returns next character from input, or eof of there is nothing left to scan
+func (l *Lexer) next() rune {
+	if l.pos >= len(l.input) {
+		l.width = 0
+		return eof
+	}
+
+	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+	l.width = w
+	l.pos += l.width
+
+	return r
+}
+
+func (l *Lexer) produce(kind TokenKind, val string) {
+	l.tokens <- Token{kind, val, l.start, l.line}
+
+	// scanning a new token
+	l.start = l.pos
+
+	// update line number
+	l.line += strings.Count(val, "\n")
+}
+
+// emit emits a new scanned token
+func (l *Lexer) emit(kind TokenKind) {
+	l.produce(kind, l.input[l.start:l.pos])
+}
+
+// emitContent emits scanned content
+func (l *Lexer) emitContent() {
+	if l.pos > l.start {
+		l.emit(TokenContent)
+	}
+}
+
+// emitString emits a scanned string
+func (l *Lexer) emitString(delimiter rune) {
+	str := l.input[l.start:l.pos]
+
+	// replace escaped delimiters
+	str = strings.Replace(str, "\\"+string(delimiter), string(delimiter), -1)
+
+	l.produce(TokenString, str)
+}
+
+// peek returns but does not consume the next character in the input
+func (l *Lexer) peek() rune {
+	r := l.next()
+	l.backup()
+	return r
+}
+
+// backup steps back one character
+//
+// WARNING: Can only be called once per call of next
+func (l *Lexer) backup() {
+	l.pos -= l.width
+}
+
+// ignoreskips all characters that have been scanned up to current position
+func (l *Lexer) ignore() {
+	l.start = l.pos
+}
+
+// accept scans the next character if it is included in given string
+func (l *Lexer) accept(valid string) bool {
+	if strings.IndexRune(valid, l.next()) >= 0 {
+		return true
+	}
+
+	l.backup()
+
+	return false
+}
+
+// acceptRun scans all following characters that are part of given string
+func (l *Lexer) acceptRun(valid string) {
+	for strings.IndexRune(valid, l.next()) >= 0 {
+	}
+
+	l.backup()
+}
+
+// errorf emits an error token
+func (l *Lexer) errorf(format string, args ...interface{}) lexFunc {
+	l.tokens <- Token{TokenError, fmt.Sprintf(format, args...), l.start, l.line}
+	return nil
+}
+
+// isString returns true if content at current scanning position starts with given string
+func (l *Lexer) isString(str string) bool {
+	return strings.HasPrefix(l.input[l.pos:], str)
+}
+
+// findRegexp returns the first string from current scanning position that matches given regular expression
+func (l *Lexer) findRegexp(r *regexp.Regexp) string {
+	return r.FindString(l.input[l.pos:])
+}
+
+// indexRegexp returns the index of the first string from current scanning position that matches given regular expression
+//
+// It returns -1 if not found
+func (l *Lexer) indexRegexp(r *regexp.Regexp) int {
+	loc := r.FindStringIndex(l.input[l.pos:])
+	if loc == nil {
+		return -1
+	}
+	return loc[0]
+}
+
+// lexContent scans content (ie: not between mustaches)
+func lexContent(l *Lexer) lexFunc {
+	var next lexFunc
+
+	if l.rawBlock {
+		if i := l.indexRegexp(rOpenEndRawLookAhead); i != -1 {
+			// {{{{/
+			l.rawBlock = false
+			l.pos += i
+
+			next = lexOpenMustache
+		} else {
+			return l.errorf("Unclosed raw block")
+		}
+	} else if l.isString(escapedEscapedOpenMustache) {
+		// \\{{
+
+		// emit content with only one escaped escape
+		l.next()
+		l.emitContent()
+
+		// ignore second escaped escape
+		l.next()
+		l.ignore()
+
+		next = lexContent
+	} else if l.isString(escapedOpenMustache) {
+		// \{{
+		next = lexEscapedOpenMustache
+	} else if str := l.findRegexp(rOpenCommentDash); str != "" {
+		// {{!--
+		l.closeComment = rCloseCommentDash
+
+		next = lexComment
+	} else if str := l.findRegexp(rOpenComment); str != "" {
+		// {{!
+		l.closeComment = rCloseComment
+
+		next = lexComment
+	} else if l.isString(openMustache) {
+		// {{
+		next = lexOpenMustache
+	}
+
+	if next != nil {
+		// emit scanned content
+		l.emitContent()
+
+		// scan next token
+		return next
+	}
+
+	// scan next rune
+	if l.next() == eof {
+		// emit scanned content
+		l.emitContent()
+
+		// this is over
+		l.emit(TokenEOF)
+		return nil
+	}
+
+	// continue content scanning
+	return lexContent
+}
+
+// lexEscapedOpenMustache scans \{{
+func lexEscapedOpenMustache(l *Lexer) lexFunc {
+	// ignore escape character
+	l.next()
+	l.ignore()
+
+	// scan mustaches
+	for l.peek() == '{' {
+		l.next()
+	}
+
+	return lexContent
+}
+
+// lexOpenMustache scans {{
+func lexOpenMustache(l *Lexer) lexFunc {
+	var str string
+	var tok TokenKind
+
+	nextFunc := lexExpression
+
+	if str = l.findRegexp(rOpenEndRaw); str != "" {
+		tok = TokenOpenEndRawBlock
+	} else if str = l.findRegexp(rOpenRaw); str != "" {
+		tok = TokenOpenRawBlock
+		l.rawBlock = true
+	} else if str = l.findRegexp(rOpenUnescaped); str != "" {
+		tok = TokenOpenUnescaped
+	} else if str = l.findRegexp(rOpenBlock); str != "" {
+		tok = TokenOpenBlock
+	} else if str = l.findRegexp(rOpenEndBlock); str != "" {
+		tok = TokenOpenEndBlock
+	} else if str = l.findRegexp(rOpenPartial); str != "" {
+		tok = TokenOpenPartial
+	} else if str = l.findRegexp(rInverse); str != "" {
+		tok = TokenInverse
+		nextFunc = lexContent
+	} else if str = l.findRegexp(rOpenInverse); str != "" {
+		tok = TokenOpenInverse
+	} else if str = l.findRegexp(rOpenInverseChain); str != "" {
+		tok = TokenOpenInverseChain
+	} else if str = l.findRegexp(rOpen); str != "" {
+		tok = TokenOpen
+	} else {
+		// this is rotten
+		panic("Current pos MUST be an opening mustache")
+	}
+
+	l.pos += len(str)
+	l.emit(tok)
+
+	return nextFunc
+}
+
+// lexCloseMustache scans }} or ~}}
+func lexCloseMustache(l *Lexer) lexFunc {
+	var str string
+	var tok TokenKind
+
+	if str = l.findRegexp(rCloseRaw); str != "" {
+		// }}}}
+		tok = TokenCloseRawBlock
+	} else if str = l.findRegexp(rCloseUnescaped); str != "" {
+		// }}}
+		tok = TokenCloseUnescaped
+	} else if str = l.findRegexp(rClose); str != "" {
+		// }}
+		tok = TokenClose
+	} else {
+		// this is rotten
+		panic("Current pos MUST be a closing mustache")
+	}
+
+	l.pos += len(str)
+	l.emit(tok)
+
+	return lexContent
+}
+
+// lexExpression scans inside mustaches
+func lexExpression(l *Lexer) lexFunc {
+	// search close mustache delimiter
+	if l.isString(closeMustache) || l.isString(closeStripMustache) || l.isString(closeUnescapedStripMustache) {
+		return lexCloseMustache
+	}
+
+	// search some patterns before advancing scanning position
+
+	// "as |"
+	if str := l.findRegexp(rOpenBlockParams); str != "" {
+		l.pos += len(str)
+		l.emit(TokenOpenBlockParams)
+		return lexExpression
+	}
+
+	// ..
+	if l.isString("..") {
+		l.pos += len("..")
+		l.emit(TokenID)
+		return lexExpression
+	}
+
+	// .
+	if str := l.findRegexp(rDotID); str != "" {
+		l.pos += len(".")
+		l.emit(TokenID)
+		return lexExpression
+	}
+
+	// true
+	if str := l.findRegexp(rTrue); str != "" {
+		l.pos += len("true")
+		l.emit(TokenBoolean)
+		return lexExpression
+	}
+
+	// false
+	if str := l.findRegexp(rFalse); str != "" {
+		l.pos += len("false")
+		l.emit(TokenBoolean)
+		return lexExpression
+	}
+
+	// let's scan next character
+	switch r := l.next(); {
+	case r == eof:
+		return l.errorf("Unclosed expression")
+	case isIgnorable(r):
+		return lexIgnorable
+	case r == '(':
+		l.emit(TokenOpenSexpr)
+	case r == ')':
+		l.emit(TokenCloseSexpr)
+	case r == '=':
+		l.emit(TokenEquals)
+	case r == '@':
+		l.emit(TokenData)
+	case r == '"' || r == '\'':
+		l.backup()
+		return lexString
+	case r == '/' || r == '.':
+		l.emit(TokenSep)
+	case r == '|':
+		l.emit(TokenCloseBlockParams)
+	case r == '+' || r == '-' || (r >= '0' && r <= '9'):
+		l.backup()
+		return lexNumber
+	case r == '[':
+		return lexPathLiteral
+	case strings.IndexRune(unallowedIDChars, r) < 0:
+		l.backup()
+		return lexIdentifier
+	default:
+		return l.errorf("Unexpected character in expression: '%c'", r)
+	}
+
+	return lexExpression
+}
+
+// lexComment scans {{!-- or {{!
+func lexComment(l *Lexer) lexFunc {
+	if str := l.findRegexp(l.closeComment); str != "" {
+		l.pos += len(str)
+		l.emit(TokenComment)
+
+		return lexContent
+	}
+
+	if r := l.next(); r == eof {
+		return l.errorf("Unclosed comment")
+	}
+
+	return lexComment
+}
+
+// lexIgnorable scans all following ignorable characters
+func lexIgnorable(l *Lexer) lexFunc {
+	for isIgnorable(l.peek()) {
+		l.next()
+	}
+	l.ignore()
+
+	return lexExpression
+}
+
+// lexString scans a string
+func lexString(l *Lexer) lexFunc {
+	// get string delimiter
+	delim := l.next()
+	var prev rune
+
+	// ignore delimiter
+	l.ignore()
+
+	for {
+		r := l.next()
+		if r == eof || r == '\n' {
+			return l.errorf("Unterminated string")
+		}
+
+		if (r == delim) && (prev != '\\') {
+			break
+		}
+
+		prev = r
+	}
+
+	// remove end delimiter
+	l.backup()
+
+	// emit string
+	l.emitString(delim)
+
+	// skip end delimiter
+	l.next()
+	l.ignore()
+
+	return lexExpression
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+//
+// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/parse/lex.go
+func lexNumber(l *Lexer) lexFunc {
+	if !l.scanNumber() {
+		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+	}
+	if sign := l.peek(); sign == '+' || sign == '-' {
+		// Complex: 1+2i. No spaces, must end in 'i'.
+		if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+			return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+		}
+		l.emit(TokenNumber)
+	} else {
+		l.emit(TokenNumber)
+	}
+	return lexExpression
+}
+
+// scanNumber scans a number
+//
+// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/parse/lex.go
+func (l *Lexer) scanNumber() bool {
+	// Optional leading sign.
+	l.accept("+-")
+
+	// Is it hex?
+	digits := "0123456789"
+
+	if l.accept("0") && l.accept("xX") {
+		digits = "0123456789abcdefABCDEF"
+	}
+
+	l.acceptRun(digits)
+
+	if l.accept(".") {
+		l.acceptRun(digits)
+	}
+
+	if l.accept("eE") {
+		l.accept("+-")
+		l.acceptRun("0123456789")
+	}
+
+	// Is it imaginary?
+	l.accept("i")
+
+	// Next thing mustn't be alphanumeric.
+	if isAlphaNumeric(l.peek()) {
+		l.next()
+		return false
+	}
+
+	return true
+}
+
+// lexIdentifier scans an ID
+func lexIdentifier(l *Lexer) lexFunc {
+	str := l.findRegexp(rID)
+	if len(str) == 0 {
+		// this is rotten
+		panic("Identifier expected")
+	}
+
+	l.pos += len(str)
+	l.emit(TokenID)
+
+	return lexExpression
+}
+
+// lexPathLiteral scans an [ID]
+func lexPathLiteral(l *Lexer) lexFunc {
+	for {
+		r := l.next()
+		if r == eof || r == '\n' {
+			return l.errorf("Unterminated path literal")
+		}
+
+		if r == ']' {
+			break
+		}
+	}
+
+	l.emit(TokenID)
+
+	return lexExpression
+}
+
+// isIgnorable returns true if given character is ignorable (ie. whitespace of line feed)
+func isIgnorable(r rune) bool {
+	return r == ' ' || r == '\t' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+//
+// NOTE borrowed from https://github.com/golang/go/tree/master/src/text/template/parse/lex.go
+func isAlphaNumeric(r rune) bool {
+	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}

+ 183 - 0
vendor/github.com/aymerick/raymond/lexer/token.go

@@ -0,0 +1,183 @@
+package lexer
+
+import "fmt"
+
+const (
+	// TokenError represents an error
+	TokenError TokenKind = iota
+
+	// TokenEOF represents an End Of File
+	TokenEOF
+
+	//
+	// Mustache delimiters
+	//
+
+	// TokenOpen is the OPEN token
+	TokenOpen
+
+	// TokenClose is the CLOSE token
+	TokenClose
+
+	// TokenOpenRawBlock is the OPEN_RAW_BLOCK token
+	TokenOpenRawBlock
+
+	// TokenCloseRawBlock is the CLOSE_RAW_BLOCK token
+	TokenCloseRawBlock
+
+	// TokenOpenEndRawBlock is the END_RAW_BLOCK token
+	TokenOpenEndRawBlock
+
+	// TokenOpenUnescaped is the OPEN_UNESCAPED token
+	TokenOpenUnescaped
+
+	// TokenCloseUnescaped is the CLOSE_UNESCAPED token
+	TokenCloseUnescaped
+
+	// TokenOpenBlock is the OPEN_BLOCK token
+	TokenOpenBlock
+
+	// TokenOpenEndBlock is the OPEN_ENDBLOCK token
+	TokenOpenEndBlock
+
+	// TokenInverse is the INVERSE token
+	TokenInverse
+
+	// TokenOpenInverse is the OPEN_INVERSE token
+	TokenOpenInverse
+
+	// TokenOpenInverseChain is the OPEN_INVERSE_CHAIN token
+	TokenOpenInverseChain
+
+	// TokenOpenPartial is the OPEN_PARTIAL token
+	TokenOpenPartial
+
+	// TokenComment is the COMMENT token
+	TokenComment
+
+	//
+	// Inside mustaches
+	//
+
+	// TokenOpenSexpr is the OPEN_SEXPR token
+	TokenOpenSexpr
+
+	// TokenCloseSexpr is the CLOSE_SEXPR token
+	TokenCloseSexpr
+
+	// TokenEquals is the EQUALS token
+	TokenEquals
+
+	// TokenData is the DATA token
+	TokenData
+
+	// TokenSep is the SEP token
+	TokenSep
+
+	// TokenOpenBlockParams is the OPEN_BLOCK_PARAMS token
+	TokenOpenBlockParams
+
+	// TokenCloseBlockParams is the CLOSE_BLOCK_PARAMS token
+	TokenCloseBlockParams
+
+	//
+	// Tokens with content
+	//
+
+	// TokenContent is the CONTENT token
+	TokenContent
+
+	// TokenID is the ID token
+	TokenID
+
+	// TokenString is the STRING token
+	TokenString
+
+	// TokenNumber is the NUMBER token
+	TokenNumber
+
+	// TokenBoolean is the BOOLEAN token
+	TokenBoolean
+)
+
+const (
+	// Option to generate token position in its string representation
+	dumpTokenPos = false
+
+	// Option to generate values for all token kinds for their string representations
+	dumpAllTokensVal = true
+)
+
+// TokenKind represents a Token type.
+type TokenKind int
+
+// Token represents a scanned token.
+type Token struct {
+	Kind TokenKind // Token kind
+	Val  string    // Token value
+
+	Pos  int // Byte position in input string
+	Line int // Line number in input string
+}
+
+// tokenName permits to display token name given token type
+var tokenName = map[TokenKind]string{
+	TokenError:            "Error",
+	TokenEOF:              "EOF",
+	TokenContent:          "Content",
+	TokenComment:          "Comment",
+	TokenOpen:             "Open",
+	TokenClose:            "Close",
+	TokenOpenUnescaped:    "OpenUnescaped",
+	TokenCloseUnescaped:   "CloseUnescaped",
+	TokenOpenBlock:        "OpenBlock",
+	TokenOpenEndBlock:     "OpenEndBlock",
+	TokenOpenRawBlock:     "OpenRawBlock",
+	TokenCloseRawBlock:    "CloseRawBlock",
+	TokenOpenEndRawBlock:  "OpenEndRawBlock",
+	TokenOpenBlockParams:  "OpenBlockParams",
+	TokenCloseBlockParams: "CloseBlockParams",
+	TokenInverse:          "Inverse",
+	TokenOpenInverse:      "OpenInverse",
+	TokenOpenInverseChain: "OpenInverseChain",
+	TokenOpenPartial:      "OpenPartial",
+	TokenOpenSexpr:        "OpenSexpr",
+	TokenCloseSexpr:       "CloseSexpr",
+	TokenID:               "ID",
+	TokenEquals:           "Equals",
+	TokenString:           "String",
+	TokenNumber:           "Number",
+	TokenBoolean:          "Boolean",
+	TokenData:             "Data",
+	TokenSep:              "Sep",
+}
+
+// String returns the token kind string representation for debugging.
+func (k TokenKind) String() string {
+	s := tokenName[k]
+	if s == "" {
+		return fmt.Sprintf("Token-%d", int(k))
+	}
+	return s
+}
+
+// String returns the token string representation for debugging.
+func (t Token) String() string {
+	result := ""
+
+	if dumpTokenPos {
+		result += fmt.Sprintf("%d:", t.Pos)
+	}
+
+	result += fmt.Sprintf("%s", t.Kind)
+
+	if (dumpAllTokensVal || (t.Kind >= TokenContent)) && len(t.Val) > 0 {
+		if len(t.Val) > 100 {
+			result += fmt.Sprintf("{%.20q...}", t.Val)
+		} else {
+			result += fmt.Sprintf("{%q}", t.Val)
+		}
+	}
+
+	return result
+}

+ 846 - 0
vendor/github.com/aymerick/raymond/parser/parser.go

@@ -0,0 +1,846 @@
+// Package parser provides a handlebars syntax analyser. It consumes the tokens provided by the lexer to build an AST.
+package parser
+
+import (
+	"fmt"
+	"regexp"
+	"runtime"
+	"strconv"
+
+	"github.com/aymerick/raymond/ast"
+	"github.com/aymerick/raymond/lexer"
+)
+
+// References:
+//   - https://github.com/wycats/handlebars.js/blob/master/src/handlebars.yy
+//   - https://github.com/golang/go/blob/master/src/text/template/parse/parse.go
+
+// parser is a syntax analyzer.
+type parser struct {
+	// Lexer
+	lex *lexer.Lexer
+
+	// Root node
+	root ast.Node
+
+	// Tokens parsed but not consumed yet
+	tokens []*lexer.Token
+
+	// All tokens have been retreieved from lexer
+	lexOver bool
+}
+
+var (
+	rOpenComment  = regexp.MustCompile(`^\{\{~?!-?-?`)
+	rCloseComment = regexp.MustCompile(`-?-?~?\}\}$`)
+	rOpenAmp      = regexp.MustCompile(`^\{\{~?&`)
+)
+
+// new instanciates a new parser
+func new(input string) *parser {
+	return &parser{
+		lex: lexer.Scan(input),
+	}
+}
+
+// Parse analyzes given input and returns the AST root node.
+func Parse(input string) (result *ast.Program, err error) {
+	// recover error
+	defer errRecover(&err)
+
+	parser := new(input)
+
+	// parse
+	result = parser.parseProgram()
+
+	// check last token
+	token := parser.shift()
+	if token.Kind != lexer.TokenEOF {
+		// Parsing ended before EOF
+		errToken(token, "Syntax error")
+	}
+
+	// fix whitespaces
+	processWhitespaces(result)
+
+	// named returned values
+	return
+}
+
+// errRecover recovers parsing panic
+func errRecover(errp *error) {
+	e := recover()
+	if e != nil {
+		switch err := e.(type) {
+		case runtime.Error:
+			panic(e)
+		case error:
+			*errp = err
+		default:
+			panic(e)
+		}
+	}
+}
+
+// errPanic panics
+func errPanic(err error, line int) {
+	panic(fmt.Errorf("Parse error on line %d:\n%s", line, err))
+}
+
+// errNode panics with given node infos
+func errNode(node ast.Node, msg string) {
+	errPanic(fmt.Errorf("%s\nNode: %s", msg, node), node.Location().Line)
+}
+
+// errNode panics with given Token infos
+func errToken(tok *lexer.Token, msg string) {
+	errPanic(fmt.Errorf("%s\nToken: %s", msg, tok), tok.Line)
+}
+
+// errNode panics because of an unexpected Token kind
+func errExpected(expect lexer.TokenKind, tok *lexer.Token) {
+	errPanic(fmt.Errorf("Expecting %s, got: '%s'", expect, tok), tok.Line)
+}
+
+// program : statement*
+func (p *parser) parseProgram() *ast.Program {
+	result := ast.NewProgram(p.next().Pos, p.next().Line)
+
+	for p.isStatement() {
+		result.AddStatement(p.parseStatement())
+	}
+
+	return result
+}
+
+// statement : mustache | block | rawBlock | partial | content | COMMENT
+func (p *parser) parseStatement() ast.Node {
+	var result ast.Node
+
+	tok := p.next()
+
+	switch tok.Kind {
+	case lexer.TokenOpen, lexer.TokenOpenUnescaped:
+		// mustache
+		result = p.parseMustache()
+	case lexer.TokenOpenBlock:
+		// block
+		result = p.parseBlock()
+	case lexer.TokenOpenInverse:
+		// block
+		result = p.parseInverse()
+	case lexer.TokenOpenRawBlock:
+		// rawBlock
+		result = p.parseRawBlock()
+	case lexer.TokenOpenPartial:
+		// partial
+		result = p.parsePartial()
+	case lexer.TokenContent:
+		// content
+		result = p.parseContent()
+	case lexer.TokenComment:
+		// COMMENT
+		result = p.parseComment()
+	}
+
+	return result
+}
+
+// isStatement returns true if next token starts a statement
+func (p *parser) isStatement() bool {
+	if !p.have(1) {
+		return false
+	}
+
+	switch p.next().Kind {
+	case lexer.TokenOpen, lexer.TokenOpenUnescaped, lexer.TokenOpenBlock,
+		lexer.TokenOpenInverse, lexer.TokenOpenRawBlock, lexer.TokenOpenPartial,
+		lexer.TokenContent, lexer.TokenComment:
+		return true
+	}
+
+	return false
+}
+
+// content : CONTENT
+func (p *parser) parseContent() *ast.ContentStatement {
+	// CONTENT
+	tok := p.shift()
+	if tok.Kind != lexer.TokenContent {
+		// @todo This check can be removed if content is optional in a raw block
+		errExpected(lexer.TokenContent, tok)
+	}
+
+	return ast.NewContentStatement(tok.Pos, tok.Line, tok.Val)
+}
+
+// COMMENT
+func (p *parser) parseComment() *ast.CommentStatement {
+	// COMMENT
+	tok := p.shift()
+
+	value := rOpenComment.ReplaceAllString(tok.Val, "")
+	value = rCloseComment.ReplaceAllString(value, "")
+
+	result := ast.NewCommentStatement(tok.Pos, tok.Line, value)
+	result.Strip = ast.NewStripForStr(tok.Val)
+
+	return result
+}
+
+// param* hash?
+func (p *parser) parseExpressionParamsHash() ([]ast.Node, *ast.Hash) {
+	var params []ast.Node
+	var hash *ast.Hash
+
+	// params*
+	if p.isParam() {
+		params = p.parseParams()
+	}
+
+	// hash?
+	if p.isHashSegment() {
+		hash = p.parseHash()
+	}
+
+	return params, hash
+}
+
+// helperName param* hash?
+func (p *parser) parseExpression(tok *lexer.Token) *ast.Expression {
+	result := ast.NewExpression(tok.Pos, tok.Line)
+
+	// helperName
+	result.Path = p.parseHelperName()
+
+	// param* hash?
+	result.Params, result.Hash = p.parseExpressionParamsHash()
+
+	return result
+}
+
+// rawBlock : openRawBlock content endRawBlock
+// openRawBlock : OPEN_RAW_BLOCK helperName param* hash? CLOSE_RAW_BLOCK
+// endRawBlock : OPEN_END_RAW_BLOCK helperName CLOSE_RAW_BLOCK
+func (p *parser) parseRawBlock() *ast.BlockStatement {
+	// OPEN_RAW_BLOCK
+	tok := p.shift()
+
+	result := ast.NewBlockStatement(tok.Pos, tok.Line)
+
+	// helperName param* hash?
+	result.Expression = p.parseExpression(tok)
+
+	openName := result.Expression.Canonical()
+
+	// CLOSE_RAW_BLOCK
+	tok = p.shift()
+	if tok.Kind != lexer.TokenCloseRawBlock {
+		errExpected(lexer.TokenCloseRawBlock, tok)
+	}
+
+	// content
+	// @todo Is content mandatory in a raw block ?
+	content := p.parseContent()
+
+	program := ast.NewProgram(tok.Pos, tok.Line)
+	program.AddStatement(content)
+
+	result.Program = program
+
+	// OPEN_END_RAW_BLOCK
+	tok = p.shift()
+	if tok.Kind != lexer.TokenOpenEndRawBlock {
+		// should never happen as it is caught by lexer
+		errExpected(lexer.TokenOpenEndRawBlock, tok)
+	}
+
+	// helperName
+	endID := p.parseHelperName()
+
+	closeName, ok := ast.HelperNameStr(endID)
+	if !ok {
+		errNode(endID, "Erroneous closing expression")
+	}
+
+	if openName != closeName {
+		errNode(endID, fmt.Sprintf("%s doesn't match %s", openName, closeName))
+	}
+
+	// CLOSE_RAW_BLOCK
+	tok = p.shift()
+	if tok.Kind != lexer.TokenCloseRawBlock {
+		errExpected(lexer.TokenCloseRawBlock, tok)
+	}
+
+	return result
+}
+
+// block : openBlock program inverseChain? closeBlock
+func (p *parser) parseBlock() *ast.BlockStatement {
+	// openBlock
+	result, blockParams := p.parseOpenBlock()
+
+	// program
+	program := p.parseProgram()
+	program.BlockParams = blockParams
+	result.Program = program
+
+	// inverseChain?
+	if p.isInverseChain() {
+		result.Inverse = p.parseInverseChain()
+	}
+
+	// closeBlock
+	p.parseCloseBlock(result)
+
+	setBlockInverseStrip(result)
+
+	return result
+}
+
+// setBlockInverseStrip is called when parsing `block` (openBlock | openInverse) and `inverseChain`
+//
+// TODO: This was totally cargo culted ! CHECK THAT !
+//
+// cf. prepareBlock() in:
+//   https://github.com/wycats/handlebars.js/blob/master/lib/handlebars/compiler/helper.js
+func setBlockInverseStrip(block *ast.BlockStatement) {
+	if block.Inverse == nil {
+		return
+	}
+
+	if block.Inverse.Chained {
+		b, _ := block.Inverse.Body[0].(*ast.BlockStatement)
+		b.CloseStrip = block.CloseStrip
+	}
+
+	block.InverseStrip = block.Inverse.Strip
+}
+
+// block : openInverse program inverseAndProgram? closeBlock
+func (p *parser) parseInverse() *ast.BlockStatement {
+	// openInverse
+	result, blockParams := p.parseOpenBlock()
+
+	// program
+	program := p.parseProgram()
+
+	program.BlockParams = blockParams
+	result.Inverse = program
+
+	// inverseAndProgram?
+	if p.isInverse() {
+		result.Program = p.parseInverseAndProgram()
+	}
+
+	// closeBlock
+	p.parseCloseBlock(result)
+
+	setBlockInverseStrip(result)
+
+	return result
+}
+
+// helperName param* hash? blockParams?
+func (p *parser) parseOpenBlockExpression(tok *lexer.Token) (*ast.BlockStatement, []string) {
+	var blockParams []string
+
+	result := ast.NewBlockStatement(tok.Pos, tok.Line)
+
+	// helperName param* hash?
+	result.Expression = p.parseExpression(tok)
+
+	// blockParams?
+	if p.isBlockParams() {
+		blockParams = p.parseBlockParams()
+	}
+
+	// named returned values
+	return result, blockParams
+}
+
+// inverseChain : openInverseChain program inverseChain?
+//              | inverseAndProgram
+func (p *parser) parseInverseChain() *ast.Program {
+	if p.isInverse() {
+		// inverseAndProgram
+		return p.parseInverseAndProgram()
+	}
+
+	result := ast.NewProgram(p.next().Pos, p.next().Line)
+
+	// openInverseChain
+	block, blockParams := p.parseOpenBlock()
+
+	// program
+	program := p.parseProgram()
+
+	program.BlockParams = blockParams
+	block.Program = program
+
+	// inverseChain?
+	if p.isInverseChain() {
+		block.Inverse = p.parseInverseChain()
+	}
+
+	setBlockInverseStrip(block)
+
+	result.Chained = true
+	result.AddStatement(block)
+
+	return result
+}
+
+// Returns true if current token starts an inverse chain
+func (p *parser) isInverseChain() bool {
+	return p.isOpenInverseChain() || p.isInverse()
+}
+
+// inverseAndProgram : INVERSE program
+func (p *parser) parseInverseAndProgram() *ast.Program {
+	// INVERSE
+	tok := p.shift()
+
+	// program
+	result := p.parseProgram()
+	result.Strip = ast.NewStripForStr(tok.Val)
+
+	return result
+}
+
+// openBlock : OPEN_BLOCK helperName param* hash? blockParams? CLOSE
+// openInverse : OPEN_INVERSE helperName param* hash? blockParams? CLOSE
+// openInverseChain: OPEN_INVERSE_CHAIN helperName param* hash? blockParams? CLOSE
+func (p *parser) parseOpenBlock() (*ast.BlockStatement, []string) {
+	// OPEN_BLOCK | OPEN_INVERSE | OPEN_INVERSE_CHAIN
+	tok := p.shift()
+
+	// helperName param* hash? blockParams?
+	result, blockParams := p.parseOpenBlockExpression(tok)
+
+	// CLOSE
+	tokClose := p.shift()
+	if tokClose.Kind != lexer.TokenClose {
+		errExpected(lexer.TokenClose, tokClose)
+	}
+
+	result.OpenStrip = ast.NewStrip(tok.Val, tokClose.Val)
+
+	// named returned values
+	return result, blockParams
+}
+
+// closeBlock : OPEN_ENDBLOCK helperName CLOSE
+func (p *parser) parseCloseBlock(block *ast.BlockStatement) {
+	// OPEN_ENDBLOCK
+	tok := p.shift()
+	if tok.Kind != lexer.TokenOpenEndBlock {
+		errExpected(lexer.TokenOpenEndBlock, tok)
+	}
+
+	// helperName
+	endID := p.parseHelperName()
+
+	closeName, ok := ast.HelperNameStr(endID)
+	if !ok {
+		errNode(endID, "Erroneous closing expression")
+	}
+
+	openName := block.Expression.Canonical()
+	if openName != closeName {
+		errNode(endID, fmt.Sprintf("%s doesn't match %s", openName, closeName))
+	}
+
+	// CLOSE
+	tokClose := p.shift()
+	if tokClose.Kind != lexer.TokenClose {
+		errExpected(lexer.TokenClose, tokClose)
+	}
+
+	block.CloseStrip = ast.NewStrip(tok.Val, tokClose.Val)
+}
+
+// mustache : OPEN helperName param* hash? CLOSE
+//          | OPEN_UNESCAPED helperName param* hash? CLOSE_UNESCAPED
+func (p *parser) parseMustache() *ast.MustacheStatement {
+	// OPEN | OPEN_UNESCAPED
+	tok := p.shift()
+
+	closeToken := lexer.TokenClose
+	if tok.Kind == lexer.TokenOpenUnescaped {
+		closeToken = lexer.TokenCloseUnescaped
+	}
+
+	unescaped := false
+	if (tok.Kind == lexer.TokenOpenUnescaped) || (rOpenAmp.MatchString(tok.Val)) {
+		unescaped = true
+	}
+
+	result := ast.NewMustacheStatement(tok.Pos, tok.Line, unescaped)
+
+	// helperName param* hash?
+	result.Expression = p.parseExpression(tok)
+
+	// CLOSE | CLOSE_UNESCAPED
+	tokClose := p.shift()
+	if tokClose.Kind != closeToken {
+		errExpected(closeToken, tokClose)
+	}
+
+	result.Strip = ast.NewStrip(tok.Val, tokClose.Val)
+
+	return result
+}
+
+// partial : OPEN_PARTIAL partialName param* hash? CLOSE
+func (p *parser) parsePartial() *ast.PartialStatement {
+	// OPEN_PARTIAL
+	tok := p.shift()
+
+	result := ast.NewPartialStatement(tok.Pos, tok.Line)
+
+	// partialName
+	result.Name = p.parsePartialName()
+
+	// param* hash?
+	result.Params, result.Hash = p.parseExpressionParamsHash()
+
+	// CLOSE
+	tokClose := p.shift()
+	if tokClose.Kind != lexer.TokenClose {
+		errExpected(lexer.TokenClose, tokClose)
+	}
+
+	result.Strip = ast.NewStrip(tok.Val, tokClose.Val)
+
+	return result
+}
+
+// helperName | sexpr
+func (p *parser) parseHelperNameOrSexpr() ast.Node {
+	if p.isSexpr() {
+		// sexpr
+		return p.parseSexpr()
+	}
+
+	// helperName
+	return p.parseHelperName()
+}
+
+// param : helperName | sexpr
+func (p *parser) parseParam() ast.Node {
+	return p.parseHelperNameOrSexpr()
+}
+
+// Returns true if next tokens represent a `param`
+func (p *parser) isParam() bool {
+	return (p.isSexpr() || p.isHelperName()) && !p.isHashSegment()
+}
+
+// param*
+func (p *parser) parseParams() []ast.Node {
+	var result []ast.Node
+
+	for p.isParam() {
+		result = append(result, p.parseParam())
+	}
+
+	return result
+}
+
+// sexpr : OPEN_SEXPR helperName param* hash? CLOSE_SEXPR
+func (p *parser) parseSexpr() *ast.SubExpression {
+	// OPEN_SEXPR
+	tok := p.shift()
+
+	result := ast.NewSubExpression(tok.Pos, tok.Line)
+
+	// helperName param* hash?
+	result.Expression = p.parseExpression(tok)
+
+	// CLOSE_SEXPR
+	tok = p.shift()
+	if tok.Kind != lexer.TokenCloseSexpr {
+		errExpected(lexer.TokenCloseSexpr, tok)
+	}
+
+	return result
+}
+
+// hash : hashSegment+
+func (p *parser) parseHash() *ast.Hash {
+	var pairs []*ast.HashPair
+
+	for p.isHashSegment() {
+		pairs = append(pairs, p.parseHashSegment())
+	}
+
+	firstLoc := pairs[0].Location()
+
+	result := ast.NewHash(firstLoc.Pos, firstLoc.Line)
+	result.Pairs = pairs
+
+	return result
+}
+
+// returns true if next tokens represents a `hashSegment`
+func (p *parser) isHashSegment() bool {
+	return p.have(2) && (p.next().Kind == lexer.TokenID) && (p.nextAt(1).Kind == lexer.TokenEquals)
+}
+
+// hashSegment : ID EQUALS param
+func (p *parser) parseHashSegment() *ast.HashPair {
+	// ID
+	tok := p.shift()
+
+	// EQUALS
+	p.shift()
+
+	// param
+	param := p.parseParam()
+
+	result := ast.NewHashPair(tok.Pos, tok.Line)
+	result.Key = tok.Val
+	result.Val = param
+
+	return result
+}
+
+// blockParams : OPEN_BLOCK_PARAMS ID+ CLOSE_BLOCK_PARAMS
+func (p *parser) parseBlockParams() []string {
+	var result []string
+
+	// OPEN_BLOCK_PARAMS
+	tok := p.shift()
+
+	// ID+
+	for p.isID() {
+		result = append(result, p.shift().Val)
+	}
+
+	if len(result) == 0 {
+		errExpected(lexer.TokenID, p.next())
+	}
+
+	// CLOSE_BLOCK_PARAMS
+	tok = p.shift()
+	if tok.Kind != lexer.TokenCloseBlockParams {
+		errExpected(lexer.TokenCloseBlockParams, tok)
+	}
+
+	return result
+}
+
+// helperName : path | dataName | STRING | NUMBER | BOOLEAN | UNDEFINED | NULL
+func (p *parser) parseHelperName() ast.Node {
+	var result ast.Node
+
+	tok := p.next()
+
+	switch tok.Kind {
+	case lexer.TokenBoolean:
+		// BOOLEAN
+		p.shift()
+		result = ast.NewBooleanLiteral(tok.Pos, tok.Line, (tok.Val == "true"), tok.Val)
+	case lexer.TokenNumber:
+		// NUMBER
+		p.shift()
+
+		val, isInt := parseNumber(tok)
+		result = ast.NewNumberLiteral(tok.Pos, tok.Line, val, isInt, tok.Val)
+	case lexer.TokenString:
+		// STRING
+		p.shift()
+		result = ast.NewStringLiteral(tok.Pos, tok.Line, tok.Val)
+	case lexer.TokenData:
+		// dataName
+		result = p.parseDataName()
+	default:
+		// path
+		result = p.parsePath(false)
+	}
+
+	return result
+}
+
+// parseNumber parses a number
+func parseNumber(tok *lexer.Token) (result float64, isInt bool) {
+	var valInt int
+	var err error
+
+	valInt, err = strconv.Atoi(tok.Val)
+	if err == nil {
+		isInt = true
+
+		result = float64(valInt)
+	} else {
+		isInt = false
+
+		result, err = strconv.ParseFloat(tok.Val, 64)
+		if err != nil {
+			errToken(tok, fmt.Sprintf("Failed to parse number: %s", tok.Val))
+		}
+	}
+
+	// named returned values
+	return
+}
+
+// Returns true if next tokens represent a `helperName`
+func (p *parser) isHelperName() bool {
+	switch p.next().Kind {
+	case lexer.TokenBoolean, lexer.TokenNumber, lexer.TokenString, lexer.TokenData, lexer.TokenID:
+		return true
+	}
+
+	return false
+}
+
+// partialName : helperName | sexpr
+func (p *parser) parsePartialName() ast.Node {
+	return p.parseHelperNameOrSexpr()
+}
+
+// dataName : DATA pathSegments
+func (p *parser) parseDataName() *ast.PathExpression {
+	// DATA
+	p.shift()
+
+	// pathSegments
+	return p.parsePath(true)
+}
+
+// path : pathSegments
+// pathSegments : pathSegments SEP ID
+//              | ID
+func (p *parser) parsePath(data bool) *ast.PathExpression {
+	var tok *lexer.Token
+
+	// ID
+	tok = p.shift()
+	if tok.Kind != lexer.TokenID {
+		errExpected(lexer.TokenID, tok)
+	}
+
+	result := ast.NewPathExpression(tok.Pos, tok.Line, data)
+	result.Part(tok.Val)
+
+	for p.isPathSep() {
+		// SEP
+		tok = p.shift()
+		result.Sep(tok.Val)
+
+		// ID
+		tok = p.shift()
+		if tok.Kind != lexer.TokenID {
+			errExpected(lexer.TokenID, tok)
+		}
+
+		result.Part(tok.Val)
+
+		if len(result.Parts) > 0 {
+			switch tok.Val {
+			case "..", ".", "this":
+				errToken(tok, "Invalid path: "+result.Original)
+			}
+		}
+	}
+
+	return result
+}
+
+// Ensures there is token to parse at given index
+func (p *parser) ensure(index int) {
+	if p.lexOver {
+		// nothing more to grab
+		return
+	}
+
+	nb := index + 1
+
+	for len(p.tokens) < nb {
+		// fetch next token
+		tok := p.lex.NextToken()
+
+		// queue it
+		p.tokens = append(p.tokens, &tok)
+
+		if (tok.Kind == lexer.TokenEOF) || (tok.Kind == lexer.TokenError) {
+			p.lexOver = true
+			break
+		}
+	}
+}
+
+// have returns true is there are a list given number of tokens to consume left
+func (p *parser) have(nb int) bool {
+	p.ensure(nb - 1)
+
+	return len(p.tokens) >= nb
+}
+
+// nextAt returns next token at given index, without consuming it
+func (p *parser) nextAt(index int) *lexer.Token {
+	p.ensure(index)
+
+	return p.tokens[index]
+}
+
+// next returns next token without consuming it
+func (p *parser) next() *lexer.Token {
+	return p.nextAt(0)
+}
+
+// shift returns next token and remove it from the tokens buffer
+//
+// Panics if next token is `TokenError`
+func (p *parser) shift() *lexer.Token {
+	var result *lexer.Token
+
+	p.ensure(0)
+
+	result, p.tokens = p.tokens[0], p.tokens[1:]
+
+	// check error token
+	if result.Kind == lexer.TokenError {
+		errToken(result, "Lexer error")
+	}
+
+	return result
+}
+
+// isToken returns true if next token is of given type
+func (p *parser) isToken(kind lexer.TokenKind) bool {
+	return p.have(1) && p.next().Kind == kind
+}
+
+// isSexpr returns true if next token starts a sexpr
+func (p *parser) isSexpr() bool {
+	return p.isToken(lexer.TokenOpenSexpr)
+}
+
+// isPathSep returns true if next token is a path separator
+func (p *parser) isPathSep() bool {
+	return p.isToken(lexer.TokenSep)
+}
+
+// isID returns true if next token is an ID
+func (p *parser) isID() bool {
+	return p.isToken(lexer.TokenID)
+}
+
+// isBlockParams returns true if next token starts a block params
+func (p *parser) isBlockParams() bool {
+	return p.isToken(lexer.TokenOpenBlockParams)
+}
+
+// isInverse returns true if next token starts an INVERSE sequence
+func (p *parser) isInverse() bool {
+	return p.isToken(lexer.TokenInverse)
+}
+
+// isOpenInverseChain returns true if next token is OPEN_INVERSE_CHAIN
+func (p *parser) isOpenInverseChain() bool {
+	return p.isToken(lexer.TokenOpenInverseChain)
+}

+ 360 - 0
vendor/github.com/aymerick/raymond/parser/whitespace.go

@@ -0,0 +1,360 @@
+package parser
+
+import (
+	"regexp"
+
+	"github.com/aymerick/raymond/ast"
+)
+
+// whitespaceVisitor walks through the AST to perform whitespace control
+//
+// The logic was shamelessly borrowed from:
+//   https://github.com/wycats/handlebars.js/blob/master/lib/handlebars/compiler/whitespace-control.js
+type whitespaceVisitor struct {
+	isRootSeen bool
+}
+
+var (
+	rTrimLeft         = regexp.MustCompile(`^[ \t]*\r?\n?`)
+	rTrimLeftMultiple = regexp.MustCompile(`^\s+`)
+
+	rTrimRight         = regexp.MustCompile(`[ \t]+$`)
+	rTrimRightMultiple = regexp.MustCompile(`\s+$`)
+
+	rPrevWhitespace      = regexp.MustCompile(`\r?\n\s*?$`)
+	rPrevWhitespaceStart = regexp.MustCompile(`(^|\r?\n)\s*?$`)
+
+	rNextWhitespace    = regexp.MustCompile(`^\s*?\r?\n`)
+	rNextWhitespaceEnd = regexp.MustCompile(`^\s*?(\r?\n|$)`)
+
+	rPartialIndent = regexp.MustCompile(`([ \t]+$)`)
+)
+
+// newWhitespaceVisitor instanciates a new whitespaceVisitor
+func newWhitespaceVisitor() *whitespaceVisitor {
+	return &whitespaceVisitor{}
+}
+
+// processWhitespaces performs whitespace control on given AST
+//
+// WARNING: It must be called only once on AST.
+func processWhitespaces(node ast.Node) {
+	node.Accept(newWhitespaceVisitor())
+}
+
+func omitRightFirst(body []ast.Node, multiple bool) {
+	omitRight(body, -1, multiple)
+}
+
+func omitRight(body []ast.Node, i int, multiple bool) {
+	if i+1 >= len(body) {
+		return
+	}
+
+	current := body[i+1]
+
+	node, ok := current.(*ast.ContentStatement)
+	if !ok {
+		return
+	}
+
+	if !multiple && node.RightStripped {
+		return
+	}
+
+	original := node.Value
+
+	r := rTrimLeft
+	if multiple {
+		r = rTrimLeftMultiple
+	}
+
+	node.Value = r.ReplaceAllString(node.Value, "")
+
+	node.RightStripped = (original != node.Value)
+}
+
+func omitLeftLast(body []ast.Node, multiple bool) {
+	omitLeft(body, len(body), multiple)
+}
+
+func omitLeft(body []ast.Node, i int, multiple bool) bool {
+	if i-1 < 0 {
+		return false
+	}
+
+	current := body[i-1]
+
+	node, ok := current.(*ast.ContentStatement)
+	if !ok {
+		return false
+	}
+
+	if !multiple && node.LeftStripped {
+		return false
+	}
+
+	original := node.Value
+
+	r := rTrimRight
+	if multiple {
+		r = rTrimRightMultiple
+	}
+
+	node.Value = r.ReplaceAllString(node.Value, "")
+
+	node.LeftStripped = (original != node.Value)
+
+	return node.LeftStripped
+}
+
+func isPrevWhitespace(body []ast.Node) bool {
+	return isPrevWhitespaceProgram(body, len(body), false)
+}
+
+func isPrevWhitespaceProgram(body []ast.Node, i int, isRoot bool) bool {
+	if i < 1 {
+		return isRoot
+	}
+
+	prev := body[i-1]
+
+	if node, ok := prev.(*ast.ContentStatement); ok {
+		if (node.Value == "") && node.RightStripped {
+			// already stripped, so it may be an empty string not catched by regexp
+			return true
+		}
+
+		r := rPrevWhitespaceStart
+		if (i > 1) || !isRoot {
+			r = rPrevWhitespace
+		}
+
+		return r.MatchString(node.Value)
+	}
+
+	return false
+}
+
+func isNextWhitespace(body []ast.Node) bool {
+	return isNextWhitespaceProgram(body, -1, false)
+}
+
+func isNextWhitespaceProgram(body []ast.Node, i int, isRoot bool) bool {
+	if i+1 >= len(body) {
+		return isRoot
+	}
+
+	next := body[i+1]
+
+	if node, ok := next.(*ast.ContentStatement); ok {
+		if (node.Value == "") && node.LeftStripped {
+			// already stripped, so it may be an empty string not catched by regexp
+			return true
+		}
+
+		r := rNextWhitespaceEnd
+		if (i+2 > len(body)) || !isRoot {
+			r = rNextWhitespace
+		}
+
+		return r.MatchString(node.Value)
+	}
+
+	return false
+}
+
+//
+// Visitor interface
+//
+
+func (v *whitespaceVisitor) VisitProgram(program *ast.Program) interface{} {
+	isRoot := !v.isRootSeen
+	v.isRootSeen = true
+
+	body := program.Body
+	for i, current := range body {
+		strip, _ := current.Accept(v).(*ast.Strip)
+		if strip == nil {
+			continue
+		}
+
+		_isPrevWhitespace := isPrevWhitespaceProgram(body, i, isRoot)
+		_isNextWhitespace := isNextWhitespaceProgram(body, i, isRoot)
+
+		openStandalone := strip.OpenStandalone && _isPrevWhitespace
+		closeStandalone := strip.CloseStandalone && _isNextWhitespace
+		inlineStandalone := strip.InlineStandalone && _isPrevWhitespace && _isNextWhitespace
+
+		if strip.Close {
+			omitRight(body, i, true)
+		}
+
+		if strip.Open && (i > 0) {
+			omitLeft(body, i, true)
+		}
+
+		if inlineStandalone {
+			omitRight(body, i, false)
+
+			if omitLeft(body, i, false) {
+				// If we are on a standalone node, save the indent info for partials
+				if partial, ok := current.(*ast.PartialStatement); ok {
+					// Pull out the whitespace from the final line
+					if i > 0 {
+						if prevContent, ok := body[i-1].(*ast.ContentStatement); ok {
+							partial.Indent = rPartialIndent.FindString(prevContent.Original)
+						}
+					}
+				}
+			}
+		}
+
+		if b, ok := current.(*ast.BlockStatement); ok {
+			if openStandalone {
+				prog := b.Program
+				if prog == nil {
+					prog = b.Inverse
+				}
+
+				omitRightFirst(prog.Body, false)
+
+				// Strip out the previous content node if it's whitespace only
+				omitLeft(body, i, false)
+			}
+
+			if closeStandalone {
+				prog := b.Inverse
+				if prog == nil {
+					prog = b.Program
+				}
+
+				// Always strip the next node
+				omitRight(body, i, false)
+
+				omitLeftLast(prog.Body, false)
+			}
+
+		}
+	}
+
+	return nil
+}
+
+func (v *whitespaceVisitor) VisitBlock(block *ast.BlockStatement) interface{} {
+	if block.Program != nil {
+		block.Program.Accept(v)
+	}
+
+	if block.Inverse != nil {
+		block.Inverse.Accept(v)
+	}
+
+	program := block.Program
+	inverse := block.Inverse
+
+	if program == nil {
+		program = inverse
+		inverse = nil
+	}
+
+	firstInverse := inverse
+	lastInverse := inverse
+
+	if (inverse != nil) && inverse.Chained {
+		b, _ := inverse.Body[0].(*ast.BlockStatement)
+		firstInverse = b.Program
+
+		for lastInverse.Chained {
+			b, _ := lastInverse.Body[len(lastInverse.Body)-1].(*ast.BlockStatement)
+			lastInverse = b.Program
+		}
+	}
+
+	closeProg := firstInverse
+	if closeProg == nil {
+		closeProg = program
+	}
+
+	strip := &ast.Strip{
+		Open:  (block.OpenStrip != nil) && block.OpenStrip.Open,
+		Close: (block.CloseStrip != nil) && block.CloseStrip.Close,
+
+		OpenStandalone:  isNextWhitespace(program.Body),
+		CloseStandalone: isPrevWhitespace(closeProg.Body),
+	}
+
+	if (block.OpenStrip != nil) && block.OpenStrip.Close {
+		omitRightFirst(program.Body, true)
+	}
+
+	if inverse != nil {
+		if block.InverseStrip != nil {
+			inverseStrip := block.InverseStrip
+
+			if inverseStrip.Open {
+				omitLeftLast(program.Body, true)
+			}
+
+			if inverseStrip.Close {
+				omitRightFirst(firstInverse.Body, true)
+			}
+		}
+
+		if (block.CloseStrip != nil) && block.CloseStrip.Open {
+			omitLeftLast(lastInverse.Body, true)
+		}
+
+		// Find standalone else statements
+		if isPrevWhitespace(program.Body) && isNextWhitespace(firstInverse.Body) {
+			omitLeftLast(program.Body, false)
+
+			omitRightFirst(firstInverse.Body, false)
+		}
+	} else if (block.CloseStrip != nil) && block.CloseStrip.Open {
+		omitLeftLast(program.Body, true)
+	}
+
+	return strip
+}
+
+func (v *whitespaceVisitor) VisitMustache(mustache *ast.MustacheStatement) interface{} {
+	return mustache.Strip
+}
+
+func _inlineStandalone(strip *ast.Strip) interface{} {
+	return &ast.Strip{
+		Open:             strip.Open,
+		Close:            strip.Close,
+		InlineStandalone: true,
+	}
+}
+
+func (v *whitespaceVisitor) VisitPartial(node *ast.PartialStatement) interface{} {
+	strip := node.Strip
+	if strip == nil {
+		strip = &ast.Strip{}
+	}
+
+	return _inlineStandalone(strip)
+}
+
+func (v *whitespaceVisitor) VisitComment(node *ast.CommentStatement) interface{} {
+	strip := node.Strip
+	if strip == nil {
+		strip = &ast.Strip{}
+	}
+
+	return _inlineStandalone(strip)
+}
+
+// NOOP
+func (v *whitespaceVisitor) VisitContent(node *ast.ContentStatement) interface{}    { return nil }
+func (v *whitespaceVisitor) VisitExpression(node *ast.Expression) interface{}       { return nil }
+func (v *whitespaceVisitor) VisitSubExpression(node *ast.SubExpression) interface{} { return nil }
+func (v *whitespaceVisitor) VisitPath(node *ast.PathExpression) interface{}         { return nil }
+func (v *whitespaceVisitor) VisitString(node *ast.StringLiteral) interface{}        { return nil }
+func (v *whitespaceVisitor) VisitBoolean(node *ast.BooleanLiteral) interface{}      { return nil }
+func (v *whitespaceVisitor) VisitNumber(node *ast.NumberLiteral) interface{}        { return nil }
+func (v *whitespaceVisitor) VisitHash(node *ast.Hash) interface{}                   { return nil }
+func (v *whitespaceVisitor) VisitHashPair(node *ast.HashPair) interface{}           { return nil }

+ 85 - 0
vendor/github.com/aymerick/raymond/partial.go

@@ -0,0 +1,85 @@
+package raymond
+
+import (
+	"fmt"
+	"sync"
+)
+
+// partial represents a partial template
+type partial struct {
+	name   string
+	source string
+	tpl    *Template
+}
+
+// partials stores all global partials
+var partials map[string]*partial
+
+// protects global partials
+var partialsMutex sync.RWMutex
+
+func init() {
+	partials = make(map[string]*partial)
+}
+
+// newPartial instanciates a new partial
+func newPartial(name string, source string, tpl *Template) *partial {
+	return &partial{
+		name:   name,
+		source: source,
+		tpl:    tpl,
+	}
+}
+
+// RegisterPartial registers a global partial. That partial will be available to all templates.
+func RegisterPartial(name string, source string) {
+	partialsMutex.Lock()
+	defer partialsMutex.Unlock()
+
+	if partials[name] != nil {
+		panic(fmt.Errorf("Partial already registered: %s", name))
+	}
+
+	partials[name] = newPartial(name, source, nil)
+}
+
+// RegisterPartials registers several global partials. Those partials will be available to all templates.
+func RegisterPartials(partials map[string]string) {
+	for name, p := range partials {
+		RegisterPartial(name, p)
+	}
+}
+
+// RegisterPartialTemplate registers a global partial with given parsed template. That partial will be available to all templates.
+func RegisterPartialTemplate(name string, tpl *Template) {
+	partialsMutex.Lock()
+	defer partialsMutex.Unlock()
+
+	if partials[name] != nil {
+		panic(fmt.Errorf("Partial already registered: %s", name))
+	}
+
+	partials[name] = newPartial(name, "", tpl)
+}
+
+// findPartial finds a registered global partial
+func findPartial(name string) *partial {
+	partialsMutex.RLock()
+	defer partialsMutex.RUnlock()
+
+	return partials[name]
+}
+
+// template returns parsed partial template
+func (p *partial) template() (*Template, error) {
+	if p.tpl == nil {
+		var err error
+
+		p.tpl, err = Parse(p.source)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return p.tpl, nil
+}

+ 28 - 0
vendor/github.com/aymerick/raymond/raymond.go

@@ -0,0 +1,28 @@
+// Package raymond provides handlebars evaluation
+package raymond
+
+// Render parses a template and evaluates it with given context
+//
+// Note that this function call is not optimal as your template is parsed everytime you call it. You should use Parse() function instead.
+func Render(source string, ctx interface{}) (string, error) {
+	// parse template
+	tpl, err := Parse(source)
+	if err != nil {
+		return "", err
+	}
+
+	// renders template
+	str, err := tpl.Exec(ctx)
+	if err != nil {
+		return "", err
+	}
+
+	return str, nil
+}
+
+// MustRender parses a template and evaluates it with given context. It panics on error.
+//
+// Note that this function call is not optimal as your template is parsed everytime you call it. You should use Parse() function instead.
+func MustRender(source string, ctx interface{}) string {
+	return MustParse(source).MustExec(ctx)
+}

+ 84 - 0
vendor/github.com/aymerick/raymond/string.go

@@ -0,0 +1,84 @@
+package raymond
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// SafeString represents a string that must not be escaped.
+//
+// A SafeString can be returned by helpers to disable escaping.
+type SafeString string
+
+// isSafeString returns true if argument is a SafeString
+func isSafeString(value interface{}) bool {
+	if _, ok := value.(SafeString); ok {
+		return true
+	}
+	return false
+}
+
+// Str returns string representation of any basic type value.
+func Str(value interface{}) string {
+	return strValue(reflect.ValueOf(value))
+}
+
+// strValue returns string representation of a reflect.Value
+func strValue(value reflect.Value) string {
+	result := ""
+
+	ival, ok := printableValue(value)
+	if !ok {
+		panic(fmt.Errorf("Can't print value: %q", value))
+	}
+
+	val := reflect.ValueOf(ival)
+
+	switch val.Kind() {
+	case reflect.Array, reflect.Slice:
+		for i := 0; i < val.Len(); i++ {
+			result += strValue(val.Index(i))
+		}
+	case reflect.Bool:
+		result = "false"
+		if val.Bool() {
+			result = "true"
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		result = fmt.Sprintf("%d", ival)
+	case reflect.Float32, reflect.Float64:
+		result = strconv.FormatFloat(val.Float(), 'f', -1, 64)
+	case reflect.Invalid:
+		result = ""
+	default:
+		result = fmt.Sprintf("%s", ival)
+	}
+
+	return result
+}
+
+// printableValue returns the, possibly indirected, interface value inside v that
+// is best for a call to formatted printer.
+//
+// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
+func printableValue(v reflect.Value) (interface{}, bool) {
+	if v.Kind() == reflect.Ptr {
+		v, _ = indirect(v) // fmt.Fprint handles nil.
+	}
+	if !v.IsValid() {
+		return "", true
+	}
+
+	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+			v = v.Addr()
+		} else {
+			switch v.Kind() {
+			case reflect.Chan, reflect.Func:
+				return nil, false
+			}
+		}
+	}
+	return v.Interface(), true
+}

+ 248 - 0
vendor/github.com/aymerick/raymond/template.go

@@ -0,0 +1,248 @@
+package raymond
+
+import (
+	"fmt"
+	"io/ioutil"
+	"reflect"
+	"runtime"
+	"sync"
+
+	"github.com/aymerick/raymond/ast"
+	"github.com/aymerick/raymond/parser"
+)
+
+// Template represents a handlebars template.
+type Template struct {
+	source   string
+	program  *ast.Program
+	helpers  map[string]reflect.Value
+	partials map[string]*partial
+	mutex    sync.RWMutex // protects helpers and partials
+}
+
+// newTemplate instanciate a new template without parsing it
+func newTemplate(source string) *Template {
+	return &Template{
+		source:   source,
+		helpers:  make(map[string]reflect.Value),
+		partials: make(map[string]*partial),
+	}
+}
+
+// Parse instanciates a template by parsing given source.
+func Parse(source string) (*Template, error) {
+	tpl := newTemplate(source)
+
+	// parse template
+	if err := tpl.parse(); err != nil {
+		return nil, err
+	}
+
+	return tpl, nil
+}
+
+// MustParse instanciates a template by parsing given source. It panics on error.
+func MustParse(source string) *Template {
+	result, err := Parse(source)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// ParseFile reads given file and returns parsed template.
+func ParseFile(filePath string) (*Template, error) {
+	b, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return nil, err
+	}
+
+	return Parse(string(b))
+}
+
+// parse parses the template
+//
+// It can be called several times, the parsing will be done only once.
+func (tpl *Template) parse() error {
+	if tpl.program == nil {
+		var err error
+
+		tpl.program, err = parser.Parse(tpl.source)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Clone returns a copy of that template.
+func (tpl *Template) Clone() *Template {
+	result := newTemplate(tpl.source)
+
+	result.program = tpl.program
+
+	tpl.mutex.RLock()
+	defer tpl.mutex.RUnlock()
+
+	for name, helper := range tpl.helpers {
+		result.RegisterHelper(name, helper.Interface())
+	}
+
+	for name, partial := range tpl.partials {
+		result.addPartial(name, partial.source, partial.tpl)
+	}
+
+	return result
+}
+
+func (tpl *Template) findHelper(name string) reflect.Value {
+	tpl.mutex.RLock()
+	defer tpl.mutex.RUnlock()
+
+	return tpl.helpers[name]
+}
+
+// RegisterHelper registers a helper for that template.
+func (tpl *Template) RegisterHelper(name string, helper interface{}) {
+	tpl.mutex.Lock()
+	defer tpl.mutex.Unlock()
+
+	if tpl.helpers[name] != zero {
+		panic(fmt.Sprintf("Helper %s already registered", name))
+	}
+
+	val := reflect.ValueOf(helper)
+	ensureValidHelper(name, val)
+
+	tpl.helpers[name] = val
+}
+
+// RegisterHelpers registers several helpers for that template.
+func (tpl *Template) RegisterHelpers(helpers map[string]interface{}) {
+	for name, helper := range helpers {
+		tpl.RegisterHelper(name, helper)
+	}
+}
+
+func (tpl *Template) addPartial(name string, source string, template *Template) {
+	tpl.mutex.Lock()
+	defer tpl.mutex.Unlock()
+
+	if tpl.partials[name] != nil {
+		panic(fmt.Sprintf("Partial %s already registered", name))
+	}
+
+	tpl.partials[name] = newPartial(name, source, template)
+}
+
+func (tpl *Template) findPartial(name string) *partial {
+	tpl.mutex.RLock()
+	defer tpl.mutex.RUnlock()
+
+	return tpl.partials[name]
+}
+
+// RegisterPartial registers a partial for that template.
+func (tpl *Template) RegisterPartial(name string, source string) {
+	tpl.addPartial(name, source, nil)
+}
+
+// RegisterPartials registers several partials for that template.
+func (tpl *Template) RegisterPartials(partials map[string]string) {
+	for name, partial := range partials {
+		tpl.RegisterPartial(name, partial)
+	}
+}
+
+// RegisterPartialFile reads given file and registers its content as a partial with given name.
+func (tpl *Template) RegisterPartialFile(filePath string, name string) error {
+	b, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return err
+	}
+
+	tpl.RegisterPartial(name, string(b))
+
+	return nil
+}
+
+// RegisterPartialFiles reads several files and registers them as partials, the filename base is used as the partial name.
+func (tpl *Template) RegisterPartialFiles(filePaths ...string) error {
+	if len(filePaths) == 0 {
+		return nil
+	}
+
+	for _, filePath := range filePaths {
+		name := fileBase(filePath)
+
+		if err := tpl.RegisterPartialFile(filePath, name); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// RegisterPartialTemplate registers an already parsed partial for that template.
+func (tpl *Template) RegisterPartialTemplate(name string, template *Template) {
+	tpl.addPartial(name, "", template)
+}
+
+// Exec evaluates template with given context.
+func (tpl *Template) Exec(ctx interface{}) (result string, err error) {
+	return tpl.ExecWith(ctx, nil)
+}
+
+// MustExec evaluates template with given context. It panics on error.
+func (tpl *Template) MustExec(ctx interface{}) string {
+	result, err := tpl.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// ExecWith evaluates template with given context and private data frame.
+func (tpl *Template) ExecWith(ctx interface{}, privData *DataFrame) (result string, err error) {
+	defer errRecover(&err)
+
+	// parses template if necessary
+	err = tpl.parse()
+	if err != nil {
+		return
+	}
+
+	// setup visitor
+	v := newEvalVisitor(tpl, ctx, privData)
+
+	// visit AST
+	result, _ = tpl.program.Accept(v).(string)
+
+	// named return values
+	return
+}
+
+// errRecover recovers evaluation panic
+func errRecover(errp *error) {
+	e := recover()
+	if e != nil {
+		switch err := e.(type) {
+		case runtime.Error:
+			panic(e)
+		case error:
+			*errp = err
+		default:
+			panic(e)
+		}
+	}
+}
+
+// PrintAST returns string representation of parsed template.
+func (tpl *Template) PrintAST() string {
+	if err := tpl.parse(); err != nil {
+		return fmt.Sprintf("PARSER ERROR: %s", err)
+	}
+
+	return ast.Print(tpl.program)
+}

+ 85 - 0
vendor/github.com/aymerick/raymond/utils.go

@@ -0,0 +1,85 @@
+package raymond
+
+import (
+	"path"
+	"reflect"
+)
+
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+//
+// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+		if v.IsNil() {
+			return v, true
+		}
+		if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+			break
+		}
+	}
+	return v, false
+}
+
+// IsTrue returns true if obj is a truthy value.
+func IsTrue(obj interface{}) bool {
+	thruth, ok := isTrueValue(reflect.ValueOf(obj))
+	if !ok {
+		return false
+	}
+	return thruth
+}
+
+// isTrueValue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value
+//
+// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
+func isTrueValue(val reflect.Value) (truth, ok bool) {
+	if !val.IsValid() {
+		// Something like var x interface{}, never set. It's a form of nil.
+		return false, true
+	}
+	switch val.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		truth = val.Len() > 0
+	case reflect.Bool:
+		truth = val.Bool()
+	case reflect.Complex64, reflect.Complex128:
+		truth = val.Complex() != 0
+	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+		truth = !val.IsNil()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		truth = val.Int() != 0
+	case reflect.Float32, reflect.Float64:
+		truth = val.Float() != 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		truth = val.Uint() != 0
+	case reflect.Struct:
+		truth = true // Struct values are always true.
+	default:
+		return
+	}
+	return truth, true
+}
+
+// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
+//
+// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
+func canBeNil(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return true
+	}
+	return false
+}
+
+// fileBase returns base file name
+//
+// example: /foo/bar/baz.png => baz
+func fileBase(filePath string) string {
+	fileName := path.Base(filePath)
+	fileExt := path.Ext(filePath)
+
+	return fileName[:len(fileName)-len(fileExt)]
+}

+ 9 - 0
vendor/github.com/eknkc/amber/LICENSE

@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2012 Ekin Koc ekin@eknkc.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 817 - 0
vendor/github.com/eknkc/amber/compiler.go

@@ -0,0 +1,817 @@
+package amber
+
+import (
+	"bytes"
+	"container/list"
+	"errors"
+	"fmt"
+	"go/ast"
+	gp "go/parser"
+	gt "go/token"
+	"html/template"
+	"io"
+	"os"
+	"path/filepath"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/eknkc/amber/parser"
+)
+
+var builtinFunctions = [...]string{
+	"len",
+	"print",
+	"printf",
+	"println",
+	"urlquery",
+	"js",
+	"json",
+	"index",
+	"html",
+	"unescaped",
+}
+
+// Compiler is the main interface of Amber Template Engine.
+// In order to use an Amber template, it is required to create a Compiler and
+// compile an Amber source to native Go template.
+//	compiler := amber.New()
+// 	// Parse the input file
+//	err := compiler.ParseFile("./input.amber")
+//	if err == nil {
+//		// Compile input file to Go template
+//		tpl, err := compiler.Compile()
+//		if err == nil {
+//			// Check built in html/template documentation for further details
+//			tpl.Execute(os.Stdout, somedata)
+//		}
+//	}
+type Compiler struct {
+	// Compiler options
+	Options
+	filename     string
+	node         parser.Node
+	indentLevel  int
+	newline      bool
+	buffer       *bytes.Buffer
+	tempvarIndex int
+	mixins       map[string]*parser.Mixin
+}
+
+// New creates and initialize a new Compiler.
+func New() *Compiler {
+	compiler := new(Compiler)
+	compiler.filename = ""
+	compiler.tempvarIndex = 0
+	compiler.PrettyPrint = true
+	compiler.Options = DefaultOptions
+	compiler.mixins = make(map[string]*parser.Mixin)
+
+	return compiler
+}
+
+// Options defines template output behavior.
+type Options struct {
+	// Setting if pretty printing is enabled.
+	// Pretty printing ensures that the output html is properly indented and in human readable form.
+	// If disabled, produced HTML is compact. This might be more suitable in production environments.
+	// Default: true
+	PrettyPrint bool
+	// Setting if line number emitting is enabled
+	// In this form, Amber emits line number comments in the output template. It is usable in debugging environments.
+	// Default: false
+	LineNumbers bool
+}
+
+// DirOptions is used to provide options to directory compilation.
+type DirOptions struct {
+	// File extension to match for compilation
+	Ext string
+	// Whether or not to walk subdirectories
+	Recursive bool
+}
+
+// DefaultOptions sets pretty-printing to true and line numbering to false.
+var DefaultOptions = Options{true, false}
+
+// DefaultDirOptions sets expected file extension to ".amber" and recursive search for templates within a directory to true.
+var DefaultDirOptions = DirOptions{".amber", true}
+
+// Compile parses and compiles the supplied amber template string. Returns corresponding Go Template (html/templates) instance.
+// Necessary runtime functions will be injected and the template will be ready to be executed.
+func Compile(input string, options Options) (*template.Template, error) {
+	comp := New()
+	comp.Options = options
+
+	err := comp.Parse(input)
+	if err != nil {
+		return nil, err
+	}
+
+	return comp.Compile()
+}
+
+// Compile parses and compiles the supplied amber template []byte.
+// Returns corresponding Go Template (html/templates) instance.
+// Necessary runtime functions will be injected and the template will be ready to be executed.
+func CompileData(input []byte, filename string, options Options) (*template.Template, error) {
+	comp := New()
+	comp.Options = options
+
+	err := comp.ParseData(input, filename)
+	if err != nil {
+		return nil, err
+	}
+
+	return comp.Compile()
+}
+
+// MustCompile is the same as Compile, except the input is assumed error free. If else, panic.
+func MustCompile(input string, options Options) *template.Template {
+	t, err := Compile(input, options)
+	if err != nil {
+		panic(err)
+	}
+	return t
+}
+
+// CompileFile parses and compiles the contents of supplied filename. Returns corresponding Go Template (html/templates) instance.
+// Necessary runtime functions will be injected and the template will be ready to be executed.
+func CompileFile(filename string, options Options) (*template.Template, error) {
+	comp := New()
+	comp.Options = options
+
+	err := comp.ParseFile(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	return comp.Compile()
+}
+
+// MustCompileFile is the same as CompileFile, except the input is assumed error free. If else, panic.
+func MustCompileFile(filename string, options Options) *template.Template {
+	t, err := CompileFile(filename, options)
+	if err != nil {
+		panic(err)
+	}
+	return t
+}
+
+// CompileDir parses and compiles the contents of a supplied directory path, with options.
+// Returns a map of a template identifier (key) to a Go Template instance.
+// Ex: if the dirname="templates/" had a file "index.amber" the key would be "index"
+// If option for recursive is True, this parses every file of relevant extension
+// in all subdirectories. The key then is the path e.g: "layouts/layout"
+func CompileDir(dirname string, dopt DirOptions, opt Options) (map[string]*template.Template, error) {
+	dir, err := os.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+
+	files, err := dir.Readdir(0)
+	if err != nil {
+		return nil, err
+	}
+
+	compiled := make(map[string]*template.Template)
+	for _, file := range files {
+		// filename is for example "index.amber"
+		filename := file.Name()
+		fileext := filepath.Ext(filename)
+
+		// If recursive is true and there's a subdirectory, recurse
+		if dopt.Recursive && file.IsDir() {
+			dirpath := filepath.Join(dirname, filename)
+			subcompiled, err := CompileDir(dirpath, dopt, opt)
+			if err != nil {
+				return nil, err
+			}
+			// Copy templates from subdirectory into parent template mapping
+			for k, v := range subcompiled {
+				// Concat with parent directory name for unique paths
+				key := filepath.Join(filename, k)
+				compiled[key] = v
+			}
+		} else if fileext == dopt.Ext {
+			// Otherwise compile the file and add to mapping
+			fullpath := filepath.Join(dirname, filename)
+			tmpl, err := CompileFile(fullpath, opt)
+			if err != nil {
+				return nil, err
+			}
+			// Strip extension
+			key := filename[0 : len(filename)-len(fileext)]
+			compiled[key] = tmpl
+		}
+	}
+
+	return compiled, nil
+}
+
+// MustCompileDir is the same as CompileDir, except input is assumed error free. If else, panic.
+func MustCompileDir(dirname string, dopt DirOptions, opt Options) map[string]*template.Template {
+	m, err := CompileDir(dirname, dopt, opt)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// Parse given raw amber template string.
+func (c *Compiler) Parse(input string) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = errors.New(r.(string))
+		}
+	}()
+
+	parser, err := parser.StringParser(input)
+
+	if err != nil {
+		return
+	}
+
+	c.node = parser.Parse()
+	return
+}
+
+// Parse given raw amber template bytes, and the filename that belongs with it
+func (c *Compiler) ParseData(input []byte, filename string) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = errors.New(r.(string))
+		}
+	}()
+
+	parser, err := parser.ByteParser(input)
+	parser.SetFilename(filename)
+
+	if err != nil {
+		return
+	}
+
+	c.node = parser.Parse()
+	return
+}
+
+// ParseFile parses the amber template file in given path.
+func (c *Compiler) ParseFile(filename string) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = errors.New(r.(string))
+		}
+	}()
+
+	parser, err := parser.FileParser(filename)
+
+	if err != nil {
+		return
+	}
+
+	c.node = parser.Parse()
+	c.filename = filename
+	return
+}
+
+// Compile amber and create a Go Template (html/templates) instance.
+// Necessary runtime functions will be injected and the template will be ready to be executed.
+func (c *Compiler) Compile() (*template.Template, error) {
+	return c.CompileWithName(filepath.Base(c.filename))
+}
+
+// CompileWithName is the same as Compile, but allows to specify a name for the template.
+func (c *Compiler) CompileWithName(name string) (*template.Template, error) {
+	return c.CompileWithTemplate(template.New(name))
+}
+
+// CompileWithTemplate is the same as Compile but allows to specify a template.
+func (c *Compiler) CompileWithTemplate(t *template.Template) (*template.Template, error) {
+	data, err := c.CompileString()
+
+	if err != nil {
+		return nil, err
+	}
+
+	tpl, err := t.Funcs(FuncMap).Parse(data)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return tpl, nil
+}
+
+// CompileWriter compiles amber and writes the Go Template source into given io.Writer instance.
+// You would not be using this unless debugging / checking the output. Please use Compile
+// method to obtain a template instance directly.
+func (c *Compiler) CompileWriter(out io.Writer) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = errors.New(r.(string))
+		}
+	}()
+
+	c.buffer = new(bytes.Buffer)
+	c.visit(c.node)
+
+	if c.buffer.Len() > 0 {
+		c.write("\n")
+	}
+
+	_, err = c.buffer.WriteTo(out)
+	return
+}
+
+// CompileString compiles the template and returns the Go Template source.
+// You would not be using this unless debugging / checking the output. Please use Compile
+// method to obtain a template instance directly.
+func (c *Compiler) CompileString() (string, error) {
+	var buf bytes.Buffer
+
+	if err := c.CompileWriter(&buf); err != nil {
+		return "", err
+	}
+
+	result := buf.String()
+
+	return result, nil
+}
+
+func (c *Compiler) visit(node parser.Node) {
+	defer func() {
+		if r := recover(); r != nil {
+			if rs, ok := r.(string); ok && rs[:len("Amber Error")] == "Amber Error" {
+				panic(r)
+			}
+
+			pos := node.Pos()
+
+			if len(pos.Filename) > 0 {
+				panic(fmt.Sprintf("Amber Error in <%s>: %v - Line: %d, Column: %d, Length: %d", pos.Filename, r, pos.LineNum, pos.ColNum, pos.TokenLength))
+			} else {
+				panic(fmt.Sprintf("Amber Error: %v - Line: %d, Column: %d, Length: %d", r, pos.LineNum, pos.ColNum, pos.TokenLength))
+			}
+		}
+	}()
+
+	switch node.(type) {
+	case *parser.Block:
+		c.visitBlock(node.(*parser.Block))
+	case *parser.Doctype:
+		c.visitDoctype(node.(*parser.Doctype))
+	case *parser.Comment:
+		c.visitComment(node.(*parser.Comment))
+	case *parser.Tag:
+		c.visitTag(node.(*parser.Tag))
+	case *parser.Text:
+		c.visitText(node.(*parser.Text))
+	case *parser.Condition:
+		c.visitCondition(node.(*parser.Condition))
+	case *parser.Each:
+		c.visitEach(node.(*parser.Each))
+	case *parser.Assignment:
+		c.visitAssignment(node.(*parser.Assignment))
+	case *parser.Mixin:
+		c.visitMixin(node.(*parser.Mixin))
+	case *parser.MixinCall:
+		c.visitMixinCall(node.(*parser.MixinCall))
+	}
+}
+
+func (c *Compiler) write(value string) {
+	c.buffer.WriteString(value)
+}
+
+func (c *Compiler) indent(offset int, newline bool) {
+	if !c.PrettyPrint {
+		return
+	}
+
+	if newline && c.buffer.Len() > 0 {
+		c.write("\n")
+	}
+
+	for i := 0; i < c.indentLevel+offset; i++ {
+		c.write("\t")
+	}
+}
+
+func (c *Compiler) tempvar() string {
+	c.tempvarIndex++
+	return "$__amber_" + strconv.Itoa(c.tempvarIndex)
+}
+
+func (c *Compiler) escape(input string) string {
+	return strings.Replace(strings.Replace(input, `\`, `\\`, -1), `"`, `\"`, -1)
+}
+
+func (c *Compiler) visitBlock(block *parser.Block) {
+	for _, node := range block.Children {
+		if _, ok := node.(*parser.Text); !block.CanInline() && ok {
+			c.indent(0, true)
+		}
+
+		c.visit(node)
+	}
+}
+
+func (c *Compiler) visitDoctype(doctype *parser.Doctype) {
+	c.write(doctype.String())
+}
+
+func (c *Compiler) visitComment(comment *parser.Comment) {
+	if comment.Silent {
+		return
+	}
+
+	c.indent(0, false)
+
+	if comment.Block == nil {
+		c.write(`{{unescaped "<!-- ` + c.escape(comment.Value) + ` -->"}}`)
+	} else {
+		c.write(`<!-- ` + comment.Value)
+		c.visitBlock(comment.Block)
+		c.write(` -->`)
+	}
+}
+
+func (c *Compiler) visitCondition(condition *parser.Condition) {
+	c.write(`{{if ` + c.visitRawInterpolation(condition.Expression) + `}}`)
+	c.visitBlock(condition.Positive)
+	if condition.Negative != nil {
+		c.write(`{{else}}`)
+		c.visitBlock(condition.Negative)
+	}
+	c.write(`{{end}}`)
+}
+
+func (c *Compiler) visitEach(each *parser.Each) {
+	if each.Block == nil {
+		return
+	}
+
+	if len(each.Y) == 0 {
+		c.write(`{{range ` + each.X + ` := ` + c.visitRawInterpolation(each.Expression) + `}}`)
+	} else {
+		c.write(`{{range ` + each.X + `, ` + each.Y + ` := ` + c.visitRawInterpolation(each.Expression) + `}}`)
+	}
+	c.visitBlock(each.Block)
+	c.write(`{{end}}`)
+}
+
+func (c *Compiler) visitAssignment(assgn *parser.Assignment) {
+	c.write(`{{` + assgn.X + ` := ` + c.visitRawInterpolation(assgn.Expression) + `}}`)
+}
+
+func (c *Compiler) visitTag(tag *parser.Tag) {
+	type attrib struct {
+		name      string
+		value     string
+		condition string
+	}
+
+	attribs := make(map[string]*attrib)
+
+	for _, item := range tag.Attributes {
+		attr := new(attrib)
+		attr.name = item.Name
+
+		if !item.IsRaw {
+			attr.value = c.visitInterpolation(item.Value)
+		} else if item.Value == "" {
+			attr.value = ""
+		} else {
+			attr.value = item.Value
+		}
+
+		if len(item.Condition) != 0 {
+			attr.condition = c.visitRawInterpolation(item.Condition)
+		}
+
+		if attr.name == "class" && attribs["class"] != nil {
+			prevclass := attribs["class"]
+			attr.value = ` ` + attr.value
+
+			if len(attr.condition) > 0 {
+				attr.value = `{{if ` + attr.condition + `}}` + attr.value + `{{end}}`
+				attr.condition = ""
+			}
+
+			if len(prevclass.condition) > 0 {
+				prevclass.value = `{{if ` + prevclass.condition + `}}` + prevclass.value + `{{end}}`
+				prevclass.condition = ""
+			}
+
+			prevclass.value = prevclass.value + attr.value
+		} else {
+			attribs[item.Name] = attr
+		}
+	}
+
+	keys := make([]string, 0, len(attribs))
+	for key := range attribs {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+
+	c.indent(0, true)
+	c.write("<" + tag.Name)
+
+	for _, name := range keys {
+		value := attribs[name]
+
+		if len(value.condition) > 0 {
+			c.write(`{{if ` + value.condition + `}}`)
+		}
+
+		if value.value == "" {
+			c.write(` ` + name)
+		} else {
+			c.write(` ` + name + `="` + value.value + `"`)
+		}
+
+		if len(value.condition) > 0 {
+			c.write(`{{end}}`)
+		}
+	}
+
+	if tag.IsSelfClosing() {
+		c.write(` />`)
+	} else {
+		c.write(`>`)
+
+		if tag.Block != nil {
+			if !tag.Block.CanInline() {
+				c.indentLevel++
+			}
+
+			c.visitBlock(tag.Block)
+
+			if !tag.Block.CanInline() {
+				c.indentLevel--
+				c.indent(0, true)
+			}
+		}
+
+		c.write(`</` + tag.Name + `>`)
+	}
+}
+
+var textInterpolateRegexp = regexp.MustCompile(`#\{(.*?)\}`)
+var textEscapeRegexp = regexp.MustCompile(`\{\{(.*?)\}\}`)
+
+func (c *Compiler) visitText(txt *parser.Text) {
+	value := textEscapeRegexp.ReplaceAllStringFunc(txt.Value, func(value string) string {
+		return `{{"{{"}}` + value[2:len(value)-2] + `{{"}}"}}`
+	})
+
+	value = textInterpolateRegexp.ReplaceAllStringFunc(value, func(value string) string {
+		return c.visitInterpolation(value[2 : len(value)-1])
+	})
+
+	lines := strings.Split(value, "\n")
+	for i := 0; i < len(lines); i++ {
+		c.write(lines[i])
+
+		if i < len(lines)-1 {
+			c.write("\n")
+			c.indent(0, false)
+		}
+	}
+}
+
+func (c *Compiler) visitInterpolation(value string) string {
+	return `{{` + c.visitRawInterpolation(value) + `}}`
+}
+
+func (c *Compiler) visitRawInterpolation(value string) string {
+	if value == "" {
+		value = "\"\""
+	}
+
+	value = strings.Replace(value, "$", "__DOLLAR__", -1)
+	expr, err := gp.ParseExpr(value)
+	if err != nil {
+		panic("Unable to parse expression.")
+	}
+	value = strings.Replace(c.visitExpression(expr), "__DOLLAR__", "$", -1)
+	return value
+}
+
+func (c *Compiler) visitExpression(outerexpr ast.Expr) string {
+	stack := list.New()
+
+	pop := func() string {
+		if stack.Front() == nil {
+			return ""
+		}
+
+		val := stack.Front().Value.(string)
+		stack.Remove(stack.Front())
+		return val
+	}
+
+	var exec func(ast.Expr)
+
+	exec = func(expr ast.Expr) {
+		switch expr.(type) {
+		case *ast.BinaryExpr:
+			{
+				be := expr.(*ast.BinaryExpr)
+
+				exec(be.Y)
+				exec(be.X)
+
+				negate := false
+				name := c.tempvar()
+				c.write(`{{` + name + ` := `)
+
+				switch be.Op {
+				case gt.ADD:
+					c.write("__amber_add ")
+				case gt.SUB:
+					c.write("__amber_sub ")
+				case gt.MUL:
+					c.write("__amber_mul ")
+				case gt.QUO:
+					c.write("__amber_quo ")
+				case gt.REM:
+					c.write("__amber_rem ")
+				case gt.LAND:
+					c.write("and ")
+				case gt.LOR:
+					c.write("or ")
+				case gt.EQL:
+					c.write("__amber_eql ")
+				case gt.NEQ:
+					c.write("__amber_eql ")
+					negate = true
+				case gt.LSS:
+					c.write("__amber_lss ")
+				case gt.GTR:
+					c.write("__amber_gtr ")
+				case gt.LEQ:
+					c.write("__amber_gtr ")
+					negate = true
+				case gt.GEQ:
+					c.write("__amber_lss ")
+					negate = true
+				default:
+					panic("Unexpected operator!")
+				}
+
+				c.write(pop() + ` ` + pop() + `}}`)
+
+				if !negate {
+					stack.PushFront(name)
+				} else {
+					negname := c.tempvar()
+					c.write(`{{` + negname + ` := not ` + name + `}}`)
+					stack.PushFront(negname)
+				}
+			}
+		case *ast.UnaryExpr:
+			{
+				ue := expr.(*ast.UnaryExpr)
+
+				exec(ue.X)
+
+				name := c.tempvar()
+				c.write(`{{` + name + ` := `)
+
+				switch ue.Op {
+				case gt.SUB:
+					c.write("__amber_minus ")
+				case gt.ADD:
+					c.write("__amber_plus ")
+				case gt.NOT:
+					c.write("not ")
+				default:
+					panic("Unexpected operator!")
+				}
+
+				c.write(pop() + `}}`)
+				stack.PushFront(name)
+			}
+		case *ast.ParenExpr:
+			exec(expr.(*ast.ParenExpr).X)
+		case *ast.BasicLit:
+			stack.PushFront(expr.(*ast.BasicLit).Value)
+		case *ast.Ident:
+			name := expr.(*ast.Ident).Name
+			if len(name) >= len("__DOLLAR__") && name[:len("__DOLLAR__")] == "__DOLLAR__" {
+				if name == "__DOLLAR__" {
+					stack.PushFront(`.`)
+				} else {
+					stack.PushFront(`$` + expr.(*ast.Ident).Name[len("__DOLLAR__"):])
+				}
+			} else {
+				stack.PushFront(`.` + expr.(*ast.Ident).Name)
+			}
+		case *ast.SelectorExpr:
+			se := expr.(*ast.SelectorExpr)
+			exec(se.X)
+			x := pop()
+
+			if x == "." {
+				x = ""
+			}
+
+			name := c.tempvar()
+			c.write(`{{` + name + ` := ` + x + `.` + se.Sel.Name + `}}`)
+			stack.PushFront(name)
+		case *ast.CallExpr:
+			ce := expr.(*ast.CallExpr)
+
+			for i := len(ce.Args) - 1; i >= 0; i-- {
+				exec(ce.Args[i])
+			}
+
+			name := c.tempvar()
+			builtin := false
+
+			if ident, ok := ce.Fun.(*ast.Ident); ok {
+				for _, fname := range builtinFunctions {
+					if fname == ident.Name {
+						builtin = true
+						break
+					}
+				}
+				for fname := range FuncMap {
+					if fname == ident.Name {
+						builtin = true
+						break
+					}
+				}
+			}
+
+			if builtin {
+				stack.PushFront(ce.Fun.(*ast.Ident).Name)
+				c.write(`{{` + name + ` := ` + pop())
+			} else if se, ok := ce.Fun.(*ast.SelectorExpr); ok {
+				exec(se.X)
+				x := pop()
+
+				if x == "." {
+					x = ""
+				}
+				stack.PushFront(se.Sel.Name)
+				c.write(`{{` + name + ` := ` + x + `.` + pop())
+			} else {
+				exec(ce.Fun)
+				c.write(`{{` + name + ` := call ` + pop())
+			}
+
+			for i := 0; i < len(ce.Args); i++ {
+				c.write(` `)
+				c.write(pop())
+			}
+
+			c.write(`}}`)
+
+			stack.PushFront(name)
+		default:
+			panic("Unable to parse expression. Unsupported: " + reflect.TypeOf(expr).String())
+		}
+	}
+
+	exec(outerexpr)
+	return pop()
+}
+
+func (c *Compiler) visitMixin(mixin *parser.Mixin) {
+	c.mixins[mixin.Name] = mixin
+}
+
+func (c *Compiler) visitMixinCall(mixinCall *parser.MixinCall) {
+	mixin := c.mixins[mixinCall.Name]
+
+	switch {
+	case mixin == nil:
+		panic(fmt.Sprintf("unknown mixin %q", mixinCall.Name))
+
+	case len(mixinCall.Args) < len(mixin.Args):
+		panic(fmt.Sprintf(
+			"not enough arguments in call to mixin %q (have: %d, want: %d)",
+			mixinCall.Name,
+			len(mixinCall.Args),
+			len(mixin.Args),
+		))
+	case len(mixinCall.Args) > len(mixin.Args):
+		panic(fmt.Sprintf(
+			"too many arguments in call to mixin %q (have: %d, want: %d)",
+			mixinCall.Name,
+			len(mixinCall.Args),
+			len(mixin.Args),
+		))
+	}
+
+	for i, arg := range mixin.Args {
+		c.write(fmt.Sprintf(`{{%s := %s}}`, arg, c.visitRawInterpolation(mixinCall.Args[i])))
+	}
+	c.visitBlock(mixin.Block)
+}

+ 257 - 0
vendor/github.com/eknkc/amber/doc.go

@@ -0,0 +1,257 @@
+/*
+Package amber is an elegant templating engine for Go Programming Language.
+It is inspired from HAML and Jade.
+
+Tags
+
+A tag is simply a word:
+
+    html
+
+is converted to
+
+    <html></html>
+
+It is possible to add ID and CLASS attributes to tags:
+
+    div#main
+    span.time
+
+are converted to
+
+    <div id="main"></div>
+    <span class="time"></span>
+
+Any arbitrary attribute name / value pair can be added this way:
+
+    a[href="http://www.google.com"]
+
+You can mix multiple attributes together
+
+    a#someid[href="/"][title="Main Page"].main.link Click Link
+
+gets converted to
+
+    <a id="someid" class="main link" href="/" title="Main Page">Click Link</a>
+
+It is also possible to define these attributes within the block of a tag
+
+    a
+        #someid
+        [href="/"]
+        [title="Main Page"]
+        .main
+        .link
+        | Click Link
+
+Doctypes
+
+To add a doctype, use `!!!` or `doctype` keywords:
+
+    !!! transitional
+    // <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+or use `doctype`
+
+    doctype 5
+    // <!DOCTYPE html>
+
+Available options: `5`, `default`, `xml`, `transitional`, `strict`, `frameset`, `1.1`, `basic`, `mobile`
+
+Tag Content
+
+For single line tag text, you can just append the text after tag name:
+
+    p Testing!
+
+would yield
+
+    <p>Testing!</p>
+
+For multi line tag text, or nested tags, use indentation:
+
+    html
+        head
+            title Page Title
+        body
+            div#content
+                p
+                    | This is a long page content
+                    | These lines are all part of the parent p
+
+                    a[href="/"] Go To Main Page
+
+Data
+
+Input template data can be reached by key names directly. For example, assuming the template has been
+executed with following JSON data:
+
+    {
+        "Name": "Ekin",
+        "LastName": "Koc",
+        "Repositories": [
+            "amber",
+            "dateformat"
+        ],
+        "Avatar": "/images/ekin.jpg",
+        "Friends": 17
+    }
+
+It is possible to interpolate fields using `#{}`
+
+    p Welcome #{Name}!
+
+would print
+
+    <p>Welcome Ekin!</p>
+
+Attributes can have field names as well
+
+    a[title=Name][href="/ekin.koc"]
+
+would print
+
+    <a title="Ekin" href="/ekin.koc"></a>
+
+Expressions
+
+Amber can expand basic expressions. For example, it is possible to concatenate strings with + operator:
+
+    p Welcome #{Name + " " + LastName}
+
+Arithmetic expressions are also supported:
+
+    p You need #{50 - Friends} more friends to reach 50!
+
+Expressions can be used within attributes
+
+    img[alt=Name + " " + LastName][src=Avatar]
+
+Variables
+
+It is possible to define dynamic variables within templates,
+all variables must start with a $ character and can be assigned as in the following example:
+
+    div
+        $fullname = Name + " " + LastName
+        p Welcome #{$fullname}
+
+If you need to access the supplied data itself (i.e. the object containing Name, LastName etc fields.) you can use `$` variable
+
+    p $.Name
+
+Conditions
+
+For conditional blocks, it is possible to use `if <expression>`
+
+    div
+        if Friends > 10
+            p You have more than 10 friends
+        else if Friends > 5
+            p You have more than 5 friends
+        else
+            p You need more friends
+
+Again, it is possible to use arithmetic and boolean operators
+
+    div
+        if Name == "Ekin" && LastName == "Koc"
+            p Hey! I know you..
+
+There is a special syntax for conditional attributes. Only block attributes can have conditions;
+
+    div
+        .hasfriends ? Friends > 0
+
+This would yield a div with `hasfriends` class only if the `Friends > 0` condition holds. It is
+perfectly fine to use the same method for other types of attributes:
+
+    div
+        #foo ? Name == "Ekin"
+        [bar=baz] ? len(Repositories) > 0
+
+Iterations
+
+It is possible to iterate over arrays and maps using `each`:
+
+    each $repo in Repositories
+        p #{$repo}
+
+would print
+
+    p amber
+    p dateformat
+
+It is also possible to iterate over values and indexes at the same time
+
+    each $i, $repo in Repositories
+        p
+            .even ? $i % 2 == 0
+            .odd ? $i % 2 == 1
+
+Includes
+
+A template can include other templates using `include`:
+
+    a.amber
+        p this is template a
+
+    b.amber
+        p this is template b
+
+    c.amber
+        div
+            include a
+            include b
+
+gets compiled to
+
+    div
+        p this is template a
+        p this is template b
+
+Inheritance
+
+A template can inherit other templates. In order to inherit another template, an `extends` keyword should be used.
+Parent template can define several named blocks and child template can modify the blocks.
+
+    master.amber
+        !!! 5
+        html
+            head
+                block meta
+                    meta[name="description"][content="This is a great website"]
+
+                title
+                    block title
+                        | Default title
+            body
+                block content
+
+    subpage.amber
+        extends master
+
+        block title
+            | Some sub page!
+
+        block append meta
+            // This will be added after the description meta tag. It is also possible
+            // to prepend something to an existing block
+            meta[name="keywords"][content="foo bar"]
+
+        block content
+            div#main
+                p Some content here
+
+License
+(The MIT License)
+
+Copyright (c) 2012 Ekin Koc <ekin@eknkc.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+package amber

+ 285 - 0
vendor/github.com/eknkc/amber/parser/nodes.go

@@ -0,0 +1,285 @@
+package parser
+
+import (
+	"regexp"
+	"strings"
+)
+
+var selfClosingTags = [...]string{
+	"meta",
+	"img",
+	"link",
+	"input",
+	"source",
+	"area",
+	"base",
+	"col",
+	"br",
+	"hr",
+}
+
+var doctypes = map[string]string{
+	"5":            `<!DOCTYPE html>`,
+	"default":      `<!DOCTYPE html>`,
+	"xml":          `<?xml version="1.0" encoding="utf-8" ?>`,
+	"transitional": `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">`,
+	"strict":       `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">`,
+	"frameset":     `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">`,
+	"1.1":          `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">`,
+	"basic":        `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">`,
+	"mobile":       `<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">`,
+}
+
+type Node interface {
+	Pos() SourcePosition
+}
+
+type SourcePosition struct {
+	LineNum     int
+	ColNum      int
+	TokenLength int
+	Filename    string
+}
+
+func (s *SourcePosition) Pos() SourcePosition {
+	return *s
+}
+
+type Doctype struct {
+	SourcePosition
+	Value string
+}
+
+func newDoctype(value string) *Doctype {
+	dt := new(Doctype)
+	dt.Value = value
+	return dt
+}
+
+func (d *Doctype) String() string {
+	if defined := doctypes[d.Value]; len(defined) != 0 {
+		return defined
+	}
+
+	return `<!DOCTYPE ` + d.Value + `>`
+}
+
+type Comment struct {
+	SourcePosition
+	Value  string
+	Block  *Block
+	Silent bool
+}
+
+func newComment(value string) *Comment {
+	dt := new(Comment)
+	dt.Value = value
+	dt.Block = nil
+	dt.Silent = false
+	return dt
+}
+
+type Text struct {
+	SourcePosition
+	Value string
+	Raw   bool
+}
+
+func newText(value string, raw bool) *Text {
+	dt := new(Text)
+	dt.Value = value
+	dt.Raw = raw
+	return dt
+}
+
+type Block struct {
+	SourcePosition
+	Children []Node
+}
+
+func newBlock() *Block {
+	block := new(Block)
+	block.Children = make([]Node, 0)
+	return block
+}
+
+func (b *Block) push(node Node) {
+	b.Children = append(b.Children, node)
+}
+
+func (b *Block) pushFront(node Node) {
+	b.Children = append([]Node{node}, b.Children...)
+}
+
+func (b *Block) CanInline() bool {
+	if len(b.Children) == 0 {
+		return true
+	}
+
+	allText := true
+
+	for _, child := range b.Children {
+		if txt, ok := child.(*Text); !ok || txt.Raw {
+			allText = false
+			break
+		}
+	}
+
+	return allText
+}
+
+const (
+	NamedBlockDefault = iota
+	NamedBlockAppend
+	NamedBlockPrepend
+)
+
+type NamedBlock struct {
+	Block
+	Name     string
+	Modifier int
+}
+
+func newNamedBlock(name string) *NamedBlock {
+	bb := new(NamedBlock)
+	bb.Name = name
+	bb.Block.Children = make([]Node, 0)
+	bb.Modifier = NamedBlockDefault
+	return bb
+}
+
+type Attribute struct {
+	SourcePosition
+	Name      string
+	Value     string
+	IsRaw     bool
+	Condition string
+}
+
+type Tag struct {
+	SourcePosition
+	Block          *Block
+	Name           string
+	IsInterpolated bool
+	Attributes     []Attribute
+}
+
+func newTag(name string) *Tag {
+	tag := new(Tag)
+	tag.Block = nil
+	tag.Name = name
+	tag.Attributes = make([]Attribute, 0)
+	tag.IsInterpolated = false
+	return tag
+
+}
+
+func (t *Tag) IsSelfClosing() bool {
+	for _, tag := range selfClosingTags {
+		if tag == t.Name {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (t *Tag) IsRawText() bool {
+	return t.Name == "style" || t.Name == "script"
+}
+
+type Condition struct {
+	SourcePosition
+	Positive   *Block
+	Negative   *Block
+	Expression string
+}
+
+func newCondition(exp string) *Condition {
+	cond := new(Condition)
+	cond.Expression = exp
+	return cond
+}
+
+type Each struct {
+	SourcePosition
+	X          string
+	Y          string
+	Expression string
+	Block      *Block
+}
+
+func newEach(exp string) *Each {
+	each := new(Each)
+	each.Expression = exp
+	return each
+}
+
+type Assignment struct {
+	SourcePosition
+	X          string
+	Expression string
+}
+
+func newAssignment(x, expression string) *Assignment {
+	assgn := new(Assignment)
+	assgn.X = x
+	assgn.Expression = expression
+	return assgn
+}
+
+type Mixin struct {
+	SourcePosition
+	Block *Block
+	Name  string
+	Args  []string
+}
+
+func newMixin(name, args string) *Mixin {
+	mixin := new(Mixin)
+	mixin.Name = name
+
+	delExp := regexp.MustCompile(`,\s`)
+	mixin.Args = delExp.Split(args, -1)
+
+	for i := 0; i < len(mixin.Args); i++ {
+		mixin.Args[i] = strings.TrimSpace(mixin.Args[i])
+		if mixin.Args[i] == "" {
+			mixin.Args = append(mixin.Args[:i], mixin.Args[i+1:]...)
+			i--
+		}
+	}
+
+	return mixin
+}
+
+type MixinCall struct {
+	SourcePosition
+	Name string
+	Args []string
+}
+
+func newMixinCall(name, args string) *MixinCall {
+	mixinCall := new(MixinCall)
+	mixinCall.Name = name
+
+	if args != "" {
+		const t = "%s"
+		quoteExp := regexp.MustCompile(`"(.*?)"`)
+		delExp := regexp.MustCompile(`,\s`)
+
+		quotes := quoteExp.FindAllString(args, -1)
+		replaced := quoteExp.ReplaceAllString(args, t)
+		mixinCall.Args = delExp.Split(replaced, -1)
+
+		qi := 0
+		for i, arg := range mixinCall.Args {
+			if arg == t {
+				mixinCall.Args[i] = quotes[qi]
+				qi++
+			}
+		}
+	}
+
+	return mixinCall
+}

+ 456 - 0
vendor/github.com/eknkc/amber/parser/parser.go

@@ -0,0 +1,456 @@
+package parser
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"path/filepath"
+	"strings"
+)
+
+type Parser struct {
+	scanner      *scanner
+	filename     string
+	currenttoken *token
+	namedBlocks  map[string]*NamedBlock
+	parent       *Parser
+	result       *Block
+}
+
+func newParser(rdr io.Reader) *Parser {
+	p := new(Parser)
+	p.scanner = newScanner(rdr)
+	p.namedBlocks = make(map[string]*NamedBlock)
+	return p
+}
+
+func StringParser(input string) (*Parser, error) {
+	return newParser(bytes.NewReader([]byte(input))), nil
+}
+
+func ByteParser(input []byte) (*Parser, error) {
+	return newParser(bytes.NewReader(input)), nil
+}
+
+func (p *Parser) SetFilename(filename string) {
+	p.filename = filename
+}
+
+func FileParser(filename string) (*Parser, error) {
+	data, err := ioutil.ReadFile(filename)
+
+	if err != nil {
+		return nil, err
+	}
+
+	parser := newParser(bytes.NewReader(data))
+	parser.filename = filename
+	return parser, nil
+}
+
+func (p *Parser) Parse() *Block {
+	if p.result != nil {
+		return p.result
+	}
+
+	defer func() {
+		if r := recover(); r != nil {
+			if rs, ok := r.(string); ok && rs[:len("Amber Error")] == "Amber Error" {
+				panic(r)
+			}
+
+			pos := p.pos()
+
+			if len(pos.Filename) > 0 {
+				panic(fmt.Sprintf("Amber Error in <%s>: %v - Line: %d, Column: %d, Length: %d", pos.Filename, r, pos.LineNum, pos.ColNum, pos.TokenLength))
+			} else {
+				panic(fmt.Sprintf("Amber Error: %v - Line: %d, Column: %d, Length: %d", r, pos.LineNum, pos.ColNum, pos.TokenLength))
+			}
+		}
+	}()
+
+	block := newBlock()
+	p.advance()
+
+	for {
+		if p.currenttoken == nil || p.currenttoken.Kind == tokEOF {
+			break
+		}
+
+		if p.currenttoken.Kind == tokBlank {
+			p.advance()
+			continue
+		}
+
+		block.push(p.parse())
+	}
+
+	if p.parent != nil {
+		p.parent.Parse()
+
+		for _, prev := range p.parent.namedBlocks {
+			ours := p.namedBlocks[prev.Name]
+
+			if ours == nil {
+				// Put a copy of the named block into current context, so that sub-templates can use the block
+				p.namedBlocks[prev.Name] = prev
+				continue
+			}
+
+			top := findTopmostParentWithNamedBlock(p, prev.Name)
+			nb := top.namedBlocks[prev.Name]
+			switch ours.Modifier {
+			case NamedBlockAppend:
+				for i := 0; i < len(ours.Children); i++ {
+					nb.push(ours.Children[i])
+				}
+			case NamedBlockPrepend:
+				for i := len(ours.Children) - 1; i >= 0; i-- {
+					nb.pushFront(ours.Children[i])
+				}
+			default:
+				nb.Children = ours.Children
+			}
+		}
+
+		block = p.parent.result
+	}
+
+	p.result = block
+	return block
+}
+
+func (p *Parser) pos() SourcePosition {
+	pos := p.scanner.Pos()
+	pos.Filename = p.filename
+	return pos
+}
+
+func (p *Parser) parseRelativeFile(filename string) *Parser {
+	if len(p.filename) == 0 {
+		panic("Unable to import or extend " + filename + " in a non filesystem based parser.")
+	}
+
+	filename = filepath.Join(filepath.Dir(p.filename), filename)
+
+	if strings.IndexRune(filepath.Base(filename), '.') < 0 {
+		filename = filename + ".amber"
+	}
+
+	parser, err := FileParser(filename)
+	if err != nil {
+		panic("Unable to read " + filename + ", Error: " + string(err.Error()))
+	}
+
+	return parser
+}
+
+func (p *Parser) parse() Node {
+	switch p.currenttoken.Kind {
+	case tokDoctype:
+		return p.parseDoctype()
+	case tokComment:
+		return p.parseComment()
+	case tokText:
+		return p.parseText()
+	case tokIf:
+		return p.parseIf()
+	case tokEach:
+		return p.parseEach()
+	case tokImport:
+		return p.parseImport()
+	case tokTag:
+		return p.parseTag()
+	case tokAssignment:
+		return p.parseAssignment()
+	case tokNamedBlock:
+		return p.parseNamedBlock()
+	case tokExtends:
+		return p.parseExtends()
+	case tokIndent:
+		return p.parseBlock(nil)
+	case tokMixin:
+		return p.parseMixin()
+	case tokMixinCall:
+		return p.parseMixinCall()
+	}
+
+	panic(fmt.Sprintf("Unexpected token: %d", p.currenttoken.Kind))
+}
+
+func (p *Parser) expect(typ rune) *token {
+	if p.currenttoken.Kind != typ {
+		panic("Unexpected token!")
+	}
+	curtok := p.currenttoken
+	p.advance()
+	return curtok
+}
+
+func (p *Parser) advance() {
+	p.currenttoken = p.scanner.Next()
+}
+
+func (p *Parser) parseExtends() *Block {
+	if p.parent != nil {
+		panic("Unable to extend multiple parent templates.")
+	}
+
+	tok := p.expect(tokExtends)
+	parser := p.parseRelativeFile(tok.Value)
+	parser.Parse()
+	p.parent = parser
+	return newBlock()
+}
+
+func (p *Parser) parseBlock(parent Node) *Block {
+	p.expect(tokIndent)
+	block := newBlock()
+	block.SourcePosition = p.pos()
+
+	for {
+		if p.currenttoken == nil || p.currenttoken.Kind == tokEOF || p.currenttoken.Kind == tokOutdent {
+			break
+		}
+
+		if p.currenttoken.Kind == tokBlank {
+			p.advance()
+			continue
+		}
+
+		if p.currenttoken.Kind == tokId ||
+			p.currenttoken.Kind == tokClassName ||
+			p.currenttoken.Kind == tokAttribute {
+
+			if tag, ok := parent.(*Tag); ok {
+				attr := p.expect(p.currenttoken.Kind)
+				cond := attr.Data["Condition"]
+
+				switch attr.Kind {
+				case tokId:
+					tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "id", attr.Value, true, cond})
+				case tokClassName:
+					tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "class", attr.Value, true, cond})
+				case tokAttribute:
+					tag.Attributes = append(tag.Attributes, Attribute{p.pos(), attr.Value, attr.Data["Content"], attr.Data["Mode"] == "raw", cond})
+				}
+
+				continue
+			} else {
+				panic("Conditional attributes must be placed immediately within a parent tag.")
+			}
+		}
+
+		block.push(p.parse())
+	}
+
+	p.expect(tokOutdent)
+
+	return block
+}
+
+func (p *Parser) parseIf() *Condition {
+	tok := p.expect(tokIf)
+	cnd := newCondition(tok.Value)
+	cnd.SourcePosition = p.pos()
+
+readmore:
+	switch p.currenttoken.Kind {
+	case tokIndent:
+		cnd.Positive = p.parseBlock(cnd)
+		goto readmore
+	case tokElse:
+		p.expect(tokElse)
+		if p.currenttoken.Kind == tokIf {
+			cnd.Negative = newBlock()
+			cnd.Negative.push(p.parseIf())
+		} else if p.currenttoken.Kind == tokIndent {
+			cnd.Negative = p.parseBlock(cnd)
+		} else {
+			panic("Unexpected token!")
+		}
+		goto readmore
+	}
+
+	return cnd
+}
+
+func (p *Parser) parseEach() *Each {
+	tok := p.expect(tokEach)
+	ech := newEach(tok.Value)
+	ech.SourcePosition = p.pos()
+	ech.X = tok.Data["X"]
+	ech.Y = tok.Data["Y"]
+
+	if p.currenttoken.Kind == tokIndent {
+		ech.Block = p.parseBlock(ech)
+	}
+
+	return ech
+}
+
+func (p *Parser) parseImport() *Block {
+	tok := p.expect(tokImport)
+	node := p.parseRelativeFile(tok.Value).Parse()
+	node.SourcePosition = p.pos()
+	return node
+}
+
+func (p *Parser) parseNamedBlock() *Block {
+	tok := p.expect(tokNamedBlock)
+
+	if p.namedBlocks[tok.Value] != nil {
+		panic("Multiple definitions of named blocks are not permitted. Block " + tok.Value + " has been re defined.")
+	}
+
+	block := newNamedBlock(tok.Value)
+	block.SourcePosition = p.pos()
+
+	if tok.Data["Modifier"] == "append" {
+		block.Modifier = NamedBlockAppend
+	} else if tok.Data["Modifier"] == "prepend" {
+		block.Modifier = NamedBlockPrepend
+	}
+
+	if p.currenttoken.Kind == tokIndent {
+		block.Block = *(p.parseBlock(nil))
+	}
+
+	p.namedBlocks[block.Name] = block
+
+	if block.Modifier == NamedBlockDefault {
+		return &block.Block
+	}
+
+	return newBlock()
+}
+
+func (p *Parser) parseDoctype() *Doctype {
+	tok := p.expect(tokDoctype)
+	node := newDoctype(tok.Value)
+	node.SourcePosition = p.pos()
+	return node
+}
+
+func (p *Parser) parseComment() *Comment {
+	tok := p.expect(tokComment)
+	cmnt := newComment(tok.Value)
+	cmnt.SourcePosition = p.pos()
+	cmnt.Silent = tok.Data["Mode"] == "silent"
+
+	if p.currenttoken.Kind == tokIndent {
+		cmnt.Block = p.parseBlock(cmnt)
+	}
+
+	return cmnt
+}
+
+func (p *Parser) parseText() *Text {
+	tok := p.expect(tokText)
+	node := newText(tok.Value, tok.Data["Mode"] == "raw")
+	node.SourcePosition = p.pos()
+	return node
+}
+
+func (p *Parser) parseAssignment() *Assignment {
+	tok := p.expect(tokAssignment)
+	node := newAssignment(tok.Data["X"], tok.Value)
+	node.SourcePosition = p.pos()
+	return node
+}
+
+func (p *Parser) parseTag() *Tag {
+	tok := p.expect(tokTag)
+	tag := newTag(tok.Value)
+	tag.SourcePosition = p.pos()
+
+	ensureBlock := func() {
+		if tag.Block == nil {
+			tag.Block = newBlock()
+		}
+	}
+
+readmore:
+	switch p.currenttoken.Kind {
+	case tokIndent:
+		if tag.IsRawText() {
+			p.scanner.readRaw = true
+		}
+
+		block := p.parseBlock(tag)
+		if tag.Block == nil {
+			tag.Block = block
+		} else {
+			for _, c := range block.Children {
+				tag.Block.push(c)
+			}
+		}
+	case tokId:
+		id := p.expect(tokId)
+		if len(id.Data["Condition"]) > 0 {
+			panic("Conditional attributes must be placed in a block within a tag.")
+		}
+		tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "id", id.Value, true, ""})
+		goto readmore
+	case tokClassName:
+		cls := p.expect(tokClassName)
+		if len(cls.Data["Condition"]) > 0 {
+			panic("Conditional attributes must be placed in a block within a tag.")
+		}
+		tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "class", cls.Value, true, ""})
+		goto readmore
+	case tokAttribute:
+		attr := p.expect(tokAttribute)
+		if len(attr.Data["Condition"]) > 0 {
+			panic("Conditional attributes must be placed in a block within a tag.")
+		}
+		tag.Attributes = append(tag.Attributes, Attribute{p.pos(), attr.Value, attr.Data["Content"], attr.Data["Mode"] == "raw", ""})
+		goto readmore
+	case tokText:
+		if p.currenttoken.Data["Mode"] != "piped" {
+			ensureBlock()
+			tag.Block.pushFront(p.parseText())
+			goto readmore
+		}
+	}
+
+	return tag
+}
+
+func (p *Parser) parseMixin() *Mixin {
+	tok := p.expect(tokMixin)
+	mixin := newMixin(tok.Value, tok.Data["Args"])
+	mixin.SourcePosition = p.pos()
+
+	if p.currenttoken.Kind == tokIndent {
+		mixin.Block = p.parseBlock(mixin)
+	}
+
+	return mixin
+}
+
+func (p *Parser) parseMixinCall() *MixinCall {
+	tok := p.expect(tokMixinCall)
+	mixinCall := newMixinCall(tok.Value, tok.Data["Args"])
+	mixinCall.SourcePosition = p.pos()
+	return mixinCall
+}
+
+func findTopmostParentWithNamedBlock(p *Parser, name string) *Parser {
+	top := p
+
+	for {
+		if top.namedBlocks[name] == nil {
+			return nil
+		}
+		if top.parent == nil {
+			return top
+		}
+		if top.parent.namedBlocks[name] != nil {
+			top = top.parent
+		} else {
+			return top
+		}
+	}
+}

+ 501 - 0
vendor/github.com/eknkc/amber/parser/scanner.go

@@ -0,0 +1,501 @@
+package parser
+
+import (
+	"bufio"
+	"container/list"
+	"fmt"
+	"io"
+	"regexp"
+)
+
+const (
+	tokEOF = -(iota + 1)
+	tokDoctype
+	tokComment
+	tokIndent
+	tokOutdent
+	tokBlank
+	tokId
+	tokClassName
+	tokTag
+	tokText
+	tokAttribute
+	tokIf
+	tokElse
+	tokEach
+	tokAssignment
+	tokImport
+	tokNamedBlock
+	tokExtends
+	tokMixin
+	tokMixinCall
+)
+
+const (
+	scnNewLine = iota
+	scnLine
+	scnEOF
+)
+
+type scanner struct {
+	reader      *bufio.Reader
+	indentStack *list.List
+	stash       *list.List
+
+	state  int32
+	buffer string
+
+	line          int
+	col           int
+	lastTokenLine int
+	lastTokenCol  int
+	lastTokenSize int
+
+	readRaw bool
+}
+
+type token struct {
+	Kind  rune
+	Value string
+	Data  map[string]string
+}
+
+func newScanner(r io.Reader) *scanner {
+	s := new(scanner)
+	s.reader = bufio.NewReader(r)
+	s.indentStack = list.New()
+	s.stash = list.New()
+	s.state = scnNewLine
+	s.line = -1
+	s.col = 0
+
+	return s
+}
+
+func (s *scanner) Pos() SourcePosition {
+	return SourcePosition{s.lastTokenLine + 1, s.lastTokenCol + 1, s.lastTokenSize, ""}
+}
+
+// Returns next token found in buffer
+func (s *scanner) Next() *token {
+	if s.readRaw {
+		s.readRaw = false
+		return s.NextRaw()
+	}
+
+	s.ensureBuffer()
+
+	if stashed := s.stash.Front(); stashed != nil {
+		tok := stashed.Value.(*token)
+		s.stash.Remove(stashed)
+		return tok
+	}
+
+	switch s.state {
+	case scnEOF:
+		if outdent := s.indentStack.Back(); outdent != nil {
+			s.indentStack.Remove(outdent)
+			return &token{tokOutdent, "", nil}
+		}
+
+		return &token{tokEOF, "", nil}
+	case scnNewLine:
+		s.state = scnLine
+
+		if tok := s.scanIndent(); tok != nil {
+			return tok
+		}
+
+		return s.Next()
+	case scnLine:
+		if tok := s.scanMixin(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanMixinCall(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanDoctype(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanCondition(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanEach(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanImport(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanExtends(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanBlock(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanAssignment(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanTag(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanId(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanClassName(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanAttribute(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanComment(); tok != nil {
+			return tok
+		}
+
+		if tok := s.scanText(); tok != nil {
+			return tok
+		}
+	}
+
+	return nil
+}
+
+func (s *scanner) NextRaw() *token {
+	result := ""
+	level := 0
+
+	for {
+		s.ensureBuffer()
+
+		switch s.state {
+		case scnEOF:
+			return &token{tokText, result, map[string]string{"Mode": "raw"}}
+		case scnNewLine:
+			s.state = scnLine
+
+			if tok := s.scanIndent(); tok != nil {
+				if tok.Kind == tokIndent {
+					level++
+				} else if tok.Kind == tokOutdent {
+					level--
+				} else {
+					result = result + "\n"
+					continue
+				}
+
+				if level < 0 {
+					s.stash.PushBack(&token{tokOutdent, "", nil})
+
+					if len(result) > 0 && result[len(result)-1] == '\n' {
+						result = result[:len(result)-1]
+					}
+
+					return &token{tokText, result, map[string]string{"Mode": "raw"}}
+				}
+			}
+		case scnLine:
+			if len(result) > 0 {
+				result = result + "\n"
+			}
+			for i := 0; i < level; i++ {
+				result += "\t"
+			}
+			result = result + s.buffer
+			s.consume(len(s.buffer))
+		}
+	}
+
+	return nil
+}
+
+var rgxIndent = regexp.MustCompile(`^(\s+)`)
+
+func (s *scanner) scanIndent() *token {
+	if len(s.buffer) == 0 {
+		return &token{tokBlank, "", nil}
+	}
+
+	var head *list.Element
+	for head = s.indentStack.Front(); head != nil; head = head.Next() {
+		value := head.Value.(*regexp.Regexp)
+
+		if match := value.FindString(s.buffer); len(match) != 0 {
+			s.consume(len(match))
+		} else {
+			break
+		}
+	}
+
+	newIndent := rgxIndent.FindString(s.buffer)
+
+	if len(newIndent) != 0 && head == nil {
+		s.indentStack.PushBack(regexp.MustCompile(regexp.QuoteMeta(newIndent)))
+		s.consume(len(newIndent))
+		return &token{tokIndent, newIndent, nil}
+	}
+
+	if len(newIndent) == 0 && head != nil {
+		for head != nil {
+			next := head.Next()
+			s.indentStack.Remove(head)
+			if next == nil {
+				return &token{tokOutdent, "", nil}
+			} else {
+				s.stash.PushBack(&token{tokOutdent, "", nil})
+			}
+			head = next
+		}
+	}
+
+	if len(newIndent) != 0 && head != nil {
+		panic("Mismatching indentation. Please use a coherent indent schema.")
+	}
+
+	return nil
+}
+
+var rgxDoctype = regexp.MustCompile(`^(!!!|doctype)\s*(.*)`)
+
+func (s *scanner) scanDoctype() *token {
+	if sm := rgxDoctype.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		if len(sm[2]) == 0 {
+			sm[2] = "html"
+		}
+
+		s.consume(len(sm[0]))
+		return &token{tokDoctype, sm[2], nil}
+	}
+
+	return nil
+}
+
+var rgxIf = regexp.MustCompile(`^if\s+(.+)$`)
+var rgxElse = regexp.MustCompile(`^else\s*`)
+
+func (s *scanner) scanCondition() *token {
+	if sm := rgxIf.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokIf, sm[1], nil}
+	}
+
+	if sm := rgxElse.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokElse, "", nil}
+	}
+
+	return nil
+}
+
+var rgxEach = regexp.MustCompile(`^each\s+(\$[\w0-9\-_]*)(?:\s*,\s*(\$[\w0-9\-_]*))?\s+in\s+(.+)$`)
+
+func (s *scanner) scanEach() *token {
+	if sm := rgxEach.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokEach, sm[3], map[string]string{"X": sm[1], "Y": sm[2]}}
+	}
+
+	return nil
+}
+
+var rgxAssignment = regexp.MustCompile(`^(\$[\w0-9\-_]*)?\s*=\s*(.+)$`)
+
+func (s *scanner) scanAssignment() *token {
+	if sm := rgxAssignment.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokAssignment, sm[2], map[string]string{"X": sm[1]}}
+	}
+
+	return nil
+}
+
+var rgxComment = regexp.MustCompile(`^\/\/(-)?\s*(.*)$`)
+
+func (s *scanner) scanComment() *token {
+	if sm := rgxComment.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		mode := "embed"
+		if len(sm[1]) != 0 {
+			mode = "silent"
+		}
+
+		s.consume(len(sm[0]))
+		return &token{tokComment, sm[2], map[string]string{"Mode": mode}}
+	}
+
+	return nil
+}
+
+var rgxId = regexp.MustCompile(`^#([\w-]+)(?:\s*\?\s*(.*)$)?`)
+
+func (s *scanner) scanId() *token {
+	if sm := rgxId.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokId, sm[1], map[string]string{"Condition": sm[2]}}
+	}
+
+	return nil
+}
+
+var rgxClassName = regexp.MustCompile(`^\.([\w-]+)(?:\s*\?\s*(.*)$)?`)
+
+func (s *scanner) scanClassName() *token {
+	if sm := rgxClassName.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokClassName, sm[1], map[string]string{"Condition": sm[2]}}
+	}
+
+	return nil
+}
+
+var rgxAttribute = regexp.MustCompile(`^\[([\w\-:@\.]+)\s*(?:=\s*(\"([^\"\\]*)\"|([^\]]+)))?\](?:\s*\?\s*(.*)$)?`)
+
+func (s *scanner) scanAttribute() *token {
+	if sm := rgxAttribute.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+
+		if len(sm[3]) != 0 || sm[2] == "" {
+			return &token{tokAttribute, sm[1], map[string]string{"Content": sm[3], "Mode": "raw", "Condition": sm[5]}}
+		}
+
+		return &token{tokAttribute, sm[1], map[string]string{"Content": sm[4], "Mode": "expression", "Condition": sm[5]}}
+	}
+
+	return nil
+}
+
+var rgxImport = regexp.MustCompile(`^import\s+([0-9a-zA-Z_\-\. \/]*)$`)
+
+func (s *scanner) scanImport() *token {
+	if sm := rgxImport.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokImport, sm[1], nil}
+	}
+
+	return nil
+}
+
+var rgxExtends = regexp.MustCompile(`^extends\s+([0-9a-zA-Z_\-\. \/]*)$`)
+
+func (s *scanner) scanExtends() *token {
+	if sm := rgxExtends.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokExtends, sm[1], nil}
+	}
+
+	return nil
+}
+
+var rgxBlock = regexp.MustCompile(`^block\s+(?:(append|prepend)\s+)?([0-9a-zA-Z_\-\. \/]*)$`)
+
+func (s *scanner) scanBlock() *token {
+	if sm := rgxBlock.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokNamedBlock, sm[2], map[string]string{"Modifier": sm[1]}}
+	}
+
+	return nil
+}
+
+var rgxTag = regexp.MustCompile(`^(\w[-:\w]*)`)
+
+func (s *scanner) scanTag() *token {
+	if sm := rgxTag.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokTag, sm[1], nil}
+	}
+
+	return nil
+}
+
+var rgxMixin = regexp.MustCompile(`^mixin ([a-zA-Z_-]+\w*)(\(((\$\w*(,\s)?)*)\))?$`)
+
+func (s *scanner) scanMixin() *token {
+	if sm := rgxMixin.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokMixin, sm[1], map[string]string{"Args": sm[3]}}
+	}
+
+	return nil
+}
+
+var rgxMixinCall = regexp.MustCompile(`^\+([A-Za-z_-]+\w*)(\((.+(,\s)?)*\))?$`)
+
+func (s *scanner) scanMixinCall() *token {
+	if sm := rgxMixinCall.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+		return &token{tokMixinCall, sm[1], map[string]string{"Args": sm[3]}}
+	}
+
+	return nil
+}
+
+var rgxText = regexp.MustCompile(`^(\|)? ?(.*)$`)
+
+func (s *scanner) scanText() *token {
+	if sm := rgxText.FindStringSubmatch(s.buffer); len(sm) != 0 {
+		s.consume(len(sm[0]))
+
+		mode := "inline"
+		if sm[1] == "|" {
+			mode = "piped"
+		}
+
+		return &token{tokText, sm[2], map[string]string{"Mode": mode}}
+	}
+
+	return nil
+}
+
+// Moves position forward, and removes beginning of s.buffer (len bytes)
+func (s *scanner) consume(runes int) {
+	if len(s.buffer) < runes {
+		panic(fmt.Sprintf("Unable to consume %d runes from buffer.", runes))
+	}
+
+	s.lastTokenLine = s.line
+	s.lastTokenCol = s.col
+	s.lastTokenSize = runes
+
+	s.buffer = s.buffer[runes:]
+	s.col += runes
+}
+
+// Reads string into s.buffer
+func (s *scanner) ensureBuffer() {
+	if len(s.buffer) > 0 {
+		return
+	}
+
+	buf, err := s.reader.ReadString('\n')
+
+	if err != nil && err != io.EOF {
+		panic(err)
+	} else if err != nil && len(buf) == 0 {
+		s.state = scnEOF
+	} else {
+		// endline "LF only" or "\n" use Unix, Linux, modern MacOS X, FreeBSD, BeOS, RISC OS
+		if buf[len(buf)-1] == '\n' {
+			buf = buf[:len(buf)-1]
+		}
+		// endline "CR+LF" or "\r\n" use internet protocols, DEC RT-11, Windows, CP/M, MS-DOS, OS/2, Symbian OS
+		if len(buf) > 0 && buf[len(buf)-1] == '\r' {
+			buf = buf[:len(buf)-1]
+		}
+
+		s.state = scnNewLine
+		s.buffer = buf
+		s.line += 1
+		s.col = 0
+	}
+}

+ 287 - 0
vendor/github.com/eknkc/amber/runtime.go

@@ -0,0 +1,287 @@
+package amber
+
+import (
+	"encoding/json"
+	"fmt"
+	"html/template"
+	"reflect"
+)
+
+var FuncMap = template.FuncMap{
+	"__amber_add":   runtime_add,
+	"__amber_sub":   runtime_sub,
+	"__amber_mul":   runtime_mul,
+	"__amber_quo":   runtime_quo,
+	"__amber_rem":   runtime_rem,
+	"__amber_minus": runtime_minus,
+	"__amber_plus":  runtime_plus,
+	"__amber_eql":   runtime_eql,
+	"__amber_gtr":   runtime_gtr,
+	"__amber_lss":   runtime_lss,
+
+	"json":      runtime_json,
+	"unescaped": runtime_unescaped,
+}
+
+func runtime_add(x, y interface{}) interface{} {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() + vy.Int()
+			case reflect.Float32, reflect.Float64:
+				return float64(vx.Int()) + vy.Float()
+			case reflect.String:
+				return fmt.Sprintf("%d%s", vx.Int(), vy.String())
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Float() + float64(vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.Float() + vy.Float()
+			case reflect.String:
+				return fmt.Sprintf("%f%s", vx.Float(), vy.String())
+			}
+		}
+	case reflect.String:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return fmt.Sprintf("%s%d", vx.String(), vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return fmt.Sprintf("%s%f", vx.String(), vy.Float())
+			case reflect.String:
+				return fmt.Sprintf("%s%s", vx.String(), vy.String())
+			}
+		}
+	}
+
+	return "<nil>"
+}
+
+func runtime_sub(x, y interface{}) interface{} {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() - vy.Int()
+			case reflect.Float32, reflect.Float64:
+				return float64(vx.Int()) - vy.Float()
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Float() - float64(vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.Float() - vy.Float()
+			}
+		}
+	}
+
+	return "<nil>"
+}
+
+func runtime_mul(x, y interface{}) interface{} {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() * vy.Int()
+			case reflect.Float32, reflect.Float64:
+				return float64(vx.Int()) * vy.Float()
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Float() * float64(vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.Float() * vy.Float()
+			}
+		}
+	}
+
+	return "<nil>"
+}
+
+func runtime_quo(x, y interface{}) interface{} {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() / vy.Int()
+			case reflect.Float32, reflect.Float64:
+				return float64(vx.Int()) / vy.Float()
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Float() / float64(vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.Float() / vy.Float()
+			}
+		}
+	}
+
+	return "<nil>"
+}
+
+func runtime_rem(x, y interface{}) interface{} {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() % vy.Int()
+			}
+		}
+	}
+
+	return "<nil>"
+}
+
+func runtime_minus(x interface{}) interface{} {
+	vx := reflect.ValueOf(x)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		return -vx.Int()
+	case reflect.Float32, reflect.Float64:
+		return -vx.Float()
+	}
+
+	return "<nil>"
+}
+
+func runtime_plus(x interface{}) interface{} {
+	vx := reflect.ValueOf(x)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		return +vx.Int()
+	case reflect.Float32, reflect.Float64:
+		return +vx.Float()
+	}
+
+	return "<nil>"
+}
+
+func runtime_eql(x, y interface{}) bool {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() == vy.Int()
+			case reflect.Float32, reflect.Float64:
+				return float64(vx.Int()) == vy.Float()
+			case reflect.String:
+				return fmt.Sprintf("%d", vx.Int()) == vy.String()
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Float() == float64(vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.Float() == vy.Float()
+			case reflect.String:
+				return fmt.Sprintf("%f", vx.Float()) == vy.String()
+			}
+		}
+	case reflect.String:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.String() == fmt.Sprintf("%d", vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.String() == fmt.Sprintf("%f", vy.Float())
+			case reflect.String:
+				return vx.String() == fmt.Sprintf("%s", vy.String())
+			}
+		}
+	case reflect.Bool:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Bool() && vy.Int() != 0
+			case reflect.Bool:
+				return vx.Bool() == vy.Bool()
+			}
+		}
+	}
+
+	return false
+}
+
+func runtime_lss(x, y interface{}) bool {
+	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+	switch vx.Kind() {
+	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Int() < vy.Int()
+			case reflect.Float32, reflect.Float64:
+				return float64(vx.Int()) < vy.Float()
+			case reflect.String:
+				return fmt.Sprintf("%d", vx.Int()) < vy.String()
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.Float() < float64(vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.Float() < vy.Float()
+			case reflect.String:
+				return fmt.Sprintf("%f", vx.Float()) < vy.String()
+			}
+		}
+	case reflect.String:
+		{
+			switch vy.Kind() {
+			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
+				return vx.String() < fmt.Sprintf("%d", vy.Int())
+			case reflect.Float32, reflect.Float64:
+				return vx.String() < fmt.Sprintf("%f", vy.Float())
+			case reflect.String:
+				return vx.String() < vy.String()
+			}
+		}
+	}
+
+	return false
+}
+
+func runtime_gtr(x, y interface{}) bool {
+	return !runtime_lss(x, y) && !runtime_eql(x, y)
+}
+
+func runtime_json(x interface{}) (res string, err error) {
+	bres, err := json.Marshal(x)
+	res = string(bres)
+	return
+}
+
+func runtime_unescaped(x string) interface{} {
+	return template.HTML(x)
+}

+ 21 - 0
vendor/github.com/fatih/structs/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 141 - 0
vendor/github.com/fatih/structs/field.go

@@ -0,0 +1,141 @@
+package structs
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+)
+
+var (
+	errNotExported = errors.New("field is not exported")
+	errNotSettable = errors.New("field is not settable")
+)
+
+// Field represents a single struct field that encapsulates high level
+// functions around the field.
+type Field struct {
+	value      reflect.Value
+	field      reflect.StructField
+	defaultTag string
+}
+
+// Tag returns the value associated with key in the tag string. If there is no
+// such key in the tag, Tag returns the empty string.
+func (f *Field) Tag(key string) string {
+	return f.field.Tag.Get(key)
+}
+
+// Value returns the underlying value of the field. It panics if the field
+// is not exported.
+func (f *Field) Value() interface{} {
+	return f.value.Interface()
+}
+
+// IsEmbedded returns true if the given field is an anonymous field (embedded)
+func (f *Field) IsEmbedded() bool {
+	return f.field.Anonymous
+}
+
+// IsExported returns true if the given field is exported.
+func (f *Field) IsExported() bool {
+	return f.field.PkgPath == ""
+}
+
+// IsZero returns true if the given field is not initialized (has a zero value).
+// It panics if the field is not exported.
+func (f *Field) IsZero() bool {
+	zero := reflect.Zero(f.value.Type()).Interface()
+	current := f.Value()
+
+	return reflect.DeepEqual(current, zero)
+}
+
+// Name returns the name of the given field
+func (f *Field) Name() string {
+	return f.field.Name
+}
+
+// Kind returns the fields kind, such as "string", "map", "bool", etc ..
+func (f *Field) Kind() reflect.Kind {
+	return f.value.Kind()
+}
+
+// Set sets the field to given value v. It returns an error if the field is not
+// settable (not addressable or not exported) or if the given value's type
+// doesn't match the fields type.
+func (f *Field) Set(val interface{}) error {
+	// we can't set unexported fields, so be sure this field is exported
+	if !f.IsExported() {
+		return errNotExported
+	}
+
+	// do we get here? not sure...
+	if !f.value.CanSet() {
+		return errNotSettable
+	}
+
+	given := reflect.ValueOf(val)
+
+	if f.value.Kind() != given.Kind() {
+		return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
+	}
+
+	f.value.Set(given)
+	return nil
+}
+
+// Zero sets the field to its zero value. It returns an error if the field is not
+// settable (not addressable or not exported).
+func (f *Field) Zero() error {
+	zero := reflect.Zero(f.value.Type()).Interface()
+	return f.Set(zero)
+}
+
+// Fields returns a slice of Fields. This is particular handy to get the fields
+// of a nested struct . A struct tag with the content of "-" ignores the
+// checking of that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field *http.Request `structs:"-"`
+//
+// It panics if field is not exported or if field's kind is not struct
+func (f *Field) Fields() []*Field {
+	return getFields(f.value, f.defaultTag)
+}
+
+// Field returns the field from a nested struct. It panics if the nested struct
+// is not exported or if the field was not found.
+func (f *Field) Field(name string) *Field {
+	field, ok := f.FieldOk(name)
+	if !ok {
+		panic("field not found")
+	}
+
+	return field
+}
+
+// FieldOk returns the field from a nested struct. The boolean returns whether
+// the field was found (true) or not (false).
+func (f *Field) FieldOk(name string) (*Field, bool) {
+	value := &f.value
+	// value must be settable so we need to make sure it holds the address of the
+	// variable and not a copy, so we can pass the pointer to strctVal instead of a
+	// copy (which is not assigned to any variable, hence not settable).
+	// see "https://blog.golang.org/laws-of-reflection#TOC_8."
+	if f.value.Kind() != reflect.Ptr {
+		a := f.value.Addr()
+		value = &a
+	}
+	v := strctVal(value.Interface())
+	t := v.Type()
+
+	field, ok := t.FieldByName(name)
+	if !ok {
+		return nil, false
+	}
+
+	return &Field{
+		field: field,
+		value: v.FieldByName(name),
+	}, true
+}

+ 586 - 0
vendor/github.com/fatih/structs/structs.go

@@ -0,0 +1,586 @@
+// Package structs contains various utilities functions to work with structs.
+package structs
+
+import (
+	"fmt"
+
+	"reflect"
+)
+
+var (
+	// DefaultTagName is the default tag name for struct fields which provides
+	// a more granular to tweak certain structs. Lookup the necessary functions
+	// for more info.
+	DefaultTagName = "structs" // struct's field default tag name
+)
+
+// Struct encapsulates a struct type to provide several high level functions
+// around the struct.
+type Struct struct {
+	raw     interface{}
+	value   reflect.Value
+	TagName string
+}
+
+// New returns a new *Struct with the struct s. It panics if the s's kind is
+// not struct.
+func New(s interface{}) *Struct {
+	return &Struct{
+		raw:     s,
+		value:   strctVal(s),
+		TagName: DefaultTagName,
+	}
+}
+
+// Map converts the given struct to a map[string]interface{}, where the keys
+// of the map are the field names and the values of the map the associated
+// values of the fields. The default key string is the struct field name but
+// can be changed in the struct field's tag value. The "structs" key in the
+// struct's field tag value is the key name. Example:
+//
+//   // Field appears in map as key "myName".
+//   Name string `structs:"myName"`
+//
+// A tag value with the content of "-" ignores that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// A tag value with the content of "string" uses the stringer to get the value. Example:
+//
+//   // The value will be output of Animal's String() func.
+//   // Map will panic if Animal does not implement String().
+//   Field *Animal `structs:"field,string"`
+//
+// A tag value with the option of "flatten" used in a struct field is to flatten its fields
+// in the output map. Example:
+//
+//   // The FieldStruct's fields will be flattened into the output map.
+//   FieldStruct time.Time `structs:",flatten"`
+//
+// A tag value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Field is not processed further by this package.
+//   Field time.Time     `structs:"myName,omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// A tag value with the option of "omitempty" ignores that particular field if
+// the field value is empty. Example:
+//
+//   // Field appears in map as key "myName", but the field is
+//   // skipped if empty.
+//   Field string `structs:"myName,omitempty"`
+//
+//   // Field appears in map as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   Field string `structs:",omitempty"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields will be neglected.
+func (s *Struct) Map() map[string]interface{} {
+	out := make(map[string]interface{})
+	s.FillMap(out)
+	return out
+}
+
+// FillMap is the same as Map. Instead of returning the output, it fills the
+// given map.
+func (s *Struct) FillMap(out map[string]interface{}) {
+	if out == nil {
+		return
+	}
+
+	fields := s.structFields()
+
+	for _, field := range fields {
+		name := field.Name
+		val := s.value.FieldByName(name)
+		isSubStruct := false
+		var finalVal interface{}
+
+		tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
+		if tagName != "" {
+			name = tagName
+		}
+
+		// if the value is a zero value and the field is marked as omitempty do
+		// not include
+		if tagOpts.Has("omitempty") {
+			zero := reflect.Zero(val.Type()).Interface()
+			current := val.Interface()
+
+			if reflect.DeepEqual(current, zero) {
+				continue
+			}
+		}
+
+		if !tagOpts.Has("omitnested") {
+			finalVal = s.nested(val)
+
+			v := reflect.ValueOf(val.Interface())
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+
+			switch v.Kind() {
+			case reflect.Map, reflect.Struct:
+				isSubStruct = true
+			}
+		} else {
+			finalVal = val.Interface()
+		}
+
+		if tagOpts.Has("string") {
+			s, ok := val.Interface().(fmt.Stringer)
+			if ok {
+				out[name] = s.String()
+			}
+			continue
+		}
+
+		if isSubStruct && (tagOpts.Has("flatten")) {
+			for k := range finalVal.(map[string]interface{}) {
+				out[k] = finalVal.(map[string]interface{})[k]
+			}
+		} else {
+			out[name] = finalVal
+		}
+	}
+}
+
+// Values converts the given s struct's field values to a []interface{}.  A
+// struct tag with the content of "-" ignores the that particular field.
+// Example:
+//
+//   // Field is ignored by this package.
+//   Field int `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Fields is not processed further by this package.
+//   Field time.Time     `structs:",omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// A tag value with the option of "omitempty" ignores that particular field and
+// is not added to the values if the field value is empty. Example:
+//
+//   // Field is skipped if empty
+//   Field string `structs:",omitempty"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields  will be neglected.
+func (s *Struct) Values() []interface{} {
+	fields := s.structFields()
+
+	var t []interface{}
+
+	for _, field := range fields {
+		val := s.value.FieldByName(field.Name)
+
+		_, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+		// if the value is a zero value and the field is marked as omitempty do
+		// not include
+		if tagOpts.Has("omitempty") {
+			zero := reflect.Zero(val.Type()).Interface()
+			current := val.Interface()
+
+			if reflect.DeepEqual(current, zero) {
+				continue
+			}
+		}
+
+		if tagOpts.Has("string") {
+			s, ok := val.Interface().(fmt.Stringer)
+			if ok {
+				t = append(t, s.String())
+			}
+			continue
+		}
+
+		if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+			// look out for embedded structs, and convert them to a
+			// []interface{} to be added to the final values slice
+			for _, embeddedVal := range Values(val.Interface()) {
+				t = append(t, embeddedVal)
+			}
+		} else {
+			t = append(t, val.Interface())
+		}
+	}
+
+	return t
+}
+
+// Fields returns a slice of Fields. A struct tag with the content of "-"
+// ignores the checking of that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// It panics if s's kind is not struct.
+func (s *Struct) Fields() []*Field {
+	return getFields(s.value, s.TagName)
+}
+
+// Names returns a slice of field names. A struct tag with the content of "-"
+// ignores the checking of that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// It panics if s's kind is not struct.
+func (s *Struct) Names() []string {
+	fields := getFields(s.value, s.TagName)
+
+	names := make([]string, len(fields))
+
+	for i, field := range fields {
+		names[i] = field.Name()
+	}
+
+	return names
+}
+
+func getFields(v reflect.Value, tagName string) []*Field {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	t := v.Type()
+
+	var fields []*Field
+
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+
+		if tag := field.Tag.Get(tagName); tag == "-" {
+			continue
+		}
+
+		f := &Field{
+			field: field,
+			value: v.FieldByName(field.Name),
+		}
+
+		fields = append(fields, f)
+
+	}
+
+	return fields
+}
+
+// Field returns a new Field struct that provides several high level functions
+// around a single struct field entity. It panics if the field is not found.
+func (s *Struct) Field(name string) *Field {
+	f, ok := s.FieldOk(name)
+	if !ok {
+		panic("field not found")
+	}
+
+	return f
+}
+
+// FieldOk returns a new Field struct that provides several high level functions
+// around a single struct field entity. The boolean returns true if the field
+// was found.
+func (s *Struct) FieldOk(name string) (*Field, bool) {
+	t := s.value.Type()
+
+	field, ok := t.FieldByName(name)
+	if !ok {
+		return nil, false
+	}
+
+	return &Field{
+		field:      field,
+		value:      s.value.FieldByName(name),
+		defaultTag: s.TagName,
+	}, true
+}
+
+// IsZero returns true if all fields in a struct is a zero value (not
+// initialized) A struct tag with the content of "-" ignores the checking of
+// that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Field is not processed further by this package.
+//   Field time.Time     `structs:"myName,omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields  will be neglected. It panics if s's kind is not struct.
+func (s *Struct) IsZero() bool {
+	fields := s.structFields()
+
+	for _, field := range fields {
+		val := s.value.FieldByName(field.Name)
+
+		_, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+		if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+			ok := IsZero(val.Interface())
+			if !ok {
+				return false
+			}
+
+			continue
+		}
+
+		// zero value of the given field, such as "" for string, 0 for int
+		zero := reflect.Zero(val.Type()).Interface()
+
+		//  current value of the given field
+		current := val.Interface()
+
+		if !reflect.DeepEqual(current, zero) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// HasZero returns true if a field in a struct is not initialized (zero value).
+// A struct tag with the content of "-" ignores the checking of that particular
+// field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Field is not processed further by this package.
+//   Field time.Time     `structs:"myName,omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields  will be neglected. It panics if s's kind is not struct.
+func (s *Struct) HasZero() bool {
+	fields := s.structFields()
+
+	for _, field := range fields {
+		val := s.value.FieldByName(field.Name)
+
+		_, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+		if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+			ok := HasZero(val.Interface())
+			if ok {
+				return true
+			}
+
+			continue
+		}
+
+		// zero value of the given field, such as "" for string, 0 for int
+		zero := reflect.Zero(val.Type()).Interface()
+
+		//  current value of the given field
+		current := val.Interface()
+
+		if reflect.DeepEqual(current, zero) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Name returns the structs's type name within its package. For more info refer
+// to Name() function.
+func (s *Struct) Name() string {
+	return s.value.Type().Name()
+}
+
+// structFields returns the exported struct fields for a given s struct. This
+// is a convenient helper method to avoid duplicate code in some of the
+// functions.
+func (s *Struct) structFields() []reflect.StructField {
+	t := s.value.Type()
+
+	var f []reflect.StructField
+
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+		// we can't access the value of unexported fields
+		if field.PkgPath != "" {
+			continue
+		}
+
+		// don't check if it's omitted
+		if tag := field.Tag.Get(s.TagName); tag == "-" {
+			continue
+		}
+
+		f = append(f, field)
+	}
+
+	return f
+}
+
+func strctVal(s interface{}) reflect.Value {
+	v := reflect.ValueOf(s)
+
+	// if pointer get the underlying element≤
+	for v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	if v.Kind() != reflect.Struct {
+		panic("not struct")
+	}
+
+	return v
+}
+
+// Map converts the given struct to a map[string]interface{}. For more info
+// refer to Struct types Map() method. It panics if s's kind is not struct.
+func Map(s interface{}) map[string]interface{} {
+	return New(s).Map()
+}
+
+// FillMap is the same as Map. Instead of returning the output, it fills the
+// given map.
+func FillMap(s interface{}, out map[string]interface{}) {
+	New(s).FillMap(out)
+}
+
+// Values converts the given struct to a []interface{}. For more info refer to
+// Struct types Values() method.  It panics if s's kind is not struct.
+func Values(s interface{}) []interface{} {
+	return New(s).Values()
+}
+
+// Fields returns a slice of *Field. For more info refer to Struct types
+// Fields() method.  It panics if s's kind is not struct.
+func Fields(s interface{}) []*Field {
+	return New(s).Fields()
+}
+
+// Names returns a slice of field names. For more info refer to Struct types
+// Names() method.  It panics if s's kind is not struct.
+func Names(s interface{}) []string {
+	return New(s).Names()
+}
+
+// IsZero returns true if all fields is equal to a zero value. For more info
+// refer to Struct types IsZero() method.  It panics if s's kind is not struct.
+func IsZero(s interface{}) bool {
+	return New(s).IsZero()
+}
+
+// HasZero returns true if any field is equal to a zero value. For more info
+// refer to Struct types HasZero() method.  It panics if s's kind is not struct.
+func HasZero(s interface{}) bool {
+	return New(s).HasZero()
+}
+
+// IsStruct returns true if the given variable is a struct or a pointer to
+// struct.
+func IsStruct(s interface{}) bool {
+	v := reflect.ValueOf(s)
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	// uninitialized zero value of a struct
+	if v.Kind() == reflect.Invalid {
+		return false
+	}
+
+	return v.Kind() == reflect.Struct
+}
+
+// Name returns the structs's type name within its package. It returns an
+// empty string for unnamed types. It panics if s's kind is not struct.
+func Name(s interface{}) string {
+	return New(s).Name()
+}
+
+// nested retrieves recursively all types for the given value and returns the
+// nested value.
+func (s *Struct) nested(val reflect.Value) interface{} {
+	var finalVal interface{}
+
+	v := reflect.ValueOf(val.Interface())
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	switch v.Kind() {
+	case reflect.Struct:
+		n := New(val.Interface())
+		n.TagName = s.TagName
+		m := n.Map()
+
+		// do not add the converted value if there are no exported fields, ie:
+		// time.Time
+		if len(m) == 0 {
+			finalVal = val.Interface()
+		} else {
+			finalVal = m
+		}
+	case reflect.Map:
+		// get the element type of the map
+		mapElem := val.Type()
+		switch val.Type().Kind() {
+		case reflect.Ptr, reflect.Array, reflect.Map,
+			reflect.Slice, reflect.Chan:
+			mapElem = val.Type().Elem()
+			if mapElem.Kind() == reflect.Ptr {
+				mapElem = mapElem.Elem()
+			}
+		}
+
+		// only iterate over struct types, ie: map[string]StructType,
+		// map[string][]StructType,
+		if mapElem.Kind() == reflect.Struct ||
+			(mapElem.Kind() == reflect.Slice &&
+				mapElem.Elem().Kind() == reflect.Struct) {
+			m := make(map[string]interface{}, val.Len())
+			for _, k := range val.MapKeys() {
+				m[k.String()] = s.nested(val.MapIndex(k))
+			}
+			finalVal = m
+			break
+		}
+
+		// TODO(arslan): should this be optional?
+		finalVal = val.Interface()
+	case reflect.Slice, reflect.Array:
+		if val.Type().Kind() == reflect.Interface {
+			finalVal = val.Interface()
+			break
+		}
+
+		// TODO(arslan): should this be optional?
+		// do not iterate of non struct types, just pass the value. Ie: []int,
+		// []string, co... We only iterate further if it's a struct.
+		// i.e []foo or []*foo
+		if val.Type().Elem().Kind() != reflect.Struct &&
+			!(val.Type().Elem().Kind() == reflect.Ptr &&
+				val.Type().Elem().Elem().Kind() == reflect.Struct) {
+			finalVal = val.Interface()
+			break
+		}
+
+		slices := make([]interface{}, val.Len(), val.Len())
+		for x := 0; x < val.Len(); x++ {
+			slices[x] = s.nested(val.Index(x))
+		}
+		finalVal = slices
+	default:
+		finalVal = val.Interface()
+	}
+
+	return finalVal
+}

+ 32 - 0
vendor/github.com/fatih/structs/tags.go

@@ -0,0 +1,32 @@
+package structs
+
+import "strings"
+
+// tagOptions contains a slice of tag options
+type tagOptions []string
+
+// Has returns true if the given optiton is available in tagOptions
+func (t tagOptions) Has(opt string) bool {
+	for _, tagOpt := range t {
+		if tagOpt == opt {
+			return true
+		}
+	}
+
+	return false
+}
+
+// parseTag splits a struct field's tag into its name and a list of options
+// which comes after a name. A tag is in the form of: "name,option1,option2".
+// The name can be neglectected.
+func parseTag(tag string) (string, tagOptions) {
+	// tag is one of followings:
+	// ""
+	// "name"
+	// "name,opt"
+	// "name,opt,opt2"
+	// ",opt"
+
+	res := strings.Split(tag, ",")
+	return res[0], res[1:]
+}

+ 10 - 0
vendor/github.com/flosch/pongo2/AUTHORS

@@ -0,0 +1,10 @@
+Main author and maintainer of pongo2:
+
+* Florian Schlachter <flori@n-schlachter.de>
+
+Contributors (in no specific order):
+
+* @romanoaugusto88
+* @vitalbh
+
+Feel free to add yourself to the list or to modify your entry if you did a contribution.

+ 20 - 0
vendor/github.com/flosch/pongo2/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2014 Florian Schlachter
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 130 - 0
vendor/github.com/flosch/pongo2/context.go

@@ -0,0 +1,130 @@
+package pongo2
+
+import (
+	"regexp"
+
+	"github.com/juju/errors"
+)
+
+var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
+
+// A Context type provides constants, variables, instances or functions to a template.
+//
+// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
+// Currently, context["pongo2"] contains the following keys:
+//  1. version: returns the version string
+//
+// Template examples for accessing items from your context:
+//     {{ myconstant }}
+//     {{ myfunc("test", 42) }}
+//     {{ user.name }}
+//     {{ pongo2.version }}
+type Context map[string]interface{}
+
+func (c Context) checkForValidIdentifiers() *Error {
+	for k, v := range c {
+		if !reIdentifiers.MatchString(k) {
+			return &Error{
+				Sender:    "checkForValidIdentifiers",
+				OrigError: errors.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
+			}
+		}
+	}
+	return nil
+}
+
+// Update updates this context with the key/value-pairs from another context.
+func (c Context) Update(other Context) Context {
+	for k, v := range other {
+		c[k] = v
+	}
+	return c
+}
+
+// ExecutionContext contains all data important for the current rendering state.
+//
+// If you're writing a custom tag, your tag's Execute()-function will
+// have access to the ExecutionContext. This struct stores anything
+// about the current rendering process's Context including
+// the Context provided by the user (field Public).
+// You can safely use the Private context to provide data to the user's
+// template (like a 'forloop'-information). The Shared-context is used
+// to share data between tags. All ExecutionContexts share this context.
+//
+// Please be careful when accessing the Public data.
+// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
+//
+// To create your own execution context within tags, use the
+// NewChildExecutionContext(parent) function.
+type ExecutionContext struct {
+	template *Template
+
+	Autoescape bool
+	Public     Context
+	Private    Context
+	Shared     Context
+}
+
+var pongo2MetaContext = Context{
+	"version": Version,
+}
+
+func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
+	privateCtx := make(Context)
+
+	// Make the pongo2-related funcs/vars available to the context
+	privateCtx["pongo2"] = pongo2MetaContext
+
+	return &ExecutionContext{
+		template: tpl,
+
+		Public:     ctx,
+		Private:    privateCtx,
+		Autoescape: true,
+	}
+}
+
+func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
+	newctx := &ExecutionContext{
+		template: parent.template,
+
+		Public:     parent.Public,
+		Private:    make(Context),
+		Autoescape: parent.Autoescape,
+	}
+	newctx.Shared = parent.Shared
+
+	// Copy all existing private items
+	newctx.Private.Update(parent.Private)
+
+	return newctx
+}
+
+func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
+	return ctx.OrigError(errors.New(msg), token)
+}
+
+func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
+	filename := ctx.template.name
+	var line, col int
+	if token != nil {
+		// No tokens available
+		// TODO: Add location (from where?)
+		filename = token.Filename
+		line = token.Line
+		col = token.Col
+	}
+	return &Error{
+		Template:  ctx.template,
+		Filename:  filename,
+		Line:      line,
+		Column:    col,
+		Token:     token,
+		Sender:    "execution",
+		OrigError: err,
+	}
+}
+
+func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
+	ctx.template.set.logf(format, args...)
+}

+ 31 - 0
vendor/github.com/flosch/pongo2/doc.go

@@ -0,0 +1,31 @@
+// A Django-syntax like template-engine
+//
+// Blog posts about pongo2 (including introduction and migration):
+// https://www.florian-schlachter.de/?tag=pongo2
+//
+// Complete documentation on the template language:
+// https://docs.djangoproject.com/en/dev/topics/templates/
+//
+// Try out pongo2 live in the pongo2 playground:
+// https://www.florian-schlachter.de/pongo2/
+//
+// Make sure to read README.md in the repository as well.
+//
+// A tiny example with template strings:
+//
+// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
+//
+//     // Compile the template first (i. e. creating the AST)
+//     tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+//     if err != nil {
+//         panic(err)
+//     }
+//     // Now you can render the template with the given
+//     // pongo2.Context how often you want to.
+//     out, err := tpl.Execute(pongo2.Context{"name": "fred"})
+//     if err != nil {
+//         panic(err)
+//     }
+//     fmt.Println(out) // Output: Hello Fred!
+//
+package pongo2

+ 91 - 0
vendor/github.com/flosch/pongo2/error.go

@@ -0,0 +1,91 @@
+package pongo2
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+)
+
+// The Error type is being used to address an error during lexing, parsing or
+// execution. If you want to return an error object (for example in your own
+// tag or filter) fill this object with as much information as you have.
+// Make sure "Sender" is always given (if you're returning an error within
+// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
+// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
+type Error struct {
+	Template  *Template
+	Filename  string
+	Line      int
+	Column    int
+	Token     *Token
+	Sender    string
+	OrigError error
+}
+
+func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
+	if e.Template == nil {
+		e.Template = template
+	}
+
+	if e.Token == nil {
+		e.Token = t
+		if e.Line <= 0 {
+			e.Line = t.Line
+			e.Column = t.Col
+		}
+	}
+
+	return e
+}
+
+// Returns a nice formatted error string.
+func (e *Error) Error() string {
+	s := "[Error"
+	if e.Sender != "" {
+		s += " (where: " + e.Sender + ")"
+	}
+	if e.Filename != "" {
+		s += " in " + e.Filename
+	}
+	if e.Line > 0 {
+		s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
+		if e.Token != nil {
+			s += fmt.Sprintf(" near '%s'", e.Token.Val)
+		}
+	}
+	s += "] "
+	s += e.OrigError.Error()
+	return s
+}
+
+// RawLine returns the affected line from the original template, if available.
+func (e *Error) RawLine() (line string, available bool, outErr error) {
+	if e.Line <= 0 || e.Filename == "<string>" {
+		return "", false, nil
+	}
+
+	filename := e.Filename
+	if e.Template != nil {
+		filename = e.Template.set.resolveFilename(e.Template, e.Filename)
+	}
+	file, err := os.Open(filename)
+	if err != nil {
+		return "", false, err
+	}
+	defer func() {
+		err := file.Close()
+		if err != nil && outErr == nil {
+			outErr = err
+		}
+	}()
+
+	scanner := bufio.NewScanner(file)
+	l := 0
+	for scanner.Scan() {
+		l++
+		if l == e.Line {
+			return scanner.Text(), true, nil
+		}
+	}
+	return "", false, nil
+}

+ 139 - 0
vendor/github.com/flosch/pongo2/filters.go

@@ -0,0 +1,139 @@
+package pongo2
+
+import (
+	"fmt"
+
+	"github.com/juju/errors"
+)
+
+// FilterFunction is the type filter functions must fulfil
+type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
+
+var filters map[string]FilterFunction
+
+func init() {
+	filters = make(map[string]FilterFunction)
+}
+
+// RegisterFilter registers a new filter. If there's already a filter with the same
+// name, RegisterFilter will panic. You usually want to call this
+// function in the filter's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterFilter(name string, fn FilterFunction) error {
+	_, existing := filters[name]
+	if existing {
+		return errors.Errorf("filter with name '%s' is already registered", name)
+	}
+	filters[name] = fn
+	return nil
+}
+
+// ReplaceFilter replaces an already registered filter with a new implementation. Use this
+// function with caution since it allows you to change existing filter behaviour.
+func ReplaceFilter(name string, fn FilterFunction) error {
+	_, existing := filters[name]
+	if !existing {
+		return errors.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
+	}
+	filters[name] = fn
+	return nil
+}
+
+// MustApplyFilter behaves like ApplyFilter, but panics on an error.
+func MustApplyFilter(name string, value *Value, param *Value) *Value {
+	val, err := ApplyFilter(name, value, param)
+	if err != nil {
+		panic(err)
+	}
+	return val
+}
+
+// ApplyFilter applies a filter to a given value using the given parameters.
+// Returns a *pongo2.Value or an error.
+func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
+	fn, existing := filters[name]
+	if !existing {
+		return nil, &Error{
+			Sender:    "applyfilter",
+			OrigError: errors.Errorf("Filter with name '%s' not found.", name),
+		}
+	}
+
+	// Make sure param is a *Value
+	if param == nil {
+		param = AsValue(nil)
+	}
+
+	return fn(value, param)
+}
+
+type filterCall struct {
+	token *Token
+
+	name      string
+	parameter IEvaluator
+
+	filterFunc FilterFunction
+}
+
+func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
+	var param *Value
+	var err *Error
+
+	if fc.parameter != nil {
+		param, err = fc.parameter.Evaluate(ctx)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		param = AsValue(nil)
+	}
+
+	filteredValue, err := fc.filterFunc(v, param)
+	if err != nil {
+		return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
+	}
+	return filteredValue, nil
+}
+
+// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
+func (p *Parser) parseFilter() (*filterCall, *Error) {
+	identToken := p.MatchType(TokenIdentifier)
+
+	// Check filter ident
+	if identToken == nil {
+		return nil, p.Error("Filter name must be an identifier.", nil)
+	}
+
+	filter := &filterCall{
+		token: identToken,
+		name:  identToken.Val,
+	}
+
+	// Get the appropriate filter function and bind it
+	filterFn, exists := filters[identToken.Val]
+	if !exists {
+		return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
+	}
+
+	filter.filterFunc = filterFn
+
+	// Check for filter-argument (2 tokens needed: ':' ARG)
+	if p.Match(TokenSymbol, ":") != nil {
+		if p.Peek(TokenSymbol, "}}") != nil {
+			return nil, p.Error("Filter parameter required after ':'.", nil)
+		}
+
+		// Get filter argument expression
+		v, err := p.parseVariableOrLiteral()
+		if err != nil {
+			return nil, err
+		}
+		filter.parameter = v
+	}
+
+	return filter, nil
+}

+ 927 - 0
vendor/github.com/flosch/pongo2/filters_builtin.go

@@ -0,0 +1,927 @@
+package pongo2
+
+/* Filters that are provided through github.com/flosch/pongo2-addons:
+   ------------------------------------------------------------------
+
+   filesizeformat
+   slugify
+   timesince
+   timeuntil
+
+   Filters that won't be added:
+   ----------------------------
+
+   get_static_prefix (reason: web-framework specific)
+   pprint (reason: python-specific)
+   static (reason: web-framework specific)
+
+   Reconsideration (not implemented yet):
+   --------------------------------------
+
+   force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
+   safeseq (reason: same reason as `force_escape`)
+   unordered_list (python-specific; not sure whether needed or not)
+   dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
+   dictsortreversed (see dictsort)
+*/
+
+import (
+	"bytes"
+	"fmt"
+	"math/rand"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/juju/errors"
+)
+
+func init() {
+	rand.Seed(time.Now().Unix())
+
+	RegisterFilter("escape", filterEscape)
+	RegisterFilter("safe", filterSafe)
+	RegisterFilter("escapejs", filterEscapejs)
+
+	RegisterFilter("add", filterAdd)
+	RegisterFilter("addslashes", filterAddslashes)
+	RegisterFilter("capfirst", filterCapfirst)
+	RegisterFilter("center", filterCenter)
+	RegisterFilter("cut", filterCut)
+	RegisterFilter("date", filterDate)
+	RegisterFilter("default", filterDefault)
+	RegisterFilter("default_if_none", filterDefaultIfNone)
+	RegisterFilter("divisibleby", filterDivisibleby)
+	RegisterFilter("first", filterFirst)
+	RegisterFilter("floatformat", filterFloatformat)
+	RegisterFilter("get_digit", filterGetdigit)
+	RegisterFilter("iriencode", filterIriencode)
+	RegisterFilter("join", filterJoin)
+	RegisterFilter("last", filterLast)
+	RegisterFilter("length", filterLength)
+	RegisterFilter("length_is", filterLengthis)
+	RegisterFilter("linebreaks", filterLinebreaks)
+	RegisterFilter("linebreaksbr", filterLinebreaksbr)
+	RegisterFilter("linenumbers", filterLinenumbers)
+	RegisterFilter("ljust", filterLjust)
+	RegisterFilter("lower", filterLower)
+	RegisterFilter("make_list", filterMakelist)
+	RegisterFilter("phone2numeric", filterPhone2numeric)
+	RegisterFilter("pluralize", filterPluralize)
+	RegisterFilter("random", filterRandom)
+	RegisterFilter("removetags", filterRemovetags)
+	RegisterFilter("rjust", filterRjust)
+	RegisterFilter("slice", filterSlice)
+	RegisterFilter("split", filterSplit)
+	RegisterFilter("stringformat", filterStringformat)
+	RegisterFilter("striptags", filterStriptags)
+	RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
+	RegisterFilter("title", filterTitle)
+	RegisterFilter("truncatechars", filterTruncatechars)
+	RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
+	RegisterFilter("truncatewords", filterTruncatewords)
+	RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
+	RegisterFilter("upper", filterUpper)
+	RegisterFilter("urlencode", filterUrlencode)
+	RegisterFilter("urlize", filterUrlize)
+	RegisterFilter("urlizetrunc", filterUrlizetrunc)
+	RegisterFilter("wordcount", filterWordcount)
+	RegisterFilter("wordwrap", filterWordwrap)
+	RegisterFilter("yesno", filterYesno)
+
+	RegisterFilter("float", filterFloat)     // pongo-specific
+	RegisterFilter("integer", filterInteger) // pongo-specific
+}
+
+func filterTruncatecharsHelper(s string, newLen int) string {
+	runes := []rune(s)
+	if newLen < len(runes) {
+		if newLen >= 3 {
+			return fmt.Sprintf("%s...", string(runes[:newLen-3]))
+		}
+		// Not enough space for the ellipsis
+		return string(runes[:newLen])
+	}
+	return string(runes)
+}
+
+func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
+	vLen := len(value)
+	var tagStack []string
+	idx := 0
+
+	for idx < vLen && !cond() {
+		c, s := utf8.DecodeRuneInString(value[idx:])
+		if c == utf8.RuneError {
+			idx += s
+			continue
+		}
+
+		if c == '<' {
+			newOutput.WriteRune(c)
+			idx += s // consume "<"
+
+			if idx+1 < vLen {
+				if value[idx] == '/' {
+					// Close tag
+
+					newOutput.WriteString("/")
+
+					tag := ""
+					idx++ // consume "/"
+
+					for idx < vLen {
+						c2, size2 := utf8.DecodeRuneInString(value[idx:])
+						if c2 == utf8.RuneError {
+							idx += size2
+							continue
+						}
+
+						// End of tag found
+						if c2 == '>' {
+							idx++ // consume ">"
+							break
+						}
+						tag += string(c2)
+						idx += size2
+					}
+
+					if len(tagStack) > 0 {
+						// Ideally, the close tag is TOP of tag stack
+						// In malformed HTML, it must not be, so iterate through the stack and remove the tag
+						for i := len(tagStack) - 1; i >= 0; i-- {
+							if tagStack[i] == tag {
+								// Found the tag
+								tagStack[i] = tagStack[len(tagStack)-1]
+								tagStack = tagStack[:len(tagStack)-1]
+								break
+							}
+						}
+					}
+
+					newOutput.WriteString(tag)
+					newOutput.WriteString(">")
+				} else {
+					// Open tag
+
+					tag := ""
+
+					params := false
+					for idx < vLen {
+						c2, size2 := utf8.DecodeRuneInString(value[idx:])
+						if c2 == utf8.RuneError {
+							idx += size2
+							continue
+						}
+
+						newOutput.WriteRune(c2)
+
+						// End of tag found
+						if c2 == '>' {
+							idx++ // consume ">"
+							break
+						}
+
+						if !params {
+							if c2 == ' ' {
+								params = true
+							} else {
+								tag += string(c2)
+							}
+						}
+
+						idx += size2
+					}
+
+					// Add tag to stack
+					tagStack = append(tagStack, tag)
+				}
+			}
+		} else {
+			idx = fn(c, s, idx)
+		}
+	}
+
+	finalize()
+
+	for i := len(tagStack) - 1; i >= 0; i-- {
+		tag := tagStack[i]
+		// Close everything from the regular tag stack
+		newOutput.WriteString(fmt.Sprintf("</%s>", tag))
+	}
+}
+
+func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
+	s := in.String()
+	newLen := param.Integer()
+	return AsValue(filterTruncatecharsHelper(s, newLen)), nil
+}
+
+func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
+	value := in.String()
+	newLen := max(param.Integer()-3, 0)
+
+	newOutput := bytes.NewBuffer(nil)
+
+	textcounter := 0
+
+	filterTruncateHTMLHelper(value, newOutput, func() bool {
+		return textcounter >= newLen
+	}, func(c rune, s int, idx int) int {
+		textcounter++
+		newOutput.WriteRune(c)
+
+		return idx + s
+	}, func() {
+		if textcounter >= newLen && textcounter < len(value) {
+			newOutput.WriteString("...")
+		}
+	})
+
+	return AsSafeValue(newOutput.String()), nil
+}
+
+func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
+	words := strings.Fields(in.String())
+	n := param.Integer()
+	if n <= 0 {
+		return AsValue(""), nil
+	}
+	nlen := min(len(words), n)
+	out := make([]string, 0, nlen)
+	for i := 0; i < nlen; i++ {
+		out = append(out, words[i])
+	}
+
+	if n < len(words) {
+		out = append(out, "...")
+	}
+
+	return AsValue(strings.Join(out, " ")), nil
+}
+
+func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
+	value := in.String()
+	newLen := max(param.Integer(), 0)
+
+	newOutput := bytes.NewBuffer(nil)
+
+	wordcounter := 0
+
+	filterTruncateHTMLHelper(value, newOutput, func() bool {
+		return wordcounter >= newLen
+	}, func(_ rune, _ int, idx int) int {
+		// Get next word
+		wordFound := false
+
+		for idx < len(value) {
+			c2, size2 := utf8.DecodeRuneInString(value[idx:])
+			if c2 == utf8.RuneError {
+				idx += size2
+				continue
+			}
+
+			if c2 == '<' {
+				// HTML tag start, don't consume it
+				return idx
+			}
+
+			newOutput.WriteRune(c2)
+			idx += size2
+
+			if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
+				// Word ends here, stop capturing it now
+				break
+			} else {
+				wordFound = true
+			}
+		}
+
+		if wordFound {
+			wordcounter++
+		}
+
+		return idx
+	}, func() {
+		if wordcounter >= newLen {
+			newOutput.WriteString("...")
+		}
+	})
+
+	return AsSafeValue(newOutput.String()), nil
+}
+
+func filterEscape(in *Value, param *Value) (*Value, *Error) {
+	output := strings.Replace(in.String(), "&", "&amp;", -1)
+	output = strings.Replace(output, ">", "&gt;", -1)
+	output = strings.Replace(output, "<", "&lt;", -1)
+	output = strings.Replace(output, "\"", "&quot;", -1)
+	output = strings.Replace(output, "'", "&#39;", -1)
+	return AsValue(output), nil
+}
+
+func filterSafe(in *Value, param *Value) (*Value, *Error) {
+	return in, nil // nothing to do here, just to keep track of the safe application
+}
+
+func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
+	sin := in.String()
+
+	var b bytes.Buffer
+
+	idx := 0
+	for idx < len(sin) {
+		c, size := utf8.DecodeRuneInString(sin[idx:])
+		if c == utf8.RuneError {
+			idx += size
+			continue
+		}
+
+		if c == '\\' {
+			// Escape seq?
+			if idx+1 < len(sin) {
+				switch sin[idx+1] {
+				case 'r':
+					b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
+					idx += 2
+					continue
+				case 'n':
+					b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
+					idx += 2
+					continue
+					/*case '\'':
+						b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
+						idx += 2
+						continue
+					case '"':
+						b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
+						idx += 2
+						continue*/
+				}
+			}
+		}
+
+		if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
+			b.WriteRune(c)
+		} else {
+			b.WriteString(fmt.Sprintf(`\u%04X`, c))
+		}
+
+		idx += size
+	}
+
+	return AsValue(b.String()), nil
+}
+
+func filterAdd(in *Value, param *Value) (*Value, *Error) {
+	if in.IsNumber() && param.IsNumber() {
+		if in.IsFloat() || param.IsFloat() {
+			return AsValue(in.Float() + param.Float()), nil
+		}
+		return AsValue(in.Integer() + param.Integer()), nil
+	}
+	// If in/param is not a number, we're relying on the
+	// Value's String() conversion and just add them both together
+	return AsValue(in.String() + param.String()), nil
+}
+
+func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
+	output := strings.Replace(in.String(), "\\", "\\\\", -1)
+	output = strings.Replace(output, "\"", "\\\"", -1)
+	output = strings.Replace(output, "'", "\\'", -1)
+	return AsValue(output), nil
+}
+
+func filterCut(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
+}
+
+func filterLength(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(in.Len()), nil
+}
+
+func filterLengthis(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(in.Len() == param.Integer()), nil
+}
+
+func filterDefault(in *Value, param *Value) (*Value, *Error) {
+	if !in.IsTrue() {
+		return param, nil
+	}
+	return in, nil
+}
+
+func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
+	if in.IsNil() {
+		return param, nil
+	}
+	return in, nil
+}
+
+func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
+	if param.Integer() == 0 {
+		return AsValue(false), nil
+	}
+	return AsValue(in.Integer()%param.Integer() == 0), nil
+}
+
+func filterFirst(in *Value, param *Value) (*Value, *Error) {
+	if in.CanSlice() && in.Len() > 0 {
+		return in.Index(0), nil
+	}
+	return AsValue(""), nil
+}
+
+func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
+	val := in.Float()
+
+	decimals := -1
+	if !param.IsNil() {
+		// Any argument provided?
+		decimals = param.Integer()
+	}
+
+	// if the argument is not a number (e. g. empty), the default
+	// behaviour is trim the result
+	trim := !param.IsNumber()
+
+	if decimals <= 0 {
+		// argument is negative or zero, so we
+		// want the output being trimmed
+		decimals = -decimals
+		trim = true
+	}
+
+	if trim {
+		// Remove zeroes
+		if float64(int(val)) == val {
+			return AsValue(in.Integer()), nil
+		}
+	}
+
+	return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
+}
+
+func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
+	i := param.Integer()
+	l := len(in.String()) // do NOT use in.Len() here!
+	if i <= 0 || i > l {
+		return in, nil
+	}
+	return AsValue(in.String()[l-i] - 48), nil
+}
+
+const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
+
+func filterIriencode(in *Value, param *Value) (*Value, *Error) {
+	var b bytes.Buffer
+
+	sin := in.String()
+	for _, r := range sin {
+		if strings.IndexRune(filterIRIChars, r) >= 0 {
+			b.WriteRune(r)
+		} else {
+			b.WriteString(url.QueryEscape(string(r)))
+		}
+	}
+
+	return AsValue(b.String()), nil
+}
+
+func filterJoin(in *Value, param *Value) (*Value, *Error) {
+	if !in.CanSlice() {
+		return in, nil
+	}
+	sep := param.String()
+	sl := make([]string, 0, in.Len())
+	for i := 0; i < in.Len(); i++ {
+		sl = append(sl, in.Index(i).String())
+	}
+	return AsValue(strings.Join(sl, sep)), nil
+}
+
+func filterLast(in *Value, param *Value) (*Value, *Error) {
+	if in.CanSlice() && in.Len() > 0 {
+		return in.Index(in.Len() - 1), nil
+	}
+	return AsValue(""), nil
+}
+
+func filterUpper(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(strings.ToUpper(in.String())), nil
+}
+
+func filterLower(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(strings.ToLower(in.String())), nil
+}
+
+func filterMakelist(in *Value, param *Value) (*Value, *Error) {
+	s := in.String()
+	result := make([]string, 0, len(s))
+	for _, c := range s {
+		result = append(result, string(c))
+	}
+	return AsValue(result), nil
+}
+
+func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
+	if in.Len() <= 0 {
+		return AsValue(""), nil
+	}
+	t := in.String()
+	r, size := utf8.DecodeRuneInString(t)
+	return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
+}
+
+func filterCenter(in *Value, param *Value) (*Value, *Error) {
+	width := param.Integer()
+	slen := in.Len()
+	if width <= slen {
+		return in, nil
+	}
+
+	spaces := width - slen
+	left := spaces/2 + spaces%2
+	right := spaces / 2
+
+	return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
+		in.String(), strings.Repeat(" ", right))), nil
+}
+
+func filterDate(in *Value, param *Value) (*Value, *Error) {
+	t, isTime := in.Interface().(time.Time)
+	if !isTime {
+		return nil, &Error{
+			Sender:    "filter:date",
+			OrigError: errors.New("filter input argument must be of type 'time.Time'"),
+		}
+	}
+	return AsValue(t.Format(param.String())), nil
+}
+
+func filterFloat(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(in.Float()), nil
+}
+
+func filterInteger(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(in.Integer()), nil
+}
+
+func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
+	if in.Len() == 0 {
+		return in, nil
+	}
+
+	var b bytes.Buffer
+
+	// Newline = <br />
+	// Double newline = <p>...</p>
+	lines := strings.Split(in.String(), "\n")
+	lenlines := len(lines)
+
+	opened := false
+
+	for idx, line := range lines {
+
+		if !opened {
+			b.WriteString("<p>")
+			opened = true
+		}
+
+		b.WriteString(line)
+
+		if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
+			// We've not reached the end
+			if strings.TrimSpace(lines[idx+1]) == "" {
+				// Next line is empty
+				if opened {
+					b.WriteString("</p>")
+					opened = false
+				}
+			} else {
+				b.WriteString("<br />")
+			}
+		}
+	}
+
+	if opened {
+		b.WriteString("</p>")
+	}
+
+	return AsValue(b.String()), nil
+}
+
+func filterSplit(in *Value, param *Value) (*Value, *Error) {
+	chunks := strings.Split(in.String(), param.String())
+
+	return AsValue(chunks), nil
+}
+
+func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(strings.Replace(in.String(), "\n", "<br />", -1)), nil
+}
+
+func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
+	lines := strings.Split(in.String(), "\n")
+	output := make([]string, 0, len(lines))
+	for idx, line := range lines {
+		output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
+	}
+	return AsValue(strings.Join(output, "\n")), nil
+}
+
+func filterLjust(in *Value, param *Value) (*Value, *Error) {
+	times := param.Integer() - in.Len()
+	if times < 0 {
+		times = 0
+	}
+	return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
+}
+
+func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(url.QueryEscape(in.String())), nil
+}
+
+// TODO: This regexp could do some work
+var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
+var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
+
+func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
+	var soutErr error
+	sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
+		var prefix string
+		var suffix string
+		if strings.HasPrefix(raw_url, " ") {
+			prefix = " "
+		}
+		if strings.HasSuffix(raw_url, " ") {
+			suffix = " "
+		}
+
+		raw_url = strings.TrimSpace(raw_url)
+
+		t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
+		if err != nil {
+			soutErr = err
+			return ""
+		}
+		url := t.String()
+
+		if !strings.HasPrefix(url, "http") {
+			url = fmt.Sprintf("http://%s", url)
+		}
+
+		title := raw_url
+
+		if trunc > 3 && len(title) > trunc {
+			title = fmt.Sprintf("%s...", title[:trunc-3])
+		}
+
+		if autoescape {
+			t, err := ApplyFilter("escape", AsValue(title), nil)
+			if err != nil {
+				soutErr = err
+				return ""
+			}
+			title = t.String()
+		}
+
+		return fmt.Sprintf(`%s<a href="%s" rel="nofollow">%s</a>%s`, prefix, url, title, suffix)
+	})
+	if soutErr != nil {
+		return "", soutErr
+	}
+
+	sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
+		title := mail
+
+		if trunc > 3 && len(title) > trunc {
+			title = fmt.Sprintf("%s...", title[:trunc-3])
+		}
+
+		return fmt.Sprintf(`<a href="mailto:%s">%s</a>`, mail, title)
+	})
+
+	return sout, nil
+}
+
+func filterUrlize(in *Value, param *Value) (*Value, *Error) {
+	autoescape := true
+	if param.IsBool() {
+		autoescape = param.Bool()
+	}
+
+	s, err := filterUrlizeHelper(in.String(), autoescape, -1)
+	if err != nil {
+
+	}
+
+	return AsValue(s), nil
+}
+
+func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
+	s, err := filterUrlizeHelper(in.String(), true, param.Integer())
+	if err != nil {
+		return nil, &Error{
+			Sender:    "filter:urlizetrunc",
+			OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
+		}
+	}
+	return AsValue(s), nil
+}
+
+func filterStringformat(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
+}
+
+var reStriptags = regexp.MustCompile("<[^>]*?>")
+
+func filterStriptags(in *Value, param *Value) (*Value, *Error) {
+	s := in.String()
+
+	// Strip all tags
+	s = reStriptags.ReplaceAllString(s, "")
+
+	return AsValue(strings.TrimSpace(s)), nil
+}
+
+// https://en.wikipedia.org/wiki/Phoneword
+var filterPhone2numericMap = map[string]string{
+	"a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
+	"l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
+	"w": "9", "x": "9", "y": "9", "z": "9",
+}
+
+func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
+	sin := in.String()
+	for k, v := range filterPhone2numericMap {
+		sin = strings.Replace(sin, k, v, -1)
+		sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
+	}
+	return AsValue(sin), nil
+}
+
+func filterPluralize(in *Value, param *Value) (*Value, *Error) {
+	if in.IsNumber() {
+		// Works only on numbers
+		if param.Len() > 0 {
+			endings := strings.Split(param.String(), ",")
+			if len(endings) > 2 {
+				return nil, &Error{
+					Sender:    "filter:pluralize",
+					OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
+				}
+			}
+			if len(endings) == 1 {
+				// 1 argument
+				if in.Integer() != 1 {
+					return AsValue(endings[0]), nil
+				}
+			} else {
+				if in.Integer() != 1 {
+					// 2 arguments
+					return AsValue(endings[1]), nil
+				}
+				return AsValue(endings[0]), nil
+			}
+		} else {
+			if in.Integer() != 1 {
+				// return default 's'
+				return AsValue("s"), nil
+			}
+		}
+
+		return AsValue(""), nil
+	}
+	return nil, &Error{
+		Sender:    "filter:pluralize",
+		OrigError: errors.New("filter 'pluralize' does only work on numbers"),
+	}
+}
+
+func filterRandom(in *Value, param *Value) (*Value, *Error) {
+	if !in.CanSlice() || in.Len() <= 0 {
+		return in, nil
+	}
+	i := rand.Intn(in.Len())
+	return in.Index(i), nil
+}
+
+func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
+	s := in.String()
+	tags := strings.Split(param.String(), ",")
+
+	// Strip only specific tags
+	for _, tag := range tags {
+		re := regexp.MustCompile(fmt.Sprintf("</?%s/?>", tag))
+		s = re.ReplaceAllString(s, "")
+	}
+
+	return AsValue(strings.TrimSpace(s)), nil
+}
+
+func filterRjust(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
+}
+
+func filterSlice(in *Value, param *Value) (*Value, *Error) {
+	comp := strings.Split(param.String(), ":")
+	if len(comp) != 2 {
+		return nil, &Error{
+			Sender:    "filter:slice",
+			OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
+		}
+	}
+
+	if !in.CanSlice() {
+		return in, nil
+	}
+
+	from := AsValue(comp[0]).Integer()
+	to := in.Len()
+
+	if from > to {
+		from = to
+	}
+
+	vto := AsValue(comp[1]).Integer()
+	if vto >= from && vto <= in.Len() {
+		to = vto
+	}
+
+	return in.Slice(from, to), nil
+}
+
+func filterTitle(in *Value, param *Value) (*Value, *Error) {
+	if !in.IsString() {
+		return AsValue(""), nil
+	}
+	return AsValue(strings.Title(strings.ToLower(in.String()))), nil
+}
+
+func filterWordcount(in *Value, param *Value) (*Value, *Error) {
+	return AsValue(len(strings.Fields(in.String()))), nil
+}
+
+func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
+	words := strings.Fields(in.String())
+	wordsLen := len(words)
+	wrapAt := param.Integer()
+	if wrapAt <= 0 {
+		return in, nil
+	}
+
+	linecount := wordsLen/wrapAt + wordsLen%wrapAt
+	lines := make([]string, 0, linecount)
+	for i := 0; i < linecount; i++ {
+		lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
+	}
+	return AsValue(strings.Join(lines, "\n")), nil
+}
+
+func filterYesno(in *Value, param *Value) (*Value, *Error) {
+	choices := map[int]string{
+		0: "yes",
+		1: "no",
+		2: "maybe",
+	}
+	paramString := param.String()
+	customChoices := strings.Split(paramString, ",")
+	if len(paramString) > 0 {
+		if len(customChoices) > 3 {
+			return nil, &Error{
+				Sender:    "filter:yesno",
+				OrigError: errors.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
+			}
+		}
+		if len(customChoices) < 2 {
+			return nil, &Error{
+				Sender:    "filter:yesno",
+				OrigError: errors.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
+			}
+		}
+
+		// Map to the options now
+		choices[0] = customChoices[0]
+		choices[1] = customChoices[1]
+		if len(customChoices) == 3 {
+			choices[2] = customChoices[2]
+		}
+	}
+
+	// maybe
+	if in.IsNil() {
+		return AsValue(choices[2]), nil
+	}
+
+	// yes
+	if in.IsTrue() {
+		return AsValue(choices[0]), nil
+	}
+
+	// no
+	return AsValue(choices[1]), nil
+}

部分文件因文件數量過多而無法顯示