瀏覽代碼

update vendor

lijian 1 年之前
父節點
當前提交
e2d248f8d5
共有 100 個文件被更改,包括 4 次插入19545 次删除
  1. 二進制
      .DS_Store
  2. 二進制
      pkg/.DS_Store
  3. 二進制
      pkg/ruleEngine/.DS_Store
  4. 0 24
      services/knowoapi/admin_test.go
  5. 0 25
      services/knowoapi/base_test.go
  6. 4 2
      services/knowoapi/router.go
  7. 0 2
      vendor/github.com/CloudyKit/jet/v4/.gitignore
  8. 0 8
      vendor/github.com/CloudyKit/jet/v4/.travis.yml
  9. 0 201
      vendor/github.com/CloudyKit/jet/v4/LICENSE
  10. 0 46
      vendor/github.com/CloudyKit/jet/v4/README.md
  11. 0 34
      vendor/github.com/CloudyKit/jet/v4/appveyor.yml
  12. 0 240
      vendor/github.com/CloudyKit/jet/v4/constructors.go
  13. 0 204
      vendor/github.com/CloudyKit/jet/v4/default.go
  14. 0 1581
      vendor/github.com/CloudyKit/jet/v4/eval.go
  15. 0 160
      vendor/github.com/CloudyKit/jet/v4/func.go
  16. 0 684
      vendor/github.com/CloudyKit/jet/v4/lex.go
  17. 0 60
      vendor/github.com/CloudyKit/jet/v4/loader.go
  18. 0 695
      vendor/github.com/CloudyKit/jet/v4/node.go
  19. 0 1004
      vendor/github.com/CloudyKit/jet/v4/parse.go
  20. 0 8
      vendor/github.com/CloudyKit/jet/v4/profile.sh
  21. 0 152
      vendor/github.com/CloudyKit/jet/v4/ranger.go
  22. 0 11
      vendor/github.com/CloudyKit/jet/v4/stress.bash
  23. 0 327
      vendor/github.com/CloudyKit/jet/v4/template.go
  24. 0 25
      vendor/github.com/ajg/form/.travis.yml
  25. 0 27
      vendor/github.com/ajg/form/LICENSE
  26. 0 247
      vendor/github.com/ajg/form/README.md
  27. 0 4
      vendor/github.com/ajg/form/TODO.md
  28. 0 370
      vendor/github.com/ajg/form/decode.go
  29. 0 388
      vendor/github.com/ajg/form/encode.go
  30. 0 14
      vendor/github.com/ajg/form/form.go
  31. 0 152
      vendor/github.com/ajg/form/node.go
  32. 0 18
      vendor/github.com/ajg/form/pre-commit.sh
  33. 0 3
      vendor/github.com/aymerick/raymond/.gitmodules
  34. 0 10
      vendor/github.com/aymerick/raymond/.travis.yml
  35. 0 46
      vendor/github.com/aymerick/raymond/BENCHMARKS.md
  36. 0 42
      vendor/github.com/aymerick/raymond/CHANGELOG.md
  37. 0 22
      vendor/github.com/aymerick/raymond/LICENSE
  38. 0 1427
      vendor/github.com/aymerick/raymond/README.md
  39. 0 1
      vendor/github.com/aymerick/raymond/VERSION
  40. 0 785
      vendor/github.com/aymerick/raymond/ast/node.go
  41. 0 279
      vendor/github.com/aymerick/raymond/ast/print.go
  42. 0 95
      vendor/github.com/aymerick/raymond/data_frame.go
  43. 0 65
      vendor/github.com/aymerick/raymond/escape.go
  44. 0 1005
      vendor/github.com/aymerick/raymond/eval.go
  45. 0 398
      vendor/github.com/aymerick/raymond/helper.go
  46. 0 639
      vendor/github.com/aymerick/raymond/lexer/lexer.go
  47. 0 183
      vendor/github.com/aymerick/raymond/lexer/token.go
  48. 0 846
      vendor/github.com/aymerick/raymond/parser/parser.go
  49. 0 360
      vendor/github.com/aymerick/raymond/parser/whitespace.go
  50. 0 101
      vendor/github.com/aymerick/raymond/partial.go
  51. 0 28
      vendor/github.com/aymerick/raymond/raymond.go
  52. 二進制
      vendor/github.com/aymerick/raymond/raymond.png
  53. 0 84
      vendor/github.com/aymerick/raymond/string.go
  54. 0 248
      vendor/github.com/aymerick/raymond/template.go
  55. 0 85
      vendor/github.com/aymerick/raymond/utils.go
  56. 0 7
      vendor/github.com/eknkc/amber/.travis.yml
  57. 0 9
      vendor/github.com/eknkc/amber/LICENSE
  58. 0 442
      vendor/github.com/eknkc/amber/README.md
  59. 0 844
      vendor/github.com/eknkc/amber/compiler.go
  60. 0 257
      vendor/github.com/eknkc/amber/doc.go
  61. 0 285
      vendor/github.com/eknkc/amber/parser/nodes.go
  62. 0 482
      vendor/github.com/eknkc/amber/parser/parser.go
  63. 0 501
      vendor/github.com/eknkc/amber/parser/scanner.go
  64. 0 287
      vendor/github.com/eknkc/amber/runtime.go
  65. 0 8
      vendor/github.com/gobwas/glob/.gitignore
  66. 0 9
      vendor/github.com/gobwas/glob/.travis.yml
  67. 0 21
      vendor/github.com/gobwas/glob/LICENSE
  68. 0 26
      vendor/github.com/gobwas/glob/bench.sh
  69. 0 525
      vendor/github.com/gobwas/glob/compiler/compiler.go
  70. 0 80
      vendor/github.com/gobwas/glob/glob.go
  71. 0 45
      vendor/github.com/gobwas/glob/match/any.go
  72. 0 82
      vendor/github.com/gobwas/glob/match/any_of.go
  73. 0 146
      vendor/github.com/gobwas/glob/match/btree.go
  74. 0 58
      vendor/github.com/gobwas/glob/match/contains.go
  75. 0 99
      vendor/github.com/gobwas/glob/match/every_of.go
  76. 0 49
      vendor/github.com/gobwas/glob/match/list.go
  77. 0 81
      vendor/github.com/gobwas/glob/match/match.go
  78. 0 49
      vendor/github.com/gobwas/glob/match/max.go
  79. 0 57
      vendor/github.com/gobwas/glob/match/min.go
  80. 0 27
      vendor/github.com/gobwas/glob/match/nothing.go
  81. 0 50
      vendor/github.com/gobwas/glob/match/prefix.go
  82. 0 55
      vendor/github.com/gobwas/glob/match/prefix_any.go
  83. 0 62
      vendor/github.com/gobwas/glob/match/prefix_suffix.go
  84. 0 48
      vendor/github.com/gobwas/glob/match/range.go
  85. 0 77
      vendor/github.com/gobwas/glob/match/row.go
  86. 0 91
      vendor/github.com/gobwas/glob/match/segments.go
  87. 0 43
      vendor/github.com/gobwas/glob/match/single.go
  88. 0 35
      vendor/github.com/gobwas/glob/match/suffix.go
  89. 0 43
      vendor/github.com/gobwas/glob/match/suffix_any.go
  90. 0 33
      vendor/github.com/gobwas/glob/match/super.go
  91. 0 45
      vendor/github.com/gobwas/glob/match/text.go
  92. 0 148
      vendor/github.com/gobwas/glob/readme.md
  93. 0 122
      vendor/github.com/gobwas/glob/syntax/ast/ast.go
  94. 0 157
      vendor/github.com/gobwas/glob/syntax/ast/parser.go
  95. 0 273
      vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
  96. 0 88
      vendor/github.com/gobwas/glob/syntax/lexer/token.go
  97. 0 14
      vendor/github.com/gobwas/glob/syntax/syntax.go
  98. 0 154
      vendor/github.com/gobwas/glob/util/runes/runes.go
  99. 0 39
      vendor/github.com/gobwas/glob/util/strings/strings.go
  100. 0 27
      vendor/github.com/google/go-querystring/LICENSE

二進制
.DS_Store


二進制
pkg/.DS_Store


二進制
pkg/ruleEngine/.DS_Store


+ 0 - 24
services/knowoapi/admin_test.go

@@ -1,24 +0,0 @@
-package main
-
-import (
-	"net/http"
-	"sparrow/pkg/models"
-	"testing"
-
-	"github.com/kataras/iris/httptest"
-)
-
-func TestRoleCreate(t *testing.T) {
-	*confDBPass = "123456"
-	e := httptest.New(t, newApp())
-	// get token
-	tk := getToken()
-	e.POST("/api/v1/admin/role").WithHeader("Authorization", "Bearer "+tk).
-		WithJSON(models.Role{
-			RoleCode: 0,
-			RoleName: "普通用户",
-			MenuList: "home",
-		}).
-		Expect().Status(http.StatusOK).
-		JSON().Object().Values().Contains(0)
-}

+ 0 - 25
services/knowoapi/base_test.go

@@ -1,25 +0,0 @@
-package main
-
-import (
-	"sparrow/pkg/models"
-	"sparrow/services/knowoapi/controllers"
-	"testing"
-
-	"github.com/kataras/iris/httptest"
-)
-
-func TestController(t *testing.T) {
-	e := httptest.New(t, newApp())
-	e.POST("/").Expect().Status(httptest.StatusOK)
-}
-
-func getToken() string {
-	tk := new(controllers.Token)
-	user := models.User{
-		UserKey:  "f7f2d7e5a8954ff4ef07ce7f77898c3f1dd389038842788346514183b5eff8b53e",
-		UserName: "lijian",
-		VendorID: "1",
-	}
-	user.ID = 1
-	return tk.TokenMaker(&user)
-}

+ 4 - 2
services/knowoapi/router.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"github.com/golang-jwt/jwt/v4"
 	"github.com/kataras/iris/v12/mvc"
 	"sparrow/pkg/generator"
 	"sparrow/services/knowoapi/controllers"
@@ -9,7 +10,6 @@ import (
 
 	"github.com/opentracing/opentracing-go"
 
-	jwt "github.com/dgrijalva/jwt-go"
 	jwtmiddleware "github.com/iris-contrib/middleware/jwt"
 	"github.com/kataras/iris/v12"
 )
@@ -29,7 +29,9 @@ func handleErrors(ctx iris.Context) {
 // jwt 中间件配置
 func newJWThandle() func(ctx iris.Context) {
 	jwtHandler := jwtmiddleware.New(jwtmiddleware.Config{
-		ValidationKeyGetter: nil,
+		ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
+			return []byte(model.SignedString), nil
+		},
 		ErrorHandler: func(ctx iris.Context, err error) {
 			ctx.StatusCode(iris.StatusUnauthorized)
 			ctx.Values().Set("reason", err)

+ 0 - 2
vendor/github.com/CloudyKit/jet/v4/.gitignore

@@ -1,2 +0,0 @@
-.DS_Store
-.idea

+ 0 - 8
vendor/github.com/CloudyKit/jet/v4/.travis.yml

@@ -1,8 +0,0 @@
-language: go
-go:
- - "1.13.x"
- - "1.14.x"
- - "tip"
-
-script:
- - env GO111MODULE=on go test -v ./...

+ 0 - 201
vendor/github.com/CloudyKit/jet/v4/LICENSE

@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.

+ 0 - 46
vendor/github.com/CloudyKit/jet/v4/README.md

@@ -1,46 +0,0 @@
-# Jet Template Engine for Go
-
-[![Build Status](https://travis-ci.org/CloudyKit/jet.svg?branch=master)](https://travis-ci.org/CloudyKit/jet) [![Build status](https://ci.appveyor.com/api/projects/status/5g4whw3c6518vvku?svg=true)](https://ci.appveyor.com/project/CloudyKit/jet) [![Join the chat at https://gitter.im/CloudyKit/jet](https://badges.gitter.im/CloudyKit/jet.svg)](https://gitter.im/CloudyKit/jet)
-
-Jet is a template engine developed to be easy to use, powerful, dynamic, yet secure and very fast.
-
-* simple and familiar syntax
-* supports template inheritance (`extends`) and composition (`block`/`yield`, `import`, `include`)
-* descriptive error messages with filename and line number
-* auto-escaping
-* simple C-like expressions
-* very fast execution – Jet can execute templates faster than some pre-compiled template engines
-* very light in terms of allocations and memory footprint
-
-## v4
-
-Version 4 brings a lot of bug fixes and improvements as well as [updated documentation](./docs/index.md), but make sure to read through the [breaking changes](./docs/changes.md) before making the jump.
-
-## Docs
-
-- [Syntax Reference](./docs/syntax.md)
-- [Built-ins](./docs/builtins.md)
-- [Wiki](https://github.com/CloudyKit/jet/wiki) (some things are out of date)
-
-## Example application
-
-An example application is available in the repository. Use `go get -u github.com/CloudyKit/jet` or clone the repository into `$GOPATH/github.com/CloudyKit/jet`, then do:
-```
-  $ cd examples/todos; go run main.go
-```
-
-## IntelliJ Plugin
-
-If you use IntelliJ there is a plugin available at https://github.com/jhsx/GoJetPlugin.
-There is also a very good Go plugin for IntelliJ – see https://github.com/go-lang-plugin-org/go-lang-idea-plugin.
-GoJetPlugin + Go-lang-idea-plugin = happiness!
-
-## Contributing
-
-All contributions are welcome – if you find a bug please report it.
-
-## Contributors
-
-- José Santos (@jhsx)
-- Daniel Lohse (@annismckenzie)
-- Alexander Willing (@sauerbraten)

+ 0 - 34
vendor/github.com/CloudyKit/jet/v4/appveyor.yml

@@ -1,34 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-# scripts that are called at very beginning, before repo cloning
-init:
-  - git config --global core.autocrlf true
-
-clone_folder: c:\gopath\src\github.com\CloudyKit\jet
-
-environment:
-  GOPATH: c:\gopath
-  GO111MODULE: on
-  matrix:
-    - GOVERSION: 113
-
-install:
-  - set PATH=%GOPATH%\bin;c:\go%GOVERSION%\bin;%PATH%
-  - set GOROOT=c:\go%GOVERSION%
-  - echo %PATH%
-  - echo %GOPATH%
-  - go version
-  - go env
-
-build: off
-
-test_script:
-  - go test -v ./...
-  - cd examples/asset_packaging/
-  - go run main.go --run-and-exit
-  - go get -u github.com/shurcooL/vfsgen
-  - go generate
-  - go build -tags=deploy_build -o bin/app.exe main.go
-  - .\bin\app.exe --run-and-exit

+ 0 - 240
vendor/github.com/CloudyKit/jet/v4/constructors.go

@@ -1,240 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-func (t *Template) newSliceExpr(pos Pos, line int, base, index, endIndex Expression) *SliceExprNode {
-	return &SliceExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeSliceExpr, Pos: pos, Line: line}, Index: index, Base: base, EndIndex: endIndex}
-}
-
-func (t *Template) newIndexExpr(pos Pos, line int, base, index Expression) *IndexExprNode {
-	return &IndexExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeIndexExpr, Pos: pos, Line: line}, Index: index, Base: base}
-}
-
-func (t *Template) newTernaryExpr(pos Pos, line int, boolean, left, right Expression) *TernaryExprNode {
-	return &TernaryExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeTernaryExpr, Pos: pos, Line: line}, Boolean: boolean, Left: left, Right: right}
-}
-
-func (t *Template) newSet(pos Pos, line int, isLet, isIndexExprGetLookup bool, left, right []Expression) *SetNode {
-	return &SetNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeSet, Pos: pos, Line: line}, Let: isLet, IndexExprGetLookup: isIndexExprGetLookup, Left: left, Right: right}
-}
-
-func (t *Template) newCallExpr(pos Pos, line int, expr Expression) *CallExprNode {
-	return &CallExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeCallExpr, Pos: pos, Line: line}, BaseExpr: expr}
-}
-func (t *Template) newNotExpr(pos Pos, line int, expr Expression) *NotExprNode {
-	return &NotExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeNotExpr, Pos: pos, Line: line}, Expr: expr}
-}
-func (t *Template) newNumericComparativeExpr(pos Pos, line int, left, right Expression, item item) *NumericComparativeExprNode {
-	return &NumericComparativeExprNode{binaryExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeNumericComparativeExpr, Pos: pos, Line: line}, Operator: item, Left: left, Right: right}}
-}
-
-func (t *Template) newComparativeExpr(pos Pos, line int, left, right Expression, item item) *ComparativeExprNode {
-	return &ComparativeExprNode{binaryExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeComparativeExpr, Pos: pos, Line: line}, Operator: item, Left: left, Right: right}}
-}
-
-func (t *Template) newLogicalExpr(pos Pos, line int, left, right Expression, item item) *LogicalExprNode {
-	return &LogicalExprNode{binaryExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeLogicalExpr, Pos: pos, Line: line}, Operator: item, Left: left, Right: right}}
-}
-
-func (t *Template) newMultiplicativeExpr(pos Pos, line int, left, right Expression, item item) *MultiplicativeExprNode {
-	return &MultiplicativeExprNode{binaryExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeMultiplicativeExpr, Pos: pos, Line: line}, Operator: item, Left: left, Right: right}}
-}
-
-func (t *Template) newAdditiveExpr(pos Pos, line int, left, right Expression, item item) *AdditiveExprNode {
-	return &AdditiveExprNode{binaryExprNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeAdditiveExpr, Pos: pos, Line: line}, Operator: item, Left: left, Right: right}}
-}
-
-func (t *Template) newList(pos Pos) *ListNode {
-	return &ListNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeList, Pos: pos}}
-}
-
-func (t *Template) newText(pos Pos, text string) *TextNode {
-	return &TextNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeText, Pos: pos}, Text: []byte(text)}
-}
-
-func (t *Template) newPipeline(pos Pos, line int) *PipeNode {
-	return &PipeNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodePipe, Pos: pos, Line: line}}
-}
-
-func (t *Template) newAction(pos Pos, line int) *ActionNode {
-	return &ActionNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeAction, Pos: pos, Line: line}}
-}
-
-func (t *Template) newCommand(pos Pos) *CommandNode {
-	return &CommandNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeCommand, Pos: pos}}
-}
-
-func (t *Template) newNil(pos Pos) *NilNode {
-	return &NilNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeNil, Pos: pos}}
-}
-
-func (t *Template) newField(pos Pos, ident string) *FieldNode {
-	return &FieldNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeField, Pos: pos}, Ident: strings.Split(ident[1:], ".")} //[1:] to drop leading period
-}
-
-func (t *Template) newChain(pos Pos, node Node) *ChainNode {
-	return &ChainNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeChain, Pos: pos}, Node: node}
-}
-
-func (t *Template) newBool(pos Pos, true bool) *BoolNode {
-	return &BoolNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeBool, Pos: pos}, True: true}
-}
-
-func (t *Template) newString(pos Pos, orig, text string) *StringNode {
-	return &StringNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeString, Pos: pos}, Quoted: orig, Text: text}
-}
-
-func (t *Template) newEnd(pos Pos) *endNode {
-	return &endNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: nodeEnd, Pos: pos}}
-}
-
-func (t *Template) newContent(pos Pos) *contentNode {
-	return &contentNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: nodeContent, Pos: pos}}
-}
-
-func (t *Template) newElse(pos Pos, line int) *elseNode {
-	return &elseNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: nodeElse, Pos: pos, Line: line}}
-}
-
-func (t *Template) newIf(pos Pos, line int, set *SetNode, pipe Expression, list, elseList *ListNode) *IfNode {
-	return &IfNode{BranchNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeIf, Pos: pos, Line: line}, Set: set, Expression: pipe, List: list, ElseList: elseList}}
-}
-
-func (t *Template) newRange(pos Pos, line int, set *SetNode, pipe Expression, list, elseList *ListNode) *RangeNode {
-	return &RangeNode{BranchNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeRange, Pos: pos, Line: line}, Set: set, Expression: pipe, List: list, ElseList: elseList}}
-}
-
-func (t *Template) newBlock(pos Pos, line int, name string, parameters *BlockParameterList, pipe Expression, listNode, contentListNode *ListNode) *BlockNode {
-	return &BlockNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeBlock, Line: line, Pos: pos}, Name: name, Parameters: parameters, Expression: pipe, List: listNode, Content: contentListNode}
-}
-
-func (t *Template) newYield(pos Pos, line int, name string, bplist *BlockParameterList, pipe Expression, content *ListNode, isContent bool) *YieldNode {
-	return &YieldNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeYield, Pos: pos, Line: line}, Name: name, Parameters: bplist, Expression: pipe, Content: content, IsContent: isContent}
-}
-
-func (t *Template) newInclude(pos Pos, line int, name, context Expression) *IncludeNode {
-	return &IncludeNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeInclude, Pos: pos, Line: line}, Name: name, Context: context}
-}
-
-func (t *Template) newReturn(pos Pos, line int, pipe Expression) *ReturnNode {
-	return &ReturnNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeReturn, Pos: pos, Line: line}, Value: pipe}
-}
-
-func (t *Template) newTry(pos Pos, line int, list *ListNode, catch *catchNode) *TryNode {
-	return &TryNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeTry, Pos: pos, Line: line}, List: list, Catch: catch}
-}
-
-func (t *Template) newCatch(pos Pos, line int, errVar *IdentifierNode, list *ListNode) *catchNode {
-	return &catchNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: nodeCatch, Pos: pos, Line: line}, Err: errVar, List: list}
-}
-
-func (t *Template) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
-	n := &NumberNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeNumber, Pos: pos}, Text: text}
-	// todo: optimize
-	switch typ {
-	case itemCharConstant:
-		_rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
-		if err != nil {
-			return nil, err
-		}
-		if tail != "'" {
-			return nil, fmt.Errorf("malformed character constant: %s", text)
-		}
-		n.Int64 = int64(_rune)
-		n.IsInt = true
-		n.Uint64 = uint64(_rune)
-		n.IsUint = true
-		n.Float64 = float64(_rune) //odd but those are the rules.
-		n.IsFloat = true
-		return n, nil
-	case itemComplex:
-		//fmt.Sscan can parse the pair, so let it do the work.
-		if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
-			return nil, err
-		}
-		n.IsComplex = true
-		n.simplifyComplex()
-		return n, nil
-	}
-	//Imaginary constants can only be complex unless they are zero.
-	if len(text) > 0 && text[len(text)-1] == 'i' {
-		f, err := strconv.ParseFloat(text[:len(text)-1], 64)
-		if err == nil {
-			n.IsComplex = true
-			n.Complex128 = complex(0, f)
-			n.simplifyComplex()
-			return n, nil
-		}
-	}
-	// Do integer test first so we get 0x123 etc.
-	u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
-	if err == nil {
-		n.IsUint = true
-		n.Uint64 = u
-	}
-	i, err := strconv.ParseInt(text, 0, 64)
-	if err == nil {
-		n.IsInt = true
-		n.Int64 = i
-		if i == 0 {
-			n.IsUint = true // in case of -0.
-			n.Uint64 = u
-		}
-	}
-	// If an integer extraction succeeded, promote the float.
-	if n.IsInt {
-		n.IsFloat = true
-		n.Float64 = float64(n.Int64)
-	} else if n.IsUint {
-		n.IsFloat = true
-		n.Float64 = float64(n.Uint64)
-	} else {
-		f, err := strconv.ParseFloat(text, 64)
-		if err == nil {
-			// If we parsed it as a float but it looks like an integer,
-			// it's a huge number too large to fit in an int. Reject it.
-			if !strings.ContainsAny(text, ".eE") {
-				return nil, fmt.Errorf("integer overflow: %q", text)
-			}
-			n.IsFloat = true
-			n.Float64 = f
-			// If a floating-point extraction succeeded, extract the int if needed.
-			if !n.IsInt && float64(int64(f)) == f {
-				n.IsInt = true
-				n.Int64 = int64(f)
-			}
-			if !n.IsUint && float64(uint64(f)) == f {
-				n.IsUint = true
-				n.Uint64 = uint64(f)
-			}
-		}
-	}
-
-	if !n.IsInt && !n.IsUint && !n.IsFloat {
-		return nil, fmt.Errorf("illegal number syntax: %q", text)
-	}
-
-	return n, nil
-}
-
-func (t *Template) newIdentifier(ident string, pos Pos, line int) *IdentifierNode {
-	return &IdentifierNode{NodeBase: NodeBase{TemplatePath: t.Name, NodeType: NodeIdentifier, Pos: pos, Line: line}, Ident: ident}
-}

+ 0 - 204
vendor/github.com/CloudyKit/jet/v4/default.go

@@ -1,204 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"encoding/json"
-	"errors"
-	"html"
-	"io"
-	"io/ioutil"
-	"net/url"
-	"reflect"
-	"strings"
-	"text/template"
-)
-
-var defaultVariables map[string]reflect.Value
-
-func init() {
-	defaultVariables = map[string]reflect.Value{
-		"lower":     reflect.ValueOf(strings.ToLower),
-		"upper":     reflect.ValueOf(strings.ToUpper),
-		"hasPrefix": reflect.ValueOf(strings.HasPrefix),
-		"hasSuffix": reflect.ValueOf(strings.HasSuffix),
-		"repeat":    reflect.ValueOf(strings.Repeat),
-		"replace":   reflect.ValueOf(strings.Replace),
-		"split":     reflect.ValueOf(strings.Split),
-		"trimSpace": reflect.ValueOf(strings.TrimSpace),
-		"html":      reflect.ValueOf(html.EscapeString),
-		"url":       reflect.ValueOf(url.QueryEscape),
-		"safeHtml":  reflect.ValueOf(SafeWriter(template.HTMLEscape)),
-		"safeJs":    reflect.ValueOf(SafeWriter(template.JSEscape)),
-		"raw":       reflect.ValueOf(SafeWriter(unsafePrinter)),
-		"unsafe":    reflect.ValueOf(SafeWriter(unsafePrinter)),
-		"writeJson": reflect.ValueOf(jsonRenderer),
-		"json":      reflect.ValueOf(json.Marshal),
-		"map":       reflect.ValueOf(newMap),
-		"slice":     reflect.ValueOf(newSlice),
-		"array":     reflect.ValueOf(newSlice),
-		"isset": reflect.ValueOf(Func(func(a Arguments) reflect.Value {
-			a.RequireNumOfArguments("isset", 1, -1)
-			for i := 0; i < len(a.argExpr); i++ {
-				if !a.runtime.isSet(a.argExpr[i]) {
-					return valueBoolFALSE
-				}
-			}
-			return valueBoolTRUE
-		})),
-		"len": reflect.ValueOf(Func(func(a Arguments) reflect.Value {
-			a.RequireNumOfArguments("len", 1, 1)
-
-			expression := a.Get(0)
-			if expression.Kind() == reflect.Ptr || expression.Kind() == reflect.Interface {
-				expression = expression.Elem()
-			}
-
-			switch expression.Kind() {
-			case reflect.Array, reflect.Chan, reflect.Slice, reflect.Map, reflect.String:
-				return reflect.ValueOf(expression.Len())
-			case reflect.Struct:
-				return reflect.ValueOf(expression.NumField())
-			}
-
-			a.Panicf("len(): invalid value type %s", expression.Type())
-			return reflect.Value{}
-		})),
-		"includeIfExists": reflect.ValueOf(Func(func(a Arguments) reflect.Value {
-			a.RequireNumOfArguments("includeIfExists", 1, 2)
-			t, err := a.runtime.set.GetTemplate(a.Get(0).String())
-			// If template exists but returns an error then panic instead of failing silently
-			if t != nil && err != nil {
-				panic(err)
-			}
-			if err != nil {
-				return hiddenFalse
-			}
-
-			a.runtime.newScope()
-			defer a.runtime.releaseScope()
-
-			a.runtime.blocks = t.processedBlocks
-			root := t.Root
-			if t.extends != nil {
-				root = t.extends.Root
-			}
-
-			if a.NumOfArguments() > 1 {
-				c := a.runtime.context
-				defer func() { a.runtime.context = c }()
-				a.runtime.context = a.Get(1)
-			}
-
-			a.runtime.executeList(root)
-
-			return hiddenTrue
-		})),
-		"exec": reflect.ValueOf(Func(func(a Arguments) (result reflect.Value) {
-			a.RequireNumOfArguments("exec", 1, 2)
-			t, err := a.runtime.set.GetTemplate(a.Get(0).String())
-			if err != nil {
-				panic(err)
-			}
-
-			a.runtime.newScope()
-			defer a.runtime.releaseScope()
-
-			w := a.runtime.Writer
-			defer func() { a.runtime.Writer = w }()
-			a.runtime.Writer = ioutil.Discard
-
-			a.runtime.blocks = t.processedBlocks
-			root := t.Root
-			if t.extends != nil {
-				root = t.extends.Root
-			}
-
-			if a.NumOfArguments() > 1 {
-				c := a.runtime.context
-				defer func() { a.runtime.context = c }()
-				a.runtime.context = a.Get(1)
-			}
-			result = a.runtime.executeList(root)
-
-			return result
-		})),
-		"ints": reflect.ValueOf(Func(func(a Arguments) (result reflect.Value) {
-			var from, to int
-			err := a.ParseInto(&from, &to)
-			if err != nil {
-				panic(err)
-			}
-			// check to > from
-			if to <= from {
-				panic(errors.New("invalid range for ints ranger: 'from' must be smaller than 'to'"))
-			}
-			return reflect.ValueOf(&intsRanger{from: from, to: to})
-		})),
-	}
-}
-
-type hiddenBool bool
-
-func (m hiddenBool) Render(r *Runtime) { /* render nothing -> hidden */ }
-
-var hiddenTrue = reflect.ValueOf(hiddenBool(true))
-var hiddenFalse = reflect.ValueOf(hiddenBool(false))
-
-func jsonRenderer(v interface{}) RendererFunc {
-	return func(r *Runtime) {
-		err := json.NewEncoder(r.Writer).Encode(v)
-		if err != nil {
-			panic(err)
-		}
-	}
-}
-
-func unsafePrinter(w io.Writer, b []byte) {
-	w.Write(b)
-}
-
-// SafeWriter is a function that writes bytes directly to the render output, without going through Jet's auto-escaping phase.
-// Use/implement this if content should be escaped differently or not at all (see raw/unsafe builtins).
-type SafeWriter func(io.Writer, []byte)
-
-var stringType = reflect.TypeOf("")
-
-var newMap = Func(func(a Arguments) reflect.Value {
-	if a.NumOfArguments()%2 > 0 {
-		panic("map(): incomplete key-value pair (even number of arguments required)")
-	}
-
-	m := reflect.ValueOf(make(map[string]interface{}, a.NumOfArguments()/2))
-
-	for i := 0; i < a.NumOfArguments(); i += 2 {
-		key := a.Get(i)
-		if !key.Type().ConvertibleTo(stringType) {
-			a.Panicf("map(): can't use %+v as string key: %s is not convertible to string", key, key.Type())
-		}
-		key = key.Convert(stringType)
-		m.SetMapIndex(a.Get(i), a.Get(i+1))
-	}
-
-	return m
-})
-
-var newSlice = Func(func(a Arguments) reflect.Value {
-	arr := make([]interface{}, a.NumOfArguments())
-	for i := 0; i < a.NumOfArguments(); i++ {
-		arr[i] = a.Get(i).Interface()
-	}
-	return reflect.ValueOf(arr)
-})

+ 0 - 1581
vendor/github.com/CloudyKit/jet/v4/eval.go

@@ -1,1581 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-
-	"github.com/CloudyKit/fastprinter"
-)
-
-var (
-	funcType       = reflect.TypeOf(Func(nil))
-	stringerType   = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-	rangerType     = reflect.TypeOf((*Ranger)(nil)).Elem()
-	rendererType   = reflect.TypeOf((*Renderer)(nil)).Elem()
-	safeWriterType = reflect.TypeOf(SafeWriter(nil))
-	pool_State     = sync.Pool{
-		New: func() interface{} {
-			return &Runtime{scope: &scope{}, escapeeWriter: new(escapeeWriter)}
-		},
-	}
-)
-
-// Renderer is used to detect if a value has its own rendering logic. If the value an action evaluates to implements this
-// interface, it will not be printed using github.com/CloudyKit/fastprinter, instead, its Render() method will be called
-// and is responsible for writing the value to the render output.
-type Renderer interface {
-	Render(*Runtime)
-}
-
-// RendererFunc func implementing interface Renderer
-type RendererFunc func(*Runtime)
-
-func (renderer RendererFunc) Render(r *Runtime) {
-	renderer(r)
-}
-
-type escapeeWriter struct {
-	Writer  io.Writer
-	escapee SafeWriter
-	set     *Set
-}
-
-func (w *escapeeWriter) Write(b []byte) (int, error) {
-	if w.set.escapee == nil {
-		w.Writer.Write(b)
-	} else {
-		w.set.escapee(w.Writer, b)
-	}
-	return 0, nil
-}
-
-// Runtime this type holds the state of the execution of an template
-type Runtime struct {
-	*escapeeWriter
-	*scope
-	content func(*Runtime, Expression)
-
-	translator Translator
-	context    reflect.Value
-}
-
-// Context returns the current context value
-func (r *Runtime) Context() reflect.Value {
-	return r.context
-}
-
-func (st *Runtime) newScope() {
-	st.scope = &scope{parent: st.scope, variables: make(VarMap), blocks: st.blocks}
-}
-
-func (st *Runtime) releaseScope() {
-	st.scope = st.scope.parent
-}
-
-type scope struct {
-	parent    *scope
-	variables VarMap
-	blocks    map[string]*BlockNode
-}
-
-// YieldBlock yields a block in the current context, will panic if the context is not available
-func (st *Runtime) YieldBlock(name string, context interface{}) {
-	block, has := st.getBlock(name)
-
-	if has == false {
-		panic(fmt.Errorf("Block %q was not found!!", name))
-	}
-
-	if context != nil {
-		current := st.context
-		st.context = reflect.ValueOf(context)
-		st.executeList(block.List)
-		st.context = current
-	}
-
-	st.executeList(block.List)
-}
-
-func (st *scope) getBlock(name string) (block *BlockNode, has bool) {
-	block, has = st.blocks[name]
-	for !has && st.parent != nil {
-		st = st.parent
-		block, has = st.blocks[name]
-	}
-	return
-}
-
-func (state *Runtime) setValue(name string, val reflect.Value) error {
-	// try changing existing variable in current or parent scope
-	sc := state.scope
-	for sc != nil {
-		if _, ok := sc.variables[name]; ok {
-			sc.variables[name] = val
-			return nil
-		}
-		sc = sc.parent
-	}
-
-	return fmt.Errorf("could not assign %q = %v because variable %q is uninitialised", name, val, name)
-}
-
-// LetGlobal sets or initialises a variable in the top-most template scope.
-func (state *Runtime) LetGlobal(name string, val interface{}) {
-	sc := state.scope
-
-	// walk up to top-most valid scope
-	for sc.parent != nil && sc.parent.variables != nil {
-		sc = sc.parent
-	}
-
-	sc.variables[name] = reflect.ValueOf(val)
-}
-
-// Set sets an existing variable in the template scope it lives in.
-func (state *Runtime) Set(name string, val interface{}) error {
-	return state.setValue(name, reflect.ValueOf(val))
-}
-
-// Let initialises a variable in the current template scope (possibly shadowing an existing variable of the same name in a parent scope).
-func (state *Runtime) Let(name string, val interface{}) {
-	state.scope.variables[name] = reflect.ValueOf(val)
-}
-
-// SetOrLet calls Set() (if a variable with the given name is visible from the current scope) or Let() (if there is no variable with the given name in the current or any parent scope).
-func (state *Runtime) SetOrLet(name string, val interface{}) {
-	_, err := state.resolve(name)
-	if err != nil {
-		state.Let(name, val)
-	} else {
-		state.Set(name, val)
-	}
-}
-
-// Resolve resolves a value from the execution context.
-func (state *Runtime) resolve(name string) (reflect.Value, error) {
-	if name == "." {
-		return state.context, nil
-	}
-
-	// try current, then parent variable scopes
-	sc := state.scope
-	for sc != nil {
-		v, ok := sc.variables[name]
-		if ok {
-			return indirectEface(v), nil
-		}
-		sc = sc.parent
-	}
-
-	// try globals
-	state.set.gmx.RLock()
-	v, ok := state.set.globals[name]
-	state.set.gmx.RUnlock()
-	if ok {
-		return indirectEface(v), nil
-	}
-
-	// try default variables
-	v, ok = defaultVariables[name]
-	if ok {
-		return indirectEface(v), nil
-	}
-
-	return reflect.Value{}, fmt.Errorf("identifier %q not available in current (%+v) or parent scope, global, or default variables", name, state.scope.variables)
-}
-
-// Resolve calls resolve() and ignores any errors, meaning it may return a zero reflect.Value.
-func (state *Runtime) Resolve(name string) reflect.Value {
-	v, _ := state.resolve(name)
-	return v
-}
-
-// Resolve calls resolve() and panics if there is an error.
-func (state *Runtime) MustResolve(name string) reflect.Value {
-	v, err := state.resolve(name)
-	if err != nil {
-		panic(err)
-	}
-	return v
-}
-
-func (st *Runtime) recover(err *error) {
-	// reset state scope and context just to be safe (they might not be cleared properly if there was a panic while using the state)
-	st.scope = &scope{}
-	st.context = reflect.Value{}
-	pool_State.Put(st)
-	if recovered := recover(); recovered != nil {
-		var ok bool
-		if _, ok = recovered.(runtime.Error); ok {
-			panic(recovered)
-		}
-		*err, ok = recovered.(error)
-		if !ok {
-			panic(recovered)
-		}
-	}
-}
-
-func (st *Runtime) executeSet(left Expression, right reflect.Value) {
-	typ := left.Type()
-	if typ == NodeIdentifier {
-		err := st.setValue(left.(*IdentifierNode).Ident, right)
-		if err != nil {
-			left.error(err)
-		}
-		return
-	}
-	var value reflect.Value
-	var fields []string
-	if typ == NodeChain {
-		chain := left.(*ChainNode)
-		value = st.evalPrimaryExpressionGroup(chain.Node)
-		fields = chain.Field
-	} else {
-		fields = left.(*FieldNode).Ident
-		value = st.context
-	}
-	lef := len(fields) - 1
-	for i := 0; i < lef; i++ {
-		var err error
-		value, err = resolveIndex(value, reflect.ValueOf(fields[i]))
-		if err != nil {
-			left.errorf("%v", err)
-		}
-	}
-
-RESTART:
-	switch value.Kind() {
-	case reflect.Ptr:
-		value = value.Elem()
-		goto RESTART
-	case reflect.Struct:
-		value = value.FieldByName(fields[lef])
-		if !value.IsValid() {
-			left.errorf("identifier %q is not available in the current scope", fields[lef])
-		}
-		value.Set(right)
-	case reflect.Map:
-		value.SetMapIndex(reflect.ValueOf(&fields[lef]).Elem(), right)
-	}
-}
-
-func (st *Runtime) executeSetList(set *SetNode) {
-	if set.IndexExprGetLookup {
-		value := st.evalPrimaryExpressionGroup(set.Right[0])
-		st.executeSet(set.Left[0], value)
-		if value.IsValid() {
-			st.executeSet(set.Left[1], valueBoolTRUE)
-		} else {
-			st.executeSet(set.Left[1], valueBoolFALSE)
-		}
-	} else {
-		for i := 0; i < len(set.Left); i++ {
-			st.executeSet(set.Left[i], st.evalPrimaryExpressionGroup(set.Right[i]))
-		}
-	}
-}
-
-func (st *Runtime) executeLetList(set *SetNode) {
-	if set.IndexExprGetLookup {
-		value := st.evalPrimaryExpressionGroup(set.Right[0])
-
-		st.variables[set.Left[0].(*IdentifierNode).Ident] = value
-
-		if value.IsValid() {
-			st.variables[set.Left[1].(*IdentifierNode).Ident] = valueBoolTRUE
-		} else {
-			st.variables[set.Left[1].(*IdentifierNode).Ident] = valueBoolFALSE
-		}
-
-	} else {
-		for i := 0; i < len(set.Left); i++ {
-			st.variables[set.Left[i].(*IdentifierNode).Ident] = st.evalPrimaryExpressionGroup(set.Right[i])
-		}
-	}
-}
-
-func (st *Runtime) executeYieldBlock(block *BlockNode, blockParam, yieldParam *BlockParameterList, expression Expression, content *ListNode) {
-
-	needNewScope := len(blockParam.List) > 0 || len(yieldParam.List) > 0
-	if needNewScope {
-		st.newScope()
-		for i := 0; i < len(yieldParam.List); i++ {
-			p := &yieldParam.List[i]
-
-			if p.Expression == nil {
-				block.errorf("missing name for block parameter '%s'", blockParam.List[i].Identifier)
-			}
-
-			st.variables[p.Identifier] = st.evalPrimaryExpressionGroup(p.Expression)
-		}
-		for i := 0; i < len(blockParam.List); i++ {
-			p := &blockParam.List[i]
-			if _, found := st.variables[p.Identifier]; !found {
-				if p.Expression == nil {
-					st.variables[p.Identifier] = valueBoolFALSE
-				} else {
-					st.variables[p.Identifier] = st.evalPrimaryExpressionGroup(p.Expression)
-				}
-			}
-		}
-	}
-
-	mycontent := st.content
-	if content != nil {
-		myscope := st.scope
-		st.content = func(st *Runtime, expression Expression) {
-			outscope := st.scope
-			outcontent := st.content
-
-			st.scope = myscope
-			st.content = mycontent
-
-			if expression != nil {
-				context := st.context
-				st.context = st.evalPrimaryExpressionGroup(expression)
-				st.executeList(content)
-				st.context = context
-			} else {
-				st.executeList(content)
-			}
-
-			st.scope = outscope
-			st.content = outcontent
-		}
-	}
-
-	if expression != nil {
-		context := st.context
-		st.context = st.evalPrimaryExpressionGroup(expression)
-		st.executeList(block.List)
-		st.context = context
-	} else {
-		st.executeList(block.List)
-	}
-
-	st.content = mycontent
-	if needNewScope {
-		st.releaseScope()
-	}
-}
-
-func (st *Runtime) executeList(list *ListNode) (returnValue reflect.Value) {
-	inNewScope := false // to use just one scope for multiple actions with variable declarations
-
-	for i := 0; i < len(list.Nodes); i++ {
-		node := list.Nodes[i]
-		switch node.Type() {
-
-		case NodeText:
-			node := node.(*TextNode)
-			_, err := st.Writer.Write(node.Text)
-			if err != nil {
-				node.error(err)
-			}
-		case NodeAction:
-			node := node.(*ActionNode)
-			if node.Set != nil {
-				if node.Set.Let {
-					if !inNewScope {
-						st.newScope()
-						inNewScope = true
-						defer st.releaseScope()
-					}
-					st.executeLetList(node.Set)
-				} else {
-					st.executeSetList(node.Set)
-				}
-			}
-			if node.Pipe != nil {
-				v, safeWriter := st.evalPipelineExpression(node.Pipe)
-				if !safeWriter && v.IsValid() {
-					if v.Type().Implements(rendererType) {
-						v.Interface().(Renderer).Render(st)
-					} else {
-						_, err := fastprinter.PrintValue(st.escapeeWriter, v)
-						if err != nil {
-							node.error(err)
-						}
-					}
-				}
-			}
-		case NodeIf:
-			node := node.(*IfNode)
-			var isLet bool
-			if node.Set != nil {
-				if node.Set.Let {
-					isLet = true
-					st.newScope()
-					st.executeLetList(node.Set)
-				} else {
-					st.executeSetList(node.Set)
-				}
-			}
-
-			if isTrue(st.evalPrimaryExpressionGroup(node.Expression)) {
-				returnValue = st.executeList(node.List)
-			} else if node.ElseList != nil {
-				returnValue = st.executeList(node.ElseList)
-			}
-			if isLet {
-				st.releaseScope()
-			}
-		case NodeRange:
-			node := node.(*RangeNode)
-			var expression reflect.Value
-
-			isSet := node.Set != nil
-			isLet := false
-			keyVarSlot := 0
-			valVarSlot := -1
-
-			context := st.context
-
-			if isSet {
-				if len(node.Set.Left) > 1 {
-					valVarSlot = 1
-				}
-				expression = st.evalPrimaryExpressionGroup(node.Set.Right[0])
-				if node.Set.Let {
-					isLet = true
-					st.newScope()
-				}
-			} else {
-				expression = st.evalPrimaryExpressionGroup(node.Expression)
-			}
-
-			ranger, cleanup := getRanger(expression)
-			if !ranger.ProvidesIndex() {
-				if len(node.Set.Left) > 1 {
-					// two-vars assignment with ranger that doesn't provide an index
-					node.error(errors.New("two-var range over ranger that does not provide an index"))
-				}
-				keyVarSlot, valVarSlot = -1, 0
-			}
-
-			indexValue, rangeValue, end := ranger.Range()
-			if !end {
-				for !end && !returnValue.IsValid() {
-					if isSet {
-						if isLet {
-							if keyVarSlot >= 0 {
-								st.variables[node.Set.Left[keyVarSlot].String()] = indexValue
-							}
-							if valVarSlot >= 0 {
-								st.variables[node.Set.Left[valVarSlot].String()] = rangeValue
-							}
-						} else {
-							if keyVarSlot >= 0 {
-								st.executeSet(node.Set.Left[keyVarSlot], indexValue)
-							}
-							if valVarSlot >= 0 {
-								st.executeSet(node.Set.Left[valVarSlot], rangeValue)
-							}
-						}
-					}
-					if valVarSlot < 0 {
-						st.context = rangeValue
-					}
-					returnValue = st.executeList(node.List)
-					indexValue, rangeValue, end = ranger.Range()
-				}
-			} else if node.ElseList != nil {
-				returnValue = st.executeList(node.ElseList)
-			}
-			cleanup()
-			st.context = context
-			if isLet {
-				st.releaseScope()
-			}
-		case NodeTry:
-			node := node.(*TryNode)
-			returnValue = st.executeTry(node)
-		case NodeYield:
-			node := node.(*YieldNode)
-			if node.IsContent {
-				if st.content != nil {
-					st.content(st, node.Expression)
-				}
-			} else {
-				block, has := st.getBlock(node.Name)
-				if has == false || block == nil {
-					node.errorf("unresolved block %q!!", node.Name)
-				}
-				st.executeYieldBlock(block, block.Parameters, node.Parameters, node.Expression, node.Content)
-			}
-		case NodeBlock:
-			node := node.(*BlockNode)
-			block, has := st.getBlock(node.Name)
-			if has == false {
-				block = node
-			}
-			st.executeYieldBlock(block, block.Parameters, block.Parameters, block.Expression, block.Content)
-		case NodeInclude:
-			node := node.(*IncludeNode)
-			returnValue = st.executeInclude(node)
-		case NodeReturn:
-			node := node.(*ReturnNode)
-			returnValue = st.evalPrimaryExpressionGroup(node.Value)
-		}
-	}
-
-	return returnValue
-}
-
-func (st *Runtime) executeTry(try *TryNode) (returnValue reflect.Value) {
-	writer := st.Writer
-	buf := new(bytes.Buffer)
-
-	defer func() {
-		r := recover()
-
-		// copy buffered render output to writer only if no panic occured
-		if r == nil {
-			io.Copy(writer, buf)
-		} else {
-			// st.Writer is already set to its original value since the later defer ran first
-			if try.Catch != nil {
-				if try.Catch.Err != nil {
-					st.newScope()
-					st.scope.variables[try.Catch.Err.Ident] = reflect.ValueOf(r)
-				}
-				if try.Catch.List != nil {
-					returnValue = st.executeList(try.Catch.List)
-				}
-				if try.Catch.Err != nil {
-					st.releaseScope()
-				}
-			}
-		}
-	}()
-
-	st.Writer = buf
-	defer func() { st.Writer = writer }()
-
-	return st.executeList(try.List)
-}
-
-func (st *Runtime) executeInclude(node *IncludeNode) (returnValue reflect.Value) {
-	var templatePath string
-	name := st.evalPrimaryExpressionGroup(node.Name)
-	if name.Type().Implements(stringerType) {
-		templatePath = name.String()
-	} else if name.Kind() == reflect.String {
-		templatePath = name.String()
-	} else {
-		node.errorf("evaluating name of template to include: unexpected expression type %q", getTypeString(name))
-	}
-
-	t, err := st.set.getSiblingTemplate(templatePath, node.TemplatePath)
-	if err != nil {
-		node.error(err)
-		return reflect.Value{}
-	}
-
-	st.newScope()
-	defer st.releaseScope()
-
-	st.blocks = t.processedBlocks
-
-	var context reflect.Value
-	if node.Context != nil {
-		context = st.context
-		defer func() { st.context = context }()
-		st.context = st.evalPrimaryExpressionGroup(node.Context)
-	}
-
-	Root := t.Root
-	for t.extends != nil {
-		t = t.extends
-		Root = t.Root
-	}
-
-	return st.executeList(Root)
-}
-
-var (
-	valueBoolTRUE  = reflect.ValueOf(true)
-	valueBoolFALSE = reflect.ValueOf(false)
-)
-
-func (st *Runtime) evalPrimaryExpressionGroup(node Expression) reflect.Value {
-	switch node.Type() {
-	case NodeAdditiveExpr:
-		return st.evalAdditiveExpression(node.(*AdditiveExprNode))
-	case NodeMultiplicativeExpr:
-		return st.evalMultiplicativeExpression(node.(*MultiplicativeExprNode))
-	case NodeComparativeExpr:
-		return st.evalComparativeExpression(node.(*ComparativeExprNode))
-	case NodeNumericComparativeExpr:
-		return st.evalNumericComparativeExpression(node.(*NumericComparativeExprNode))
-	case NodeLogicalExpr:
-		return st.evalLogicalExpression(node.(*LogicalExprNode))
-	case NodeNotExpr:
-		return reflect.ValueOf(!isTrue(st.evalPrimaryExpressionGroup(node.(*NotExprNode).Expr)))
-	case NodeTernaryExpr:
-		node := node.(*TernaryExprNode)
-		if isTrue(st.evalPrimaryExpressionGroup(node.Boolean)) {
-			return st.evalPrimaryExpressionGroup(node.Left)
-		}
-		return st.evalPrimaryExpressionGroup(node.Right)
-	case NodeCallExpr:
-		node := node.(*CallExprNode)
-		baseExpr := st.evalBaseExpressionGroup(node.BaseExpr)
-		if baseExpr.Kind() != reflect.Func {
-			node.errorf("node %q is not func kind %q", node.BaseExpr, baseExpr.Type())
-		}
-		return st.evalCallExpression(baseExpr, node.Args)
-	case NodeIndexExpr:
-		node := node.(*IndexExprNode)
-		base := st.evalPrimaryExpressionGroup(node.Base)
-		index := st.evalPrimaryExpressionGroup(node.Index)
-
-		resolved, err := resolveIndex(base, index)
-		if err != nil {
-			node.error(err)
-		}
-		return resolved
-	case NodeSliceExpr:
-		node := node.(*SliceExprNode)
-		baseExpression := st.evalPrimaryExpressionGroup(node.Base)
-
-		var index, length int
-		if node.Index != nil {
-			indexExpression := st.evalPrimaryExpressionGroup(node.Index)
-			if canNumber(indexExpression.Kind()) {
-				index = int(castInt64(indexExpression))
-			} else {
-				node.Index.errorf("non numeric value in index expression kind %s", indexExpression.Kind().String())
-			}
-		}
-
-		if node.EndIndex != nil {
-			indexExpression := st.evalPrimaryExpressionGroup(node.EndIndex)
-			if canNumber(indexExpression.Kind()) {
-				length = int(castInt64(indexExpression))
-			} else {
-				node.EndIndex.errorf("non numeric value in index expression kind %s", indexExpression.Kind().String())
-			}
-		} else {
-			length = baseExpression.Len()
-		}
-
-		return baseExpression.Slice(index, length)
-	}
-	return st.evalBaseExpressionGroup(node)
-}
-
-// notNil returns false when v.IsValid() == false
-// or when v's kind can be nil and v.IsNil() == true
-func notNil(v reflect.Value) bool {
-	if !v.IsValid() {
-		return false
-	}
-	switch v.Kind() {
-	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return !v.IsNil()
-	default:
-		return true
-	}
-}
-
-func (st *Runtime) isSet(node Node) (ok bool) {
-	defer func() {
-		if r := recover(); r != nil {
-			// something panicked while evaluating node
-			ok = false
-		}
-	}()
-
-	nodeType := node.Type()
-
-	switch nodeType {
-	case NodeIndexExpr:
-		node := node.(*IndexExprNode)
-		if !st.isSet(node.Base) || !st.isSet(node.Index) {
-			return false
-		}
-
-		base := st.evalPrimaryExpressionGroup(node.Base)
-		index := st.evalPrimaryExpressionGroup(node.Index)
-
-		resolved, err := resolveIndex(base, index)
-		return err == nil && notNil(resolved)
-	case NodeIdentifier:
-		value, err := st.resolve(node.String())
-		return err == nil && notNil(value)
-	case NodeField:
-		node := node.(*FieldNode)
-		resolved := st.context
-		for i := 0; i < len(node.Ident); i++ {
-			var err error
-			resolved, err = resolveIndex(resolved, reflect.ValueOf(node.Ident[i]))
-			if err != nil || !notNil(resolved) {
-				return false
-			}
-		}
-	case NodeChain:
-		node := node.(*ChainNode)
-		resolved, err := st.evalChainNodeExpression(node)
-		return err == nil && notNil(resolved)
-	default:
-		//todo: maybe work some edge cases
-		if !(nodeType > beginExpressions && nodeType < endExpressions) {
-			node.errorf("unexpected %q node in isset clause", node)
-		}
-	}
-	return true
-}
-
-func (st *Runtime) evalNumericComparativeExpression(node *NumericComparativeExprNode) reflect.Value {
-	left, right := st.evalPrimaryExpressionGroup(node.Left), st.evalPrimaryExpressionGroup(node.Right)
-	isTrue := false
-	kind := left.Kind()
-
-	// if the left value is not a float and the right is, we need to promote the left value to a float before the calculation
-	// this is necessary for expressions like 4*1.23
-	needFloatPromotion := !isFloat(kind) && isFloat(right.Kind())
-
-	switch node.Operator.typ {
-	case itemGreat:
-		if isInt(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Int()) > right.Float()
-			} else {
-				isTrue = left.Int() > toInt(right)
-			}
-		} else if isFloat(kind) {
-			isTrue = left.Float() > toFloat(right)
-		} else if isUint(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Uint()) > right.Float()
-			} else {
-				isTrue = left.Uint() > toUint(right)
-			}
-		} else {
-			node.Left.errorf("a non numeric value in numeric comparative expression")
-		}
-	case itemGreatEquals:
-		if isInt(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Int()) >= right.Float()
-			} else {
-				isTrue = left.Int() >= toInt(right)
-			}
-		} else if isFloat(kind) {
-			isTrue = left.Float() >= toFloat(right)
-		} else if isUint(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Uint()) >= right.Float()
-			} else {
-				isTrue = left.Uint() >= toUint(right)
-			}
-		} else {
-			node.Left.errorf("a non numeric value in numeric comparative expression")
-		}
-	case itemLess:
-		if isInt(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Int()) < right.Float()
-			} else {
-				isTrue = left.Int() < toInt(right)
-			}
-		} else if isFloat(kind) {
-			isTrue = left.Float() < toFloat(right)
-		} else if isUint(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Uint()) < right.Float()
-			} else {
-				isTrue = left.Uint() < toUint(right)
-			}
-		} else {
-			node.Left.errorf("a non numeric value in numeric comparative expression")
-		}
-	case itemLessEquals:
-		if isInt(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Int()) <= right.Float()
-			} else {
-				isTrue = left.Int() <= toInt(right)
-			}
-		} else if isFloat(kind) {
-			isTrue = left.Float() <= toFloat(right)
-		} else if isUint(kind) {
-			if needFloatPromotion {
-				isTrue = float64(left.Uint()) <= right.Float()
-			} else {
-				isTrue = left.Uint() <= toUint(right)
-			}
-		} else {
-			node.Left.errorf("a non numeric value in numeric comparative expression")
-		}
-	}
-	return reflect.ValueOf(isTrue)
-}
-
-func (st *Runtime) evalLogicalExpression(node *LogicalExprNode) reflect.Value {
-	truthy := isTrue(st.evalPrimaryExpressionGroup(node.Left))
-	if node.Operator.typ == itemAnd {
-		truthy = truthy && isTrue(st.evalPrimaryExpressionGroup(node.Right))
-	} else {
-		truthy = truthy || isTrue(st.evalPrimaryExpressionGroup(node.Right))
-	}
-	return reflect.ValueOf(truthy)
-}
-
-func (st *Runtime) evalComparativeExpression(node *ComparativeExprNode) reflect.Value {
-	left, right := st.evalPrimaryExpressionGroup(node.Left), st.evalPrimaryExpressionGroup(node.Right)
-	equal := checkEquality(left, right)
-	if node.Operator.typ == itemNotEquals {
-		return reflect.ValueOf(!equal)
-	}
-	return reflect.ValueOf(equal)
-}
-
-func toInt(v reflect.Value) int64 {
-	kind := v.Kind()
-	if isInt(kind) {
-		return v.Int()
-	} else if isFloat(kind) {
-		return int64(v.Float())
-	} else if isUint(kind) {
-		return int64(v.Uint())
-	} else if kind == reflect.String {
-		n, e := strconv.ParseInt(v.String(), 10, 0)
-		if e != nil {
-			panic(e)
-		}
-		return n
-	} else if kind == reflect.Bool {
-		if v.Bool() {
-			return 0
-		}
-		return 1
-	}
-	panic(fmt.Errorf("type: %q can't be converted to int64", v.Type()))
-}
-
-func toUint(v reflect.Value) uint64 {
-	kind := v.Kind()
-	if isUint(kind) {
-		return v.Uint()
-	} else if isInt(kind) {
-		return uint64(v.Int())
-	} else if isFloat(kind) {
-		return uint64(v.Float())
-	} else if kind == reflect.String {
-		n, e := strconv.ParseUint(v.String(), 10, 0)
-		if e != nil {
-			panic(e)
-		}
-		return n
-	} else if kind == reflect.Bool {
-		if v.Bool() {
-			return 0
-		}
-		return 1
-	}
-	panic(fmt.Errorf("type: %q can't be converted to uint64", v.Type()))
-}
-
-func toFloat(v reflect.Value) float64 {
-	kind := v.Kind()
-	if isFloat(kind) {
-		return v.Float()
-	} else if isInt(kind) {
-		return float64(v.Int())
-	} else if isUint(kind) {
-		return float64(v.Uint())
-	} else if kind == reflect.String {
-		n, e := strconv.ParseFloat(v.String(), 0)
-		if e != nil {
-			panic(e)
-		}
-		return n
-	} else if kind == reflect.Bool {
-		if v.Bool() {
-			return 0
-		}
-		return 1
-	}
-	panic(fmt.Errorf("type: %q can't be converted to float64", v.Type()))
-}
-
-func (st *Runtime) evalMultiplicativeExpression(node *MultiplicativeExprNode) reflect.Value {
-	left, right := st.evalPrimaryExpressionGroup(node.Left), st.evalPrimaryExpressionGroup(node.Right)
-	kind := left.Kind()
-	// if the left value is not a float and the right is, we need to promote the left value to a float before the calculation
-	// this is necessary for expressions like 4*1.23
-	needFloatPromotion := !isFloat(kind) && isFloat(right.Kind())
-	switch node.Operator.typ {
-	case itemMul:
-		if isInt(kind) {
-			if needFloatPromotion {
-				// do the promotion and calculates
-				left = reflect.ValueOf(float64(left.Int()) * right.Float())
-			} else {
-				// do not need float promotion
-				left = reflect.ValueOf(left.Int() * toInt(right))
-			}
-		} else if isFloat(kind) {
-			left = reflect.ValueOf(left.Float() * toFloat(right))
-		} else if isUint(kind) {
-			if needFloatPromotion {
-				left = reflect.ValueOf(float64(left.Uint()) * right.Float())
-			} else {
-				left = reflect.ValueOf(left.Uint() * toUint(right))
-			}
-		} else {
-			node.Left.errorf("a non numeric value in multiplicative expression")
-		}
-	case itemDiv:
-		if isInt(kind) {
-			if needFloatPromotion {
-				left = reflect.ValueOf(float64(left.Int()) / right.Float())
-			} else {
-				left = reflect.ValueOf(left.Int() / toInt(right))
-			}
-		} else if isFloat(kind) {
-			left = reflect.ValueOf(left.Float() / toFloat(right))
-		} else if isUint(kind) {
-			if needFloatPromotion {
-				left = reflect.ValueOf(float64(left.Uint()) / right.Float())
-			} else {
-				left = reflect.ValueOf(left.Uint() / toUint(right))
-			}
-		} else {
-			node.Left.errorf("a non numeric value in multiplicative expression")
-		}
-	case itemMod:
-		if isInt(kind) {
-			left = reflect.ValueOf(left.Int() % toInt(right))
-		} else if isFloat(kind) {
-			left = reflect.ValueOf(int64(left.Float()) % toInt(right))
-		} else if isUint(kind) {
-			left = reflect.ValueOf(left.Uint() % toUint(right))
-		} else {
-			node.Left.errorf("a non numeric value in multiplicative expression")
-		}
-	}
-	return left
-}
-
-func (st *Runtime) evalAdditiveExpression(node *AdditiveExprNode) reflect.Value {
-
-	isAdditive := node.Operator.typ == itemAdd
-	if node.Left == nil {
-		right := st.evalPrimaryExpressionGroup(node.Right)
-		kind := right.Kind()
-		// todo: optimize
-		if isInt(kind) {
-			if isAdditive {
-				return reflect.ValueOf(+right.Int())
-			} else {
-				return reflect.ValueOf(-right.Int())
-			}
-		} else if isUint(kind) {
-			if isAdditive {
-				return right
-			} else {
-				return reflect.ValueOf(-int64(right.Uint()))
-			}
-		} else if isFloat(kind) {
-			if isAdditive {
-				return reflect.ValueOf(+right.Float())
-			} else {
-				return reflect.ValueOf(-right.Float())
-			}
-		}
-		node.Left.errorf("additive expression: right side %s (%s) is not a numeric value (no left side)", node.Right, getTypeString(right))
-	}
-
-	left, right := st.evalPrimaryExpressionGroup(node.Left), st.evalPrimaryExpressionGroup(node.Right)
-	kind := left.Kind()
-	// if the left value is not a float and the right is, we need to promote the left value to a float before the calculation
-	// this is necessary for expressions like 4+1.23
-	needFloatPromotion := !isFloat(kind) && kind != reflect.String && isFloat(right.Kind())
-	if needFloatPromotion {
-		if isInt(kind) {
-			if isAdditive {
-				left = reflect.ValueOf(float64(left.Int()) + right.Float())
-			} else {
-				left = reflect.ValueOf(float64(left.Int()) - right.Float())
-			}
-		} else if isUint(kind) {
-			if isAdditive {
-				left = reflect.ValueOf(float64(left.Uint()) + right.Float())
-			} else {
-				left = reflect.ValueOf(float64(left.Uint()) - right.Float())
-			}
-		} else {
-			node.Left.errorf("additive expression: left side (%s (%s) needs float promotion but neither int nor uint)", node.Left, getTypeString(left))
-		}
-	} else {
-		if isInt(kind) {
-			if isAdditive {
-				left = reflect.ValueOf(left.Int() + toInt(right))
-			} else {
-				left = reflect.ValueOf(left.Int() - toInt(right))
-			}
-		} else if isFloat(kind) {
-			if isAdditive {
-				left = reflect.ValueOf(left.Float() + toFloat(right))
-			} else {
-				left = reflect.ValueOf(left.Float() - toFloat(right))
-			}
-		} else if isUint(kind) {
-			if isAdditive {
-				left = reflect.ValueOf(left.Uint() + toUint(right))
-			} else {
-				left = reflect.ValueOf(left.Uint() - toUint(right))
-			}
-		} else if kind == reflect.String {
-			if !isAdditive {
-				node.Right.errorf("minus signal is not allowed with strings")
-			}
-			// converts []byte (and alias types of []byte) to string
-			if right.Kind() == reflect.Slice && right.Type().Elem().Kind() == reflect.Uint8 {
-				right = right.Convert(left.Type())
-			}
-			left = reflect.ValueOf(left.String() + fmt.Sprint(right))
-		} else {
-			node.Left.errorf("additive expression: left side %s (%s) is not a numeric value", node.Left, getTypeString(left))
-		}
-	}
-
-	return left
-}
-
-func getTypeString(value reflect.Value) string {
-	if value.IsValid() {
-		return value.Type().String()
-	}
-	return "<invalid>"
-}
-
-func (st *Runtime) evalBaseExpressionGroup(node Node) reflect.Value {
-	switch node.Type() {
-	case NodeNil:
-		return reflect.ValueOf(nil)
-	case NodeBool:
-		if node.(*BoolNode).True {
-			return valueBoolTRUE
-		}
-		return valueBoolFALSE
-	case NodeString:
-		return reflect.ValueOf(&node.(*StringNode).Text).Elem()
-	case NodeIdentifier:
-		resolved, err := st.resolve(node.(*IdentifierNode).Ident)
-		if err != nil {
-			node.error(err)
-		}
-		return resolved
-	case NodeField:
-		node := node.(*FieldNode)
-		resolved := st.context
-		for i := 0; i < len(node.Ident); i++ {
-			field, err := resolveIndex(resolved, reflect.ValueOf(node.Ident[i]))
-			if err != nil {
-				node.errorf("%v", err)
-			}
-			if !field.IsValid() {
-				node.errorf("there is no field or method '%s' in %s (.%s)", node.Ident[i], getTypeString(resolved), strings.Join(node.Ident, "."))
-			}
-			resolved = field
-		}
-		return resolved
-	case NodeChain:
-		resolved, err := st.evalChainNodeExpression(node.(*ChainNode))
-		if err != nil {
-			node.error(err)
-		}
-		return resolved
-	case NodeNumber:
-		node := node.(*NumberNode)
-		if node.IsFloat {
-			return reflect.ValueOf(&node.Float64).Elem()
-		}
-
-		if node.IsInt {
-			return reflect.ValueOf(&node.Int64).Elem()
-		}
-
-		if node.IsUint {
-			return reflect.ValueOf(&node.Uint64).Elem()
-		}
-	}
-	node.errorf("unexpected node type %s in unary expression evaluating", node)
-	return reflect.Value{}
-}
-
-func (st *Runtime) evalCallExpression(baseExpr reflect.Value, args []Expression, values ...reflect.Value) reflect.Value {
-
-	if funcType.AssignableTo(baseExpr.Type()) {
-		return baseExpr.Interface().(Func)(Arguments{runtime: st, argExpr: args, argVal: values})
-	}
-
-	i := len(args) + len(values)
-	var returns []reflect.Value
-	if i <= 10 {
-		returns = reflect_Call10(i, st, baseExpr, args, values...)
-	} else {
-		returns = reflect_Call(make([]reflect.Value, i, i), st, baseExpr, args, values...)
-	}
-
-	if len(returns) == 0 {
-		return reflect.Value{}
-	}
-
-	return returns[0]
-}
-
-func (st *Runtime) evalCommandExpression(node *CommandNode) (reflect.Value, bool) {
-	term := st.evalPrimaryExpressionGroup(node.BaseExpr)
-	if node.Args != nil {
-		if term.Kind() == reflect.Func {
-			if term.Type() == safeWriterType {
-				st.evalSafeWriter(term, node)
-				return reflect.Value{}, true
-			}
-			return st.evalCallExpression(term, node.Args), false
-		} else {
-			node.Args[0].errorf("command %q type %s is not func", node.Args[0], term.Type())
-		}
-	}
-	return term, false
-}
-
-func (st *Runtime) evalChainNodeExpression(node *ChainNode) (reflect.Value, error) {
-	resolved := st.evalPrimaryExpressionGroup(node.Node)
-
-	for i := 0; i < len(node.Field); i++ {
-		field, err := resolveIndex(resolved, reflect.ValueOf(node.Field[i]))
-		if err != nil {
-			return reflect.Value{}, err
-		}
-		if !field.IsValid() {
-			if resolved.Kind() == reflect.Map && i == len(node.Field)-1 {
-				// return reflect.Zero(resolved.Type().Elem()), nil
-				return reflect.Value{}, nil
-			}
-			return reflect.Value{}, fmt.Errorf("there is no field or method '%s' in %s (%s)", node.Field[i], getTypeString(resolved), node)
-		}
-		resolved = field
-	}
-
-	return resolved, nil
-}
-
-type escapeWriter struct {
-	rawWriter  io.Writer
-	safeWriter SafeWriter
-}
-
-func (w *escapeWriter) Write(b []byte) (int, error) {
-	w.safeWriter(w.rawWriter, b)
-	return 0, nil
-}
-
-func (st *Runtime) evalSafeWriter(term reflect.Value, node *CommandNode, v ...reflect.Value) {
-
-	sw := &escapeWriter{rawWriter: st.Writer, safeWriter: term.Interface().(SafeWriter)}
-	for i := 0; i < len(v); i++ {
-		fastprinter.PrintValue(sw, v[i])
-	}
-	for i := 0; i < len(node.Args); i++ {
-		fastprinter.PrintValue(sw, st.evalPrimaryExpressionGroup(node.Args[i]))
-	}
-}
-
-func (st *Runtime) evalCommandPipeExpression(node *CommandNode, value reflect.Value) (reflect.Value, bool) {
-	term := st.evalPrimaryExpressionGroup(node.BaseExpr)
-	if term.Kind() == reflect.Func {
-		if term.Type() == safeWriterType {
-			st.evalSafeWriter(term, node, value)
-			return reflect.Value{}, true
-		}
-		return st.evalCallExpression(term, node.Args, value), false
-	} else {
-		node.BaseExpr.errorf("pipe command %q type %s is not func", node.BaseExpr, term.Type())
-	}
-	return term, false
-}
-
-func (st *Runtime) evalPipelineExpression(node *PipeNode) (value reflect.Value, safeWriter bool) {
-	value, safeWriter = st.evalCommandExpression(node.Cmds[0])
-	for i := 1; i < len(node.Cmds); i++ {
-		if safeWriter {
-			node.Cmds[i].errorf("unexpected command %s, writer command should be the last command", node.Cmds[i])
-		}
-		value, safeWriter = st.evalCommandPipeExpression(node.Cmds[i], value)
-	}
-	return
-}
-
-func reflect_Call(arguments []reflect.Value, st *Runtime, fn reflect.Value, args []Expression, values ...reflect.Value) []reflect.Value {
-	typ := fn.Type()
-	numIn := typ.NumIn()
-
-	isVariadic := typ.IsVariadic()
-	if isVariadic {
-		numIn--
-	}
-	i, j := 0, 0
-
-	for ; i < numIn && i < len(values); i++ {
-		in := typ.In(i)
-		term := values[i]
-		if !term.Type().AssignableTo(in) {
-			term = term.Convert(in)
-		}
-		arguments[i] = term
-	}
-
-	if isVariadic {
-		in := typ.In(numIn).Elem()
-		for ; i < len(values); i++ {
-			term := values[i]
-			if !term.Type().AssignableTo(in) {
-				term = term.Convert(in)
-			}
-			arguments[i] = term
-		}
-	}
-
-	for ; i < numIn && j < len(args); i, j = i+1, j+1 {
-		in := typ.In(i)
-		term := st.evalPrimaryExpressionGroup(args[j])
-		if !term.Type().AssignableTo(in) {
-			term = term.Convert(in)
-		}
-		arguments[i] = term
-	}
-
-	if isVariadic {
-		in := typ.In(numIn).Elem()
-		for ; j < len(args); i, j = i+1, j+1 {
-			term := st.evalPrimaryExpressionGroup(args[j])
-			if !term.Type().AssignableTo(in) {
-				term = term.Convert(in)
-			}
-			arguments[i] = term
-		}
-	}
-	return fn.Call(arguments[0:i])
-}
-
-func reflect_Call10(i int, st *Runtime, fn reflect.Value, args []Expression, values ...reflect.Value) []reflect.Value {
-	var arguments [10]reflect.Value
-	return reflect_Call(arguments[0:i], st, fn, args, values...)
-}
-
-func isUint(kind reflect.Kind) bool {
-	return kind >= reflect.Uint && kind <= reflect.Uint64
-}
-func isInt(kind reflect.Kind) bool {
-	return kind >= reflect.Int && kind <= reflect.Int64
-}
-func isFloat(kind reflect.Kind) bool {
-	return kind == reflect.Float32 || kind == reflect.Float64
-}
-
-// checkEquality of two reflect values in the semantic of the jet runtime
-func checkEquality(v1, v2 reflect.Value) bool {
-	v1 = indirectInterface(v1)
-	v2 = indirectInterface(v2)
-
-	if !v1.IsValid() || !v2.IsValid() {
-		return v1.IsValid() == v2.IsValid()
-	}
-
-	v1Type := v1.Type()
-	v2Type := v2.Type()
-
-	// fast path
-	if v1Type != v2Type && !v2Type.AssignableTo(v1Type) && !v2Type.ConvertibleTo(v1Type) {
-		return false
-	}
-
-	kind := v1.Kind()
-	if isInt(kind) {
-		return v1.Int() == toInt(v2)
-	}
-	if isFloat(kind) {
-		return v1.Float() == toFloat(v2)
-	}
-	if isUint(kind) {
-		return v1.Uint() == toUint(v2)
-	}
-
-	switch kind {
-	case reflect.Bool:
-		return v1.Bool() == isTrue(v2)
-	case reflect.String:
-		return v1.String() == v2.String()
-	case reflect.Array:
-		vlen := v1.Len()
-		if vlen == v2.Len() {
-			return false
-		}
-		for i := 0; i < vlen; i++ {
-			if !checkEquality(v1.Index(i), v2.Index(i)) {
-				return false
-			}
-		}
-		return true
-	case reflect.Slice:
-		if v1.IsNil() != v2.IsNil() {
-			return false
-		}
-
-		vlen := v1.Len()
-		if vlen != v2.Len() {
-			return false
-		}
-
-		if v1.CanAddr() && v2.CanAddr() && v1.Pointer() == v2.Pointer() {
-			return true
-		}
-
-		for i := 0; i < vlen; i++ {
-			if !checkEquality(v1.Index(i), v2.Index(i)) {
-				return false
-			}
-		}
-		return true
-	case reflect.Interface:
-		if v1.IsNil() || v2.IsNil() {
-			return v1.IsNil() == v2.IsNil()
-		}
-		return checkEquality(v1.Elem(), v2.Elem())
-	case reflect.Ptr:
-		return v1.Pointer() == v2.Pointer()
-	case reflect.Struct:
-		numField := v1.NumField()
-		for i, n := 0, numField; i < n; i++ {
-			if !checkEquality(v1.Field(i), v2.Field(i)) {
-				return false
-			}
-		}
-		return true
-	case reflect.Map:
-		if v1.IsNil() != v2.IsNil() {
-			return false
-		}
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		if v1.Pointer() == v2.Pointer() {
-			return true
-		}
-		for _, k := range v1.MapKeys() {
-			val1 := v1.MapIndex(k)
-			val2 := v2.MapIndex(k)
-			if !val1.IsValid() || !val2.IsValid() || !checkEquality(v1.MapIndex(k), v2.MapIndex(k)) {
-				return false
-			}
-		}
-		return true
-	case reflect.Func:
-		return v1.IsNil() && v2.IsNil()
-	default:
-		// Normal equality suffices
-		return v1.Interface() == v2.Interface()
-	}
-}
-
-func isTrue(v reflect.Value) bool {
-	return v.IsValid() && !v.IsZero()
-}
-
-func canNumber(kind reflect.Kind) bool {
-	return isInt(kind) || isUint(kind) || isFloat(kind)
-}
-
-func castInt64(v reflect.Value) int64 {
-	kind := v.Kind()
-	switch {
-	case isInt(kind):
-		return v.Int()
-	case isUint(kind):
-		return int64(v.Uint())
-	case isFloat(kind):
-		return int64(v.Float())
-	}
-	return 0
-}
-
-var cachedStructsMutex = sync.RWMutex{}
-var cachedStructsFieldIndex = map[reflect.Type]map[string][]int{}
-
-// from text/template's exec.go:
-//
-// indirect returns the item at the end of indirection, and a bool to indicate
-// if it's nil. If the returned bool is true, the returned value's kind will be
-// either a pointer or interface.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
-	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
-		if v.IsNil() {
-			return v, true
-		}
-	}
-	return v, false
-}
-
-// indirectInterface returns the concrete value in an interface value, or else v itself.
-// That is, if v represents the interface value x, the result is the same as reflect.ValueOf(x):
-// the fact that x was an interface value is forgotten.
-func indirectInterface(v reflect.Value) reflect.Value {
-	if v.Kind() == reflect.Interface {
-		return v.Elem()
-	}
-	return v
-}
-
-// indirectEface is the same as indirectInterface, but only indirects through v if its type
-// is the empty interface and its value is not nil.
-func indirectEface(v reflect.Value) reflect.Value {
-	if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 && !v.IsNil() {
-		return v.Elem()
-	}
-	return v
-}
-
-// mostly copied from text/template's evalField() (exec.go):
-func resolveIndex(v, index reflect.Value) (reflect.Value, error) {
-	if !v.IsValid() {
-		return reflect.Value{}, fmt.Errorf("there is no field or method '%s' in %s (%s)", index, v, getTypeString(v))
-	}
-
-	v, isNil := indirect(v)
-	if v.Kind() == reflect.Interface && isNil {
-		// Calling a method on a nil interface can't work. The
-		// MethodByName method call below would panic.
-		return reflect.Value{}, fmt.Errorf("nil pointer evaluating %s.%s", v.Type(), index)
-	}
-
-	// Unless it's an interface, need to get to a value of type *T to guarantee
-	// we see all methods of T and *T.
-	if index.Kind() == reflect.String {
-		ptr := v
-		if ptr.Kind() != reflect.Interface && ptr.Kind() != reflect.Ptr && ptr.CanAddr() {
-			ptr = ptr.Addr()
-		}
-		if method := ptr.MethodByName(index.String()); method.IsValid() {
-			return method, nil
-		}
-	}
-
-	// It's not a method on v; so now:
-	//  - if v is array/slice/string, use index as numeric index
-	//  - if v is a struct, use index as field name
-	//  - if v is a map, use index as key
-	//  - if v is (still) a pointer, indexing will fail but we check for nil to get a useful error
-	switch v.Kind() {
-	case reflect.Array, reflect.Slice, reflect.String:
-		x, err := indexArg(index, v.Len())
-		if err != nil {
-			return reflect.Value{}, err
-		}
-		return indirectEface(v.Index(x)), nil
-	case reflect.Struct:
-		if index.Kind() != reflect.String {
-			return reflect.Value{}, fmt.Errorf("can't use %s (%s, not string) as field name in struct type %s", index, index.Type(), v.Type())
-		}
-		tField, ok := v.Type().FieldByName(index.String())
-		if ok {
-			field := v.FieldByIndex(tField.Index)
-			if tField.PkgPath != "" { // field is unexported
-				return reflect.Value{}, fmt.Errorf("%s is an unexported field of struct type %s", index.String(), v.Type())
-			}
-			return indirectEface(field), nil
-		}
-		return reflect.Value{}, fmt.Errorf("can't use %s as field name in struct type %s", index, v.Type())
-	case reflect.Map:
-		// If it's a map, attempt to use the field name as a key.
-		if !index.Type().ConvertibleTo(v.Type().Key()) {
-			return reflect.Value{}, fmt.Errorf("can't use %s (%s) as key for map of type %s", index, index.Type(), v.Type())
-		}
-		index = index.Convert(v.Type().Key()) // noop in most cases, but not expensive
-		return indirectEface(v.MapIndex(index)), nil
-	case reflect.Ptr:
-		etyp := v.Type().Elem()
-		if etyp.Kind() == reflect.Struct && index.Kind() == reflect.String {
-			if _, ok := etyp.FieldByName(index.String()); !ok {
-				// If there's no such field, say "can't evaluate"
-				// instead of "nil pointer evaluating".
-				break
-			}
-		}
-		if isNil {
-			return reflect.Value{}, fmt.Errorf("nil pointer evaluating %s.%s", v.Type(), index)
-		}
-	}
-	return reflect.Value{}, fmt.Errorf("can't evaluate index %s (%s) in type %s", index, index.Type(), v.Type())
-}
-
-// from Go's text/template's funcs.go:
-//
-// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
-func indexArg(index reflect.Value, cap int) (int, error) {
-	var x int64
-	switch index.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		x = index.Int()
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		x = int64(index.Uint())
-	case reflect.Float32, reflect.Float64:
-		x = int64(index.Float())
-	case reflect.Invalid:
-		return 0, fmt.Errorf("cannot index slice/array/string with nil")
-	default:
-		return 0, fmt.Errorf("cannot index slice/array/string with type %s", index.Type())
-	}
-	if int(x) < 0 || int(x) >= cap {
-		return 0, fmt.Errorf("index out of range: %d", x)
-	}
-	return int(x), nil
-}
-
-func buildCache(typ reflect.Type, cache map[string][]int, parent []int) {
-	numFields := typ.NumField()
-	max := len(parent) + 1
-
-	for i := 0; i < numFields; i++ {
-
-		index := make([]int, max)
-		copy(index, parent)
-		index[len(parent)] = i
-
-		field := typ.Field(i)
-		if field.Anonymous {
-			typ := field.Type
-			if typ.Kind() == reflect.Struct {
-				buildCache(typ, cache, index)
-			}
-		}
-		cache[field.Name] = index
-	}
-}

+ 0 - 160
vendor/github.com/CloudyKit/jet/v4/func.go

@@ -1,160 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"fmt"
-	"reflect"
-	"time"
-)
-
-// Arguments holds the arguments passed to jet.Func.
-type Arguments struct {
-	runtime *Runtime
-	argExpr []Expression
-	argVal  []reflect.Value
-}
-
-// IsSet checks whether an argument is set or not. It behaves like the build-in isset function.
-func (a *Arguments) IsSet(argumentIndex int) bool {
-	return a.runtime.isSet(a.argExpr[argumentIndex])
-}
-
-// Get gets an argument by index.
-func (a *Arguments) Get(argumentIndex int) reflect.Value {
-	if argumentIndex < len(a.argVal) {
-		return a.argVal[argumentIndex]
-	}
-	if argumentIndex < len(a.argVal)+len(a.argExpr) {
-		return a.runtime.evalPrimaryExpressionGroup(a.argExpr[argumentIndex-len(a.argVal)])
-	}
-	return reflect.Value{}
-}
-
-// Panicf panics with formatted error message.
-func (a *Arguments) Panicf(format string, v ...interface{}) {
-	panic(fmt.Errorf(format, v...))
-}
-
-// RequireNumOfArguments panics if the number of arguments is not in the range specified by min and max.
-// In case there is no minimum pass -1, in case there is no maximum pass -1 respectively.
-func (a *Arguments) RequireNumOfArguments(funcname string, min, max int) {
-	num := len(a.argExpr) + len(a.argVal)
-	if min >= 0 && num < min {
-		a.Panicf("unexpected number of arguments in a call to %s", funcname)
-	} else if max >= 0 && num > max {
-		a.Panicf("unexpected number of arguments in a call to %s", funcname)
-	}
-}
-
-// NumOfArguments returns the number of arguments
-func (a *Arguments) NumOfArguments() int {
-	return len(a.argExpr) + len(a.argVal)
-}
-
-// Runtime get the Runtime context
-func (a *Arguments) Runtime() *Runtime {
-	return a.runtime
-}
-
-// ParseInto parses the arguments into the provided pointers. It returns an error if the number of pointers passed in does not
-// equal the number of arguments, if any argument's value is invalid according to Go's reflect package, if an argument can't
-// be used as the value the pointer passed in at the corresponding position points to, or if an unhandled pointer type is encountered.
-// Allowed pointer types are pointers to interface{}, int, int64, float64, bool, string,  time.Time, reflect.Value, []interface{},
-// map[string]interface{}. If a pointer to a reflect.Value is passed in, the argument be assigned as-is to the value pointed to. For
-// pointers to int or float types, type conversion is performed automatically if necessary.
-func (a *Arguments) ParseInto(ptrs ...interface{}) error {
-	if len(ptrs) < a.NumOfArguments() {
-		return fmt.Errorf("have %d arguments, but only %d pointers to parse into", a.NumOfArguments(), len(ptrs))
-	}
-
-	for i := 0; i < a.NumOfArguments(); i++ {
-		arg, ptr := indirectEface(a.Get(i)), ptrs[i]
-		ok := false
-
-		if !arg.IsValid() {
-			return fmt.Errorf("argument at position %d is not a valid value", i)
-		}
-
-		switch p := ptr.(type) {
-		case *reflect.Value:
-			*p, ok = arg, true
-		case *int:
-			switch arg.Kind() {
-			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-				*p, ok = int(arg.Int()), true
-			case reflect.Float32, reflect.Float64:
-				*p, ok = int(arg.Float()), true
-			default:
-				return fmt.Errorf("could not parse %v (%s) into %v (%T)", arg, arg.Type(), ptr, ptr)
-			}
-		case *int64:
-			switch arg.Kind() {
-			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-				*p, ok = arg.Int(), true
-			case reflect.Float32, reflect.Float64:
-				*p, ok = int64(arg.Float()), true
-			default:
-				return fmt.Errorf("could not parse %v (%s) into %v (%T)", arg, arg.Type(), ptr, ptr)
-			}
-		case *float64:
-			switch arg.Kind() {
-			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-				*p, ok = float64(arg.Int()), true
-			case reflect.Float32, reflect.Float64:
-				*p, ok = arg.Float(), true
-			default:
-				return fmt.Errorf("could not parse %v (%s) into %v (%T)", arg, arg.Type(), ptr, ptr)
-			}
-		}
-
-		if ok {
-			continue
-		}
-
-		if !arg.CanInterface() {
-			return fmt.Errorf("argument at position %d can't be accessed via Interface()", i)
-		}
-		val := arg.Interface()
-
-		switch p := ptr.(type) {
-		case *interface{}:
-			*p, ok = val, true
-		case *bool:
-			*p, ok = val.(bool)
-		case *string:
-			*p, ok = val.(string)
-		case *time.Time:
-			*p, ok = val.(time.Time)
-		case *[]interface{}:
-			*p, ok = val.([]interface{})
-		case *map[string]interface{}:
-			*p, ok = val.(map[string]interface{})
-		default:
-			return fmt.Errorf("trying to parse %v into %v: unhandled value type %T", arg, p, val)
-		}
-
-		if !ok {
-			return fmt.Errorf("could not parse %v (%s) into %v (%T)", arg, arg.Type(), ptr, ptr)
-		}
-	}
-
-	return nil
-}
-
-// Func function implementing this type is called directly, which is faster than calling through reflect.
-// If a function is being called many times in the execution of a template, you may consider implementing
-// a wrapper for that function implementing a Func.
-type Func func(Arguments) reflect.Value

+ 0 - 684
vendor/github.com/CloudyKit/jet/v4/lex.go

@@ -1,684 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"fmt"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
-	typ itemType // The type of this item.
-	pos Pos      // The starting position, in bytes, of this item in the input string.
-	val string   // The value of this item.
-}
-
-func (i item) String() string {
-	switch {
-	case i.typ == itemEOF:
-		return "EOF"
-	case i.typ == itemError:
-		return i.val
-	case i.typ > itemKeyword:
-		return fmt.Sprintf("<%s>", i.val)
-	case len(i.val) > 10:
-		return fmt.Sprintf("%.10q...", i.val)
-	}
-	return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
-	itemError        itemType = iota // error occurred; value is text of error
-	itemBool                         // boolean constant
-	itemChar                         // printable ASCII character; grab bag for comma etc.
-	itemCharConstant                 // character constant
-	itemComplex                      // complex constant (1+2i); imaginary is just a number
-	itemEOF
-	itemField      // alphanumeric identifier starting with '.'
-	itemIdentifier // alphanumeric identifier not starting with '.'
-	itemLeftDelim  // left action delimiter
-	itemLeftParen  // '(' inside action
-	itemNumber     // simple number, including imaginary
-	itemPipe       // pipe symbol
-	itemRawString  // raw quoted string (includes quotes)
-	itemRightDelim // right action delimiter
-	itemRightParen // ')' inside action
-	itemSpace      // run of spaces separating arguments
-	itemString     // quoted string (includes quotes)
-	itemText       // plain text
-	itemAssign
-	itemEquals
-	itemNotEquals
-	itemGreat
-	itemGreatEquals
-	itemLess
-	itemLessEquals
-	itemComma
-	itemSemicolon
-	itemAdd
-	itemMinus
-	itemMul
-	itemDiv
-	itemMod
-	itemColon
-	itemTernary
-	itemLeftBrackets
-	itemRightBrackets
-	// Keywords appear after all the rest.
-	itemKeyword // used only to delimit the keywords
-	itemExtends
-	itemImport
-	itemInclude
-	itemBlock
-	itemEnd
-	itemYield
-	itemContent
-	itemIf
-	itemElse
-	itemRange
-	itemTry
-	itemCatch
-	itemReturn
-	itemAnd
-	itemOr
-	itemNot
-	itemNil
-	itemMSG
-	itemTrans
-)
-
-var key = map[string]itemType{
-	"extends": itemExtends,
-	"import":  itemImport,
-
-	"include": itemInclude,
-	"block":   itemBlock,
-	"end":     itemEnd,
-	"yield":   itemYield,
-	"content": itemContent,
-
-	"if":   itemIf,
-	"else": itemElse,
-
-	"range": itemRange,
-
-	"try":   itemTry,
-	"catch": itemCatch,
-
-	"return": itemReturn,
-
-	"and": itemAnd,
-	"or":  itemOr,
-	"not": itemNot,
-
-	"nil": itemNil,
-
-	"msg":   itemMSG,
-	"trans": itemTrans,
-}
-
-const eof = -1
-
-const (
-	defaultLeftDelim  = "{{"
-	defaultRightDelim = "}}"
-	leftComment       = "{*"
-	rightComment      = "*}"
-)
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
-	name       string    // the name of the input; used only for error reports
-	input      string    // the string being scanned
-	state      stateFn   // the next lexing function to enter
-	pos        Pos       // current position in the input
-	start      Pos       // start position of this item
-	width      Pos       // width of last rune read from input
-	lastPos    Pos       // position of most recent item returned by nextItem
-	items      chan item // channel of scanned items
-	parenDepth int       // nesting depth of ( ) exprs
-	lastType   itemType
-	leftDelim  string
-	rightDelim string
-}
-
-func (l *lexer) setDelimiters(leftDelim, rightDelim string) {
-	if leftDelim != "" {
-		l.leftDelim = leftDelim
-	}
-	if rightDelim != "" {
-		l.rightDelim = rightDelim
-	}
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
-	if int(l.pos) >= len(l.input) {
-		l.width = 0
-		return eof
-	}
-	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
-	l.width = Pos(w)
-	l.pos += l.width
-	return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
-	r := l.next()
-	l.backup()
-	return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
-	l.pos -= l.width
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
-	l.lastType = t
-	l.items <- item{t, l.start, l.input[l.start:l.pos]}
-	l.start = l.pos
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
-	l.start = l.pos
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
-	if strings.IndexRune(valid, l.next()) >= 0 {
-		return true
-	}
-	l.backup()
-	return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
-	for strings.IndexRune(valid, l.next()) >= 0 {
-	}
-	l.backup()
-}
-
-// lineNumber reports which line we're on, based on the position of
-// the previous item returned by nextItem. Doing it this way
-// means we don't have to worry about peek double counting.
-func (l *lexer) lineNumber() int {
-	return 1 + strings.Count(l.input[:l.lastPos], "\n")
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
-	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
-	return nil
-}
-
-// nextItem returns the next item from the input.
-// Called by the parser, not in the lexing goroutine.
-func (l *lexer) nextItem() item {
-	item := <-l.items
-	l.lastPos = item.pos
-	return item
-}
-
-// drain drains the output so the lexing goroutine will exit.
-// Called by the parser, not in the lexing goroutine.
-func (l *lexer) drain() {
-	for range l.items {
-	}
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input string, run bool) *lexer {
-	l := &lexer{
-		name:       name,
-		input:      input,
-		items:      make(chan item),
-		leftDelim:  defaultLeftDelim,
-		rightDelim: defaultRightDelim,
-	}
-	if run {
-		l.run()
-	}
-	return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
-	go func() {
-		for l.state = lexText; l.state != nil; {
-			l.state = l.state(l)
-		}
-		close(l.items)
-	}()
-}
-
-// state functions
-func lexText(l *lexer) stateFn {
-	for {
-		if i := strings.IndexByte(l.input[l.pos:], l.leftDelim[0]); i == -1 {
-			l.pos = Pos(len(l.input))
-			break
-		} else {
-			l.pos += Pos(i)
-			if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
-				if l.pos > l.start {
-					l.emit(itemText)
-				}
-				return lexLeftDelim
-			}
-			if strings.HasPrefix(l.input[l.pos:], leftComment) {
-				if l.pos > l.start {
-					l.emit(itemText)
-				}
-				return lexComment
-			}
-		}
-		if l.next() == eof {
-			break
-		}
-	}
-	// Correctly reached EOF.
-	if l.pos > l.start {
-		l.emit(itemText)
-	}
-	l.emit(itemEOF)
-	return nil
-}
-
-func lexLeftDelim(l *lexer) stateFn {
-	l.pos += Pos(len(l.leftDelim))
-	l.emit(itemLeftDelim)
-	l.parenDepth = 0
-	return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
-	l.pos += Pos(len(leftComment))
-	i := strings.Index(l.input[l.pos:], rightComment)
-	if i < 0 {
-		return l.errorf("unclosed comment")
-	}
-	l.pos += Pos(i + len(rightComment))
-	l.ignore()
-	return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present.
-func lexRightDelim(l *lexer) stateFn {
-	l.pos += Pos(len(l.rightDelim))
-	l.emit(itemRightDelim)
-	return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
-	// Either number, quoted string, or identifier.
-	// Spaces separate arguments; runs of spaces turn into itemSpace.
-	// Pipe symbols separate and are emitted.
-	if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
-		if l.parenDepth == 0 {
-			return lexRightDelim
-		}
-		return l.errorf("unclosed left paren")
-	}
-	switch r := l.next(); {
-	case r == eof:
-		return l.errorf("unclosed action")
-	case isSpace(r):
-		return lexSpace
-	case r == ',':
-		l.emit(itemComma)
-	case r == ';':
-		l.emit(itemSemicolon)
-	case r == '*':
-		l.emit(itemMul)
-	case r == '/':
-		l.emit(itemDiv)
-	case r == '%':
-		l.emit(itemMod)
-	case r == '-':
-
-		if r := l.peek(); '0' <= r && r <= '9' &&
-			itemAdd != l.lastType &&
-			itemMinus != l.lastType &&
-			itemNumber != l.lastType &&
-			itemIdentifier != l.lastType &&
-			itemString != l.lastType &&
-			itemRawString != l.lastType &&
-			itemCharConstant != l.lastType &&
-			itemBool != l.lastType &&
-			itemField != l.lastType &&
-			itemChar != l.lastType &&
-			itemTrans != l.lastType {
-			l.backup()
-			return lexNumber
-		}
-		l.emit(itemMinus)
-	case r == '+':
-		if r := l.peek(); '0' <= r && r <= '9' &&
-			itemAdd != l.lastType &&
-			itemMinus != l.lastType &&
-			itemNumber != l.lastType &&
-			itemIdentifier != l.lastType &&
-			itemString != l.lastType &&
-			itemRawString != l.lastType &&
-			itemCharConstant != l.lastType &&
-			itemBool != l.lastType &&
-			itemField != l.lastType &&
-			itemChar != l.lastType &&
-			itemTrans != l.lastType {
-			l.backup()
-			return lexNumber
-		}
-		l.emit(itemAdd)
-	case r == '?':
-		l.emit(itemTernary)
-	case r == '&':
-		if l.next() == '&' {
-			l.emit(itemAnd)
-		} else {
-			l.backup()
-		}
-	case r == '<':
-		if l.next() == '=' {
-			l.emit(itemLessEquals)
-		} else {
-			l.backup()
-			l.emit(itemLess)
-		}
-	case r == '>':
-		if l.next() == '=' {
-			l.emit(itemGreatEquals)
-		} else {
-			l.backup()
-			l.emit(itemGreat)
-		}
-	case r == '!':
-		if l.next() == '=' {
-			l.emit(itemNotEquals)
-		} else {
-			l.backup()
-			l.emit(itemNot)
-		}
-
-	case r == '=':
-		if l.next() == '=' {
-			l.emit(itemEquals)
-		} else {
-			l.backup()
-			l.emit(itemAssign)
-		}
-	case r == ':':
-		if l.next() == '=' {
-			l.emit(itemAssign)
-		} else {
-			l.backup()
-			l.emit(itemColon)
-		}
-	case r == '|':
-		if l.next() == '|' {
-			l.emit(itemOr)
-		} else {
-			l.backup()
-			l.emit(itemPipe)
-		}
-	case r == '"':
-		return lexQuote
-	case r == '`':
-		return lexRawQuote
-	case r == '\'':
-		return lexChar
-	case r == '.':
-		// special look-ahead for ".field" so we don't break l.backup().
-		if l.pos < Pos(len(l.input)) {
-			r := l.input[l.pos]
-			if r < '0' || '9' < r {
-				return lexField
-			}
-		}
-		fallthrough // '.' can start a number.
-	case '0' <= r && r <= '9':
-		l.backup()
-		return lexNumber
-	case isAlphaNumeric(r):
-		l.backup()
-		return lexIdentifier
-	case r == '[':
-		l.emit(itemLeftBrackets)
-	case r == ']':
-		l.emit(itemRightBrackets)
-	case r == '(':
-		l.emit(itemLeftParen)
-		l.parenDepth++
-	case r == ')':
-		l.emit(itemRightParen)
-		l.parenDepth--
-		if l.parenDepth < 0 {
-			return l.errorf("unexpected right paren %#U", r)
-		}
-	case r <= unicode.MaxASCII && unicode.IsPrint(r):
-		l.emit(itemChar)
-		return lexInsideAction
-	default:
-		return l.errorf("unrecognized character in action: %#U", r)
-	}
-	return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// One space has already been seen.
-func lexSpace(l *lexer) stateFn {
-	for isSpace(l.peek()) {
-		l.next()
-	}
-	l.emit(itemSpace)
-	return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
-	for {
-		switch r := l.next(); {
-		case isAlphaNumeric(r):
-		// absorb.
-		default:
-			l.backup()
-			word := l.input[l.start:l.pos]
-			if !l.atTerminator() {
-				return l.errorf("bad character %#U", r)
-			}
-			switch {
-			case key[word] > itemKeyword:
-				l.emit(key[word])
-			case word[0] == '.':
-				l.emit(itemField)
-			case word == "true", word == "false":
-				l.emit(itemBool)
-			default:
-				l.emit(itemIdentifier)
-			}
-			break Loop
-		}
-	}
-	return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
-
-	if l.atTerminator() {
-		// Nothing interesting follows -> "." or "$".
-		l.emit(itemIdentifier)
-		return lexInsideAction
-	}
-
-	var r rune
-	for {
-		r = l.next()
-		if !isAlphaNumeric(r) {
-			l.backup()
-			break
-		}
-	}
-	if !l.atTerminator() {
-		return l.errorf("bad character %#U", r)
-	}
-	l.emit(itemField)
-	return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
-	r := l.peek()
-	if isSpace(r) {
-		return true
-	}
-	switch r {
-	case eof, '.', ',', '|', ':', ')', '=', '(', ';', '?', '[', ']', '+', '-', '/', '%', '*', '&', '!', '<', '>':
-		return true
-	}
-	// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
-	// succeed but should fail) but only in extremely rare cases caused by willfully
-	// bad choice of delimiter.
-	if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
-		return true
-	}
-	return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
-	for {
-		switch l.next() {
-		case '\\':
-			if r := l.next(); r != eof && r != '\n' {
-				break
-			}
-			fallthrough
-		case eof, '\n':
-			return l.errorf("unterminated character constant")
-		case '\'':
-			break Loop
-		}
-	}
-	l.emit(itemCharConstant)
-	return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
-	if !l.scanNumber() {
-		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
-	}
-
-	l.emit(itemNumber)
-	return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
-	// Optional leading sign.
-	l.accept("+-")
-	// Is it hex?
-	digits := "0123456789"
-	if l.accept("0") && l.accept("xX") {
-		digits = "0123456789abcdefABCDEF"
-	}
-	l.acceptRun(digits)
-	if l.accept(".") {
-		l.acceptRun(digits)
-	}
-	if l.accept("eE") {
-		l.accept("+-")
-		l.acceptRun("0123456789")
-	}
-	//Is it imaginary?
-	l.accept("i")
-	//Next thing mustn't be alphanumeric.
-	if isAlphaNumeric(l.peek()) {
-		l.next()
-		return false
-	}
-	return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
-	for {
-		switch l.next() {
-		case '\\':
-			if r := l.next(); r != eof && r != '\n' {
-				break
-			}
-			fallthrough
-		case eof, '\n':
-			return l.errorf("unterminated quoted string")
-		case '"':
-			break Loop
-		}
-	}
-	l.emit(itemString)
-	return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
-	for {
-		switch l.next() {
-		case eof:
-			return l.errorf("unterminated raw quoted string")
-		case '`':
-			break Loop
-		}
-	}
-	l.emit(itemRawString)
-	return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
-	return r == ' ' || r == '\t' || r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
-	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}

+ 0 - 60
vendor/github.com/CloudyKit/jet/v4/loader.go

@@ -1,60 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"io"
-	"os"
-	"path/filepath"
-)
-
-// Loader is a minimal interface required for loading templates.
-type Loader interface {
-	// Exists checks for template existence.
-	Exists(path string) (string, bool)
-	// Open opens the underlying reader with template content.
-	Open(path string) (io.ReadCloser, error)
-}
-
-// OSFileSystemLoader implements Loader interface using OS file system (os.File).
-type OSFileSystemLoader struct {
-	dir string
-}
-
-// compile time check that we implement Loader
-var _ Loader = (*OSFileSystemLoader)(nil)
-
-// NewOSFileSystemLoader returns an initialized OSFileSystemLoader.
-func NewOSFileSystemLoader(dirPath string) *OSFileSystemLoader {
-	return &OSFileSystemLoader{
-		dir: dirPath,
-	}
-}
-
-// Open opens a file from OS file system.
-func (l *OSFileSystemLoader) Open(path string) (io.ReadCloser, error) {
-	return os.Open(filepath.Join(l.dir, path))
-}
-
-// Exists checks if the template name exists by walking the list of template paths
-// returns true if the template file was found
-func (l *OSFileSystemLoader) Exists(path string) (string, bool) {
-	path = filepath.Join(l.dir, path)
-	stat, err := os.Stat(path)
-	if err == nil && !stat.IsDir() {
-		return path, true
-	}
-	return "", false
-}

+ 0 - 695
vendor/github.com/CloudyKit/jet/v4/node.go

@@ -1,695 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"bytes"
-	"fmt"
-)
-
-var textFormat = "%s" //Changed to "%q" in tests for better error messages.
-
-type Node interface {
-	Type() NodeType
-	String() string
-	Position() Pos
-	line() int
-	error(error)
-	errorf(string, ...interface{})
-}
-
-type Expression interface {
-	Node
-}
-
-// Pos represents a byte position in the original input text from which
-// this template was parsed.
-type Pos int
-
-func (p Pos) Position() Pos {
-	return p
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-type NodeBase struct {
-	TemplatePath string
-	Line         int
-	NodeType
-	Pos
-}
-
-func (node *NodeBase) line() int {
-	return node.Line
-}
-
-func (node *NodeBase) error(err error) {
-	node.errorf("%s", err)
-}
-
-func (node *NodeBase) errorf(format string, v ...interface{}) {
-	panic(fmt.Errorf("Jet Runtime Error (%q:%d): %s", node.TemplatePath, node.Line, fmt.Sprintf(format, v...)))
-}
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
-	return t
-}
-
-const (
-	NodeText       NodeType = iota //Plain text.
-	NodeAction                     //A non-control action such as a field evaluation.
-	NodeChain                      //A sequence of field accesses.
-	NodeCommand                    //An element of a pipeline.
-	NodeField                      //A field or method name.
-	NodeIdentifier                 //An identifier; always a function name.
-	NodeList                       //A list of Nodes.
-	NodePipe                       //A pipeline of commands.
-	NodeSet
-	//NodeWith                       //A with action.
-	NodeInclude
-	NodeBlock
-	nodeEnd //An end action. Not added to tree.
-	NodeYield
-	nodeContent
-	NodeIf    //An if action.
-	nodeElse  //An else action. Not added to tree.
-	NodeRange //A range action.
-	NodeTry
-	nodeCatch
-	NodeReturn
-	beginExpressions
-	NodeString //A string constant.
-	NodeNil    //An untyped nil constant.
-	NodeNumber //A numerical constant.
-	NodeBool   //A boolean constant.
-	NodeAdditiveExpr
-	NodeMultiplicativeExpr
-	NodeComparativeExpr
-	NodeNumericComparativeExpr
-	NodeLogicalExpr
-	NodeCallExpr
-	NodeNotExpr
-	NodeTernaryExpr
-	NodeIndexExpr
-	NodeSliceExpr
-	endExpressions
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
-	NodeBase
-	Nodes []Node //The element nodes in lexical order.
-}
-
-func (l *ListNode) append(n Node) {
-	l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) String() string {
-	b := new(bytes.Buffer)
-	for _, n := range l.Nodes {
-		fmt.Fprint(b, n)
-	}
-	return b.String()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
-	NodeBase
-	Text []byte
-}
-
-func (t *TextNode) String() string {
-	return fmt.Sprintf(textFormat, t.Text)
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
-	NodeBase                //The line number in the input. Deprecated: Kept for compatibility.
-	Cmds     []*CommandNode //The commands in lexical order.
-}
-
-func (p *PipeNode) append(command *CommandNode) {
-	p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
-	s := ""
-	for i, c := range p.Cmds {
-		if i > 0 {
-			s += " | "
-		}
-		s += c.String()
-	}
-	return s
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations and parenthesized pipelines.
-type ActionNode struct {
-	NodeBase
-	Set  *SetNode
-	Pipe *PipeNode
-}
-
-func (a *ActionNode) String() string {
-	if a.Set != nil {
-		if a.Pipe == nil {
-			return fmt.Sprintf("{{%s}}", a.Set)
-		}
-		return fmt.Sprintf("{{%s;%s}}", a.Set, a.Pipe)
-	}
-	return fmt.Sprintf("{{%s}}", a.Pipe)
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
-	NodeBase
-	CallExprNode
-}
-
-func (c *CommandNode) append(arg Node) {
-	c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
-	if c.Args == nil {
-		return c.BaseExpr.String()
-	}
-
-	arguments := ""
-	for i, expr := range c.Args {
-		if i > 0 {
-			arguments += ", "
-		}
-		arguments += expr.String()
-	}
-	return fmt.Sprintf("%s(%s)", c.BaseExpr, arguments)
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
-	NodeBase
-	Ident string //The identifier's name.
-}
-
-func (i *IdentifierNode) String() string {
-	return i.Ident
-}
-
-// NilNode holds the special identifier 'nil' representing an untyped nil constant.
-type NilNode struct {
-	NodeBase
-}
-
-func (n *NilNode) String() string {
-	return "nil"
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
-	NodeBase
-	Ident []string //The identifiers in lexical order.
-}
-
-func (f *FieldNode) String() string {
-	s := ""
-	for _, id := range f.Ident {
-		s += "." + id
-	}
-	return s
-}
-
-// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The periods are dropped from each ident.
-type ChainNode struct {
-	NodeBase
-	Node  Node
-	Field []string //The identifiers in lexical order.
-}
-
-// Add adds the named field (which should start with a period) to the end of the chain.
-func (c *ChainNode) Add(field string) {
-	if len(field) == 0 || field[0] != '.' {
-		panic("no dot in field")
-	}
-	field = field[1:] //Remove leading dot.
-	if field == "" {
-		panic("empty field")
-	}
-	c.Field = append(c.Field, field)
-}
-
-func (c *ChainNode) String() string {
-	s := c.Node.String()
-	if _, ok := c.Node.(*PipeNode); ok {
-		s = "(" + s + ")"
-	}
-	for _, field := range c.Field {
-		s += "." + field
-	}
-	return s
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
-	NodeBase
-	True bool //The value of the boolean constant.
-}
-
-func (b *BoolNode) String() string {
-	if b.True {
-		return "true"
-	}
-	return "false"
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
-	NodeBase
-
-	IsInt      bool       //Number has an integral value.
-	IsUint     bool       //Number has an unsigned integral value.
-	IsFloat    bool       //Number has a floating-point value.
-	IsComplex  bool       //Number is complex.
-	Int64      int64      //The signed integer value.
-	Uint64     uint64     //The unsigned integer value.
-	Float64    float64    //The floating-point value.
-	Complex128 complex128 //The complex value.
-	Text       string     //The original textual representation from the input.
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
-	n.IsFloat = imag(n.Complex128) == 0
-	if n.IsFloat {
-		n.Float64 = real(n.Complex128)
-		n.IsInt = float64(int64(n.Float64)) == n.Float64
-		if n.IsInt {
-			n.Int64 = int64(n.Float64)
-		}
-		n.IsUint = float64(uint64(n.Float64)) == n.Float64
-		if n.IsUint {
-			n.Uint64 = uint64(n.Float64)
-		}
-	}
-}
-
-func (n *NumberNode) String() string {
-	return n.Text
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
-	NodeBase
-
-	Quoted string //The original text of the string, with quotes.
-	Text   string //The string, after quote processing.
-}
-
-func (s *StringNode) String() string {
-	return s.Quoted
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type endNode struct {
-	NodeBase
-}
-
-func (e *endNode) String() string {
-	return "{{end}}"
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type contentNode struct {
-	NodeBase
-}
-
-func (e *contentNode) String() string {
-	return "{{content}}"
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
-	NodeBase //The line number in the input. Deprecated: Kept for compatibility.
-}
-
-func (e *elseNode) String() string {
-	return "{{else}}"
-}
-
-// SetNode represents a set action, ident( ',' ident)* '=' expression ( ',' expression )*
-type SetNode struct {
-	NodeBase
-	Let                bool
-	IndexExprGetLookup bool
-	Left               []Expression
-	Right              []Expression
-}
-
-func (set *SetNode) String() string {
-	var s = ""
-
-	for i, v := range set.Left {
-		if i > 0 {
-			s += ", "
-		}
-		s += v.String()
-	}
-
-	if set.Let {
-		s += ":="
-	} else {
-		s += "="
-	}
-
-	for i, v := range set.Right {
-		if i > 0 {
-			s += ", "
-		}
-		s += v.String()
-	}
-
-	return s
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
-	NodeBase
-	Set        *SetNode
-	Expression Expression
-	List       *ListNode
-	ElseList   *ListNode
-}
-
-func (b *BranchNode) String() string {
-
-	if b.NodeType == NodeRange {
-		s := ""
-		if b.Set != nil {
-			s = b.Set.String()
-		} else {
-			s = b.Expression.String()
-		}
-
-		if b.ElseList != nil {
-			return fmt.Sprintf("{{range %s}}%s{{else}}%s{{end}}", s, b.List, b.ElseList)
-		}
-		return fmt.Sprintf("{{range %s}}%s{{end}}", s, b.List)
-	} else {
-		s := ""
-		if b.Set != nil {
-			s = b.Set.String() + ";"
-		}
-		if b.ElseList != nil {
-			return fmt.Sprintf("{{if %s%s}}%s{{else}}%s{{end}}", s, b.Expression, b.List, b.ElseList)
-		}
-		return fmt.Sprintf("{{if %s%s}}%s{{end}}", s, b.Expression, b.List)
-	}
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
-	BranchNode
-}
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
-	BranchNode
-}
-
-type BlockParameter struct {
-	Identifier string
-	Expression Expression
-}
-
-type BlockParameterList struct {
-	NodeBase
-	List []BlockParameter
-}
-
-func (bplist *BlockParameterList) Param(name string) (Expression, int) {
-	for i := 0; i < len(bplist.List); i++ {
-		param := &bplist.List[i]
-		if param.Identifier == name {
-			return param.Expression, i
-		}
-	}
-	return nil, -1
-}
-
-func (bplist *BlockParameterList) String() (str string) {
-	buff := bytes.NewBuffer(nil)
-	for _, bp := range bplist.List {
-		if bp.Identifier == "" {
-			fmt.Fprintf(buff, "%s,", bp.Expression)
-		} else {
-			if bp.Expression == nil {
-				fmt.Fprintf(buff, "%s,", bp.Identifier)
-			} else {
-				fmt.Fprintf(buff, "%s=%s,", bp.Identifier, bp.Expression)
-			}
-		}
-	}
-	if buff.Len() > 0 {
-		str = buff.String()[0 : buff.Len()-1]
-	}
-	return
-}
-
-// BlockNode represents a {{block }} action.
-type BlockNode struct {
-	NodeBase        //The line number in the input. Deprecated: Kept for compatibility.
-	Name     string //The name of the template (unquoted).
-
-	Parameters *BlockParameterList
-	Expression Expression //The command to evaluate as dot for the template.
-
-	List    *ListNode
-	Content *ListNode
-}
-
-func (t *BlockNode) String() string {
-	if t.Content != nil {
-		if t.Expression == nil {
-			return fmt.Sprintf("{{block %s(%s)}}%s{{content}}%s{{end}}", t.Name, t.Parameters, t.List, t.Content)
-		}
-		return fmt.Sprintf("{{block %s(%s) %s}}%s{{content}}%s{{end}}", t.Name, t.Parameters, t.Expression, t.List, t.Content)
-	}
-	if t.Expression == nil {
-		return fmt.Sprintf("{{block %s(%s)}}%s{{end}}", t.Name, t.Parameters, t.List)
-	}
-	return fmt.Sprintf("{{block %s(%s) %s}}%s{{end}}", t.Name, t.Parameters, t.Expression, t.List)
-}
-
-// YieldNode represents a {{yield}} action
-type YieldNode struct {
-	NodeBase          //The line number in the input. Deprecated: Kept for compatibility.
-	Name       string //The name of the template (unquoted).
-	Parameters *BlockParameterList
-	Expression Expression //The command to evaluate as dot for the template.
-	Content    *ListNode
-	IsContent  bool
-}
-
-func (t *YieldNode) String() string {
-	if t.IsContent {
-		if t.Expression == nil {
-			return "{{yield content}}"
-		}
-		return fmt.Sprintf("{{yield content %s}}", t.Expression)
-	}
-
-	if t.Content != nil {
-		if t.Expression == nil {
-			return fmt.Sprintf("{{yield %s(%s) content}}%s{{end}}", t.Name, t.Parameters, t.Content)
-		}
-		return fmt.Sprintf("{{yield %s(%s) %s content}}%s{{end}}", t.Name, t.Parameters, t.Expression, t.Content)
-	}
-
-	if t.Expression == nil {
-		return fmt.Sprintf("{{yield %s(%s)}}", t.Name, t.Parameters)
-	}
-	return fmt.Sprintf("{{yield %s(%s) %s}}", t.Name, t.Parameters, t.Expression)
-}
-
-// IncludeNode represents a {{include }} action.
-type IncludeNode struct {
-	NodeBase
-	Name    Expression
-	Context Expression
-}
-
-func (t *IncludeNode) String() string {
-	if t.Context == nil {
-		return fmt.Sprintf("{{include %s}}", t.Name)
-	}
-	return fmt.Sprintf("{{include %s %s}}", t.Name, t.Context)
-}
-
-type binaryExprNode struct {
-	NodeBase
-	Operator    item
-	Left, Right Expression
-}
-
-func (node *binaryExprNode) String() string {
-	return fmt.Sprintf("%s %s %s", node.Left, node.Operator.val, node.Right)
-}
-
-// AdditiveExprNode represents an add or subtract expression
-// ex: expression ( '+' | '-' ) expression
-type AdditiveExprNode struct {
-	binaryExprNode
-}
-
-// MultiplicativeExprNode represents a multiplication, division, or module expression
-// ex: expression ( '*' | '/' | '%' ) expression
-type MultiplicativeExprNode struct {
-	binaryExprNode
-}
-
-// LogicalExprNode represents a boolean expression, 'and' or 'or'
-// ex: expression ( '&&' | '||' ) expression
-type LogicalExprNode struct {
-	binaryExprNode
-}
-
-// ComparativeExprNode represents a comparative expression
-// ex: expression ( '==' | '!=' ) expression
-type ComparativeExprNode struct {
-	binaryExprNode
-}
-
-// NumericComparativeExprNode represents a numeric comparative expression
-// ex: expression ( '<' | '>' | '<=' | '>=' ) expression
-type NumericComparativeExprNode struct {
-	binaryExprNode
-}
-
-// NotExprNode represents a negate expression
-// ex: '!' expression
-type NotExprNode struct {
-	NodeBase
-	Expr Expression
-}
-
-func (s *NotExprNode) String() string {
-	return fmt.Sprintf("!%s", s.Expr)
-}
-
-// CallExprNode represents a call expression
-// ex: expression '(' (expression (',' expression)* )? ')'
-type CallExprNode struct {
-	NodeBase
-	BaseExpr Expression
-	Args     []Expression
-}
-
-func (s *CallExprNode) String() string {
-	arguments := ""
-	for i, expr := range s.Args {
-		if i > 0 {
-			arguments += ", "
-		}
-		arguments += expr.String()
-	}
-	return fmt.Sprintf("%s(%s)", s.BaseExpr, arguments)
-}
-
-// TernaryExprNod represents a ternary expression,
-// ex: expression '?' expression ':' expression
-type TernaryExprNode struct {
-	NodeBase
-	Boolean, Left, Right Expression
-}
-
-func (s *TernaryExprNode) String() string {
-	return fmt.Sprintf("%s?%s:%s", s.Boolean, s.Left, s.Right)
-}
-
-type IndexExprNode struct {
-	NodeBase
-	Base  Expression
-	Index Expression
-}
-
-func (s *IndexExprNode) String() string {
-	return fmt.Sprintf("%s[%s]", s.Base, s.Index)
-}
-
-type SliceExprNode struct {
-	NodeBase
-	Base     Expression
-	Index    Expression
-	EndIndex Expression
-}
-
-func (s *SliceExprNode) String() string {
-	var index_string, len_string string
-	if s.Index != nil {
-		index_string = s.Index.String()
-	}
-	if s.EndIndex != nil {
-		len_string = s.EndIndex.String()
-	}
-	return fmt.Sprintf("%s[%s:%s]", s.Base, index_string, len_string)
-}
-
-type ReturnNode struct {
-	NodeBase
-	Value Expression
-}
-
-func (n *ReturnNode) String() string {
-	return fmt.Sprintf("return %v", n.Value)
-}
-
-type TryNode struct {
-	NodeBase
-	List  *ListNode
-	Catch *catchNode
-}
-
-func (n *TryNode) String() string {
-	if n.Catch != nil {
-		return fmt.Sprintf("{{try}}%s%s", n.List, n.Catch)
-	}
-	return fmt.Sprintf("{{try}}%s{{end}}", n.List)
-}
-
-type catchNode struct {
-	NodeBase
-	Err  *IdentifierNode
-	List *ListNode
-}
-
-func (n *catchNode) String() string {
-	return fmt.Sprintf("{{catch %s}}%s{{end}}", n.Err, n.List)
-}

+ 0 - 1004
vendor/github.com/CloudyKit/jet/v4/parse.go

@@ -1,1004 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jet
-
-import (
-	"bytes"
-	"fmt"
-	"runtime"
-	"strconv"
-	"strings"
-)
-
-func unquote(text string) (string, error) {
-	return strconv.Unquote(text)
-}
-
-// Template is the representation of a single parsed template.
-type Template struct {
-	Name      string // name of the template represented by the tree.
-	ParseName string // name of the top-level template during parsing, for error messages.
-
-	set     *Set
-	extends *Template
-	imports []*Template
-
-	processedBlocks map[string]*BlockNode
-	passedBlocks    map[string]*BlockNode
-	Root            *ListNode // top-level root of the tree.
-
-	text string // text parsed to create the template (or its parent)
-
-	// Parsing only; cleared after parse.
-	lex       *lexer
-	token     [3]item // three-token lookahead for parser.
-	peekCount int
-}
-
-// next returns the next token.
-func (t *Template) next() item {
-	if t.peekCount > 0 {
-		t.peekCount--
-	} else {
-		t.token[0] = t.lex.nextItem()
-	}
-	return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Template) backup() {
-	t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Template) backup2(t1 item) {
-	t.token[1] = t1
-	t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Template) backup3(t2, t1 item) {
-	// Reverse order: we're pushing back.
-	t.token[1] = t1
-	t.token[2] = t2
-	t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Template) peek() item {
-	if t.peekCount > 0 {
-		return t.token[t.peekCount-1]
-	}
-	t.peekCount = 1
-	t.token[0] = t.lex.nextItem()
-	return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Template) nextNonSpace() (token item) {
-	for {
-		token = t.next()
-		if token.typ != itemSpace {
-			break
-		}
-	}
-	return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Template) peekNonSpace() (token item) {
-	for {
-		token = t.next()
-		if token.typ != itemSpace {
-			break
-		}
-	}
-	t.backup()
-	return token
-}
-
-// errorf formats the error and terminates processing.
-func (t *Template) errorf(format string, args ...interface{}) {
-	t.Root = nil
-	format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
-	panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Template) error(err error) {
-	t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Template) expect(expectedType itemType, context, expected string) item {
-	token := t.nextNonSpace()
-	if token.typ != expectedType {
-		t.unexpected(token, context, expected)
-	}
-	return token
-}
-
-func (t *Template) expectRightDelim(context string) item {
-	return t.expect(itemRightDelim, context, "closing delimiter")
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Template) expectOneOf(expected1, expected2 itemType, context, expectedAs string) item {
-	token := t.nextNonSpace()
-	if token.typ != expected1 && token.typ != expected2 {
-		t.unexpected(token, context, expectedAs)
-	}
-	return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Template) unexpected(token item, context, expected string) {
-	switch {
-	case token.typ == itemImport,
-		token.typ == itemExtends:
-		t.errorf("parsing %s: unexpected keyword '%s' ('%s' statements must be at the beginning of the template)", context, token.val, token.val)
-	case token.typ > itemKeyword:
-		t.errorf("parsing %s: unexpected keyword '%s' (expected %s)", context, token.val, expected)
-	default:
-		t.errorf("parsing %s: unexpected token '%s' (expected %s)", context, token.val, expected)
-	}
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Template) recover(errp *error) {
-	e := recover()
-	if e != nil {
-		if _, ok := e.(runtime.Error); ok {
-			panic(e)
-		}
-		if t != nil {
-			t.lex.drain()
-			t.stopParse()
-		}
-		*errp = e.(error)
-	}
-	return
-}
-
-func (s *Set) parse(name, text string) (t *Template, err error) {
-	t = &Template{
-		Name:         name,
-		ParseName:    name,
-		text:         text,
-		set:          s,
-		passedBlocks: make(map[string]*BlockNode),
-	}
-	defer t.recover(&err)
-
-	lexer := lex(name, text, false)
-	lexer.setDelimiters(s.leftDelim, s.rightDelim)
-	lexer.run()
-	t.startParse(lexer)
-	t.parseTemplate()
-	t.stopParse()
-
-	if t.extends != nil {
-		t.addBlocks(t.extends.processedBlocks)
-	}
-
-	for _, _import := range t.imports {
-		t.addBlocks(_import.processedBlocks)
-	}
-
-	t.addBlocks(t.passedBlocks)
-
-	return t, err
-}
-
-func (t *Template) expectString(context string) string {
-	token := t.expectOneOf(itemString, itemRawString, context, "string literal")
-	s, err := unquote(token.val)
-	if err != nil {
-		t.error(err)
-	}
-	return s
-}
-
-// parse is the top-level parser for a template, essentially the same
-// It runs to EOF.
-func (t *Template) parseTemplate() (next Node) {
-	t.Root = t.newList(t.peek().pos)
-	// {{ extends|import stringLiteral }}
-	for t.peek().typ != itemEOF {
-		delim := t.next()
-		if delim.typ == itemText && strings.TrimSpace(delim.val) == "" {
-			continue //skips empty text nodes
-		}
-		if delim.typ == itemLeftDelim {
-			token := t.nextNonSpace()
-			if token.typ == itemExtends || token.typ == itemImport {
-				s := t.expectString("extends|import")
-				if token.typ == itemExtends {
-					if t.extends != nil {
-						t.errorf("Unexpected extends clause: each template can only extend one template")
-					} else if len(t.imports) > 0 {
-						t.errorf("Unexpected extends clause: the 'extends' clause should come before all import clauses")
-					}
-					var err error
-					t.extends, err = t.set.getSiblingTemplate(s, t.Name)
-					if err != nil {
-						t.error(err)
-					}
-				} else {
-					tt, err := t.set.getSiblingTemplate(s, t.Name)
-					if err != nil {
-						t.error(err)
-					}
-					t.imports = append(t.imports, tt)
-				}
-				t.expect(itemRightDelim, "extends|import", "closing delimiter")
-			} else {
-				t.backup2(delim)
-				break
-			}
-		} else {
-			t.backup()
-			break
-		}
-	}
-
-	for t.peek().typ != itemEOF {
-		switch n := t.textOrAction(); n.Type() {
-		case nodeEnd, nodeElse, nodeContent:
-			t.errorf("unexpected %s", n)
-		default:
-			t.Root.append(n)
-		}
-	}
-	return nil
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Template) startParse(lex *lexer) {
-	t.Root = nil
-	t.lex = lex
-}
-
-// stopParse terminates parsing.
-func (t *Template) stopParse() {
-	t.lex = nil
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
-func IsEmptyTree(n Node) bool {
-	switch n := n.(type) {
-	case nil:
-		return true
-	case *ActionNode:
-	case *IfNode:
-	case *ListNode:
-		for _, node := range n.Nodes {
-			if !IsEmptyTree(node) {
-				return false
-			}
-		}
-		return true
-	case *RangeNode:
-	case *IncludeNode:
-	case *TextNode:
-		return len(bytes.TrimSpace(n.Text)) == 0
-	case *BlockNode:
-	case *YieldNode:
-	default:
-		panic("unknown node: " + n.String())
-	}
-	return false
-}
-
-func (t *Template) blockParametersList(isDeclaring bool, context string) *BlockParameterList {
-	block := &BlockParameterList{}
-
-	t.expect(itemLeftParen, context, "opening parenthesis")
-	for {
-		var expression Expression
-		next := t.nextNonSpace()
-		if next.typ == itemIdentifier {
-			identifier := next.val
-			next2 := t.nextNonSpace()
-			switch next2.typ {
-			case itemComma, itemRightParen:
-				block.List = append(block.List, BlockParameter{Identifier: identifier})
-				next = next2
-			case itemAssign:
-				expression, next = t.parseExpression(context)
-				block.List = append(block.List, BlockParameter{Identifier: identifier, Expression: expression})
-			default:
-				if !isDeclaring {
-					switch next2.typ {
-					case itemComma, itemRightParen:
-					default:
-						t.backup2(next)
-						expression, next = t.parseExpression(context)
-						block.List = append(block.List, BlockParameter{Expression: expression})
-					}
-				} else {
-					t.unexpected(next2, context, "comma, assignment, or closing parenthesis")
-				}
-			}
-		} else if !isDeclaring {
-			switch next.typ {
-			case itemComma, itemRightParen:
-			default:
-				t.backup()
-				expression, next = t.parseExpression(context)
-				block.List = append(block.List, BlockParameter{Expression: expression})
-			}
-		}
-
-		if next.typ != itemComma {
-			t.backup()
-			break
-		}
-	}
-	t.expect(itemRightParen, context, "closing parenthesis")
-	return block
-}
-
-func (t *Template) parseBlock() Node {
-	const context = "block clause"
-	var pipe Expression
-
-	name := t.expect(itemIdentifier, context, "name")
-	bplist := t.blockParametersList(true, context)
-
-	if t.peekNonSpace().typ != itemRightDelim {
-		pipe = t.expression(context, "context")
-	}
-
-	t.expectRightDelim(context)
-
-	list, end := t.itemList(nodeContent, nodeEnd)
-	var contentList *ListNode
-
-	if end.Type() == nodeContent {
-		contentList, end = t.itemList(nodeEnd)
-	}
-
-	block := t.newBlock(name.pos, t.lex.lineNumber(), name.val, bplist, pipe, list, contentList)
-	t.passedBlocks[block.Name] = block
-	return block
-}
-
-func (t *Template) parseYield() Node {
-	const context = "yield clause"
-
-	var (
-		pipe    Expression
-		name    item
-		bplist  *BlockParameterList
-		content *ListNode
-	)
-
-	// parse block name
-	name = t.nextNonSpace()
-	if name.typ == itemContent {
-		// content yield {{yield content}}
-		if t.peekNonSpace().typ != itemRightDelim {
-			pipe = t.expression(context, "content context")
-		}
-		t.expectRightDelim(context)
-		return t.newYield(name.pos, t.lex.lineNumber(), "", nil, pipe, nil, true)
-	} else if name.typ != itemIdentifier {
-		t.unexpected(name, context, "block name")
-	}
-
-	// parse block parameters
-	bplist = t.blockParametersList(false, context)
-
-	// parse optional context & content
-	typ := t.peekNonSpace().typ
-	if typ == itemRightDelim {
-		t.expectRightDelim(context)
-	} else {
-		if typ != itemContent {
-			// parse context expression
-			pipe = t.expression("yield", "context")
-			typ = t.peekNonSpace().typ
-		}
-		if typ == itemRightDelim {
-			t.expectRightDelim(context)
-		} else if typ == itemContent {
-			// parse content from following nodes (until {{end}})
-			t.nextNonSpace()
-			t.expectRightDelim(context)
-			content, _ = t.itemList(nodeEnd)
-		} else {
-			t.unexpected(t.nextNonSpace(), context, "content keyword or closing delimiter")
-		}
-	}
-
-	return t.newYield(name.pos, t.lex.lineNumber(), name.val, bplist, pipe, content, false)
-}
-
-func (t *Template) parseInclude() Node {
-	var context Expression
-	name := t.expression("include", "template name")
-	if t.peekNonSpace().typ != itemRightDelim {
-		context = t.expression("include", "context")
-	}
-	t.expectRightDelim("include invocation")
-	return t.newInclude(name.Position(), t.lex.lineNumber(), name, context)
-}
-
-func (t *Template) parseReturn() Node {
-	value := t.expression("return", "value")
-	t.expectRightDelim("return")
-	return t.newReturn(value.Position(), t.lex.lineNumber(), value)
-}
-
-// itemList:
-//	textOrAction*
-// Terminates at any of the given nodes, returned separately.
-func (t *Template) itemList(terminatedBy ...NodeType) (list *ListNode, next Node) {
-	list = t.newList(t.peekNonSpace().pos)
-	for t.peekNonSpace().typ != itemEOF {
-		n := t.textOrAction()
-		for _, terminatorType := range terminatedBy {
-			if n.Type() == terminatorType {
-				return list, n
-			}
-		}
-		list.append(n)
-	}
-	t.errorf("unexpected EOF")
-	return
-}
-
-// textOrAction:
-//	text | action
-func (t *Template) textOrAction() Node {
-	switch token := t.nextNonSpace(); token.typ {
-	case itemText:
-		return t.newText(token.pos, token.val)
-	case itemLeftDelim:
-		return t.action()
-	default:
-		t.unexpected(token, "input", "text or action")
-	}
-	return nil
-}
-
-func (t *Template) action() (n Node) {
-	switch token := t.nextNonSpace(); token.typ {
-	case itemInclude:
-		return t.parseInclude()
-	case itemBlock:
-		return t.parseBlock()
-	case itemEnd:
-		return t.endControl()
-	case itemYield:
-		return t.parseYield()
-	case itemContent:
-		return t.contentControl()
-	case itemIf:
-		return t.ifControl()
-	case itemElse:
-		return t.elseControl()
-	case itemRange:
-		return t.rangeControl()
-	case itemTry:
-		return t.parseTry()
-	case itemCatch:
-		return t.parseCatch()
-	case itemReturn:
-		return t.parseReturn()
-	}
-
-	t.backup()
-	action := t.newAction(t.peek().pos, t.lex.lineNumber())
-
-	expr := t.assignmentOrExpression("command")
-	if expr.Type() == NodeSet {
-		action.Set = expr.(*SetNode)
-		expr = nil
-		if t.expectOneOf(itemSemicolon, itemRightDelim, "command", "semicolon or right delimiter").typ == itemSemicolon {
-			expr = t.expression("command", "pipeline base expression")
-		}
-	}
-	if expr != nil {
-		action.Pipe = t.pipeline("command", expr)
-	}
-	return action
-}
-
-func (t *Template) logicalExpression(context string) (Expression, item) {
-	left, endtoken := t.comparativeExpression(context)
-	for endtoken.typ == itemAnd || endtoken.typ == itemOr {
-		right, rightendtoken := t.comparativeExpression(context)
-		left, endtoken = t.newLogicalExpr(left.Position(), t.lex.lineNumber(), left, right, endtoken), rightendtoken
-	}
-	return left, endtoken
-}
-
-func (t *Template) parseExpression(context string) (Expression, item) {
-	expression, endtoken := t.logicalExpression(context)
-	if endtoken.typ == itemTernary {
-		var left, right Expression
-		left, endtoken = t.parseExpression(context)
-		if endtoken.typ != itemColon {
-			t.unexpected(endtoken, "ternary expression", "colon in ternary expression")
-		}
-		right, endtoken = t.parseExpression(context)
-		expression = t.newTernaryExpr(expression.Position(), t.lex.lineNumber(), expression, left, right)
-	}
-	return expression, endtoken
-}
-
-func (t *Template) comparativeExpression(context string) (Expression, item) {
-	left, endtoken := t.numericComparativeExpression(context)
-	for endtoken.typ == itemEquals || endtoken.typ == itemNotEquals {
-		right, rightendtoken := t.numericComparativeExpression(context)
-		left, endtoken = t.newComparativeExpr(left.Position(), t.lex.lineNumber(), left, right, endtoken), rightendtoken
-	}
-	return left, endtoken
-}
-
-func (t *Template) numericComparativeExpression(context string) (Expression, item) {
-	left, endtoken := t.additiveExpression(context)
-	for endtoken.typ >= itemGreat && endtoken.typ <= itemLessEquals {
-		right, rightendtoken := t.additiveExpression(context)
-		left, endtoken = t.newNumericComparativeExpr(left.Position(), t.lex.lineNumber(), left, right, endtoken), rightendtoken
-	}
-	return left, endtoken
-}
-
-func (t *Template) additiveExpression(context string) (Expression, item) {
-	left, endtoken := t.multiplicativeExpression(context)
-	for endtoken.typ == itemAdd || endtoken.typ == itemMinus {
-		right, rightendtoken := t.multiplicativeExpression(context)
-		left, endtoken = t.newAdditiveExpr(left.Position(), t.lex.lineNumber(), left, right, endtoken), rightendtoken
-	}
-	return left, endtoken
-}
-
-func (t *Template) multiplicativeExpression(context string) (left Expression, endtoken item) {
-	left, endtoken = t.unaryExpression(context)
-	for endtoken.typ >= itemMul && endtoken.typ <= itemMod {
-		right, rightendtoken := t.unaryExpression(context)
-		left, endtoken = t.newMultiplicativeExpr(left.Position(), t.lex.lineNumber(), left, right, endtoken), rightendtoken
-	}
-
-	return left, endtoken
-}
-
-func (t *Template) unaryExpression(context string) (Expression, item) {
-	next := t.nextNonSpace()
-	switch next.typ {
-	case itemNot:
-		expr, endToken := t.comparativeExpression(context)
-		return t.newNotExpr(expr.Position(), t.lex.lineNumber(), expr), endToken
-	case itemMinus, itemAdd:
-		return t.newAdditiveExpr(next.pos, t.lex.lineNumber(), nil, t.operand("additive expression"), next), t.nextNonSpace()
-	default:
-		t.backup()
-	}
-	operand := t.operand(context)
-	return operand, t.nextNonSpace()
-}
-
-func (t *Template) assignmentOrExpression(context string) (operand Expression) {
-	t.peekNonSpace()
-	line := t.lex.lineNumber()
-	var right, left []Expression
-
-	var isSet bool
-	var isLet bool
-	var returned item
-	operand, returned = t.parseExpression(context)
-	pos := operand.Position()
-	if returned.typ == itemComma || returned.typ == itemAssign {
-		isSet = true
-	} else {
-		if operand == nil {
-			t.unexpected(returned, context, "operand")
-		}
-		t.backup()
-		return operand
-	}
-
-	if isSet {
-	leftloop:
-		for {
-			switch operand.Type() {
-			case NodeField, NodeChain, NodeIdentifier:
-				left = append(left, operand)
-			default:
-				t.errorf("unexpected node in assign")
-			}
-
-			switch returned.typ {
-			case itemComma:
-				operand, returned = t.parseExpression(context)
-			case itemAssign:
-				isLet = returned.val == ":="
-				break leftloop
-			default:
-				t.unexpected(returned, "assignment", "comma or assignment")
-			}
-		}
-
-		if isLet {
-			for _, operand := range left {
-				if operand.Type() != NodeIdentifier {
-					t.errorf("unexpected node type %s in variable declaration", operand)
-				}
-			}
-		}
-
-		for {
-			operand, returned = t.parseExpression("assignment")
-			right = append(right, operand)
-			if returned.typ != itemComma {
-				t.backup()
-				break
-			}
-		}
-
-		var isIndexExprGetLookup bool
-
-		if context == "range" {
-			if len(left) > 2 || len(right) > 1 {
-				t.errorf("unexpected number of operands in assign on range")
-			}
-		} else {
-			if len(left) != len(right) {
-				if len(left) == 2 && len(right) == 1 && right[0].Type() == NodeIndexExpr {
-					isIndexExprGetLookup = true
-				} else {
-					t.errorf("unexpected number of operands in assign on range")
-				}
-			}
-		}
-		operand = t.newSet(pos, line, isLet, isIndexExprGetLookup, left, right)
-		return
-
-	}
-	return
-}
-
-func (t *Template) expression(context, as string) Expression {
-	expr, tk := t.parseExpression(context)
-	if expr == nil {
-		t.unexpected(tk, context, as)
-	}
-	t.backup()
-	return expr
-}
-
-func (t *Template) pipeline(context string, baseExprMutate Expression) (pipe *PipeNode) {
-	pos := t.peekNonSpace().pos
-	pipe = t.newPipeline(pos, t.lex.lineNumber())
-
-	if baseExprMutate == nil {
-		pipe.errorf("parsing pipeline: first expression cannot be nil")
-	}
-	pipe.append(t.command(baseExprMutate))
-
-	for {
-		token := t.expectOneOf(itemPipe, itemRightDelim, "pipeline", "pipe or right delimiter")
-		if token.typ == itemRightDelim {
-			break
-		}
-		token = t.nextNonSpace()
-		switch token.typ {
-		case itemField, itemIdentifier:
-			t.backup()
-			pipe.append(t.command(nil))
-		default:
-			t.unexpected(token, "pipeline", "field or identifier")
-		}
-	}
-
-	return
-}
-
-func (t *Template) command(baseExpr Expression) *CommandNode {
-	cmd := t.newCommand(t.peekNonSpace().pos)
-
-	if baseExpr == nil {
-		baseExpr = t.expression("command", "name")
-	}
-
-	if baseExpr.Type() == NodeCallExpr {
-		call := baseExpr.(*CallExprNode)
-		cmd.BaseExpr = call.BaseExpr
-		cmd.Args = call.Args
-		return cmd
-	}
-
-	cmd.BaseExpr = baseExpr
-
-	next := t.nextNonSpace()
-	switch next.typ {
-	case itemColon:
-		cmd.Args = t.parseArguments()
-	default:
-		t.backup()
-	}
-
-	if cmd.BaseExpr == nil {
-		t.errorf("empty command")
-	}
-
-	return cmd
-}
-
-// operand:
-//	term .Field*
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Template) operand(context string) Expression {
-	node := t.term()
-	if node == nil {
-		t.unexpected(t.next(), context, "term")
-	}
-RESET:
-	if t.peek().typ == itemField {
-		chain := t.newChain(t.peek().pos, node)
-		for t.peekNonSpace().typ == itemField {
-			chain.Add(t.next().val)
-		}
-		// Compatibility with original API: If the term is of type NodeField
-		// or NodeVariable, just put more fields on the original.
-		// Otherwise, keep the Chain node.
-		// Obvious parsing errors involving literal values are detected here.
-		// More complex error cases will have to be handled at execution time.
-		switch node.Type() {
-		case NodeField:
-			node = t.newField(chain.Position(), chain.String())
-		case NodeBool, NodeString, NodeNumber, NodeNil:
-			t.errorf("unexpected . after term %q", node.String())
-		default:
-			node = chain
-		}
-	}
-	nodeTYPE := node.Type()
-	if nodeTYPE == NodeIdentifier ||
-		nodeTYPE == NodeCallExpr ||
-		nodeTYPE == NodeField ||
-		nodeTYPE == NodeChain ||
-		nodeTYPE == NodeIndexExpr {
-		switch t.nextNonSpace().typ {
-		case itemLeftParen:
-			callExpr := t.newCallExpr(node.Position(), t.lex.lineNumber(), node)
-			callExpr.Args = t.parseArguments()
-			t.expect(itemRightParen, "call expression", "closing parenthesis")
-			node = callExpr
-			goto RESET
-		case itemLeftBrackets:
-			base := node
-			var index Expression
-			var next item
-
-			//found colon is slice expression
-			if t.peekNonSpace().typ != itemColon {
-				index, next = t.parseExpression("index|slice expression")
-			} else {
-				next = t.nextNonSpace()
-			}
-
-			switch next.typ {
-			case itemColon:
-				var endIndex Expression
-				if t.peekNonSpace().typ != itemRightBrackets {
-					endIndex = t.expression("slice expression", "end indexß")
-				}
-				node = t.newSliceExpr(node.Position(), node.line(), base, index, endIndex)
-			case itemRightBrackets:
-				node = t.newIndexExpr(node.Position(), node.line(), base, index)
-				fallthrough
-			default:
-				t.backup()
-			}
-
-			t.expect(itemRightBrackets, "index expression", "closing bracket")
-			goto RESET
-		default:
-			t.backup()
-		}
-	}
-	return node
-}
-
-func (t *Template) parseArguments() []Expression {
-	args := []Expression{}
-	context := "call expression argument list"
-loop:
-	for t.peekNonSpace().typ != itemRightParen {
-		expr, endtoken := t.parseExpression(context)
-		args = append(args, expr)
-		switch endtoken.typ {
-		case itemComma:
-			// continue with closing parens (allowed because of multiline syntax) or next arg
-		default:
-			t.backup()
-			break loop
-		}
-	}
-	return args
-}
-
-func (t *Template) parseControl(allowElseIf bool, context string) (pos Pos, line int, set *SetNode, expression Expression, list, elseList *ListNode) {
-	line = t.lex.lineNumber()
-
-	expression = t.assignmentOrExpression(context)
-	pos = expression.Position()
-	if expression.Type() == NodeSet {
-		set = expression.(*SetNode)
-		if context != "range" {
-			t.expect(itemSemicolon, context, "semicolon between assignment and expression")
-			expression = t.expression(context, "expression after assignment")
-		} else {
-			expression = nil
-		}
-	}
-
-	t.expectRightDelim(context)
-	var next Node
-	list, next = t.itemList(nodeElse, nodeEnd)
-	if next.Type() == nodeElse {
-		if allowElseIf && t.peek().typ == itemIf {
-			// Special case for "else if". If the "else" is followed immediately by an "if",
-			// the elseControl will have left the "if" token pending. Treat
-			//	{{if a}}_{{else if b}}_{{end}}
-			// as
-			//	{{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
-			// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
-			// is assumed. This technique works even for long if-else-if chains.
-			t.next() // Consume the "if" token.
-			elseList = t.newList(next.Position())
-			elseList.append(t.ifControl())
-			// Do not consume the next item - only one {{end}} required.
-		} else {
-			elseList, next = t.itemList(nodeEnd)
-		}
-	}
-	return pos, line, set, expression, list, elseList
-}
-
-// If:
-//	{{if expression}} itemList {{end}}
-//	{{if expression}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Template) ifControl() Node {
-	return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-//	{{range expression}} itemList {{end}}
-//	{{range expression}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Template) rangeControl() Node {
-	return t.newRange(t.parseControl(false, "range"))
-}
-
-// End:
-//	{{end}}
-// End keyword is past.
-func (t *Template) endControl() Node {
-	return t.newEnd(t.expectRightDelim("end").pos)
-}
-
-// Content:
-//	{{content}}
-// Content keyword is past.
-func (t *Template) contentControl() Node {
-	return t.newContent(t.expectRightDelim("content").pos)
-}
-
-// Else:
-//	{{else}}
-// Else keyword is past.
-func (t *Template) elseControl() Node {
-	// Special case for "else if".
-	peek := t.peekNonSpace()
-	if peek.typ == itemIf {
-		// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
-		return t.newElse(peek.pos, t.lex.lineNumber())
-	}
-	return t.newElse(t.expectRightDelim("else").pos, t.lex.lineNumber())
-}
-
-// Try-catch:
-//	{{try}}
-//    itemList
-//  {{catch <ident>}}
-//    itemList
-//  {{end}}
-// try keyword is past.
-func (t *Template) parseTry() *TryNode {
-	var recov *catchNode
-	line := t.lex.lineNumber()
-	pos := t.expectRightDelim("try").pos
-	list, next := t.itemList(nodeCatch, nodeEnd)
-	if next.Type() == nodeCatch {
-		recov = next.(*catchNode)
-	}
-
-	return t.newTry(pos, line, list, recov)
-}
-
-// catch:
-//  {{catch <ident>}}
-//    itemList
-//  {{end}}
-// catch keyword is past.
-func (t *Template) parseCatch() *catchNode {
-	line := t.lex.lineNumber()
-	var errVar *IdentifierNode
-	peek := t.peekNonSpace()
-	if peek.typ != itemRightDelim {
-		_errVar := t.term()
-		if typ := _errVar.Type(); typ != NodeIdentifier {
-			t.errorf("unexpected node type '%s' in catch", typ)
-		}
-		errVar = _errVar.(*IdentifierNode)
-	}
-	t.expectRightDelim("catch")
-	list, _ := t.itemList(nodeEnd)
-	return t.newCatch(peek.pos, line, errVar, list)
-}
-
-// term:
-//	literal (number, string, nil, boolean)
-//	function (identifier)
-//	.
-//	.Field
-//	variable
-//	'(' expression ')'
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Template) term() Node {
-	switch token := t.nextNonSpace(); token.typ {
-	case itemError:
-		t.errorf("%s", token.val)
-	case itemIdentifier:
-		return t.newIdentifier(token.val, token.pos, t.lex.lineNumber())
-	case itemNil:
-		return t.newNil(token.pos)
-	case itemField:
-		return t.newField(token.pos, token.val)
-	case itemBool:
-		return t.newBool(token.pos, token.val == "true")
-	case itemCharConstant, itemComplex, itemNumber:
-		number, err := t.newNumber(token.pos, token.val, token.typ)
-		if err != nil {
-			t.error(err)
-		}
-		return number
-	case itemLeftParen:
-		pipe := t.expression("parenthesized expression", "expression")
-		if token := t.next(); token.typ != itemRightParen {
-			t.unexpected(token, "parenthesized expression", "closing parenthesis")
-		}
-		return pipe
-	case itemString, itemRawString:
-		s, err := unquote(token.val)
-		if err != nil {
-			t.error(err)
-		}
-		return t.newString(token.pos, token.val, s)
-	}
-	t.backup()
-	return nil
-}

+ 0 - 8
vendor/github.com/CloudyKit/jet/v4/profile.sh

@@ -1,8 +0,0 @@
-#!/usr/bin/env bash
-
-go test -run="^$" -bench="Range" -benchmem -c -cpuprofile=./pprof.out
-go test -run="^$" -bench="Range" -benchmem -cpuprofile=./pprof.out
-go tool pprof --pdf --focus="$1" jet.test pprof.out >> out.pdf
-rm jet.test
-rm pprof.out
-open out.pdf

+ 0 - 152
vendor/github.com/CloudyKit/jet/v4/ranger.go

@@ -1,152 +0,0 @@
-package jet
-
-import (
-	"fmt"
-	"reflect"
-	"sync"
-)
-
-// Ranger describes an interface for types that iterate over something.
-// Implementing this interface means the ranger will be used when it's
-// encountered on the right hand side of a range's "let" expression.
-type Ranger interface {
-	Range() (reflect.Value, reflect.Value, bool)
-	ProvidesIndex() bool
-}
-
-type intsRanger struct {
-	i, from, to int
-}
-
-var _ Ranger = &intsRanger{}
-
-func (r *intsRanger) Range() (index, value reflect.Value, end bool) {
-	index = reflect.ValueOf(r.i)
-	value = reflect.ValueOf(r.from + r.i)
-	end = r.i == r.to-r.from
-	r.i++
-	return
-}
-
-func (r *intsRanger) ProvidesIndex() bool { return true }
-
-type pooledRanger interface {
-	Ranger
-	Setup(reflect.Value)
-}
-
-type sliceRanger struct {
-	v reflect.Value
-	i int
-}
-
-var _ Ranger = &sliceRanger{}
-var _ pooledRanger = &sliceRanger{}
-
-func (r *sliceRanger) Setup(v reflect.Value) {
-	r.i = 0
-	r.v = v
-}
-
-func (r *sliceRanger) Range() (index, value reflect.Value, end bool) {
-	if r.i == r.v.Len() {
-		end = true
-		return
-	}
-	index = reflect.ValueOf(r.i)
-	value = r.v.Index(r.i)
-	r.i++
-	return
-}
-
-func (r *sliceRanger) ProvidesIndex() bool { return true }
-
-type mapRanger struct {
-	iter    *reflect.MapIter
-	hasMore bool
-}
-
-var _ Ranger = &mapRanger{}
-var _ pooledRanger = &mapRanger{}
-
-func (r *mapRanger) Setup(v reflect.Value) {
-	r.iter = v.MapRange()
-	r.hasMore = r.iter.Next()
-}
-
-func (r *mapRanger) Range() (key, value reflect.Value, end bool) {
-	if !r.hasMore {
-		end = true
-		return
-	}
-	key, value = r.iter.Key(), r.iter.Value()
-	r.hasMore = r.iter.Next()
-	return
-}
-
-func (r *mapRanger) ProvidesIndex() bool { return true }
-
-type chanRanger struct {
-	v reflect.Value
-}
-
-var _ Ranger = &chanRanger{}
-var _ pooledRanger = &chanRanger{}
-
-func (r *chanRanger) Setup(v reflect.Value) {
-	r.v = v
-}
-
-func (r *chanRanger) Range() (_, value reflect.Value, end bool) {
-	v, ok := r.v.Recv()
-	value, end = v, !ok
-	return
-}
-
-func (r *chanRanger) ProvidesIndex() bool { return false }
-
-// ranger pooling
-
-var (
-	poolSliceRanger = &sync.Pool{
-		New: func() interface{} {
-			return new(sliceRanger)
-		},
-	}
-
-	poolsByKind = map[reflect.Kind]*sync.Pool{
-		reflect.Slice: poolSliceRanger,
-		reflect.Array: poolSliceRanger,
-		reflect.Map: &sync.Pool{
-			New: func() interface{} {
-				return new(mapRanger)
-			},
-		},
-		reflect.Chan: &sync.Pool{
-			New: func() interface{} {
-				return new(chanRanger)
-			},
-		},
-	}
-)
-
-func getRanger(v reflect.Value) (r Ranger, cleanup func()) {
-	t := v.Type()
-	if t.Implements(rangerType) {
-		return v.Interface().(Ranger), func() { /* no cleanup needed */ }
-	}
-
-	v, isNil := indirect(v)
-	if isNil {
-		panic(fmt.Errorf("cannot range over nil pointer/interface (%s)", t))
-	}
-
-	pool, ok := poolsByKind[v.Kind()]
-	if !ok {
-		panic(fmt.Errorf("value %v (type %s) is not rangeable", v, t))
-	}
-
-	pr := pool.Get().(pooledRanger)
-	pr.Setup(v)
-	return pr, func() { pool.Put(pr) }
-}

+ 0 - 11
vendor/github.com/CloudyKit/jet/v4/stress.bash

@@ -1,11 +0,0 @@
-#!/usr/bin/env bash -e
-
-go test -c
-# comment above and uncomment below to enable the race builder
-#go test -c -race
-PKG=$(basename $(pwd))
-
-while true ; do
-        export GOMAXPROCS=$[ 1 + $[ RANDOM % 128 ]]
-        ./$PKG.test $@ 2>&1
-done

+ 0 - 327
vendor/github.com/CloudyKit/jet/v4/template.go

@@ -1,327 +0,0 @@
-// Copyright 2016 José Santos <henrique_1609@me.com>
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Jet is a fast and dynamic template engine for the Go programming language, set of features
-// includes very fast template execution, a dynamic and flexible language, template inheritance, low number of allocations,
-// special interfaces to allow even further optimizations.
-
-package jet
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"path/filepath"
-	"reflect"
-	"sync"
-	"text/template"
-)
-
-var defaultExtensions = []string{
-	"", // in case the path is given with the correct extension already
-	".jet",
-	".html.jet",
-	".jet.html",
-}
-
-// Set is responsible to load,invoke parse and cache templates and relations
-// every jet template is associated with one set.
-// create a set with jet.NewSet(escapeeFn) returns a pointer to the Set
-type Set struct {
-	loader          Loader
-	templates       map[string]*Template // parsed templates
-	escapee         SafeWriter           // escapee to use at runtime
-	globals         VarMap               // global scope for this template set
-	tmx             *sync.RWMutex        // template parsing mutex
-	gmx             *sync.RWMutex        // global variables map mutex
-	extensions      []string
-	developmentMode bool
-	leftDelim       string
-	rightDelim      string
-}
-
-// SetDevelopmentMode set's development mode on/off, in development mode template will be recompiled on every run
-func (s *Set) SetDevelopmentMode(b bool) *Set {
-	s.developmentMode = b
-	return s
-}
-
-func (s *Set) LookupGlobal(key string) (val interface{}, found bool) {
-	s.gmx.RLock()
-	val, found = s.globals[key]
-	s.gmx.RUnlock()
-	return
-}
-
-// AddGlobal add or set a global variable into the Set
-func (s *Set) AddGlobal(key string, i interface{}) *Set {
-	s.gmx.Lock()
-	s.globals[key] = reflect.ValueOf(i)
-	s.gmx.Unlock()
-	return s
-}
-
-func (s *Set) AddGlobalFunc(key string, fn Func) *Set {
-	return s.AddGlobal(key, fn)
-}
-
-// NewSetLoader creates a new set with custom Loader
-func NewSetLoader(escapee SafeWriter, loader Loader) *Set {
-	return &Set{
-		loader:     loader,
-		templates:  map[string]*Template{},
-		escapee:    escapee,
-		globals:    VarMap{},
-		tmx:        &sync.RWMutex{},
-		gmx:        &sync.RWMutex{},
-		extensions: append([]string{}, defaultExtensions...),
-	}
-}
-
-// NewHTMLSetLoader creates a new set with custom Loader
-func NewHTMLSetLoader(loader Loader) *Set {
-	return NewSetLoader(template.HTMLEscape, loader)
-}
-
-// NewSet creates a new set, dirs is a list of directories to be searched for templates
-func NewSet(escapee SafeWriter, dir string) *Set {
-	return NewSetLoader(escapee, &OSFileSystemLoader{dir: dir})
-}
-
-// NewHTMLSet creates a new set, dirs is a list of directories to be searched for templates
-func NewHTMLSet(dir string) *Set {
-	return NewSet(template.HTMLEscape, dir)
-}
-
-// Delims sets the delimiters to the specified strings. Parsed templates will
-// inherit the settings. Not setting them leaves them at the default: {{ or }}.
-func (s *Set) Delims(left, right string) {
-	s.leftDelim = left
-	s.rightDelim = right
-}
-
-func (s *Set) getTemplateFromCache(path string) (t *Template, ok bool) {
-	// check path with all possible extensions in cache
-	for _, extension := range s.extensions {
-		canonicalPath := path + extension
-		if t, found := s.templates[canonicalPath]; found {
-			return t, true
-		}
-	}
-	return nil, false
-}
-
-func (s *Set) getTemplateFromLoader(path string) (t *Template, err error) {
-	// check path with all possible extensions in loader
-	for _, extension := range s.extensions {
-		canonicalPath := path + extension
-		if _, found := s.loader.Exists(canonicalPath); found {
-			return s.loadFromFile(canonicalPath)
-		}
-	}
-	return nil, fmt.Errorf("template %s could not be found", path)
-}
-
-// GetTemplate tries to find (and load, if not yet loaded) the template at the specified path.
-//
-// for example, GetTemplate("catalog/products.list") with extensions set to []string{"", ".html.jet",".jet"}
-// will try to look for:
-//     1. catalog/products.list
-//     2. catalog/products.list.html.jet
-//     3. catalog/products.list.jet
-// in the set's templates cache, and if it can't find the template it will try to load the same paths via
-// the loader, and, if parsed successfully, cache the template.
-func (s *Set) GetTemplate(path string) (t *Template, err error) {
-	if !s.developmentMode {
-		s.tmx.RLock()
-		t, found := s.getTemplateFromCache(path)
-		if found {
-			s.tmx.RUnlock()
-			return t, nil
-		}
-		s.tmx.RUnlock()
-	}
-
-	t, err = s.getTemplateFromLoader(path)
-	if err == nil && !s.developmentMode {
-		// load template into cache
-		s.tmx.Lock()
-		s.templates[t.Name] = t
-		s.tmx.Unlock()
-	}
-	return t, err
-}
-
-// same as GetTemplate, but assumes the reader already called s.tmx.RLock(), and
-// doesn't cache a template when found through the loader
-func (s *Set) getTemplate(path string) (t *Template, err error) {
-	if !s.developmentMode {
-		t, found := s.getTemplateFromCache(path)
-		if found {
-			return t, nil
-		}
-	}
-
-	return s.getTemplateFromLoader(path)
-}
-
-func (s *Set) getSiblingTemplate(path, siblingPath string) (t *Template, err error) {
-	if !filepath.IsAbs(filepath.Clean(path)) {
-		siblingDir := filepath.Dir(siblingPath)
-		path = filepath.Join(siblingDir, path)
-	}
-	return s.getTemplate(path)
-}
-
-// Parse parses the template without adding it to the set's templates cache.
-func (s *Set) Parse(path, content string) (*Template, error) {
-	s.tmx.RLock()
-	t, err := s.parse(path, content)
-	s.tmx.RUnlock()
-
-	return t, err
-}
-
-func (s *Set) loadFromFile(path string) (template *Template, err error) {
-	f, err := s.loader.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	content, err := ioutil.ReadAll(f)
-	if err != nil {
-		return nil, err
-	}
-	return s.parse(path, string(content))
-}
-
-func (s *Set) LoadTemplate(path, content string) (*Template, error) {
-	if s.developmentMode {
-		s.tmx.RLock()
-		defer s.tmx.RUnlock()
-		return s.parse(path, content)
-	}
-
-	// fast path (t from cache)
-	s.tmx.RLock()
-	if t, found := s.templates[path]; found {
-		s.tmx.RUnlock()
-		return t, nil
-	}
-	s.tmx.RUnlock()
-
-	// not found, parse and cache
-	s.tmx.Lock()
-	defer s.tmx.Unlock()
-
-	t, err := s.parse(path, content)
-	if err == nil {
-		s.templates[path] = t
-	}
-	return t, err
-}
-
-// SetExtensions sets extensions.
-func (s *Set) SetExtensions(extensions []string) {
-	s.extensions = extensions
-}
-
-func (t *Template) String() (template string) {
-	if t.extends != nil {
-		if len(t.Root.Nodes) > 0 && len(t.imports) == 0 {
-			template += fmt.Sprintf("{{extends %q}}", t.extends.ParseName)
-		} else {
-			template += fmt.Sprintf("{{extends %q}}", t.extends.ParseName)
-		}
-	}
-
-	for k, _import := range t.imports {
-		if t.extends == nil && k == 0 {
-			template += fmt.Sprintf("{{import %q}}", _import.ParseName)
-		} else {
-			template += fmt.Sprintf("\n{{import %q}}", _import.ParseName)
-		}
-	}
-
-	if t.extends != nil || len(t.imports) > 0 {
-		if len(t.Root.Nodes) > 0 {
-			template += "\n" + t.Root.String()
-		}
-	} else {
-		template += t.Root.String()
-	}
-	return
-}
-
-func (t *Template) addBlocks(blocks map[string]*BlockNode) {
-	if len(blocks) == 0 {
-		return
-	}
-	if t.processedBlocks == nil {
-		t.processedBlocks = make(map[string]*BlockNode)
-	}
-	for key, value := range blocks {
-		t.processedBlocks[key] = value
-	}
-}
-
-type VarMap map[string]reflect.Value
-
-func (scope VarMap) Set(name string, v interface{}) VarMap {
-	scope[name] = reflect.ValueOf(v)
-	return scope
-}
-
-func (scope VarMap) SetFunc(name string, v Func) VarMap {
-	scope[name] = reflect.ValueOf(v)
-	return scope
-}
-
-func (scope VarMap) SetWriter(name string, v SafeWriter) VarMap {
-	scope[name] = reflect.ValueOf(v)
-	return scope
-}
-
-// Execute executes the template in the w Writer
-func (t *Template) Execute(w io.Writer, variables VarMap, data interface{}) error {
-	return t.ExecuteI18N(nil, w, variables, data)
-}
-
-type Translator interface {
-	Msg(key, defaultValue string) string
-	Trans(format, defaultFormat string, v ...interface{}) string
-}
-
-func (t *Template) ExecuteI18N(translator Translator, w io.Writer, variables VarMap, data interface{}) (err error) {
-	st := pool_State.Get().(*Runtime)
-	defer st.recover(&err)
-
-	st.blocks = t.processedBlocks
-	st.translator = translator
-	st.variables = variables
-	st.set = t.set
-	st.Writer = w
-
-	// resolve extended template
-	for t.extends != nil {
-		t = t.extends
-	}
-
-	if data != nil {
-		st.context = reflect.ValueOf(data)
-	}
-
-	st.executeList(t.Root)
-	return
-}

+ 0 - 25
vendor/github.com/ajg/form/.travis.yml

@@ -1,25 +0,0 @@
-## Copyright 2014 Alvaro J. Genial. All rights reserved.
-## Use of this source code is governed by a BSD-style
-## license that can be found in the LICENSE file.
-
-language: go
-
-go:
-  - tip
-  - 1.6
-  - 1.5
-  - 1.4
-  - 1.3
-  # 1.2
-
-before_install:
-  # - go get -v golang.org/x/tools/cmd/cover
-  # - go get -v golang.org/x/tools/cmd/vet
-  # - go get -v golang.org/x/lint/golint
-  - export PATH=$PATH:/home/travis/gopath/bin
-
-script:
-  - go build -v ./...
-  - go test -v -cover ./...
-  - go vet ./...
-  # - golint .

+ 0 - 27
vendor/github.com/ajg/form/LICENSE

@@ -1,27 +0,0 @@
-Copyright (c) 2014 Alvaro J. Genial. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 247
vendor/github.com/ajg/form/README.md

@@ -1,247 +0,0 @@
-form
-====
-
-A Form Encoding & Decoding Package for Go, written by [Alvaro J. Genial](http://alva.ro).
-
-[![Build Status](https://travis-ci.org/ajg/form.png?branch=master)](https://travis-ci.org/ajg/form)
-[![GoDoc](https://godoc.org/github.com/ajg/form?status.png)](https://godoc.org/github.com/ajg/form)
-
-Synopsis
---------
-
-This library is designed to allow seamless, high-fidelity encoding and decoding of arbitrary data in `application/x-www-form-urlencoded` format and as [`url.Values`](http://golang.org/pkg/net/url/#Values). It is intended to be useful primarily in dealing with web forms and URI query strings, both of which natively employ said format.
-
-Unsurprisingly, `form` is modeled after other Go [`encoding`](http://golang.org/pkg/encoding/) packages, in particular [`encoding/json`](http://golang.org/pkg/encoding/json/), and follows the same conventions (see below for more.) It aims to automatically handle any kind of concrete Go [data value](#values) (i.e., not functions, channels, etc.) while providing mechanisms for custom behavior.
-
-Status
-------
-
-The implementation is in usable shape and is fairly well tested with its accompanying test suite. The API is unlikely to change much, but still may. Lastly, the code has not yet undergone a security review to ensure it is free of vulnerabilities. Please file an issue or send a pull request for fixes & improvements.
-
-Dependencies
-------------
-
-The only requirement is [Go 1.2](http://golang.org/doc/go1.2) or later.
-
-Usage
------
-
-```go
-import "github.com/ajg/form"
-// or: "gopkg.in/ajg/form.v1"
-```
-
-Given a type like the following...
-
-```go
-type User struct {
-	Name         string            `form:"name"`
-	Email        string            `form:"email"`
-	Joined       time.Time         `form:"joined,omitempty"`
-	Posts        []int             `form:"posts"`
-	Preferences  map[string]string `form:"prefs"`
-	Avatar       []byte            `form:"avatar"`
-	PasswordHash int64             `form:"-"`
-}
-```
-
-...it is easy to encode data of that type...
-
-
-```go
-func PostUser(url string, u User) error {
-	var c http.Client
-	_, err := c.PostForm(url, form.EncodeToValues(u))
-	return err
-}
-```
-
-...as well as decode it...
-
-
-```go
-func Handler(w http.ResponseWriter, r *http.Request) {
-	var u User
-
-	d := form.NewDecoder(r.Body)
-	if err := d.Decode(&u); err != nil {
-		http.Error(w, "Form could not be decoded", http.StatusBadRequest)
-		return
-	}
-
-	fmt.Fprintf(w, "Decoded: %#v", u)
-}
-```
-
-...without having to do any grunt work.
-
-Field Tags
-----------
-
-Like other encoding packages, `form` supports the following options for fields:
-
- - `` `form:"-"` ``: Causes the field to be ignored during encoding and decoding.
- - `` `form:"<name>"` ``: Overrides the field's name; useful especially when dealing with external identifiers in camelCase, as are commonly found on the web.
- - `` `form:",omitempty"` ``: Elides the field during encoding if it is empty (typically meaning equal to the type's zero value.)
- - `` `form:"<name>,omitempty"` ``: The way to combine the two options above.
-
-Values
-------
-
-### Simple Values
-
-Values of the following types are all considered simple:
-
- - `bool`
- - `int`, `int8`, `int16`, `int32`, `int64`, `rune`
- - `uint`, `uint8`, `uint16`, `uint32`, `uint64`, `byte`
- - `float32`, `float64`
- - `complex64`, `complex128`
- - `string`
- - `[]byte` (see note)
- - [`time.Time`](http://golang.org/pkg/time/#Time)
- - [`url.URL`](http://golang.org/pkg/net/url/#URL)
- - An alias of any of the above
- - A pointer to any of the above
-
-### Composite Values
-
-A composite value is one that can contain other values. Values of the following kinds...
-
- - Maps
- - Slices; except `[]byte` (see note)
- - Structs; except [`time.Time`](http://golang.org/pkg/time/#Time) and [`url.URL`](http://golang.org/pkg/net/url/#URL)
- - Arrays
- - An alias of any of the above
- - A pointer to any of the above
-
-...are considered composites in general, unless they implement custom marshaling/unmarshaling. Composite values are encoded as a flat mapping of paths to values, where the paths are constructed by joining the parent and child paths with a period (`.`).
-
-(Note: a byte slice is treated as a `string` by default because it's more efficient, but can also be decoded as a slice—i.e., with indexes.)
-
-### Untyped Values
-
-While encouraged, it is not necessary to define a type (e.g. a `struct`) in order to use `form`, since it is able to encode and decode untyped data generically using the following rules:
-
- - Simple values will be treated as a `string`.
- - Composite values will be treated as a `map[string]interface{}`, itself able to contain nested values (both scalar and compound) ad infinitum.
- - However, if there is a value (of any supported type) already present in a map for a given key, then it will be used when possible, rather than being replaced with a generic value as specified above; this makes it possible to handle partially typed, dynamic or schema-less values.
-
-### Zero Values
-
-By default, and without custom marshaling, zero values (also known as empty/default values) are encoded as the empty string. To disable this behavior, meaning to keep zero values in their literal form (e.g. `0` for integral types), `Encoder` offers a `KeepZeros` setter method, which will do just that when set to `true`.
-
-### Unsupported Values
-
-Values of the following kinds aren't supported and, if present, must be ignored.
-
- - Channel
- - Function
- - Unsafe pointer
- - An alias of any of the above
- - A pointer to any of the above
-
-Custom Marshaling
------------------
-
-There is a default (generally lossless) marshaling & unmarshaling scheme for any concrete data value in Go, which is good enough in most cases. However, it is possible to override it and use a custom scheme. For instance, a "binary" field could be marshaled more efficiently using [base64](http://golang.org/pkg/encoding/base64/) to prevent it from being percent-escaped during serialization to `application/x-www-form-urlencoded` format.
-
-Because `form` provides support for [`encoding.TextMarshaler`](http://golang.org/pkg/encoding/#TextMarshaler) and [`encoding.TextUnmarshaler`](http://golang.org/pkg/encoding/#TextUnmarshaler) it is easy to do that; for instance, like this:
-
-```go
-import "encoding"
-
-type Binary []byte
-
-var (
-	_ encoding.TextMarshaler   = &Binary{}
-	_ encoding.TextUnmarshaler = &Binary{}
-)
-
-func (b Binary) MarshalText() ([]byte, error) {
-	return []byte(base64.URLEncoding.EncodeToString([]byte(b))), nil
-}
-
-func (b *Binary) UnmarshalText(text []byte) error {
-	bs, err := base64.URLEncoding.DecodeString(string(text))
-	if err == nil {
-		*b = Binary(bs)
-	}
-	return err
-}
-```
-
-Now any value with type `Binary` will automatically be encoded using the [URL](http://golang.org/pkg/encoding/base64/#URLEncoding) variant of base64. It is left as an exercise to the reader to improve upon this scheme by eliminating the need for padding (which, besides being superfluous, uses `=`, a character that will end up percent-escaped.)
-
-Keys
-----
-
-In theory any value can be a key as long as it has a string representation. However, by default, periods have special meaning to `form`, and thus, under the hood (i.e. in encoded form) they are transparently escaped using a preceding backslash (`\`). Backslashes within keys, themselves, are also escaped in this manner (e.g. as `\\`) in order to permit representing `\.` itself (as `\\\.`).
-
-(Note: it is normally unnecessary to deal with this issue unless keys are being constructed manually—e.g. literally embedded in HTML or in a URI.)
-
-The default delimiter and escape characters used for encoding and decoding composite keys can be changed using the `DelimitWith` and `EscapeWith` setter methods of `Encoder` and `Decoder`, respectively. For example...
-
-```go
-package main
-
-import (
-	"os"
-
-	"github.com/ajg/form"
-)
-
-func main() {
-	type B struct {
-		Qux string `form:"qux"`
-	}
-	type A struct {
-		FooBar B `form:"foo.bar"`
-	}
-	a := A{FooBar: B{"XYZ"}}
-	os.Stdout.WriteString("Default: ")
-	form.NewEncoder(os.Stdout).Encode(a)
-	os.Stdout.WriteString("\nCustom:  ")
-	form.NewEncoder(os.Stdout).DelimitWith('/').Encode(a)
-	os.Stdout.WriteString("\n")
-}
-
-```
-
-...will produce...
-
-```
-Default: foo%5C.bar.qux=XYZ
-Custom:  foo.bar%2Fqux=XYZ
-```
-
-(`%5C` and `%2F` represent `\` and `/`, respectively.)
-
-Limitations
------------
-
- - Circular (self-referential) values are untested.
-
-Future Work
------------
-
-The following items would be nice to have in the future—though they are not being worked on yet:
-
- - An option to treat all values as if they had been tagged with `omitempty`.
- - An option to automatically treat all field names in `camelCase` or `underscore_case`.
- - Built-in support for the types in [`math/big`](http://golang.org/pkg/math/big/).
- - Built-in support for the types in [`image/color`](http://golang.org/pkg/image/color/).
- - Improve encoding/decoding by reading/writing directly from/to the `io.Reader`/`io.Writer` when possible, rather than going through an intermediate representation (i.e. `node`) which requires more memory.
-
-(Feel free to implement any of these and then send a pull request.)
-
-Related Work
-------------
-
- - Package [gorilla/schema](https://github.com/gorilla/schema), which only implements decoding.
- - Package [google/go-querystring](https://github.com/google/go-querystring), which only implements encoding.
-
-License
--------
-
-This library is distributed under a BSD-style [LICENSE](./LICENSE).

+ 0 - 4
vendor/github.com/ajg/form/TODO.md

@@ -1,4 +0,0 @@
-TODO
-====
-
-  - Document IgnoreCase and IgnoreUnknownKeys in README.

+ 0 - 370
vendor/github.com/ajg/form/decode.go

@@ -1,370 +0,0 @@
-// Copyright 2014 Alvaro J. Genial. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package form
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/url"
-	"reflect"
-	"strconv"
-	"time"
-)
-
-// NewDecoder returns a new form Decoder.
-func NewDecoder(r io.Reader) *Decoder {
-	return &Decoder{r, defaultDelimiter, defaultEscape, false, false}
-}
-
-// Decoder decodes data from a form (application/x-www-form-urlencoded).
-type Decoder struct {
-	r             io.Reader
-	d             rune
-	e             rune
-	ignoreUnknown bool
-	ignoreCase    bool
-}
-
-// DelimitWith sets r as the delimiter used for composite keys by Decoder d and returns the latter; it is '.' by default.
-func (d *Decoder) DelimitWith(r rune) *Decoder {
-	d.d = r
-	return d
-}
-
-// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Decoder d and returns the latter; it is '\\' by default.
-func (d *Decoder) EscapeWith(r rune) *Decoder {
-	d.e = r
-	return d
-}
-
-// Decode reads in and decodes form-encoded data into dst.
-func (d Decoder) Decode(dst interface{}) error {
-	bs, err := ioutil.ReadAll(d.r)
-	if err != nil {
-		return err
-	}
-	vs, err := url.ParseQuery(string(bs))
-	if err != nil {
-		return err
-	}
-	v := reflect.ValueOf(dst)
-	return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
-}
-
-// IgnoreUnknownKeys if set to true it will make the Decoder ignore values
-// that are not found in the destination object instead of returning an error.
-func (d *Decoder) IgnoreUnknownKeys(ignoreUnknown bool) {
-	d.ignoreUnknown = ignoreUnknown
-}
-
-// IgnoreCase if set to true it will make the Decoder try to set values in the
-// destination object even if the case does not match.
-func (d *Decoder) IgnoreCase(ignoreCase bool) {
-	d.ignoreCase = ignoreCase
-}
-
-// DecodeString decodes src into dst.
-func (d Decoder) DecodeString(dst interface{}, src string) error {
-	vs, err := url.ParseQuery(src)
-	if err != nil {
-		return err
-	}
-	v := reflect.ValueOf(dst)
-	return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
-}
-
-// DecodeValues decodes vs into dst.
-func (d Decoder) DecodeValues(dst interface{}, vs url.Values) error {
-	v := reflect.ValueOf(dst)
-	return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
-}
-
-// DecodeString decodes src into dst.
-func DecodeString(dst interface{}, src string) error {
-	return NewDecoder(nil).DecodeString(dst, src)
-}
-
-// DecodeValues decodes vs into dst.
-func DecodeValues(dst interface{}, vs url.Values) error {
-	return NewDecoder(nil).DecodeValues(dst, vs)
-}
-
-func (d Decoder) decodeNode(v reflect.Value, n node) (err error) {
-	defer func() {
-		if e := recover(); e != nil {
-			err = fmt.Errorf("%v", e)
-		}
-	}()
-
-	if v.Kind() == reflect.Slice {
-		return fmt.Errorf("could not decode directly into slice; use pointer to slice")
-	}
-	d.decodeValue(v, n)
-	return nil
-}
-
-func (d Decoder) decodeValue(v reflect.Value, x interface{}) {
-	t := v.Type()
-	k := v.Kind()
-
-	if k == reflect.Ptr && v.IsNil() {
-		v.Set(reflect.New(t.Elem()))
-	}
-
-	if unmarshalValue(v, x) {
-		return
-	}
-
-	empty := isEmpty(x)
-
-	switch k {
-	case reflect.Ptr:
-		d.decodeValue(v.Elem(), x)
-		return
-	case reflect.Interface:
-		if !v.IsNil() {
-			d.decodeValue(v.Elem(), x)
-			return
-
-		} else if empty {
-			return // Allow nil interfaces only if empty.
-		} else {
-			panic("form: cannot decode non-empty value into into nil interface")
-		}
-	}
-
-	if empty {
-		v.Set(reflect.Zero(t)) // Treat the empty string as the zero value.
-		return
-	}
-
-	switch k {
-	case reflect.Struct:
-		if t.ConvertibleTo(timeType) {
-			d.decodeTime(v, x)
-		} else if t.ConvertibleTo(urlType) {
-			d.decodeURL(v, x)
-		} else {
-			d.decodeStruct(v, x)
-		}
-	case reflect.Slice:
-		d.decodeSlice(v, x)
-	case reflect.Array:
-		d.decodeArray(v, x)
-	case reflect.Map:
-		d.decodeMap(v, x)
-	case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func:
-		panic(t.String() + " has unsupported kind " + k.String())
-	default:
-		d.decodeBasic(v, x)
-	}
-}
-
-func (d Decoder) decodeStruct(v reflect.Value, x interface{}) {
-	t := v.Type()
-	for k, c := range getNode(x) {
-		if f, ok := findField(v, k, d.ignoreCase); !ok && k == "" {
-			panic(getString(x) + " cannot be decoded as " + t.String())
-		} else if !ok {
-			if !d.ignoreUnknown {
-				panic(k + " doesn't exist in " + t.String())
-			}
-		} else if !f.CanSet() {
-			panic(k + " cannot be set in " + t.String())
-		} else {
-			d.decodeValue(f, c)
-		}
-	}
-}
-
-func (d Decoder) decodeMap(v reflect.Value, x interface{}) {
-	t := v.Type()
-	if v.IsNil() {
-		v.Set(reflect.MakeMap(t))
-	}
-	for k, c := range getNode(x) {
-		i := reflect.New(t.Key()).Elem()
-		d.decodeValue(i, k)
-
-		w := v.MapIndex(i)
-		if w.IsValid() { // We have an actual element value to decode into.
-			if w.Kind() == reflect.Interface {
-				w = w.Elem()
-			}
-			w = reflect.New(w.Type()).Elem()
-		} else if t.Elem().Kind() != reflect.Interface { // The map's element type is concrete.
-			w = reflect.New(t.Elem()).Elem()
-		} else {
-			// The best we can do here is to decode as either a string (for scalars) or a map[string]interface {} (for the rest).
-			// We could try to guess the type based on the string (e.g. true/false => bool) but that'll get ugly fast,
-			// especially if we have to guess the kind (slice vs. array vs. map) and index type (e.g. string, int, etc.)
-			switch c.(type) {
-			case node:
-				w = reflect.MakeMap(stringMapType)
-			case string:
-				w = reflect.New(stringType).Elem()
-			default:
-				panic("value is neither node nor string")
-			}
-		}
-
-		d.decodeValue(w, c)
-		v.SetMapIndex(i, w)
-	}
-}
-
-func (d Decoder) decodeArray(v reflect.Value, x interface{}) {
-	t := v.Type()
-	for k, c := range getNode(x) {
-		i, err := strconv.Atoi(k)
-		if err != nil {
-			panic(k + " is not a valid index for type " + t.String())
-		}
-		if l := v.Len(); i >= l {
-			panic("index is above array size")
-		}
-		d.decodeValue(v.Index(i), c)
-	}
-}
-
-func (d Decoder) decodeSlice(v reflect.Value, x interface{}) {
-	t := v.Type()
-	if t.Elem().Kind() == reflect.Uint8 {
-		// Allow, but don't require, byte slices to be encoded as a single string.
-		if s, ok := x.(string); ok {
-			v.SetBytes([]byte(s))
-			return
-		}
-	}
-
-	// NOTE: Implicit indexing is currently done at the parseValues level,
-	//       so if if an implicitKey reaches here it will always replace the last.
-	implicit := 0
-	for k, c := range getNode(x) {
-		var i int
-		if k == implicitKey {
-			i = implicit
-			implicit++
-		} else {
-			explicit, err := strconv.Atoi(k)
-			if err != nil {
-				panic(k + " is not a valid index for type " + t.String())
-			}
-			i = explicit
-			implicit = explicit + 1
-		}
-		// "Extend" the slice if it's too short.
-		if l := v.Len(); i >= l {
-			delta := i - l + 1
-			v.Set(reflect.AppendSlice(v, reflect.MakeSlice(t, delta, delta)))
-		}
-		d.decodeValue(v.Index(i), c)
-	}
-}
-
-func (d Decoder) decodeBasic(v reflect.Value, x interface{}) {
-	t := v.Type()
-	switch k, s := t.Kind(), getString(x); k {
-	case reflect.Bool:
-		if b, e := strconv.ParseBool(s); e == nil {
-			v.SetBool(b)
-		} else {
-			panic("could not parse bool from " + strconv.Quote(s))
-		}
-	case reflect.Int,
-		reflect.Int8,
-		reflect.Int16,
-		reflect.Int32,
-		reflect.Int64:
-		if i, e := strconv.ParseInt(s, 10, 64); e == nil {
-			v.SetInt(i)
-		} else {
-			panic("could not parse int from " + strconv.Quote(s))
-		}
-	case reflect.Uint,
-		reflect.Uint8,
-		reflect.Uint16,
-		reflect.Uint32,
-		reflect.Uint64:
-		if u, e := strconv.ParseUint(s, 10, 64); e == nil {
-			v.SetUint(u)
-		} else {
-			panic("could not parse uint from " + strconv.Quote(s))
-		}
-	case reflect.Float32,
-		reflect.Float64:
-		if f, e := strconv.ParseFloat(s, 64); e == nil {
-			v.SetFloat(f)
-		} else {
-			panic("could not parse float from " + strconv.Quote(s))
-		}
-	case reflect.Complex64,
-		reflect.Complex128:
-		var c complex128
-		if n, err := fmt.Sscanf(s, "%g", &c); n == 1 && err == nil {
-			v.SetComplex(c)
-		} else {
-			panic("could not parse complex from " + strconv.Quote(s))
-		}
-	case reflect.String:
-		v.SetString(s)
-	default:
-		panic(t.String() + " has unsupported kind " + k.String())
-	}
-}
-
-func (d Decoder) decodeTime(v reflect.Value, x interface{}) {
-	t := v.Type()
-	s := getString(x)
-	// TODO: Find a more efficient way to do this.
-	for _, f := range allowedTimeFormats {
-		if p, err := time.Parse(f, s); err == nil {
-			v.Set(reflect.ValueOf(p).Convert(v.Type()))
-			return
-		}
-	}
-	panic("cannot decode string `" + s + "` as " + t.String())
-}
-
-func (d Decoder) decodeURL(v reflect.Value, x interface{}) {
-	t := v.Type()
-	s := getString(x)
-	if u, err := url.Parse(s); err == nil {
-		v.Set(reflect.ValueOf(*u).Convert(v.Type()))
-		return
-	}
-	panic("cannot decode string `" + s + "` as " + t.String())
-}
-
-var allowedTimeFormats = []string{
-	"2006-01-02T15:04:05.999999999Z07:00",
-	"2006-01-02T15:04:05.999999999Z07",
-	"2006-01-02T15:04:05.999999999Z",
-	"2006-01-02T15:04:05.999999999",
-	"2006-01-02T15:04:05Z07:00",
-	"2006-01-02T15:04:05Z07",
-	"2006-01-02T15:04:05Z",
-	"2006-01-02T15:04:05",
-	"2006-01-02T15:04Z",
-	"2006-01-02T15:04",
-	"2006-01-02T15Z",
-	"2006-01-02T15",
-	"2006-01-02",
-	"2006-01",
-	"2006",
-	"15:04:05.999999999Z07:00",
-	"15:04:05.999999999Z07",
-	"15:04:05.999999999Z",
-	"15:04:05.999999999",
-	"15:04:05Z07:00",
-	"15:04:05Z07",
-	"15:04:05Z",
-	"15:04:05",
-	"15:04Z",
-	"15:04",
-	"15Z",
-	"15",
-}

+ 0 - 388
vendor/github.com/ajg/form/encode.go

@@ -1,388 +0,0 @@
-// Copyright 2014 Alvaro J. Genial. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package form
-
-import (
-	"encoding"
-	"errors"
-	"fmt"
-	"io"
-	"net/url"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// NewEncoder returns a new form Encoder.
-func NewEncoder(w io.Writer) *Encoder {
-	return &Encoder{w, defaultDelimiter, defaultEscape, false}
-}
-
-// Encoder provides a way to encode to a Writer.
-type Encoder struct {
-	w io.Writer
-	d rune
-	e rune
-	z bool
-}
-
-// DelimitWith sets r as the delimiter used for composite keys by Encoder e and returns the latter; it is '.' by default.
-func (e *Encoder) DelimitWith(r rune) *Encoder {
-	e.d = r
-	return e
-}
-
-// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Encoder e and returns the latter; it is '\\' by default.
-func (e *Encoder) EscapeWith(r rune) *Encoder {
-	e.e = r
-	return e
-}
-
-// KeepZeros sets whether Encoder e should keep zero (default) values in their literal form when encoding, and returns the former; by default zero values are not kept, but are rather encoded as the empty string.
-func (e *Encoder) KeepZeros(z bool) *Encoder {
-	e.z = z
-	return e
-}
-
-// Encode encodes dst as form and writes it out using the Encoder's Writer.
-func (e Encoder) Encode(dst interface{}) error {
-	v := reflect.ValueOf(dst)
-	n, err := encodeToNode(v, e.z)
-	if err != nil {
-		return err
-	}
-	s := n.values(e.d, e.e).Encode()
-	l, err := io.WriteString(e.w, s)
-	switch {
-	case err != nil:
-		return err
-	case l != len(s):
-		return errors.New("could not write data completely")
-	}
-	return nil
-}
-
-// EncodeToString encodes dst as a form and returns it as a string.
-func EncodeToString(dst interface{}) (string, error) {
-	v := reflect.ValueOf(dst)
-	n, err := encodeToNode(v, false)
-	if err != nil {
-		return "", err
-	}
-	vs := n.values(defaultDelimiter, defaultEscape)
-	return vs.Encode(), nil
-}
-
-// EncodeToValues encodes dst as a form and returns it as Values.
-func EncodeToValues(dst interface{}) (url.Values, error) {
-	v := reflect.ValueOf(dst)
-	n, err := encodeToNode(v, false)
-	if err != nil {
-		return nil, err
-	}
-	vs := n.values(defaultDelimiter, defaultEscape)
-	return vs, nil
-}
-
-func encodeToNode(v reflect.Value, z bool) (n node, err error) {
-	defer func() {
-		if e := recover(); e != nil {
-			err = fmt.Errorf("%v", e)
-		}
-	}()
-	return getNode(encodeValue(v, z)), nil
-}
-
-func encodeValue(v reflect.Value, z bool) interface{} {
-	t := v.Type()
-	k := v.Kind()
-
-	if s, ok := marshalValue(v); ok {
-		return s
-	} else if !z && isEmptyValue(v) {
-		return "" // Treat the zero value as the empty string.
-	}
-
-	switch k {
-	case reflect.Ptr, reflect.Interface:
-		return encodeValue(v.Elem(), z)
-	case reflect.Struct:
-		if t.ConvertibleTo(timeType) {
-			return encodeTime(v)
-		} else if t.ConvertibleTo(urlType) {
-			return encodeURL(v)
-		}
-		return encodeStruct(v, z)
-	case reflect.Slice:
-		return encodeSlice(v, z)
-	case reflect.Array:
-		return encodeArray(v, z)
-	case reflect.Map:
-		return encodeMap(v, z)
-	case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func:
-		panic(t.String() + " has unsupported kind " + t.Kind().String())
-	default:
-		return encodeBasic(v)
-	}
-}
-
-func encodeStruct(v reflect.Value, z bool) interface{} {
-	t := v.Type()
-	n := node{}
-	for i := 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-		k, oe := fieldInfo(f)
-
-		if k == "-" {
-			continue
-		} else if fv := v.Field(i); oe && isEmptyValue(fv) {
-			delete(n, k)
-		} else {
-			n[k] = encodeValue(fv, z)
-		}
-	}
-	return n
-}
-
-func encodeMap(v reflect.Value, z bool) interface{} {
-	n := node{}
-	for _, i := range v.MapKeys() {
-		k := getString(encodeValue(i, z))
-		n[k] = encodeValue(v.MapIndex(i), z)
-	}
-	return n
-}
-
-func encodeArray(v reflect.Value, z bool) interface{} {
-	n := node{}
-	for i := 0; i < v.Len(); i++ {
-		n[strconv.Itoa(i)] = encodeValue(v.Index(i), z)
-	}
-	return n
-}
-
-func encodeSlice(v reflect.Value, z bool) interface{} {
-	t := v.Type()
-	if t.Elem().Kind() == reflect.Uint8 {
-		return string(v.Bytes()) // Encode byte slices as a single string by default.
-	}
-	n := node{}
-	for i := 0; i < v.Len(); i++ {
-		n[strconv.Itoa(i)] = encodeValue(v.Index(i), z)
-	}
-	return n
-}
-
-func encodeTime(v reflect.Value) string {
-	t := v.Convert(timeType).Interface().(time.Time)
-	if t.Year() == 0 && (t.Month() == 0 || t.Month() == 1) && (t.Day() == 0 || t.Day() == 1) {
-		return t.Format("15:04:05.999999999Z07:00")
-	} else if t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 && t.Nanosecond() == 0 {
-		return t.Format("2006-01-02")
-	}
-	return t.Format("2006-01-02T15:04:05.999999999Z07:00")
-}
-
-func encodeURL(v reflect.Value) string {
-	u := v.Convert(urlType).Interface().(url.URL)
-	return u.String()
-}
-
-func encodeBasic(v reflect.Value) string {
-	t := v.Type()
-	switch k := t.Kind(); k {
-	case reflect.Bool:
-		return strconv.FormatBool(v.Bool())
-	case reflect.Int,
-		reflect.Int8,
-		reflect.Int16,
-		reflect.Int32,
-		reflect.Int64:
-		return strconv.FormatInt(v.Int(), 10)
-	case reflect.Uint,
-		reflect.Uint8,
-		reflect.Uint16,
-		reflect.Uint32,
-		reflect.Uint64:
-		return strconv.FormatUint(v.Uint(), 10)
-	case reflect.Float32:
-		return strconv.FormatFloat(v.Float(), 'g', -1, 32)
-	case reflect.Float64:
-		return strconv.FormatFloat(v.Float(), 'g', -1, 64)
-	case reflect.Complex64, reflect.Complex128:
-		s := fmt.Sprintf("%g", v.Complex())
-		return strings.TrimSuffix(strings.TrimPrefix(s, "("), ")")
-	case reflect.String:
-		return v.String()
-	}
-	panic(t.String() + " has unsupported kind " + t.Kind().String())
-}
-
-func isEmptyValue(v reflect.Value) bool {
-	switch t := v.Type(); v.Kind() {
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
-		return v.Len() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Complex64, reflect.Complex128:
-		return v.Complex() == 0
-	case reflect.Interface, reflect.Ptr:
-		return v.IsNil()
-	case reflect.Struct:
-		if t.ConvertibleTo(timeType) {
-			return v.Convert(timeType).Interface().(time.Time).IsZero()
-		}
-		return reflect.DeepEqual(v, reflect.Zero(t))
-	}
-	return false
-}
-
-// canIndexOrdinally returns whether a value contains an ordered sequence of elements.
-func canIndexOrdinally(v reflect.Value) bool {
-	if !v.IsValid() {
-		return false
-	}
-	switch t := v.Type(); t.Kind() {
-	case reflect.Ptr, reflect.Interface:
-		return canIndexOrdinally(v.Elem())
-	case reflect.Slice, reflect.Array:
-		return true
-	}
-	return false
-}
-
-func fieldInfo(f reflect.StructField) (k string, oe bool) {
-	if f.PkgPath != "" { // Skip private fields.
-		return omittedKey, oe
-	}
-
-	k = f.Name
-	tag := f.Tag.Get("form")
-	if tag == "" {
-		return k, oe
-	}
-
-	ps := strings.SplitN(tag, ",", 2)
-	if ps[0] != "" {
-		k = ps[0]
-	}
-	if len(ps) == 2 {
-		oe = ps[1] == "omitempty"
-	}
-	return k, oe
-}
-
-func findField(v reflect.Value, n string, ignoreCase bool) (reflect.Value, bool) {
-	t := v.Type()
-	l := v.NumField()
-
-	var lowerN string
-	caseInsensitiveMatch := -1
-	if ignoreCase {
-		lowerN = strings.ToLower(n)
-	}
-
-	// First try named fields.
-	for i := 0; i < l; i++ {
-		f := t.Field(i)
-		k, _ := fieldInfo(f)
-		if k == omittedKey {
-			continue
-		} else if n == k {
-			return v.Field(i), true
-		} else if ignoreCase && lowerN == strings.ToLower(k) {
-			caseInsensitiveMatch = i
-		}
-	}
-
-	// If no exact match was found try case insensitive match.
-	if caseInsensitiveMatch != -1 {
-		return v.Field(caseInsensitiveMatch), true
-	}
-
-	// Then try anonymous (embedded) fields.
-	for i := 0; i < l; i++ {
-		f := t.Field(i)
-		k, _ := fieldInfo(f)
-		if k == omittedKey || !f.Anonymous { // || k != "" ?
-			continue
-		}
-		fv := v.Field(i)
-		fk := fv.Kind()
-		for fk == reflect.Ptr || fk == reflect.Interface {
-			fv = fv.Elem()
-			fk = fv.Kind()
-		}
-
-		if fk != reflect.Struct {
-			continue
-		}
-		if ev, ok := findField(fv, n, ignoreCase); ok {
-			return ev, true
-		}
-	}
-
-	return reflect.Value{}, false
-}
-
-var (
-	stringType    = reflect.TypeOf(string(""))
-	stringMapType = reflect.TypeOf(map[string]interface{}{})
-	timeType      = reflect.TypeOf(time.Time{})
-	timePtrType   = reflect.TypeOf(&time.Time{})
-	urlType       = reflect.TypeOf(url.URL{})
-)
-
-func skipTextMarshalling(t reflect.Type) bool {
-	/*// Skip time.Time because its text unmarshaling is overly rigid:
-	return t == timeType || t == timePtrType*/
-	// Skip time.Time & convertibles because its text unmarshaling is overly rigid:
-	return t.ConvertibleTo(timeType) || t.ConvertibleTo(timePtrType)
-}
-
-func unmarshalValue(v reflect.Value, x interface{}) bool {
-	if skipTextMarshalling(v.Type()) {
-		return false
-	}
-
-	tu, ok := v.Interface().(encoding.TextUnmarshaler)
-	if !ok && !v.CanAddr() {
-		return false
-	} else if !ok {
-		return unmarshalValue(v.Addr(), x)
-	}
-
-	s := getString(x)
-	if err := tu.UnmarshalText([]byte(s)); err != nil {
-		panic(err)
-	}
-	return true
-}
-
-func marshalValue(v reflect.Value) (string, bool) {
-	if skipTextMarshalling(v.Type()) {
-		return "", false
-	}
-
-	tm, ok := v.Interface().(encoding.TextMarshaler)
-	if !ok && !v.CanAddr() {
-		return "", false
-	} else if !ok {
-		return marshalValue(v.Addr())
-	}
-
-	bs, err := tm.MarshalText()
-	if err != nil {
-		panic(err)
-	}
-	return string(bs), true
-}

+ 0 - 14
vendor/github.com/ajg/form/form.go

@@ -1,14 +0,0 @@
-// Copyright 2014 Alvaro J. Genial. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package form implements encoding and decoding of application/x-www-form-urlencoded data.
-package form
-
-const (
-	implicitKey = "_"
-	omittedKey  = "-"
-
-	defaultDelimiter = '.'
-	defaultEscape    = '\\'
-)

+ 0 - 152
vendor/github.com/ajg/form/node.go

@@ -1,152 +0,0 @@
-// Copyright 2014 Alvaro J. Genial. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package form
-
-import (
-	"net/url"
-	"strconv"
-	"strings"
-)
-
-type node map[string]interface{}
-
-func (n node) values(d, e rune) url.Values {
-	vs := url.Values{}
-	n.merge(d, e, "", &vs)
-	return vs
-}
-
-func (n node) merge(d, e rune, p string, vs *url.Values) {
-	for k, x := range n {
-		switch y := x.(type) {
-		case string:
-			vs.Add(p+escape(d, e, k), y)
-		case node:
-			y.merge(d, e, p+escape(d, e, k)+string(d), vs)
-		default:
-			panic("value is neither string nor node")
-		}
-	}
-}
-
-// TODO: Add tests for implicit indexing.
-func parseValues(d, e rune, vs url.Values, canIndexFirstLevelOrdinally bool) node {
-	// NOTE: Because of the flattening of potentially multiple strings to one key, implicit indexing works:
-	//    i. At the first level;   e.g. Foo.Bar=A&Foo.Bar=B     becomes 0.Foo.Bar=A&1.Foo.Bar=B
-	//   ii. At the last level;    e.g. Foo.Bar._=A&Foo.Bar._=B becomes Foo.Bar.0=A&Foo.Bar.1=B
-	// TODO: At in-between levels; e.g. Foo._.Bar=A&Foo._.Bar=B becomes Foo.0.Bar=A&Foo.1.Bar=B
-	//       (This last one requires that there only be one placeholder in order for it to be unambiguous.)
-
-	m := map[string]string{}
-	for k, ss := range vs {
-		indexLastLevelOrdinally := strings.HasSuffix(k, string(d)+implicitKey)
-
-		for i, s := range ss {
-			if canIndexFirstLevelOrdinally {
-				k = strconv.Itoa(i) + string(d) + k
-			} else if indexLastLevelOrdinally {
-				k = strings.TrimSuffix(k, implicitKey) + strconv.Itoa(i)
-			}
-
-			m[k] = s
-		}
-	}
-
-	n := node{}
-	for k, s := range m {
-		n = n.split(d, e, k, s)
-	}
-	return n
-}
-
-func splitPath(d, e rune, path string) (k, rest string) {
-	esc := false
-	for i, r := range path {
-		switch {
-		case !esc && r == e:
-			esc = true
-		case !esc && r == d:
-			return unescape(d, e, path[:i]), path[i+1:]
-		default:
-			esc = false
-		}
-	}
-	return unescape(d, e, path), ""
-}
-
-func (n node) split(d, e rune, path, s string) node {
-	k, rest := splitPath(d, e, path)
-	if rest == "" {
-		return add(n, k, s)
-	}
-	if _, ok := n[k]; !ok {
-		n[k] = node{}
-	}
-
-	c := getNode(n[k])
-	n[k] = c.split(d, e, rest, s)
-	return n
-}
-
-func add(n node, k, s string) node {
-	if n == nil {
-		return node{k: s}
-	}
-
-	if _, ok := n[k]; ok {
-		panic("key " + k + " already set")
-	}
-
-	n[k] = s
-	return n
-}
-
-func isEmpty(x interface{}) bool {
-	switch y := x.(type) {
-	case string:
-		return y == ""
-	case node:
-		if s, ok := y[""].(string); ok {
-			return s == ""
-		}
-		return false
-	}
-	panic("value is neither string nor node")
-}
-
-func getNode(x interface{}) node {
-	switch y := x.(type) {
-	case string:
-		return node{"": y}
-	case node:
-		return y
-	}
-	panic("value is neither string nor node")
-}
-
-func getString(x interface{}) string {
-	switch y := x.(type) {
-	case string:
-		return y
-	case node:
-		if s, ok := y[""].(string); ok {
-			return s
-		}
-		return ""
-	}
-	panic("value is neither string nor node")
-}
-
-func escape(d, e rune, s string) string {
-	s = strings.Replace(s, string(e), string(e)+string(e), -1) // Escape the escape    (\ => \\)
-	s = strings.Replace(s, string(d), string(e)+string(d), -1) // Escape the delimiter (. => \.)
-	return s
-}
-
-func unescape(d, e rune, s string) string {
-	s = strings.Replace(s, string(e)+string(d), string(d), -1) // Unescape the delimiter (\. => .)
-	s = strings.Replace(s, string(e)+string(e), string(e), -1) // Unescape the escape    (\\ => \)
-	return s
-}

+ 0 - 18
vendor/github.com/ajg/form/pre-commit.sh

@@ -1,18 +0,0 @@
-#!/bin/bash -eu
-
-# TODO: Only colorize messages given a suitable terminal.
-# FIXME: Handle case in which no stash entry is created due to no changes.
-
-printf "\e[30m=== PRE-COMMIT STARTING ===\e[m\n"
-git stash save --quiet --keep-index --include-untracked
-
-if go build -v ./... && go test -v -cover ./... && go vet ./... && golint . && travis-lint; then
-  result=$?
-  printf "\e[32m=== PRE-COMMIT SUCCEEDED ===\e[m\n"
-else
-  result=$?
-  printf "\e[31m=== PRE-COMMIT FAILED ===\e[m\n"
-fi
-
-git stash pop --quiet
-exit $result

+ 0 - 3
vendor/github.com/aymerick/raymond/.gitmodules

@@ -1,3 +0,0 @@
-[submodule "mustache"]
-	path = mustache
-	url = git://github.com/mustache/spec.git

+ 0 - 10
vendor/github.com/aymerick/raymond/.travis.yml

@@ -1,10 +0,0 @@
----
-language: go
-
-go:
-  - 1.3
-  - 1.4
-  - 1.5
-  - 1.6
-  - 1.7
-  - tip

+ 0 - 46
vendor/github.com/aymerick/raymond/BENCHMARKS.md

@@ -1,46 +0,0 @@
-# Benchmarks
-
-Hardware: MacBookPro11,1 - Intel Core i5 - 2,6 GHz - 8 Go RAM
-
-With:
-
-    - handlebars.js #8cba84df119c317fcebc49fb285518542ca9c2d0
-    - raymond #7bbaaf50ed03c96b56687d7fa6c6e04e02375a98
-
-
-## handlebars.js (ops/ms)
-
-        arguments          198 ±4 (5)
-        array-each         568 ±23 (5)
-        array-mustache     522 ±18 (4)
-        complex             71 ±7 (3)
-        data                67 ±2 (3)
-        depth-1             47 ±2 (3)
-        depth-2             14 ±1 (2)
-        object-mustache   1099 ±47 (5)
-        object             907 ±58 (4)
-        partial-recursion   46 ±3 (4)
-        partial             68 ±3 (3)
-        paths             1650 ±50 (3)
-        string            2552 ±157 (3)
-        subexpression      141 ±2 (4)
-        variables         2671 ±83 (4)
-
-
-## raymond
-
-        BenchmarkArguments          200000     6642 ns/op   151 ops/ms
-        BenchmarkArrayEach          100000    19584 ns/op    51 ops/ms
-        BenchmarkArrayMustache      100000    17305 ns/op    58 ops/ms
-        BenchmarkComplex            30000     50270 ns/op    20 ops/ms
-        BenchmarkData               50000     25551 ns/op    39 ops/ms
-        BenchmarkDepth1             100000    20162 ns/op    50 ops/ms
-        BenchmarkDepth2             30000     47782 ns/op    21 ops/ms
-        BenchmarkObjectMustache     200000     7668 ns/op   130 ops/ms
-        BenchmarkObject             200000     8843 ns/op   113 ops/ms
-        BenchmarkPartialRecursion   50000     23139 ns/op    43 ops/ms
-        BenchmarkPartial            50000     31015 ns/op    32 ops/ms
-        BenchmarkPath               200000     8997 ns/op   111 ops/ms
-        BenchmarkString             1000000    1879 ns/op   532 ops/ms
-        BenchmarkSubExpression      300000     4935 ns/op   203 ops/ms
-        BenchmarkVariables          200000     6478 ns/op   154 ops/ms

+ 0 - 42
vendor/github.com/aymerick/raymond/CHANGELOG.md

@@ -1,42 +0,0 @@
-# Raymond Changelog
-
-### HEAD
-
-- [IMPROVEMENT] Add `RemoveHelper` and `RemoveAllHelpers` functions
-
-### Raymond 2.0.2 _(March 22, 2018)_
-
-- [IMPROVEMENT] Add the #equal helper (#7)
-- [IMPROVEMENT] Add struct tag template variable support (#8)
-
-### Raymond 2.0.1 _(June 01, 2016)_
-
-- [BUGFIX] Removes data races [#3](https://github.com/aymerick/raymond/issues/3) - Thanks [@markbates](https://github.com/markbates)
-
-### Raymond 2.0.0 _(May 01, 2016)_
-
-- [BUGFIX] Fixes passing of context in helper options [#2](https://github.com/aymerick/raymond/issues/2) - Thanks [@GhostRussia](https://github.com/GhostRussia)
-- [BREAKING] Renames and unexports constants:
-
-  - `handlebars.DUMP_TPL`
-  - `lexer.ESCAPED_ESCAPED_OPEN_MUSTACHE`
-  - `lexer.ESCAPED_OPEN_MUSTACHE`
-  - `lexer.OPEN_MUSTACHE`
-  - `lexer.CLOSE_MUSTACHE`
-  - `lexer.CLOSE_STRIP_MUSTACHE`
-  - `lexer.CLOSE_UNESCAPED_STRIP_MUSTACHE`
-  - `lexer.DUMP_TOKEN_POS`
-  - `lexer.DUMP_ALL_TOKENS_VAL`
-
-
-### Raymond 1.1.0 _(June 15, 2015)_
-
-- Permits templates references with lowercase versions of struct fields.
-- Adds `ParseFile()` function.
-- Adds `RegisterPartialFile()`, `RegisterPartialFiles()` and `Clone()` methods on `Template`.
-- Helpers can now be struct methods.
-- Ensures safe concurrent access to helpers and partials.
-
-### Raymond 1.0.0 _(June 09, 2015)_
-
-- This is the first release. Raymond supports almost all handlebars features. See https://github.com/aymerick/raymond#limitations for a list of differences with the javascript implementation.

+ 0 - 22
vendor/github.com/aymerick/raymond/LICENSE

@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Aymerick JEHANNE
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-

+ 0 - 1427
vendor/github.com/aymerick/raymond/README.md

@@ -1,1427 +0,0 @@
-# raymond [![Build Status](https://secure.travis-ci.org/aymerick/raymond.svg?branch=master)](http://travis-ci.org/aymerick/raymond) [![GoDoc](https://godoc.org/github.com/aymerick/raymond?status.svg)](http://godoc.org/github.com/aymerick/raymond)
-
-Handlebars for [golang](https://golang.org) with the same features as [handlebars.js](http://handlebarsjs.com) `3.0`.
-
-The full API documentation is available here: <http://godoc.org/github.com/aymerick/raymond>.
-
-![Raymond Logo](https://github.com/aymerick/raymond/blob/master/raymond.png?raw=true "Raymond")
-
-
-# Table of Contents
-
-- [Quick Start](#quick-start)
-- [Correct Usage](#correct-usage)
-- [Context](#context)
-- [HTML Escaping](#html-escaping)
-- [Helpers](#helpers)
-  - [Template Helpers](#template-helpers)
-  - [Built-In Helpers](#built-in-helpers)
-    - [The `if` block helper](#the-if-block-helper)
-    - [The `unless` block helper](#the-unless-block-helper)
-    - [The `each` block helper](#the-each-block-helper)
-    - [The `with` block helper](#the-with-block-helper)
-    - [The `lookup` helper](#the-lookup-helper)
-    - [The `log` helper](#the-log-helper)
-    - [The `equal` helper](#the-equal-helper)
-  - [Block Helpers](#block-helpers)
-    - [Block Evaluation](#block-evaluation)
-    - [Conditional](#conditional)
-    - [Else Block Evaluation](#else-block-evaluation)
-    - [Block Parameters](#block-parameters)
-  - [Helper Parameters](#helper-parameters)
-    - [Automatic conversion](#automatic-conversion)
-  - [Options Argument](#options-argument)
-    - [Context Values](#context-values)
-    - [Helper Hash Arguments](#helper-hash-arguments)
-    - [Private Data](#private-data)
-  - [Utilites](#utilites)
-    - [`Str()`](#str)
-    - [`IsTrue()`](#istrue)
-- [Context Functions](#context-functions)
-- [Partials](#partials)
-  - [Template Partials](#template-partials)
-  - [Global Partials](#global-partials)
-  - [Dynamic Partials](#dynamic-partials)
-  - [Partial Contexts](#partial-contexts)
-  - [Partial Parameters](#partial-parameters)
-- [Utility Functions](#utility-functions)
-- [Mustache](#mustache)
-- [Limitations](#limitations)
-- [Handlebars Lexer](#handlebars-lexer)
-- [Handlebars Parser](#handlebars-parser)
-- [Test](#test)
-- [References](#references)
-- [Others Implementations](#others-implementations)
-
-
-## Quick Start
-
-    $ go get github.com/aymerick/raymond
-
-The quick and dirty way of rendering a handlebars template:
-
-```go
-package main
-
-import (
-    "fmt"
-
-    "github.com/aymerick/raymond"
-)
-
-func main() {
-    tpl := `<div class="entry">
-  <h1>{{title}}</h1>
-  <div class="body">
-    {{body}}
-  </div>
-</div>
-`
-
-    ctx := map[string]string{
-        "title": "My New Post",
-        "body":  "This is my first post!",
-    }
-
-    result, err := raymond.Render(tpl, ctx)
-    if err != nil {
-        panic("Please report a bug :)")
-    }
-
-    fmt.Print(result)
-}
-```
-
-Displays:
-
-```html
-<div class="entry">
-  <h1>My New Post</h1>
-  <div class="body">
-    This is my first post!
-  </div>
-</div>
-```
-
-Please note that the template will be parsed everytime you call `Render()` function. So you probably want to read the next section.
-
-
-## Correct Usage
-
-To avoid parsing a template several times, use the `Parse()` and `Exec()` functions:
-
-```go
-package main
-
-import (
-    "fmt"
-
-    "github.com/aymerick/raymond"
-)
-
-func main() {
-    source := `<div class="entry">
-  <h1>{{title}}</h1>
-  <div class="body">
-    {{body}}
-  </div>
-</div>
-`
-
-    ctxList := []map[string]string{
-        {
-            "title": "My New Post",
-            "body":  "This is my first post!",
-        },
-        {
-            "title": "Here is another post",
-            "body":  "This is my second post!",
-        },
-    }
-
-    // parse template
-    tpl, err := raymond.Parse(source)
-    if err != nil {
-        panic(err)
-    }
-
-    for _, ctx := range ctxList {
-        // render template
-        result, err := tpl.Exec(ctx)
-        if err != nil {
-            panic(err)
-        }
-
-        fmt.Print(result)
-    }
-}
-
-```
-
-Displays:
-
-```html
-<div class="entry">
-  <h1>My New Post</h1>
-  <div class="body">
-    This is my first post!
-  </div>
-</div>
-<div class="entry">
-  <h1>Here is another post</h1>
-  <div class="body">
-    This is my second post!
-  </div>
-</div>
-```
-
-You can use `MustParse()` and `MustExec()` functions if you don't want to deal with errors:
-
-```go
-// parse template
-tpl := raymond.MustParse(source)
-
-// render template
-result := tpl.MustExec(ctx)
-```
-
-
-## Context
-
-The rendering context can contain any type of values, including `array`, `slice`, `map`, `struct` and `func`.
-
-When using structs, be warned that only exported fields are accessible. However you can access exported fields in template with their lowercase names. For example, both `{{author.firstName}}` and `{{Author.FirstName}}` references give the same result, as long as `Author` and `FirstName` are exported struct fields.
-
-More, you can use the `handlebars` struct tag to specify a template variable name different from the struct field name.
-
-```go
-package main
-
-import (
-  "fmt"
-
-  "github.com/aymerick/raymond"
-)
-
-func main() {
-    source := `<div class="post">
-  <h1>By {{author.firstName}} {{author.lastName}}</h1>
-  <div class="body">{{body}}</div>
-
-  <h1>Comments</h1>
-
-  {{#each comments}}
-  <h2>By {{author.firstName}} {{author.lastName}}</h2>
-  <div class="body">{{content}}</div>
-  {{/each}}
-</div>`
-
-    type Person struct {
-        FirstName string
-        LastName  string
-    }
-
-    type Comment struct {
-        Author Person
-        Body   string `handlebars:"content"`
-    }
-
-    type Post struct {
-        Author   Person
-        Body     string
-        Comments []Comment
-    }
-
-    ctx := Post{
-        Person{"Jean", "Valjean"},
-        "Life is difficult",
-        []Comment{
-            Comment{
-                Person{"Marcel", "Beliveau"},
-                "LOL!",
-            },
-        },
-    }
-
-    output := raymond.MustRender(source, ctx)
-
-    fmt.Print(output)
-}
-```
-
-Output:
-
-```html
-<div class="post">
-  <h1>By Jean Valjean</h1>
-  <div class="body">Life is difficult</div>
-
-  <h1>Comments</h1>
-
-  <h2>By Marcel Beliveau</h2>
-  <div class="body">LOL!</div>
-</div>
-```
-
-## HTML Escaping
-
-By default, the result of a mustache expression is HTML escaped. Use the triple mustache `{{{` to output unescaped values.
-
-```go
-source := `<div class="entry">
-  <h1>{{title}}</h1>
-  <div class="body">
-    {{{body}}}
-  </div>
-</div>
-`
-
-ctx := map[string]string{
-    "title": "All about <p> Tags",
-    "body":  "<p>This is a post about &lt;p&gt; tags</p>",
-}
-
-tpl := raymond.MustParse(source)
-result := tpl.MustExec(ctx)
-
-fmt.Print(result)
-```
-
-Output:
-
-```html
-<div class="entry">
-  <h1>All about &lt;p&gt; Tags</h1>
-  <div class="body">
-    <p>This is a post about &lt;p&gt; tags</p>
-  </div>
-</div>
-```
-
-When returning HTML from a helper, you should return a `SafeString` if you don't want it to be escaped by default. When using `SafeString` all unknown or unsafe data should be manually escaped with the `Escape` method.
-
-```go
-raymond.RegisterHelper("link", func(url, text string) raymond.SafeString {
-    return raymond.SafeString("<a href='" + raymond.Escape(url) + "'>" + raymond.Escape(text) + "</a>")
-})
-
-tpl := raymond.MustParse("{{link url text}}")
-
-ctx := map[string]string{
-    "url":  "http://www.aymerick.com/",
-    "text": "This is a <em>cool</em> website",
-}
-
-result := tpl.MustExec(ctx)
-fmt.Print(result)
-```
-
-Output:
-
-```html
-<a href='http://www.aymerick.com/'>This is a &lt;em&gt;cool&lt;/em&gt; website</a>
-```
-
-
-## Helpers
-
-Helpers can be accessed from any context in a template. You can register a helper with the `RegisterHelper` function.
-
-For example:
-
-```html
-<div class="post">
-  <h1>By {{fullName author}}</h1>
-  <div class="body">{{body}}</div>
-
-  <h1>Comments</h1>
-
-  {{#each comments}}
-  <h2>By {{fullName author}}</h2>
-  <div class="body">{{body}}</div>
-  {{/each}}
-</div>
-```
-
-With this context and helper:
-
-```go
-ctx := map[string]interface{}{
-    "author": map[string]string{"firstName": "Jean", "lastName": "Valjean"},
-    "body":   "Life is difficult",
-    "comments": []map[string]interface{}{{
-        "author": map[string]string{"firstName": "Marcel", "lastName": "Beliveau"},
-        "body":   "LOL!",
-    }},
-}
-
-raymond.RegisterHelper("fullName", func(person map[string]string) string {
-    return person["firstName"] + " " + person["lastName"]
-})
-```
-
-Outputs:
-
-```html
-<div class="post">
-  <h1>By Jean Valjean</h1>
-  <div class="body">Life is difficult</div>
-
-  <h1>Comments</h1>
-
-  <h2>By Marcel Beliveau</h2>
-  <div class="body">LOL!</div>
-</div>
-```
-
-Helper arguments can be any type.
-
-The following example uses structs instead of maps and produces the same output as the previous one:
-
-```html
-<div class="post">
-  <h1>By {{fullName author}}</h1>
-  <div class="body">{{body}}</div>
-
-  <h1>Comments</h1>
-
-  {{#each comments}}
-  <h2>By {{fullName author}}</h2>
-  <div class="body">{{body}}</div>
-  {{/each}}
-</div>
-```
-
-With this context and helper:
-
-```go
-type Post struct {
-    Author   Person
-    Body     string
-    Comments []Comment
-}
-
-type Person struct {
-    FirstName string
-    LastName  string
-}
-
-type Comment struct {
-    Author Person
-    Body   string
-}
-
-ctx := Post{
-    Person{"Jean", "Valjean"},
-    "Life is difficult",
-    []Comment{
-        Comment{
-            Person{"Marcel", "Beliveau"},
-            "LOL!",
-        },
-    },
-}
-
-raymond.RegisterHelper("fullName", func(person Person) string {
-    return person.FirstName + " " + person.LastName
-})
-```
-
-You can unregister global helpers with `RemoveHelper` and `RemoveAllHelpers` functions:
-
-```go
-raymond.RemoveHelper("fullname")
-```
-
-```go
-raymond.RemoveAllHelpers()
-```
-
-
-### Template Helpers
-
-You can register a helper on a specific template, and in that case that helper will be available to that template only:
-
-```go
-tpl := raymond.MustParse("User: {{fullName user.firstName user.lastName}}")
-
-tpl.RegisterHelper("fullName", func(firstName, lastName string) string {
-  return firstName + " " + lastName
-})
-```
-
-
-### Built-In Helpers
-
-Those built-in helpers are available to all templates.
-
-
-#### The `if` block helper
-
-You can use the `if` helper to conditionally render a block. If its argument returns `false`, `nil`, `0`, `""`, an empty array, an empty slice or an empty map, then raymond will not render the block.
-
-```html
-<div class="entry">
-  {{#if author}}
-    <h1>{{firstName}} {{lastName}}</h1>
-  {{/if}}
-</div>
-```
-
-When using a block expression, you can specify a template section to run if the expression returns a falsy value. That section, marked by `{{else}}` is called an "else section".
-
-```html
-<div class="entry">
-  {{#if author}}
-    <h1>{{firstName}} {{lastName}}</h1>
-  {{else}}
-    <h1>Unknown Author</h1>
-  {{/if}}
-</div>
-```
-
-You can chain several blocks. For example that template:
-
-```html
-{{#if isActive}}
-  <img src="star.gif" alt="Active">
-{{else if isInactive}}
-  <img src="cry.gif" alt="Inactive">
-{{else}}
-  <img src="wat.gif" alt="Unknown">
-{{/if}}
-```
-
-With that context:
-
-```go
-ctx := map[string]interface{}{
-    "isActive":   false,
-    "isInactive": false,
-}
-```
-
-Outputs:
-
-```html
- <img src="wat.gif" alt="Unknown">
-```
-
-
-#### The `unless` block helper
-
-You can use the `unless` helper as the inverse of the `if` helper. Its block will be rendered if the expression returns a falsy value.
-
-```html
-<div class="entry">
-  {{#unless license}}
-  <h3 class="warning">WARNING: This entry does not have a license!</h3>
-  {{/unless}}
-</div>
-```
-
-
-#### The `each` block helper
-
-You can iterate over an array, a slice, a map or a struct instance using this built-in `each` helper. Inside the block, you can use `this` to reference the element being iterated over.
-
-For example:
-
-```html
-<ul class="people">
-  {{#each people}}
-    <li>{{this}}</li>
-  {{/each}}
-</ul>
-```
-
-With this context:
-
-```go
-map[string]interface{}{
-    "people": []string{
-        "Marcel", "Jean-Claude", "Yvette",
-    },
-}
-```
-
-Outputs:
-
-```html
-<ul class="people">
-  <li>Marcel</li>
-  <li>Jean-Claude</li>
-  <li>Yvette</li>
-</ul>
-```
-
-You can optionally provide an `{{else}}` section which will display only when the passed argument is an empty array, an empty slice or an empty map (a `struct` instance is never considered empty).
-
-```html
-{{#each paragraphs}}
-  <p>{{this}}</p>
-{{else}}
-  <p class="empty">No content</p>
-{{/each}}
-```
-
-When looping through items in `each`, you can optionally reference the current loop index via `{{@index}}`.
-
-```html
-{{#each array}}
-  {{@index}}: {{this}}
-{{/each}}
-```
-
-Additionally for map and struct instance iteration, `{{@key}}` references the current map key or struct field name:
-
-```html
-{{#each map}}
-  {{@key}}: {{this}}
-{{/each}}
-```
-
-The first and last steps of iteration are noted via the `@first` and `@last` variables.
-
-
-#### The `with` block helper
-
-You can shift the context for a section of a template by using the built-in `with` block helper.
-
-```html
-<div class="entry">
-  <h1>{{title}}</h1>
-
-  {{#with author}}
-  <h2>By {{firstName}} {{lastName}}</h2>
-  {{/with}}
-</div>
-```
-
-With this context:
-
-```go
-map[string]interface{}{
-    "title": "My first post!",
-    "author": map[string]string{
-        "firstName": "Jean",
-        "lastName":  "Valjean",
-    },
-}
-```
-
-Outputs:
-
-```html
-<div class="entry">
-  <h1>My first post!</h1>
-
-  <h2>By Jean Valjean</h2>
-</div>
-```
-
-You can optionally provide an `{{else}}` section which will display only when the passed argument is falsy.
-
-```html
-{{#with author}}
-  <p>{{name}}</p>
-{{else}}
-  <p class="empty">No content</p>
-{{/with}}
-```
-
-
-#### The `lookup` helper
-
-The `lookup` helper allows for dynamic parameter resolution using handlebars variables.
-
-```html
-{{#each bar}}
-  {{lookup ../foo @index}}
-{{/each}}
-```
-
-
-#### The `log` helper
-
-The `log` helper allows for logging while rendering a template.
-
-```html
-{{log "Look at me!"}}
-```
-
-Note that the handlebars.js `@level` variable is not supported.
-
-
-#### The `equal` helper
-
-The `equal` helper renders a block if the string version of both arguments are equals.
-
-For example that template:
-
-```html
-{{#equal foo "bar"}}foo is bar{{/equal}}
-{{#equal foo baz}}foo is the same as baz{{/equal}}
-{{#equal nb 0}}nothing{{/equal}}
-{{#equal nb 1}}there is one{{/equal}}
-{{#equal nb "1"}}everything is stringified before comparison{{/equal}}
-```
-
-With that context:
-
-```go
-ctx := map[string]interface{}{
-    "foo": "bar",
-    "baz": "bar",
-    "nb":  1,
-}
-```
-
-Outputs:
-
-```html
-foo is bar
-foo is the same as baz
-
-there is one
-everything is stringified before comparison
-```
-
-
-### Block Helpers
-
-Block helpers make it possible to define custom iterators and other functionality that can invoke the passed block with a new context.
-
-
-#### Block Evaluation
-
-As an example, let's define a block helper that adds some markup to the wrapped text.
-
-```html
-<div class="entry">
-  <h1>{{title}}</h1>
-  <div class="body">
-    {{#bold}}{{body}}{{/bold}}
-  </div>
-</div>
-```
-
-The `bold` helper will add markup to make its text bold.
-
-```go
-raymond.RegisterHelper("bold", func(options *raymond.Options) raymond.SafeString {
-    return raymond.SafeString(`<div class="mybold">` + options.Fn() + "</div>")
-})
-```
-
-A helper evaluates the block content with current context by calling `options.Fn()`.
-
-If you want to evaluate the block with another context, then use `options.FnWith(ctx)`, like this french version of built-in `with` block helper:
-
-```go
-raymond.RegisterHelper("avec", func(context interface{}, options *raymond.Options) string {
-    return options.FnWith(context)
-})
-```
-
-With that template:
-
-```html
-{{#avec obj.text}}{{this}}{{/avec}}
-```
-
-
-#### Conditional
-
-Let's write a french version of `if` block helper:
-
-```go
-source := `{{#si yep}}YEP !{{/si}}`
-
-ctx := map[string]interface{}{"yep": true}
-
-raymond.RegisterHelper("si", func(conditional bool, options *raymond.Options) string {
-    if conditional {
-        return options.Fn()
-    }
-    return ""
-})
-```
-
-Note that as the first parameter of the helper is typed as `bool` an automatic conversion is made if corresponding context value is not a boolean. So this helper works with that context too:
-
-```go
-ctx := map[string]interface{}{"yep": "message"}
-```
-
-Here, `"message"` is converted to `true` because it is an non-empty string. See `IsTrue()` function for more informations on boolean conversion.
-
-
-#### Else Block Evaluation
-
-We can enhance the `si` block helper to evaluate the `else block` by calling `options.Inverse()` if conditional is false:
-
-```go
-source := `{{#si yep}}YEP !{{else}}NOP !{{/si}}`
-
-ctx := map[string]interface{}{"yep": false}
-
-raymond.RegisterHelper("si", func(conditional bool, options *raymond.Options) string {
-    if conditional {
-        return options.Fn()
-    }
-    return options.Inverse()
-})
-```
-
-Outputs:
-```
-NOP !
-```
-
-
-#### Block Parameters
-
-It's possible to receive named parameters from supporting helpers.
-
-```html
-{{#each users as |user userId|}}
-  Id: {{userId}} Name: {{user.name}}
-{{/each}}
-```
-
-In this particular example, `user` will have the same value as the current context and `userId` will have the index/key value for the iteration.
-
-This allows for nested helpers to avoid name conflicts.
-
-For example:
-
-```html
-{{#each users as |user userId|}}
-  {{#each user.books as |book bookId|}}
-    User: {{userId}} Book: {{bookId}}
-  {{/each}}
-{{/each}}
-```
-
-With this context:
-
-```go
-ctx := map[string]interface{}{
-    "users": map[string]interface{}{
-        "marcel": map[string]interface{}{
-            "books": map[string]interface{}{
-                "book1": "My first book",
-                "book2": "My second book",
-            },
-        },
-        "didier": map[string]interface{}{
-            "books": map[string]interface{}{
-                "bookA": "Good book",
-                "bookB": "Bad book",
-            },
-        },
-    },
-}
-```
-
-Outputs:
-
-```html
-  User: marcel Book: book1
-  User: marcel Book: book2
-  User: didier Book: bookA
-  User: didier Book: bookB
-```
-
-As you can see, the second block parameter is the map key. When using structs, it is the struct field name.
-
-When using arrays and slices, the second parameter is element index:
-
-```go
-ctx := map[string]interface{}{
-    "users": []map[string]interface{}{
-        {
-            "id": "marcel",
-            "books": []map[string]interface{}{
-                {"id": "book1", "title": "My first book"},
-                {"id": "book2", "title": "My second book"},
-            },
-        },
-        {
-            "id": "didier",
-            "books": []map[string]interface{}{
-                {"id": "bookA", "title": "Good book"},
-                {"id": "bookB", "title": "Bad book"},
-            },
-        },
-    },
-}
-```
-
-Outputs:
-
-```html
-    User: 0 Book: 0
-    User: 0 Book: 1
-    User: 1 Book: 0
-    User: 1 Book: 1
-```
-
-
-### Helper Parameters
-
-When calling a helper in a template, raymond expects the same number of arguments as the number of helper function parameters.
-
-So this template:
-
-```html
-{{add a}}
-```
-
-With this helper:
-
-```go
-raymond.RegisterHelper("add", func(val1, val2 int) string {
-    return strconv.Itoa(val1 + val2)
-})
-```
-
-Will simply panics, because we call the helper with one argument whereas it expects two.
-
-
-#### Automatic conversion
-
-Let's create a `concat` helper that expects two strings and concat them:
-
-```go
-source := `{{concat a b}}`
-
-ctx := map[string]interface{}{
-    "a": "Jean",
-    "b": "Valjean",
-}
-
-raymond.RegisterHelper("concat", func(val1, val2 string) string {
-    return val1 + " " + val2
-})
-```
-
-Everything goes well, two strings are passed as arguments to the helper that outputs:
-
-```html
-Jean VALJEAN
-```
-
-But what happens if there is another type than `string` in the context ? For example:
-
-```go
-ctx := map[string]interface{}{
-    "a": 10,
-    "b": "Valjean",
-}
-```
-
-Actually, raymond perfoms automatic string conversion. So because the first parameter of the helper is typed as `string`, the first argument will be converted from the `10` integer to `"10"`, and the helper outputs:
-
-```html
-10 VALJEAN
-```
-
-Note that this kind of automatic conversion is done with `bool` type too, thanks to the `IsTrue()` function.
-
-
-### Options Argument
-
-If a helper needs the `Options` argument, just add it at the end of helper parameters:
-
-```go
-raymond.RegisterHelper("add", func(val1, val2 int, options *raymond.Options) string {
-    return strconv.Itoa(val1 + val2) + " " + options.ValueStr("bananas")
-})
-```
-
-Thanks to the `options` argument, helpers have access to the current evaluation context, to the `Hash` arguments, and they can manipulate the private data variables.
-
-The `Options` argument is even necessary for Block Helpers to evaluate block and "else block".
-
-
-#### Context Values
-
-Helpers fetch current context values with `options.Value()` and `options.ValuesStr()`.
-
-`Value()` returns an `interface{}` and lets the helper do the type assertions whereas `ValueStr()` automatically converts the value to a `string`.
-
-For example:
-
-```go
-source := `{{concat a b}}`
-
-ctx := map[string]interface{}{
-    "a":      "Marcel",
-    "b":      "Beliveau",
-    "suffix": "FOREVER !",
-}
-
-raymond.RegisterHelper("concat", func(val1, val2 string, options *raymond.Options) string {
-    return val1 + " " + val2 + " " + options.ValueStr("suffix")
-})
-```
-
-Outputs:
-
-```html
-Marcel Beliveau FOREVER !
-```
-
-Helpers can get the entire current context with `options.Ctx()` that returns an `interface{}`.
-
-
-#### Helper Hash Arguments
-
-Helpers access hash arguments with `options.HashProp()` and `options.HashStr()`.
-
-`HashProp()` returns an `interface{}` and lets the helper do the type assertions whereas `HashStr()` automatically converts the value to a `string`.
-
-For example:
-
-```go
-source := `{{concat suffix first=a second=b}}`
-
-ctx := map[string]interface{}{
-    "a":      "Marcel",
-    "b":      "Beliveau",
-    "suffix": "FOREVER !",
-}
-
-raymond.RegisterHelper("concat", func(suffix string, options *raymond.Options) string {
-    return options.HashStr("first") + " " + options.HashStr("second") + " " + suffix
-})
-```
-
-Outputs:
-
-```html
-Marcel Beliveau FOREVER !
-```
-
-Helpers can get the full hash with `options.Hash()` that returns a `map[string]interface{}`.
-
-
-#### Private Data
-
-Helpers access private data variables with `options.Data()` and `options.DataStr()`.
-
-`Data()` returns an `interface{}` and lets the helper do the type assertions whereas `DataStr()` automatically converts the value to a `string`.
-
-Helpers can get the entire current data frame with `options.DataFrame()` that returns a `*DataFrame`.
-
-For helpers that need to inject their own private data frame, use `options.NewDataFrame()` to create the frame and `options.FnData()` to evaluate the block with that frame.
-
-For example:
-
-```go
-source := `{{#voodoo kind=a}}Voodoo is {{@magix}}{{/voodoo}}`
-
-ctx := map[string]interface{}{
-    "a": "awesome",
-}
-
-raymond.RegisterHelper("voodoo", func(options *raymond.Options) string {
-    // create data frame with @magix data
-    frame := options.NewDataFrame()
-    frame.Set("magix", options.HashProp("kind"))
-
-    // evaluates block with new data frame
-    return options.FnData(frame)
-})
-```
-
-Helpers that need to evaluate the block with a private data frame and a new context can call `options.FnCtxData()`.
-
-
-### Utilites
-
-In addition to `Escape()`, raymond provides utility functions that can be usefull for helpers.
-
-
-#### `Str()`
-
-`Str()` converts its parameter to a `string`.
-
-Booleans:
-
-```go
-raymond.Str(3) + " foos and " + raymond.Str(-1.25) + " bars"
-// Outputs: "3 foos and -1.25 bars"
-```
-
-Numbers:
-
-``` go
-"everything is " + raymond.Str(true) + " and nothing is " + raymond.Str(false)
-// Outputs: "everything is true and nothing is false"
-```
-
-Maps:
-
-```go
-raymond.Str(map[string]string{"foo": "bar"})
-// Outputs: "map[foo:bar]"
-```
-
-Arrays and Slices:
-
-```go
-raymond.Str([]interface{}{true, 10, "foo", 5, "bar"})
-// Outputs: "true10foo5bar"
-```
-
-
-#### `IsTrue()`
-
-`IsTrue()` returns the truthy version of its parameter.
-
-It returns `false` when parameter is either:
-
-  - an empty array
-  - an empty slice
-  - an empty map
-  - `""`
-  - `nil`
-  - `0`
-  - `false`
-
-For all others values, `IsTrue()` returns `true`.
-
-
-## Context Functions
-
-In addition to helpers, lambdas found in context are evaluated.
-
-For example, that template and context:
-
-```go
-source := "I {{feeling}} you"
-
-ctx := map[string]interface{}{
-    "feeling": func() string {
-        rand.Seed(time.Now().UTC().UnixNano())
-
-        feelings := []string{"hate", "love"}
-        return feelings[rand.Intn(len(feelings))]
-    },
-}
-```
-
-Randomly renders `I hate you` or `I love you`.
-
-Those context functions behave like helper functions: they can be called with parameters and they can have an `Options` argument.
-
-
-## Partials
-
-### Template Partials
-
-You can register template partials before execution:
-
-```go
-tpl := raymond.MustParse("{{> foo}} baz")
-tpl.RegisterPartial("foo", "<span>bar</span>")
-
-result := tpl.MustExec(nil)
-fmt.Print(result)
-```
-
-Output:
-
-```html
-<span>bar</span> baz
-```
-
-You can register several partials at once:
-
-```go
-tpl := raymond.MustParse("{{> foo}} and {{> baz}}")
-tpl.RegisterPartials(map[string]string{
-    "foo": "<span>bar</span>",
-    "baz": "<span>bat</span>",
-})
-
-result := tpl.MustExec(nil)
-fmt.Print(result)
-```
-
-Output:
-
-```html
-<span>bar</span> and <span>bat</span>
-```
-
-
-### Global Partials
-
-You can registers global partials that will be accessible by all templates:
-
-```go
-raymond.RegisterPartial("foo", "<span>bar</span>")
-
-tpl := raymond.MustParse("{{> foo}} baz")
-result := tpl.MustExec(nil)
-fmt.Print(result)
-```
-
-Or:
-
-```go
-raymond.RegisterPartials(map[string]string{
-    "foo": "<span>bar</span>",
-    "baz": "<span>bat</span>",
-})
-
-tpl := raymond.MustParse("{{> foo}} and {{> baz}}")
-result := tpl.MustExec(nil)
-fmt.Print(result)
-```
-
-
-### Dynamic Partials
-
-It's possible to dynamically select the partial to be executed by using sub expression syntax.
-
-For example, that template randomly evaluates the `foo` or `baz` partial:
-
-```go
-tpl := raymond.MustParse("{{> (whichPartial) }}")
-tpl.RegisterPartials(map[string]string{
-    "foo": "<span>bar</span>",
-    "baz": "<span>bat</span>",
-})
-
-ctx := map[string]interface{}{
-    "whichPartial": func() string {
-        rand.Seed(time.Now().UTC().UnixNano())
-
-        names := []string{"foo", "baz"}
-        return names[rand.Intn(len(names))]
-    },
-}
-
-result := tpl.MustExec(ctx)
-fmt.Print(result)
-```
-
-
-### Partial Contexts
-
-It's possible to execute partials on a custom context by passing in the context to the partial call.
-
-For example:
-
-```go
-tpl := raymond.MustParse("User: {{> userDetails user }}")
-tpl.RegisterPartial("userDetails", "{{firstname}} {{lastname}}")
-
-ctx := map[string]interface{}{
-    "user": map[string]string{
-        "firstname": "Jean",
-        "lastname":  "Valjean",
-    },
-}
-
-result := tpl.MustExec(ctx)
-fmt.Print(result)
-```
-
-Displays:
-
-```html
-User: Jean Valjean
-```
-
-
-### Partial Parameters
-
-Custom data can be passed to partials through hash parameters.
-
-For example:
-
-```go
-tpl := raymond.MustParse("{{> myPartial name=hero }}")
-tpl.RegisterPartial("myPartial", "My hero is {{name}}")
-
-ctx := map[string]interface{}{
-    "hero": "Goldorak",
-}
-
-result := tpl.MustExec(ctx)
-fmt.Print(result)
-```
-
-Displays:
-
-```html
-My hero is Goldorak
-```
-
-
-## Utility Functions
-
-You can use following utility fuctions to parse and register partials from files:
-
-- `ParseFile()` - reads a file and return parsed template
-- `Template.RegisterPartialFile()` - reads a file and registers its content as a partial with given name
-- `Template.RegisterPartialFiles()` - reads several files and registers them as partials, the filename base is used as the partial name
-
-
-## Mustache
-
-Handlebars is a superset of [mustache](https://mustache.github.io) but it differs on those points:
-
-- Alternative delimiters are not supported
-- There is no recursive lookup
-
-
-## Limitations
-
-These handlebars options are currently NOT implemented:
-
-- `compat` - enables recursive field lookup
-- `knownHelpers` - list of helpers that are known to exist (truthy) at template execution time
-- `knownHelpersOnly` - allows further optimizations based on the known helpers list
-- `trackIds` - include the id names used to resolve parameters for helpers
-- `noEscape` - disables HTML escaping globally
-- `strict` - templates will throw rather than silently ignore missing fields
-- `assumeObjects` - removes object existence checks when traversing paths
-- `preventIndent` - disables the auto-indententation of nested partials
-- `stringParams` - resolves a parameter to it's name if the value isn't present in the context stack
-
-These handlebars features are currently NOT implemented:
-
-- raw block content is not passed as a parameter to helper
-- `blockHelperMissing` - helper called when a helper can not be directly resolved
-- `helperMissing` - helper called when a potential helper expression was not found
-- `@contextPath` - value set in `trackIds` mode that records the lookup path for the current context
-- `@level` - log level
-
-
-## Handlebars Lexer
-
-You should not use the lexer directly, but for your information here is an example:
-
-```go
-package main
-
-import (
-    "fmt"
-
-    "github.com/aymerick/raymond/lexer"
-)
-
-func main() {
-    source := "You know {{nothing}} John Snow"
-
-    output := ""
-
-    lex := lexer.Scan(source)
-    for {
-        // consume next token
-        token := lex.NextToken()
-
-        output += fmt.Sprintf(" %s", token)
-
-        // stops when all tokens have been consumed, or on error
-        if token.Kind == lexer.TokenEOF || token.Kind == lexer.TokenError {
-            break
-        }
-    }
-
-    fmt.Print(output)
-}
-```
-
-Outputs:
-
-```
-Content{"You know "} Open{"{{"} ID{"nothing"} Close{"}}"} Content{" John Snow"} EOF
-```
-
-
-## Handlebars Parser
-
-You should not use the parser directly, but for your information here is an example:
-
-```go
-package main
-
-import (
-    "fmt"
-
-    "github.com/aymerick/raymond/ast"
-    "github.com/aymerick/raymond/parser"
-)
-
-fu  nc main() {
-    source := "You know {{nothing}} John Snow"
-
-    // parse template
-    program, err := parser.Parse(source)
-    if err != nil {
-        panic(err)
-    }
-
-    // print AST
-    output := ast.Print(program)
-
-    fmt.Print(output)
-}
-```
-
-Outputs:
-
-```
-CONTENT[ 'You know ' ]
-{{ PATH:nothing [] }}
-CONTENT[ ' John Snow' ]
-```
-
-
-## Test
-
-First, fetch mustache tests:
-
-    $ git submodule update --init
-
-To run all tests:
-
-    $ go test ./...
-
-To filter tests:
-
-    $ go test -run="Partials"
-
-To run all test and all benchmarks:
-
-    $ go test -bench . ./...
-
-To test with race detection:
-
-    $ go test -race ./...
-
-
-## References
-
-  - <http://handlebarsjs.com/>
-  - <https://mustache.github.io/mustache.5.html>
-  - <https://github.com/golang/go/tree/master/src/text/template>
-  - <https://www.youtube.com/watch?v=HxaD_trXwRE>
-
-
-## Others Implementations
-
-- [handlebars.js](http://handlebarsjs.com) - javascript
-- [handlebars.java](https://github.com/jknack/handlebars.java) - java
-- [handlebars.rb](https://github.com/cowboyd/handlebars.rb) - ruby
-- [handlebars.php](https://github.com/XaminProject/handlebars.php) - php
-- [handlebars-objc](https://github.com/Bertrand/handlebars-objc) - Objective C
-- [rumblebars](https://github.com/nicolas-cherel/rumblebars) - rust

+ 0 - 1
vendor/github.com/aymerick/raymond/VERSION

@@ -1 +0,0 @@
-2.0.2

+ 0 - 785
vendor/github.com/aymerick/raymond/ast/node.go

@@ -1,785 +0,0 @@
-// Package ast provides structures to represent a handlebars Abstract Syntax Tree, and a Visitor interface to visit that tree.
-package ast
-
-import (
-	"fmt"
-	"strconv"
-)
-
-// References:
-//   - https://github.com/wycats/handlebars.js/blob/master/lib/handlebars/compiler/ast.js
-//   - https://github.com/wycats/handlebars.js/blob/master/docs/compiler-api.md
-//   - https://github.com/golang/go/blob/master/src/text/template/parse/node.go
-
-// Node is an element in the AST.
-type Node interface {
-	// node type
-	Type() NodeType
-
-	// location of node in original input string
-	Location() Loc
-
-	// string representation, used for debugging
-	String() string
-
-	// accepts visitor
-	Accept(Visitor) interface{}
-}
-
-// Visitor is the interface to visit an AST.
-type Visitor interface {
-	VisitProgram(*Program) interface{}
-
-	// statements
-	VisitMustache(*MustacheStatement) interface{}
-	VisitBlock(*BlockStatement) interface{}
-	VisitPartial(*PartialStatement) interface{}
-	VisitContent(*ContentStatement) interface{}
-	VisitComment(*CommentStatement) interface{}
-
-	// expressions
-	VisitExpression(*Expression) interface{}
-	VisitSubExpression(*SubExpression) interface{}
-	VisitPath(*PathExpression) interface{}
-
-	// literals
-	VisitString(*StringLiteral) interface{}
-	VisitBoolean(*BooleanLiteral) interface{}
-	VisitNumber(*NumberLiteral) interface{}
-
-	// miscellaneous
-	VisitHash(*Hash) interface{}
-	VisitHashPair(*HashPair) interface{}
-}
-
-// NodeType represents an AST Node type.
-type NodeType int
-
-// Type returns itself, and permits struct includers to satisfy that part of Node interface.
-func (t NodeType) Type() NodeType {
-	return t
-}
-
-const (
-	// NodeProgram is the program node
-	NodeProgram NodeType = iota
-
-	// NodeMustache is the mustache statement node
-	NodeMustache
-
-	// NodeBlock is the block statement node
-	NodeBlock
-
-	// NodePartial is the partial statement node
-	NodePartial
-
-	// NodeContent is the content statement node
-	NodeContent
-
-	// NodeComment is the comment statement node
-	NodeComment
-
-	// NodeExpression is the expression node
-	NodeExpression
-
-	// NodeSubExpression is the subexpression node
-	NodeSubExpression
-
-	// NodePath is the expression path node
-	NodePath
-
-	// NodeBoolean is the literal boolean node
-	NodeBoolean
-
-	// NodeNumber is the literal number node
-	NodeNumber
-
-	// NodeString is the literal string node
-	NodeString
-
-	// NodeHash is the hash node
-	NodeHash
-
-	// NodeHashPair is the hash pair node
-	NodeHashPair
-)
-
-// Loc represents the position of a parsed node in source file.
-type Loc struct {
-	Pos  int // Byte position
-	Line int // Line number
-}
-
-// Location returns itself, and permits struct includers to satisfy that part of Node interface.
-func (l Loc) Location() Loc {
-	return l
-}
-
-// Strip describes node whitespace management.
-type Strip struct {
-	Open  bool
-	Close bool
-
-	OpenStandalone   bool
-	CloseStandalone  bool
-	InlineStandalone bool
-}
-
-// NewStrip instanciates a Strip for given open and close mustaches.
-func NewStrip(openStr, closeStr string) *Strip {
-	return &Strip{
-		Open:  (len(openStr) > 2) && openStr[2] == '~',
-		Close: (len(closeStr) > 2) && closeStr[len(closeStr)-3] == '~',
-	}
-}
-
-// NewStripForStr instanciates a Strip for given tag.
-func NewStripForStr(str string) *Strip {
-	return &Strip{
-		Open:  (len(str) > 2) && str[2] == '~',
-		Close: (len(str) > 2) && str[len(str)-3] == '~',
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (s *Strip) String() string {
-	return fmt.Sprintf("Open: %t, Close: %t, OpenStandalone: %t, CloseStandalone: %t, InlineStandalone: %t", s.Open, s.Close, s.OpenStandalone, s.CloseStandalone, s.InlineStandalone)
-}
-
-//
-// Program
-//
-
-// Program represents a program node.
-type Program struct {
-	NodeType
-	Loc
-
-	Body        []Node // [ Statement ... ]
-	BlockParams []string
-	Chained     bool
-
-	// whitespace management
-	Strip *Strip
-}
-
-// NewProgram instanciates a new program node.
-func NewProgram(pos int, line int) *Program {
-	return &Program{
-		NodeType: NodeProgram,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *Program) String() string {
-	return fmt.Sprintf("Program{Pos: %d}", node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *Program) Accept(visitor Visitor) interface{} {
-	return visitor.VisitProgram(node)
-}
-
-// AddStatement adds given statement to program.
-func (node *Program) AddStatement(statement Node) {
-	node.Body = append(node.Body, statement)
-}
-
-//
-// Mustache Statement
-//
-
-// MustacheStatement represents a mustache node.
-type MustacheStatement struct {
-	NodeType
-	Loc
-
-	Unescaped  bool
-	Expression *Expression
-
-	// whitespace management
-	Strip *Strip
-}
-
-// NewMustacheStatement instanciates a new mustache node.
-func NewMustacheStatement(pos int, line int, unescaped bool) *MustacheStatement {
-	return &MustacheStatement{
-		NodeType:  NodeMustache,
-		Loc:       Loc{pos, line},
-		Unescaped: unescaped,
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *MustacheStatement) String() string {
-	return fmt.Sprintf("Mustache{Pos: %d}", node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *MustacheStatement) Accept(visitor Visitor) interface{} {
-	return visitor.VisitMustache(node)
-}
-
-//
-// Block Statement
-//
-
-// BlockStatement represents a block node.
-type BlockStatement struct {
-	NodeType
-	Loc
-
-	Expression *Expression
-
-	Program *Program
-	Inverse *Program
-
-	// whitespace management
-	OpenStrip    *Strip
-	InverseStrip *Strip
-	CloseStrip   *Strip
-}
-
-// NewBlockStatement instanciates a new block node.
-func NewBlockStatement(pos int, line int) *BlockStatement {
-	return &BlockStatement{
-		NodeType: NodeBlock,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *BlockStatement) String() string {
-	return fmt.Sprintf("Block{Pos: %d}", node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *BlockStatement) Accept(visitor Visitor) interface{} {
-	return visitor.VisitBlock(node)
-}
-
-//
-// Partial Statement
-//
-
-// PartialStatement represents a partial node.
-type PartialStatement struct {
-	NodeType
-	Loc
-
-	Name   Node   // PathExpression | SubExpression
-	Params []Node // [ Expression ... ]
-	Hash   *Hash
-
-	// whitespace management
-	Strip  *Strip
-	Indent string
-}
-
-// NewPartialStatement instanciates a new partial node.
-func NewPartialStatement(pos int, line int) *PartialStatement {
-	return &PartialStatement{
-		NodeType: NodePartial,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *PartialStatement) String() string {
-	return fmt.Sprintf("Partial{Name:%s, Pos:%d}", node.Name, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *PartialStatement) Accept(visitor Visitor) interface{} {
-	return visitor.VisitPartial(node)
-}
-
-//
-// Content Statement
-//
-
-// ContentStatement represents a content node.
-type ContentStatement struct {
-	NodeType
-	Loc
-
-	Value    string
-	Original string
-
-	// whitespace management
-	RightStripped bool
-	LeftStripped  bool
-}
-
-// NewContentStatement instanciates a new content node.
-func NewContentStatement(pos int, line int, val string) *ContentStatement {
-	return &ContentStatement{
-		NodeType: NodeContent,
-		Loc:      Loc{pos, line},
-
-		Value:    val,
-		Original: val,
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *ContentStatement) String() string {
-	return fmt.Sprintf("Content{Value:'%s', Pos:%d}", node.Value, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *ContentStatement) Accept(visitor Visitor) interface{} {
-	return visitor.VisitContent(node)
-}
-
-//
-// Comment Statement
-//
-
-// CommentStatement represents a comment node.
-type CommentStatement struct {
-	NodeType
-	Loc
-
-	Value string
-
-	// whitespace management
-	Strip *Strip
-}
-
-// NewCommentStatement instanciates a new comment node.
-func NewCommentStatement(pos int, line int, val string) *CommentStatement {
-	return &CommentStatement{
-		NodeType: NodeComment,
-		Loc:      Loc{pos, line},
-
-		Value: val,
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *CommentStatement) String() string {
-	return fmt.Sprintf("Comment{Value:'%s', Pos:%d}", node.Value, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *CommentStatement) Accept(visitor Visitor) interface{} {
-	return visitor.VisitComment(node)
-}
-
-//
-// Expression
-//
-
-// Expression represents an expression node.
-type Expression struct {
-	NodeType
-	Loc
-
-	Path   Node   // PathExpression | StringLiteral | BooleanLiteral | NumberLiteral
-	Params []Node // [ Expression ... ]
-	Hash   *Hash
-}
-
-// NewExpression instanciates a new expression node.
-func NewExpression(pos int, line int) *Expression {
-	return &Expression{
-		NodeType: NodeExpression,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *Expression) String() string {
-	return fmt.Sprintf("Expr{Path:%s, Pos:%d}", node.Path, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *Expression) Accept(visitor Visitor) interface{} {
-	return visitor.VisitExpression(node)
-}
-
-// HelperName returns helper name, or an empty string if this expression can't be a helper.
-func (node *Expression) HelperName() string {
-	path, ok := node.Path.(*PathExpression)
-	if !ok {
-		return ""
-	}
-
-	if path.Data || (len(path.Parts) != 1) || (path.Depth > 0) || path.Scoped {
-		return ""
-	}
-
-	return path.Parts[0]
-}
-
-// FieldPath returns path expression representing a field path, or nil if this is not a field path.
-func (node *Expression) FieldPath() *PathExpression {
-	path, ok := node.Path.(*PathExpression)
-	if !ok {
-		return nil
-	}
-
-	return path
-}
-
-// LiteralStr returns the string representation of literal value, with a boolean set to false if this is not a literal.
-func (node *Expression) LiteralStr() (string, bool) {
-	return LiteralStr(node.Path)
-}
-
-// Canonical returns the canonical form of expression node as a string.
-func (node *Expression) Canonical() string {
-	if str, ok := HelperNameStr(node.Path); ok {
-		return str
-	}
-
-	return ""
-}
-
-// HelperNameStr returns the string representation of a helper name, with a boolean set to false if this is not a valid helper name.
-//
-// helperName : path | dataName | STRING | NUMBER | BOOLEAN | UNDEFINED | NULL
-func HelperNameStr(node Node) (string, bool) {
-	// PathExpression
-	if str, ok := PathExpressionStr(node); ok {
-		return str, ok
-	}
-
-	// Literal
-	if str, ok := LiteralStr(node); ok {
-		return str, ok
-	}
-
-	return "", false
-}
-
-// PathExpressionStr returns the string representation of path expression value, with a boolean set to false if this is not a path expression.
-func PathExpressionStr(node Node) (string, bool) {
-	if path, ok := node.(*PathExpression); ok {
-		result := path.Original
-
-		// "[foo bar]"" => "foo bar"
-		if (len(result) >= 2) && (result[0] == '[') && (result[len(result)-1] == ']') {
-			result = result[1 : len(result)-1]
-		}
-
-		return result, true
-	}
-
-	return "", false
-}
-
-// LiteralStr returns the string representation of literal value, with a boolean set to false if this is not a literal.
-func LiteralStr(node Node) (string, bool) {
-	if lit, ok := node.(*StringLiteral); ok {
-		return lit.Value, true
-	}
-
-	if lit, ok := node.(*BooleanLiteral); ok {
-		return lit.Canonical(), true
-	}
-
-	if lit, ok := node.(*NumberLiteral); ok {
-		return lit.Canonical(), true
-	}
-
-	return "", false
-}
-
-//
-// SubExpression
-//
-
-// SubExpression represents a subexpression node.
-type SubExpression struct {
-	NodeType
-	Loc
-
-	Expression *Expression
-}
-
-// NewSubExpression instanciates a new subexpression node.
-func NewSubExpression(pos int, line int) *SubExpression {
-	return &SubExpression{
-		NodeType: NodeSubExpression,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *SubExpression) String() string {
-	return fmt.Sprintf("Sexp{Path:%s, Pos:%d}", node.Expression.Path, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *SubExpression) Accept(visitor Visitor) interface{} {
-	return visitor.VisitSubExpression(node)
-}
-
-//
-// Path Expression
-//
-
-// PathExpression represents a path expression node.
-type PathExpression struct {
-	NodeType
-	Loc
-
-	Original string
-	Depth    int
-	Parts    []string
-	Data     bool
-	Scoped   bool
-}
-
-// NewPathExpression instanciates a new path expression node.
-func NewPathExpression(pos int, line int, data bool) *PathExpression {
-	result := &PathExpression{
-		NodeType: NodePath,
-		Loc:      Loc{pos, line},
-
-		Data: data,
-	}
-
-	if data {
-		result.Original = "@"
-	}
-
-	return result
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *PathExpression) String() string {
-	return fmt.Sprintf("Path{Original:'%s', Pos:%d}", node.Original, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *PathExpression) Accept(visitor Visitor) interface{} {
-	return visitor.VisitPath(node)
-}
-
-// Part adds path part.
-func (node *PathExpression) Part(part string) {
-	node.Original += part
-
-	switch part {
-	case "..":
-		node.Depth++
-		node.Scoped = true
-	case ".", "this":
-		node.Scoped = true
-	default:
-		node.Parts = append(node.Parts, part)
-	}
-}
-
-// Sep adds path separator.
-func (node *PathExpression) Sep(separator string) {
-	node.Original += separator
-}
-
-// IsDataRoot returns true if path expression is @root.
-func (node *PathExpression) IsDataRoot() bool {
-	return node.Data && (node.Parts[0] == "root")
-}
-
-//
-// String Literal
-//
-
-// StringLiteral represents a string node.
-type StringLiteral struct {
-	NodeType
-	Loc
-
-	Value string
-}
-
-// NewStringLiteral instanciates a new string node.
-func NewStringLiteral(pos int, line int, val string) *StringLiteral {
-	return &StringLiteral{
-		NodeType: NodeString,
-		Loc:      Loc{pos, line},
-
-		Value: val,
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *StringLiteral) String() string {
-	return fmt.Sprintf("String{Value:'%s', Pos:%d}", node.Value, node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *StringLiteral) Accept(visitor Visitor) interface{} {
-	return visitor.VisitString(node)
-}
-
-//
-// Boolean Literal
-//
-
-// BooleanLiteral represents a boolean node.
-type BooleanLiteral struct {
-	NodeType
-	Loc
-
-	Value    bool
-	Original string
-}
-
-// NewBooleanLiteral instanciates a new boolean node.
-func NewBooleanLiteral(pos int, line int, val bool, original string) *BooleanLiteral {
-	return &BooleanLiteral{
-		NodeType: NodeBoolean,
-		Loc:      Loc{pos, line},
-
-		Value:    val,
-		Original: original,
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *BooleanLiteral) String() string {
-	return fmt.Sprintf("Boolean{Value:%s, Pos:%d}", node.Canonical(), node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *BooleanLiteral) Accept(visitor Visitor) interface{} {
-	return visitor.VisitBoolean(node)
-}
-
-// Canonical returns the canonical form of boolean node as a string (ie. "true" | "false").
-func (node *BooleanLiteral) Canonical() string {
-	if node.Value {
-		return "true"
-	}
-
-	return "false"
-}
-
-//
-// Number Literal
-//
-
-// NumberLiteral represents a number node.
-type NumberLiteral struct {
-	NodeType
-	Loc
-
-	Value    float64
-	IsInt    bool
-	Original string
-}
-
-// NewNumberLiteral instanciates a new number node.
-func NewNumberLiteral(pos int, line int, val float64, isInt bool, original string) *NumberLiteral {
-	return &NumberLiteral{
-		NodeType: NodeNumber,
-		Loc:      Loc{pos, line},
-
-		Value:    val,
-		IsInt:    isInt,
-		Original: original,
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *NumberLiteral) String() string {
-	return fmt.Sprintf("Number{Value:%s, Pos:%d}", node.Canonical(), node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *NumberLiteral) Accept(visitor Visitor) interface{} {
-	return visitor.VisitNumber(node)
-}
-
-// Canonical returns the canonical form of number node as a string (eg: "12", "-1.51").
-func (node *NumberLiteral) Canonical() string {
-	prec := -1
-	if node.IsInt {
-		prec = 0
-	}
-	return strconv.FormatFloat(node.Value, 'f', prec, 64)
-}
-
-// Number returns an integer or a float.
-func (node *NumberLiteral) Number() interface{} {
-	if node.IsInt {
-		return int(node.Value)
-	}
-
-	return node.Value
-}
-
-//
-// Hash
-//
-
-// Hash represents a hash node.
-type Hash struct {
-	NodeType
-	Loc
-
-	Pairs []*HashPair
-}
-
-// NewHash instanciates a new hash node.
-func NewHash(pos int, line int) *Hash {
-	return &Hash{
-		NodeType: NodeHash,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *Hash) String() string {
-	result := fmt.Sprintf("Hash{[%d", node.Loc.Pos)
-
-	for i, p := range node.Pairs {
-		if i > 0 {
-			result += ", "
-		}
-		result += p.String()
-	}
-
-	return result + fmt.Sprintf("], Pos:%d}", node.Loc.Pos)
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *Hash) Accept(visitor Visitor) interface{} {
-	return visitor.VisitHash(node)
-}
-
-//
-// HashPair
-//
-
-// HashPair represents a hash pair node.
-type HashPair struct {
-	NodeType
-	Loc
-
-	Key string
-	Val Node // Expression
-}
-
-// NewHashPair instanciates a new hash pair node.
-func NewHashPair(pos int, line int) *HashPair {
-	return &HashPair{
-		NodeType: NodeHashPair,
-		Loc:      Loc{pos, line},
-	}
-}
-
-// String returns a string representation of receiver that can be used for debugging.
-func (node *HashPair) String() string {
-	return node.Key + "=" + node.Val.String()
-}
-
-// Accept is the receiver entry point for visitors.
-func (node *HashPair) Accept(visitor Visitor) interface{} {
-	return visitor.VisitHashPair(node)
-}

+ 0 - 279
vendor/github.com/aymerick/raymond/ast/print.go

@@ -1,279 +0,0 @@
-package ast
-
-import (
-	"fmt"
-	"strings"
-)
-
-// printVisitor implements the Visitor interface to print a AST.
-type printVisitor struct {
-	buf   string
-	depth int
-
-	original bool
-	inBlock  bool
-}
-
-func newPrintVisitor() *printVisitor {
-	return &printVisitor{}
-}
-
-// Print returns a string representation of given AST, that can be used for debugging purpose.
-func Print(node Node) string {
-	visitor := newPrintVisitor()
-	node.Accept(visitor)
-	return visitor.output()
-}
-
-func (v *printVisitor) output() string {
-	return v.buf
-}
-
-func (v *printVisitor) indent() {
-	for i := 0; i < v.depth; {
-		v.buf += "  "
-		i++
-	}
-}
-
-func (v *printVisitor) str(val string) {
-	v.buf += val
-}
-
-func (v *printVisitor) nl() {
-	v.str("\n")
-}
-
-func (v *printVisitor) line(val string) {
-	v.indent()
-	v.str(val)
-	v.nl()
-}
-
-//
-// Visitor interface
-//
-
-// Statements
-
-// VisitProgram implements corresponding Visitor interface method
-func (v *printVisitor) VisitProgram(node *Program) interface{} {
-	if len(node.BlockParams) > 0 {
-		v.line("BLOCK PARAMS: [ " + strings.Join(node.BlockParams, " ") + " ]")
-	}
-
-	for _, n := range node.Body {
-		n.Accept(v)
-	}
-
-	return nil
-}
-
-// VisitMustache implements corresponding Visitor interface method
-func (v *printVisitor) VisitMustache(node *MustacheStatement) interface{} {
-	v.indent()
-	v.str("{{ ")
-
-	node.Expression.Accept(v)
-
-	v.str(" }}")
-	v.nl()
-
-	return nil
-}
-
-// VisitBlock implements corresponding Visitor interface method
-func (v *printVisitor) VisitBlock(node *BlockStatement) interface{} {
-	v.inBlock = true
-
-	v.line("BLOCK:")
-	v.depth++
-
-	node.Expression.Accept(v)
-
-	if node.Program != nil {
-		v.line("PROGRAM:")
-		v.depth++
-		node.Program.Accept(v)
-		v.depth--
-	}
-
-	if node.Inverse != nil {
-		// if node.Program != nil {
-		// 	v.depth++
-		// }
-
-		v.line("{{^}}")
-		v.depth++
-		node.Inverse.Accept(v)
-		v.depth--
-
-		// if node.Program != nil {
-		// 	v.depth--
-		// }
-	}
-
-	v.inBlock = false
-
-	return nil
-}
-
-// VisitPartial implements corresponding Visitor interface method
-func (v *printVisitor) VisitPartial(node *PartialStatement) interface{} {
-	v.indent()
-	v.str("{{> PARTIAL:")
-
-	v.original = true
-	node.Name.Accept(v)
-	v.original = false
-
-	if len(node.Params) > 0 {
-		v.str(" ")
-		node.Params[0].Accept(v)
-	}
-
-	// hash
-	if node.Hash != nil {
-		v.str(" ")
-		node.Hash.Accept(v)
-	}
-
-	v.str(" }}")
-	v.nl()
-
-	return nil
-}
-
-// VisitContent implements corresponding Visitor interface method
-func (v *printVisitor) VisitContent(node *ContentStatement) interface{} {
-	v.line("CONTENT[ '" + node.Value + "' ]")
-
-	return nil
-}
-
-// VisitComment implements corresponding Visitor interface method
-func (v *printVisitor) VisitComment(node *CommentStatement) interface{} {
-	v.line("{{! '" + node.Value + "' }}")
-
-	return nil
-}
-
-// Expressions
-
-// VisitExpression implements corresponding Visitor interface method
-func (v *printVisitor) VisitExpression(node *Expression) interface{} {
-	if v.inBlock {
-		v.indent()
-	}
-
-	// path
-	node.Path.Accept(v)
-
-	// params
-	v.str(" [")
-	for i, n := range node.Params {
-		if i > 0 {
-			v.str(", ")
-		}
-		n.Accept(v)
-	}
-	v.str("]")
-
-	// hash
-	if node.Hash != nil {
-		v.str(" ")
-		node.Hash.Accept(v)
-	}
-
-	if v.inBlock {
-		v.nl()
-	}
-
-	return nil
-}
-
-// VisitSubExpression implements corresponding Visitor interface method
-func (v *printVisitor) VisitSubExpression(node *SubExpression) interface{} {
-	node.Expression.Accept(v)
-
-	return nil
-}
-
-// VisitPath implements corresponding Visitor interface method
-func (v *printVisitor) VisitPath(node *PathExpression) interface{} {
-	if v.original {
-		v.str(node.Original)
-	} else {
-		path := strings.Join(node.Parts, "/")
-
-		result := ""
-		if node.Data {
-			result += "@"
-		}
-
-		v.str(result + "PATH:" + path)
-	}
-
-	return nil
-}
-
-// Literals
-
-// VisitString implements corresponding Visitor interface method
-func (v *printVisitor) VisitString(node *StringLiteral) interface{} {
-	if v.original {
-		v.str(node.Value)
-	} else {
-		v.str("\"" + node.Value + "\"")
-	}
-
-	return nil
-}
-
-// VisitBoolean implements corresponding Visitor interface method
-func (v *printVisitor) VisitBoolean(node *BooleanLiteral) interface{} {
-	if v.original {
-		v.str(node.Original)
-	} else {
-		v.str(fmt.Sprintf("BOOLEAN{%s}", node.Canonical()))
-	}
-
-	return nil
-}
-
-// VisitNumber implements corresponding Visitor interface method
-func (v *printVisitor) VisitNumber(node *NumberLiteral) interface{} {
-	if v.original {
-		v.str(node.Original)
-	} else {
-		v.str(fmt.Sprintf("NUMBER{%s}", node.Canonical()))
-	}
-
-	return nil
-}
-
-// Miscellaneous
-
-// VisitHash implements corresponding Visitor interface method
-func (v *printVisitor) VisitHash(node *Hash) interface{} {
-	v.str("HASH{")
-
-	for i, p := range node.Pairs {
-		if i > 0 {
-			v.str(", ")
-		}
-		p.Accept(v)
-	}
-
-	v.str("}")
-
-	return nil
-}
-
-// VisitHashPair implements corresponding Visitor interface method
-func (v *printVisitor) VisitHashPair(node *HashPair) interface{} {
-	v.str(node.Key + "=")
-	node.Val.Accept(v)
-
-	return nil
-}

+ 0 - 95
vendor/github.com/aymerick/raymond/data_frame.go

@@ -1,95 +0,0 @@
-package raymond
-
-import "reflect"
-
-// DataFrame represents a private data frame.
-//
-// Cf. private variables documentation at: http://handlebarsjs.com/block_helpers.html
-type DataFrame struct {
-	parent *DataFrame
-	data   map[string]interface{}
-}
-
-// NewDataFrame instanciates a new private data frame.
-func NewDataFrame() *DataFrame {
-	return &DataFrame{
-		data: make(map[string]interface{}),
-	}
-}
-
-// Copy instanciates a new private data frame with receiver as parent.
-func (p *DataFrame) Copy() *DataFrame {
-	result := NewDataFrame()
-
-	for k, v := range p.data {
-		result.data[k] = v
-	}
-
-	result.parent = p
-
-	return result
-}
-
-// newIterDataFrame instanciates a new private data frame with receiver as parent and with iteration data set (@index, @key, @first, @last)
-func (p *DataFrame) newIterDataFrame(length int, i int, key interface{}) *DataFrame {
-	result := p.Copy()
-
-	result.Set("index", i)
-	result.Set("key", key)
-	result.Set("first", i == 0)
-	result.Set("last", i == length-1)
-
-	return result
-}
-
-// Set sets a data value.
-func (p *DataFrame) Set(key string, val interface{}) {
-	p.data[key] = val
-}
-
-// Get gets a data value.
-func (p *DataFrame) Get(key string) interface{} {
-	return p.find([]string{key})
-}
-
-// find gets a deep data value
-//
-// @todo This is NOT consistent with the way we resolve data in template (cf. `evalDataPathExpression()`) ! FIX THAT !
-func (p *DataFrame) find(parts []string) interface{} {
-	data := p.data
-
-	for i, part := range parts {
-		val := data[part]
-		if val == nil {
-			return nil
-		}
-
-		if i == len(parts)-1 {
-			// found
-			return val
-		}
-
-		valValue := reflect.ValueOf(val)
-		if valValue.Kind() != reflect.Map {
-			// not found
-			return nil
-		}
-
-		// continue
-		data = mapStringInterface(valValue)
-	}
-
-	// not found
-	return nil
-}
-
-// mapStringInterface converts any `map` to `map[string]interface{}`
-func mapStringInterface(value reflect.Value) map[string]interface{} {
-	result := make(map[string]interface{})
-
-	for _, key := range value.MapKeys() {
-		result[strValue(key)] = value.MapIndex(key).Interface()
-	}
-
-	return result
-}

+ 0 - 65
vendor/github.com/aymerick/raymond/escape.go

@@ -1,65 +0,0 @@
-package raymond
-
-import (
-	"bytes"
-	"strings"
-)
-
-//
-// That whole file is borrowed from https://github.com/golang/go/tree/master/src/html/escape.go
-//
-// With changes:
-//    &#39 => &apos;
-//    &#34 => &quot;
-//
-// To stay in sync with JS implementation, and make mustache tests pass.
-//
-
-type writer interface {
-	WriteString(string) (int, error)
-}
-
-const escapedChars = `&'<>"`
-
-func escape(w writer, s string) error {
-	i := strings.IndexAny(s, escapedChars)
-	for i != -1 {
-		if _, err := w.WriteString(s[:i]); err != nil {
-			return err
-		}
-		var esc string
-		switch s[i] {
-		case '&':
-			esc = "&amp;"
-		case '\'':
-			esc = "&apos;"
-		case '<':
-			esc = "&lt;"
-		case '>':
-			esc = "&gt;"
-		case '"':
-			esc = "&quot;"
-		default:
-			panic("unrecognized escape character")
-		}
-		s = s[i+1:]
-		if _, err := w.WriteString(esc); err != nil {
-			return err
-		}
-		i = strings.IndexAny(s, escapedChars)
-	}
-	_, err := w.WriteString(s)
-	return err
-}
-
-// Escape escapes special HTML characters.
-//
-// It can be used by helpers that return a SafeString and that need to escape some content by themselves.
-func Escape(s string) string {
-	if strings.IndexAny(s, escapedChars) == -1 {
-		return s
-	}
-	var buf bytes.Buffer
-	escape(&buf, s)
-	return buf.String()
-}

+ 0 - 1005
vendor/github.com/aymerick/raymond/eval.go

@@ -1,1005 +0,0 @@
-package raymond
-
-import (
-	"bytes"
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-
-	"github.com/aymerick/raymond/ast"
-)
-
-var (
-	// @note borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
-	errorType       = reflect.TypeOf((*error)(nil)).Elem()
-	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-
-	zero reflect.Value
-)
-
-// evalVisitor evaluates a handlebars template with context
-type evalVisitor struct {
-	tpl *Template
-
-	// contexts stack
-	ctx []reflect.Value
-
-	// current data frame (chained with parent)
-	dataFrame *DataFrame
-
-	// block parameters stack
-	blockParams []map[string]interface{}
-
-	// block statements stack
-	blocks []*ast.BlockStatement
-
-	// expressions stack
-	exprs []*ast.Expression
-
-	// memoize expressions that were function calls
-	exprFunc map[*ast.Expression]bool
-
-	// used for info on panic
-	curNode ast.Node
-}
-
-// NewEvalVisitor instanciate a new evaluation visitor with given context and initial private data frame
-//
-// If privData is nil, then a default data frame is created
-func newEvalVisitor(tpl *Template, ctx interface{}, privData *DataFrame) *evalVisitor {
-	frame := privData
-	if frame == nil {
-		frame = NewDataFrame()
-	}
-
-	return &evalVisitor{
-		tpl:       tpl,
-		ctx:       []reflect.Value{reflect.ValueOf(ctx)},
-		dataFrame: frame,
-		exprFunc:  make(map[*ast.Expression]bool),
-	}
-}
-
-// at sets current node
-func (v *evalVisitor) at(node ast.Node) {
-	v.curNode = node
-}
-
-//
-// Contexts stack
-//
-
-// pushCtx pushes new context to the stack
-func (v *evalVisitor) pushCtx(ctx reflect.Value) {
-	v.ctx = append(v.ctx, ctx)
-}
-
-// popCtx pops last context from stack
-func (v *evalVisitor) popCtx() reflect.Value {
-	if len(v.ctx) == 0 {
-		return zero
-	}
-
-	var result reflect.Value
-	result, v.ctx = v.ctx[len(v.ctx)-1], v.ctx[:len(v.ctx)-1]
-
-	return result
-}
-
-// rootCtx returns root context
-func (v *evalVisitor) rootCtx() reflect.Value {
-	return v.ctx[0]
-}
-
-// curCtx returns current context
-func (v *evalVisitor) curCtx() reflect.Value {
-	return v.ancestorCtx(0)
-}
-
-// ancestorCtx returns ancestor context
-func (v *evalVisitor) ancestorCtx(depth int) reflect.Value {
-	index := len(v.ctx) - 1 - depth
-	if index < 0 {
-		return zero
-	}
-
-	return v.ctx[index]
-}
-
-//
-// Private data frame
-//
-
-// setDataFrame sets new data frame
-func (v *evalVisitor) setDataFrame(frame *DataFrame) {
-	v.dataFrame = frame
-}
-
-// popDataFrame sets back parent data frame
-func (v *evalVisitor) popDataFrame() {
-	v.dataFrame = v.dataFrame.parent
-}
-
-//
-// Block Parameters stack
-//
-
-// pushBlockParams pushes new block params to the stack
-func (v *evalVisitor) pushBlockParams(params map[string]interface{}) {
-	v.blockParams = append(v.blockParams, params)
-}
-
-// popBlockParams pops last block params from stack
-func (v *evalVisitor) popBlockParams() map[string]interface{} {
-	var result map[string]interface{}
-
-	if len(v.blockParams) == 0 {
-		return result
-	}
-
-	result, v.blockParams = v.blockParams[len(v.blockParams)-1], v.blockParams[:len(v.blockParams)-1]
-	return result
-}
-
-// blockParam iterates on stack to find given block parameter, and returns its value or nil if not founc
-func (v *evalVisitor) blockParam(name string) interface{} {
-	for i := len(v.blockParams) - 1; i >= 0; i-- {
-		for k, v := range v.blockParams[i] {
-			if name == k {
-				return v
-			}
-		}
-	}
-
-	return nil
-}
-
-//
-// Blocks stack
-//
-
-// pushBlock pushes new block statement to stack
-func (v *evalVisitor) pushBlock(block *ast.BlockStatement) {
-	v.blocks = append(v.blocks, block)
-}
-
-// popBlock pops last block statement from stack
-func (v *evalVisitor) popBlock() *ast.BlockStatement {
-	if len(v.blocks) == 0 {
-		return nil
-	}
-
-	var result *ast.BlockStatement
-	result, v.blocks = v.blocks[len(v.blocks)-1], v.blocks[:len(v.blocks)-1]
-
-	return result
-}
-
-// curBlock returns current block statement
-func (v *evalVisitor) curBlock() *ast.BlockStatement {
-	if len(v.blocks) == 0 {
-		return nil
-	}
-
-	return v.blocks[len(v.blocks)-1]
-}
-
-//
-// Expressions stack
-//
-
-// pushExpr pushes new expression to stack
-func (v *evalVisitor) pushExpr(expression *ast.Expression) {
-	v.exprs = append(v.exprs, expression)
-}
-
-// popExpr pops last expression from stack
-func (v *evalVisitor) popExpr() *ast.Expression {
-	if len(v.exprs) == 0 {
-		return nil
-	}
-
-	var result *ast.Expression
-	result, v.exprs = v.exprs[len(v.exprs)-1], v.exprs[:len(v.exprs)-1]
-
-	return result
-}
-
-// curExpr returns current expression
-func (v *evalVisitor) curExpr() *ast.Expression {
-	if len(v.exprs) == 0 {
-		return nil
-	}
-
-	return v.exprs[len(v.exprs)-1]
-}
-
-//
-// Error functions
-//
-
-// errPanic panics
-func (v *evalVisitor) errPanic(err error) {
-	panic(fmt.Errorf("Evaluation error: %s\nCurrent node:\n\t%s", err, v.curNode))
-}
-
-// errorf panics with a custom message
-func (v *evalVisitor) errorf(format string, args ...interface{}) {
-	v.errPanic(fmt.Errorf(format, args...))
-}
-
-//
-// Evaluation
-//
-
-// evalProgram eEvaluates program with given context and returns string result
-func (v *evalVisitor) evalProgram(program *ast.Program, ctx interface{}, data *DataFrame, key interface{}) string {
-	blockParams := make(map[string]interface{})
-
-	// compute block params
-	if len(program.BlockParams) > 0 {
-		blockParams[program.BlockParams[0]] = ctx
-	}
-
-	if (len(program.BlockParams) > 1) && (key != nil) {
-		blockParams[program.BlockParams[1]] = key
-	}
-
-	// push contexts
-	if len(blockParams) > 0 {
-		v.pushBlockParams(blockParams)
-	}
-
-	ctxVal := reflect.ValueOf(ctx)
-	if ctxVal.IsValid() {
-		v.pushCtx(ctxVal)
-	}
-
-	if data != nil {
-		v.setDataFrame(data)
-	}
-
-	// evaluate program
-	result, _ := program.Accept(v).(string)
-
-	// pop contexts
-	if data != nil {
-		v.popDataFrame()
-	}
-
-	if ctxVal.IsValid() {
-		v.popCtx()
-	}
-
-	if len(blockParams) > 0 {
-		v.popBlockParams()
-	}
-
-	return result
-}
-
-// evalPath evaluates all path parts with given context
-func (v *evalVisitor) evalPath(ctx reflect.Value, parts []string, exprRoot bool) (reflect.Value, bool) {
-	partResolved := false
-
-	for i := 0; i < len(parts); i++ {
-		part := parts[i]
-
-		// "[foo bar]"" => "foo bar"
-		if (len(part) >= 2) && (part[0] == '[') && (part[len(part)-1] == ']') {
-			part = part[1 : len(part)-1]
-		}
-
-		ctx = v.evalField(ctx, part, exprRoot)
-		if !ctx.IsValid() {
-			break
-		}
-
-		// we resolved at least one part of path
-		partResolved = true
-	}
-
-	return ctx, partResolved
-}
-
-// evalField evaluates field with given context
-func (v *evalVisitor) evalField(ctx reflect.Value, fieldName string, exprRoot bool) reflect.Value {
-	result := zero
-
-	ctx, _ = indirect(ctx)
-	if !ctx.IsValid() {
-		return result
-	}
-
-	// check if this is a method call
-	result, isMeth := v.evalMethod(ctx, fieldName, exprRoot)
-	if !isMeth {
-		switch ctx.Kind() {
-		case reflect.Struct:
-			// example: firstName => FirstName
-			expFieldName := strings.Title(fieldName)
-
-			// check if struct have this field and that it is exported
-			if tField, ok := ctx.Type().FieldByName(expFieldName); ok && (tField.PkgPath == "") {
-				// struct field
-				result = ctx.FieldByIndex(tField.Index)
-				break
-			}
-
-			// attempts to find template variable name as a struct tag
-			result = v.evalStructTag(ctx, fieldName)
-		case reflect.Map:
-			nameVal := reflect.ValueOf(fieldName)
-			if nameVal.Type().AssignableTo(ctx.Type().Key()) {
-				// map key
-				result = ctx.MapIndex(nameVal)
-			}
-		case reflect.Array, reflect.Slice:
-			if i, err := strconv.Atoi(fieldName); (err == nil) && (i < ctx.Len()) {
-				result = ctx.Index(i)
-			}
-		}
-	}
-
-	// check if result is a function
-	result, _ = indirect(result)
-	if result.Kind() == reflect.Func {
-		result = v.evalFieldFunc(fieldName, result, exprRoot)
-	}
-
-	return result
-}
-
-// evalFieldFunc tries to evaluate given method name, and a boolean to indicate if this was a method call
-func (v *evalVisitor) evalMethod(ctx reflect.Value, name string, exprRoot bool) (reflect.Value, bool) {
-	if ctx.Kind() != reflect.Interface && ctx.CanAddr() {
-		ctx = ctx.Addr()
-	}
-
-	method := ctx.MethodByName(name)
-	if !method.IsValid() {
-		// example: subject() => Subject()
-		method = ctx.MethodByName(strings.Title(name))
-	}
-
-	if !method.IsValid() {
-		return zero, false
-	}
-
-	return v.evalFieldFunc(name, method, exprRoot), true
-}
-
-// evalFieldFunc evaluates given function
-func (v *evalVisitor) evalFieldFunc(name string, funcVal reflect.Value, exprRoot bool) reflect.Value {
-	ensureValidHelper(name, funcVal)
-
-	var options *Options
-	if exprRoot {
-		// create function arg with all params/hash
-		expr := v.curExpr()
-		options = v.helperOptions(expr)
-
-		// ok, that expression was a function call
-		v.exprFunc[expr] = true
-	} else {
-		// we are not at root of expression, so we are a parameter... and we don't like
-		// infinite loops caused by trying to parse ourself forever
-		options = newEmptyOptions(v)
-	}
-
-	return v.callFunc(name, funcVal, options)
-}
-
-// evalStructTag checks for the existence of a struct tag containing the
-// name of the variable in the template. This allows for a template variable to
-// be separated from the field in the struct.
-func (v *evalVisitor) evalStructTag(ctx reflect.Value, name string) reflect.Value {
-	val := reflect.ValueOf(ctx.Interface())
-
-	for i := 0; i < val.NumField(); i++ {
-		field := val.Type().Field(i)
-		tag := field.Tag.Get("handlebars")
-		if tag == name {
-			return val.Field(i)
-		}
-	}
-
-	return zero
-}
-
-// findBlockParam returns node's block parameter
-func (v *evalVisitor) findBlockParam(node *ast.PathExpression) (string, interface{}) {
-	if len(node.Parts) > 0 {
-		name := node.Parts[0]
-		if value := v.blockParam(name); value != nil {
-			return name, value
-		}
-	}
-
-	return "", nil
-}
-
-// evalPathExpression evaluates a path expression
-func (v *evalVisitor) evalPathExpression(node *ast.PathExpression, exprRoot bool) interface{} {
-	var result interface{}
-
-	if name, value := v.findBlockParam(node); value != nil {
-		// block parameter value
-
-		// We push a new context so we can evaluate the path expression (note: this may be a bad idea).
-		//
-		// Example:
-		//   {{#foo as |bar|}}
-		//     {{bar.baz}}
-		//   {{/foo}}
-		//
-		// With data:
-		//   {"foo": {"baz": "bat"}}
-		newCtx := map[string]interface{}{name: value}
-
-		v.pushCtx(reflect.ValueOf(newCtx))
-		result = v.evalCtxPathExpression(node, exprRoot)
-		v.popCtx()
-	} else {
-		ctxTried := false
-
-		if node.IsDataRoot() {
-			// context path
-			result = v.evalCtxPathExpression(node, exprRoot)
-
-			ctxTried = true
-		}
-
-		if (result == nil) && node.Data {
-			// if it is @root, then we tried to evaluate with root context but nothing was found
-			// so let's try with private data
-
-			// private data
-			result = v.evalDataPathExpression(node, exprRoot)
-		}
-
-		if (result == nil) && !ctxTried {
-			// context path
-			result = v.evalCtxPathExpression(node, exprRoot)
-		}
-	}
-
-	return result
-}
-
-// evalDataPathExpression evaluates a private data path expression
-func (v *evalVisitor) evalDataPathExpression(node *ast.PathExpression, exprRoot bool) interface{} {
-	// find data frame
-	frame := v.dataFrame
-	for i := node.Depth; i > 0; i-- {
-		if frame.parent == nil {
-			return nil
-		}
-		frame = frame.parent
-	}
-
-	// resolve data
-	// @note Can be changed to v.evalCtx() as context can't be an array
-	result, _ := v.evalCtxPath(reflect.ValueOf(frame.data), node.Parts, exprRoot)
-	return result
-}
-
-// evalCtxPathExpression evaluates a context path expression
-func (v *evalVisitor) evalCtxPathExpression(node *ast.PathExpression, exprRoot bool) interface{} {
-	v.at(node)
-
-	if node.IsDataRoot() {
-		// `@root` - remove the first part
-		parts := node.Parts[1:len(node.Parts)]
-
-		result, _ := v.evalCtxPath(v.rootCtx(), parts, exprRoot)
-		return result
-	}
-
-	return v.evalDepthPath(node.Depth, node.Parts, exprRoot)
-}
-
-// evalDepthPath iterates on contexts, starting at given depth, until there is one that resolve given path parts
-func (v *evalVisitor) evalDepthPath(depth int, parts []string, exprRoot bool) interface{} {
-	var result interface{}
-	partResolved := false
-
-	ctx := v.ancestorCtx(depth)
-
-	for (result == nil) && ctx.IsValid() && (depth <= len(v.ctx) && !partResolved) {
-		// try with context
-		result, partResolved = v.evalCtxPath(ctx, parts, exprRoot)
-
-		// As soon as we find the first part of a path, we must not try to resolve with parent context if result is finally `nil`
-		// Reference: "Dotted Names - Context Precedence" mustache test
-		if !partResolved && (result == nil) {
-			// try with previous context
-			depth++
-			ctx = v.ancestorCtx(depth)
-		}
-	}
-
-	return result
-}
-
-// evalCtxPath evaluates path with given context
-func (v *evalVisitor) evalCtxPath(ctx reflect.Value, parts []string, exprRoot bool) (interface{}, bool) {
-	var result interface{}
-	partResolved := false
-
-	switch ctx.Kind() {
-	case reflect.Array, reflect.Slice:
-		// Array context
-		var results []interface{}
-
-		for i := 0; i < ctx.Len(); i++ {
-			value, _ := v.evalPath(ctx.Index(i), parts, exprRoot)
-			if value.IsValid() {
-				results = append(results, value.Interface())
-			}
-		}
-
-		result = results
-	default:
-		// NOT array context
-		var value reflect.Value
-
-		value, partResolved = v.evalPath(ctx, parts, exprRoot)
-		if value.IsValid() {
-			result = value.Interface()
-		}
-	}
-
-	return result, partResolved
-}
-
-//
-// Helpers
-//
-
-// isHelperCall returns true if given expression is a helper call
-func (v *evalVisitor) isHelperCall(node *ast.Expression) bool {
-	if helperName := node.HelperName(); helperName != "" {
-		return v.findHelper(helperName) != zero
-	}
-	return false
-}
-
-// findHelper finds given helper
-func (v *evalVisitor) findHelper(name string) reflect.Value {
-	// check template helpers
-	if h := v.tpl.findHelper(name); h != zero {
-		return h
-	}
-
-	// check global helpers
-	return findHelper(name)
-}
-
-// callFunc calls function with given options
-func (v *evalVisitor) callFunc(name string, funcVal reflect.Value, options *Options) reflect.Value {
-	params := options.Params()
-
-	funcType := funcVal.Type()
-
-	// @todo Is there a better way to do that ?
-	strType := reflect.TypeOf("")
-	boolType := reflect.TypeOf(true)
-
-	// check parameters number
-	addOptions := false
-	numIn := funcType.NumIn()
-
-	if numIn == len(params)+1 {
-		lastArgType := funcType.In(numIn - 1)
-		if reflect.TypeOf(options).AssignableTo(lastArgType) {
-			addOptions = true
-		}
-	}
-
-	if !addOptions && (len(params) != numIn) {
-		v.errorf("Helper '%s' called with wrong number of arguments, needed %d but got %d", name, numIn, len(params))
-	}
-
-	// check and collect arguments
-	args := make([]reflect.Value, numIn)
-	for i, param := range params {
-		arg := reflect.ValueOf(param)
-		argType := funcType.In(i)
-
-		if !arg.IsValid() {
-			if canBeNil(argType) {
-				arg = reflect.Zero(argType)
-			} else if argType.Kind() == reflect.String {
-				arg = reflect.ValueOf("")
-			} else {
-				// @todo Maybe we can panic on that
-				return reflect.Zero(strType)
-			}
-		}
-
-		if !arg.Type().AssignableTo(argType) {
-			if strType.AssignableTo(argType) {
-				// convert parameter to string
-				arg = reflect.ValueOf(strValue(arg))
-			} else if boolType.AssignableTo(argType) {
-				// convert parameter to bool
-				val, _ := isTrueValue(arg)
-				arg = reflect.ValueOf(val)
-			} else {
-				v.errorf("Helper %s called with argument %d with type %s but it should be %s", name, i, arg.Type(), argType)
-			}
-		}
-
-		args[i] = arg
-	}
-
-	if addOptions {
-		args[numIn-1] = reflect.ValueOf(options)
-	}
-
-	result := funcVal.Call(args)
-
-	return result[0]
-}
-
-// callHelper invoqs helper function for given expression node
-func (v *evalVisitor) callHelper(name string, helper reflect.Value, node *ast.Expression) interface{} {
-	result := v.callFunc(name, helper, v.helperOptions(node))
-	if !result.IsValid() {
-		return nil
-	}
-
-	// @todo We maybe want to ensure here that helper returned a string or a SafeString
-	return result.Interface()
-}
-
-// helperOptions computes helper options argument from an expression
-func (v *evalVisitor) helperOptions(node *ast.Expression) *Options {
-	var params []interface{}
-	var hash map[string]interface{}
-
-	for _, paramNode := range node.Params {
-		param := paramNode.Accept(v)
-		params = append(params, param)
-	}
-
-	if node.Hash != nil {
-		hash, _ = node.Hash.Accept(v).(map[string]interface{})
-	}
-
-	return newOptions(v, params, hash)
-}
-
-//
-// Partials
-//
-
-// findPartial finds given partial
-func (v *evalVisitor) findPartial(name string) *partial {
-	// check template partials
-	if p := v.tpl.findPartial(name); p != nil {
-		return p
-	}
-
-	// check global partials
-	return findPartial(name)
-}
-
-// partialContext computes partial context
-func (v *evalVisitor) partialContext(node *ast.PartialStatement) reflect.Value {
-	if nb := len(node.Params); nb > 1 {
-		v.errorf("Unsupported number of partial arguments: %d", nb)
-	}
-
-	if (len(node.Params) > 0) && (node.Hash != nil) {
-		v.errorf("Passing both context and named parameters to a partial is not allowed")
-	}
-
-	if len(node.Params) == 1 {
-		return reflect.ValueOf(node.Params[0].Accept(v))
-	}
-
-	if node.Hash != nil {
-		hash, _ := node.Hash.Accept(v).(map[string]interface{})
-		return reflect.ValueOf(hash)
-	}
-
-	return zero
-}
-
-// evalPartial evaluates a partial
-func (v *evalVisitor) evalPartial(p *partial, node *ast.PartialStatement) string {
-	// get partial template
-	partialTpl, err := p.template()
-	if err != nil {
-		v.errPanic(err)
-	}
-
-	// push partial context
-	ctx := v.partialContext(node)
-	if ctx.IsValid() {
-		v.pushCtx(ctx)
-	}
-
-	// evaluate partial template
-	result, _ := partialTpl.program.Accept(v).(string)
-
-	// ident partial
-	result = indentLines(result, node.Indent)
-
-	if ctx.IsValid() {
-		v.popCtx()
-	}
-
-	return result
-}
-
-// indentLines indents all lines of given string
-func indentLines(str string, indent string) string {
-	if indent == "" {
-		return str
-	}
-
-	var indented []string
-
-	lines := strings.Split(str, "\n")
-	for i, line := range lines {
-		if (i == (len(lines) - 1)) && (line == "") {
-			// input string ends with a new line
-			indented = append(indented, line)
-		} else {
-			indented = append(indented, indent+line)
-		}
-	}
-
-	return strings.Join(indented, "\n")
-}
-
-//
-// Functions
-//
-
-// wasFuncCall returns true if given expression was a function call
-func (v *evalVisitor) wasFuncCall(node *ast.Expression) bool {
-	// check if expression was tagged as a function call
-	return v.exprFunc[node]
-}
-
-//
-// Visitor interface
-//
-
-// Statements
-
-// VisitProgram implements corresponding Visitor interface method
-func (v *evalVisitor) VisitProgram(node *ast.Program) interface{} {
-	v.at(node)
-
-	buf := new(bytes.Buffer)
-
-	for _, n := range node.Body {
-		if str := Str(n.Accept(v)); str != "" {
-			if _, err := buf.Write([]byte(str)); err != nil {
-				v.errPanic(err)
-			}
-		}
-	}
-
-	return buf.String()
-}
-
-// VisitMustache implements corresponding Visitor interface method
-func (v *evalVisitor) VisitMustache(node *ast.MustacheStatement) interface{} {
-	v.at(node)
-
-	// evaluate expression
-	expr := node.Expression.Accept(v)
-
-	// check if this is a safe string
-	isSafe := isSafeString(expr)
-
-	// get string value
-	str := Str(expr)
-	if !isSafe && !node.Unescaped {
-		// escape html
-		str = Escape(str)
-	}
-
-	return str
-}
-
-// VisitBlock implements corresponding Visitor interface method
-func (v *evalVisitor) VisitBlock(node *ast.BlockStatement) interface{} {
-	v.at(node)
-
-	v.pushBlock(node)
-
-	var result interface{}
-
-	// evaluate expression
-	expr := node.Expression.Accept(v)
-
-	if v.isHelperCall(node.Expression) || v.wasFuncCall(node.Expression) {
-		// it is the responsibility of the helper/function to evaluate block
-		result = expr
-	} else {
-		val := reflect.ValueOf(expr)
-
-		truth, _ := isTrueValue(val)
-		if truth {
-			if node.Program != nil {
-				switch val.Kind() {
-				case reflect.Array, reflect.Slice:
-					concat := ""
-
-					// Array context
-					for i := 0; i < val.Len(); i++ {
-						// Computes new private data frame
-						frame := v.dataFrame.newIterDataFrame(val.Len(), i, nil)
-
-						// Evaluate program
-						concat += v.evalProgram(node.Program, val.Index(i).Interface(), frame, i)
-					}
-
-					result = concat
-				default:
-					// NOT array
-					result = v.evalProgram(node.Program, expr, nil, nil)
-				}
-			}
-		} else if node.Inverse != nil {
-			result, _ = node.Inverse.Accept(v).(string)
-		}
-	}
-
-	v.popBlock()
-
-	return result
-}
-
-// VisitPartial implements corresponding Visitor interface method
-func (v *evalVisitor) VisitPartial(node *ast.PartialStatement) interface{} {
-	v.at(node)
-
-	// partialName: helperName | sexpr
-	name, ok := ast.HelperNameStr(node.Name)
-	if !ok {
-		if subExpr, ok := node.Name.(*ast.SubExpression); ok {
-			name, _ = subExpr.Accept(v).(string)
-		}
-	}
-
-	if name == "" {
-		v.errorf("Unexpected partial name: %q", node.Name)
-	}
-
-	partial := v.findPartial(name)
-	if partial == nil {
-		v.errorf("Partial not found: %s", name)
-	}
-
-	return v.evalPartial(partial, node)
-}
-
-// VisitContent implements corresponding Visitor interface method
-func (v *evalVisitor) VisitContent(node *ast.ContentStatement) interface{} {
-	v.at(node)
-
-	// write content as is
-	return node.Value
-}
-
-// VisitComment implements corresponding Visitor interface method
-func (v *evalVisitor) VisitComment(node *ast.CommentStatement) interface{} {
-	v.at(node)
-
-	// ignore comments
-	return ""
-}
-
-// Expressions
-
-// VisitExpression implements corresponding Visitor interface method
-func (v *evalVisitor) VisitExpression(node *ast.Expression) interface{} {
-	v.at(node)
-
-	var result interface{}
-	done := false
-
-	v.pushExpr(node)
-
-	// helper call
-	if helperName := node.HelperName(); helperName != "" {
-		if helper := v.findHelper(helperName); helper != zero {
-			result = v.callHelper(helperName, helper, node)
-			done = true
-		}
-	}
-
-	if !done {
-		// literal
-		if literal, ok := node.LiteralStr(); ok {
-			if val := v.evalField(v.curCtx(), literal, true); val.IsValid() {
-				result = val.Interface()
-				done = true
-			}
-		}
-	}
-
-	if !done {
-		// field path
-		if path := node.FieldPath(); path != nil {
-			// @todo Find a cleaner way ! Don't break the pattern !
-			// this is an exception to visitor pattern, because we need to pass the info
-			// that this path is at root of current expression
-			if val := v.evalPathExpression(path, true); val != nil {
-				result = val
-			}
-		}
-	}
-
-	v.popExpr()
-
-	return result
-}
-
-// VisitSubExpression implements corresponding Visitor interface method
-func (v *evalVisitor) VisitSubExpression(node *ast.SubExpression) interface{} {
-	v.at(node)
-
-	return node.Expression.Accept(v)
-}
-
-// VisitPath implements corresponding Visitor interface method
-func (v *evalVisitor) VisitPath(node *ast.PathExpression) interface{} {
-	return v.evalPathExpression(node, false)
-}
-
-// Literals
-
-// VisitString implements corresponding Visitor interface method
-func (v *evalVisitor) VisitString(node *ast.StringLiteral) interface{} {
-	v.at(node)
-
-	return node.Value
-}
-
-// VisitBoolean implements corresponding Visitor interface method
-func (v *evalVisitor) VisitBoolean(node *ast.BooleanLiteral) interface{} {
-	v.at(node)
-
-	return node.Value
-}
-
-// VisitNumber implements corresponding Visitor interface method
-func (v *evalVisitor) VisitNumber(node *ast.NumberLiteral) interface{} {
-	v.at(node)
-
-	return node.Number()
-}
-
-// Miscellaneous
-
-// VisitHash implements corresponding Visitor interface method
-func (v *evalVisitor) VisitHash(node *ast.Hash) interface{} {
-	v.at(node)
-
-	result := make(map[string]interface{})
-
-	for _, pair := range node.Pairs {
-		if value := pair.Accept(v); value != nil {
-			result[pair.Key] = value
-		}
-	}
-
-	return result
-}
-
-// VisitHashPair implements corresponding Visitor interface method
-func (v *evalVisitor) VisitHashPair(node *ast.HashPair) interface{} {
-	v.at(node)
-
-	return node.Val.Accept(v)
-}

+ 0 - 398
vendor/github.com/aymerick/raymond/helper.go

@@ -1,398 +0,0 @@
-package raymond
-
-import (
-	"fmt"
-	"log"
-	"reflect"
-	"sync"
-)
-
-// Options represents the options argument provided to helpers and context functions.
-type Options struct {
-	// evaluation visitor
-	eval *evalVisitor
-
-	// params
-	params []interface{}
-	hash   map[string]interface{}
-}
-
-// helpers stores all globally registered helpers
-var helpers = make(map[string]reflect.Value)
-
-// protects global helpers
-var helpersMutex sync.RWMutex
-
-func init() {
-	// register builtin helpers
-	RegisterHelper("if", ifHelper)
-	RegisterHelper("unless", unlessHelper)
-	RegisterHelper("with", withHelper)
-	RegisterHelper("each", eachHelper)
-	RegisterHelper("log", logHelper)
-	RegisterHelper("lookup", lookupHelper)
-	RegisterHelper("equal", equalHelper)
-}
-
-// RegisterHelper registers a global helper. That helper will be available to all templates.
-func RegisterHelper(name string, helper interface{}) {
-	helpersMutex.Lock()
-	defer helpersMutex.Unlock()
-
-	if helpers[name] != zero {
-		panic(fmt.Errorf("Helper already registered: %s", name))
-	}
-
-	val := reflect.ValueOf(helper)
-	ensureValidHelper(name, val)
-
-	helpers[name] = val
-}
-
-// RegisterHelpers registers several global helpers. Those helpers will be available to all templates.
-func RegisterHelpers(helpers map[string]interface{}) {
-	for name, helper := range helpers {
-		RegisterHelper(name, helper)
-	}
-}
-
-// RemoveHelper unregisters a global helper
-func RemoveHelper(name string) {
-	helpersMutex.Lock()
-	defer helpersMutex.Unlock()
-
-	delete(helpers, name)
-}
-
-// RemoveAllHelpers unregisters all global helpers
-func RemoveAllHelpers() {
-	helpersMutex.Lock()
-	defer helpersMutex.Unlock()
-
-	helpers = make(map[string]reflect.Value)
-}
-
-// ensureValidHelper panics if given helper is not valid
-func ensureValidHelper(name string, funcValue reflect.Value) {
-	if funcValue.Kind() != reflect.Func {
-		panic(fmt.Errorf("Helper must be a function: %s", name))
-	}
-
-	funcType := funcValue.Type()
-
-	if funcType.NumOut() != 1 {
-		panic(fmt.Errorf("Helper function must return a string or a SafeString: %s", name))
-	}
-
-	// @todo Check if first returned value is a string, SafeString or interface{} ?
-}
-
-// findHelper finds a globally registered helper
-func findHelper(name string) reflect.Value {
-	helpersMutex.RLock()
-	defer helpersMutex.RUnlock()
-
-	return helpers[name]
-}
-
-// newOptions instanciates a new Options
-func newOptions(eval *evalVisitor, params []interface{}, hash map[string]interface{}) *Options {
-	return &Options{
-		eval:   eval,
-		params: params,
-		hash:   hash,
-	}
-}
-
-// newEmptyOptions instanciates a new empty Options
-func newEmptyOptions(eval *evalVisitor) *Options {
-	return &Options{
-		eval: eval,
-		hash: make(map[string]interface{}),
-	}
-}
-
-//
-// Context Values
-//
-
-// Value returns field value from current context.
-func (options *Options) Value(name string) interface{} {
-	value := options.eval.evalField(options.eval.curCtx(), name, false)
-	if !value.IsValid() {
-		return nil
-	}
-
-	return value.Interface()
-}
-
-// ValueStr returns string representation of field value from current context.
-func (options *Options) ValueStr(name string) string {
-	return Str(options.Value(name))
-}
-
-// Ctx returns current evaluation context.
-func (options *Options) Ctx() interface{} {
-	return options.eval.curCtx().Interface()
-}
-
-//
-// Hash Arguments
-//
-
-// HashProp returns hash property.
-func (options *Options) HashProp(name string) interface{} {
-	return options.hash[name]
-}
-
-// HashStr returns string representation of hash property.
-func (options *Options) HashStr(name string) string {
-	return Str(options.hash[name])
-}
-
-// Hash returns entire hash.
-func (options *Options) Hash() map[string]interface{} {
-	return options.hash
-}
-
-//
-// Parameters
-//
-
-// Param returns parameter at given position.
-func (options *Options) Param(pos int) interface{} {
-	if len(options.params) > pos {
-		return options.params[pos]
-	}
-
-	return nil
-}
-
-// ParamStr returns string representation of parameter at given position.
-func (options *Options) ParamStr(pos int) string {
-	return Str(options.Param(pos))
-}
-
-// Params returns all parameters.
-func (options *Options) Params() []interface{} {
-	return options.params
-}
-
-//
-// Private data
-//
-
-// Data returns private data value.
-func (options *Options) Data(name string) interface{} {
-	return options.eval.dataFrame.Get(name)
-}
-
-// DataStr returns string representation of private data value.
-func (options *Options) DataStr(name string) string {
-	return Str(options.eval.dataFrame.Get(name))
-}
-
-// DataFrame returns current private data frame.
-func (options *Options) DataFrame() *DataFrame {
-	return options.eval.dataFrame
-}
-
-// NewDataFrame instanciates a new data frame that is a copy of current evaluation data frame.
-//
-// Parent of returned data frame is set to current evaluation data frame.
-func (options *Options) NewDataFrame() *DataFrame {
-	return options.eval.dataFrame.Copy()
-}
-
-// newIterDataFrame instanciates a new data frame and set iteration specific vars
-func (options *Options) newIterDataFrame(length int, i int, key interface{}) *DataFrame {
-	return options.eval.dataFrame.newIterDataFrame(length, i, key)
-}
-
-//
-// Evaluation
-//
-
-// evalBlock evaluates block with given context, private data and iteration key
-func (options *Options) evalBlock(ctx interface{}, data *DataFrame, key interface{}) string {
-	result := ""
-
-	if block := options.eval.curBlock(); (block != nil) && (block.Program != nil) {
-		result = options.eval.evalProgram(block.Program, ctx, data, key)
-	}
-
-	return result
-}
-
-// Fn evaluates block with current evaluation context.
-func (options *Options) Fn() string {
-	return options.evalBlock(nil, nil, nil)
-}
-
-// FnCtxData evaluates block with given context and private data frame.
-func (options *Options) FnCtxData(ctx interface{}, data *DataFrame) string {
-	return options.evalBlock(ctx, data, nil)
-}
-
-// FnWith evaluates block with given context.
-func (options *Options) FnWith(ctx interface{}) string {
-	return options.evalBlock(ctx, nil, nil)
-}
-
-// FnData evaluates block with given private data frame.
-func (options *Options) FnData(data *DataFrame) string {
-	return options.evalBlock(nil, data, nil)
-}
-
-// Inverse evaluates "else block".
-func (options *Options) Inverse() string {
-	result := ""
-	if block := options.eval.curBlock(); (block != nil) && (block.Inverse != nil) {
-		result, _ = block.Inverse.Accept(options.eval).(string)
-	}
-
-	return result
-}
-
-// Eval evaluates field for given context.
-func (options *Options) Eval(ctx interface{}, field string) interface{} {
-	if ctx == nil {
-		return nil
-	}
-
-	if field == "" {
-		return nil
-	}
-
-	val := options.eval.evalField(reflect.ValueOf(ctx), field, false)
-	if !val.IsValid() {
-		return nil
-	}
-
-	return val.Interface()
-}
-
-//
-// Misc
-//
-
-// isIncludableZero returns true if 'includeZero' option is set and first param is the number 0
-func (options *Options) isIncludableZero() bool {
-	b, ok := options.HashProp("includeZero").(bool)
-	if ok && b {
-		nb, ok := options.Param(0).(int)
-		if ok && nb == 0 {
-			return true
-		}
-	}
-
-	return false
-}
-
-//
-// Builtin helpers
-//
-
-// #if block helper
-func ifHelper(conditional interface{}, options *Options) interface{} {
-	if options.isIncludableZero() || IsTrue(conditional) {
-		return options.Fn()
-	}
-
-	return options.Inverse()
-}
-
-// #unless block helper
-func unlessHelper(conditional interface{}, options *Options) interface{} {
-	if options.isIncludableZero() || IsTrue(conditional) {
-		return options.Inverse()
-	}
-
-	return options.Fn()
-}
-
-// #with block helper
-func withHelper(context interface{}, options *Options) interface{} {
-	if IsTrue(context) {
-		return options.FnWith(context)
-	}
-
-	return options.Inverse()
-}
-
-// #each block helper
-func eachHelper(context interface{}, options *Options) interface{} {
-	if !IsTrue(context) {
-		return options.Inverse()
-	}
-
-	result := ""
-
-	val := reflect.ValueOf(context)
-	switch val.Kind() {
-	case reflect.Array, reflect.Slice:
-		for i := 0; i < val.Len(); i++ {
-			// computes private data
-			data := options.newIterDataFrame(val.Len(), i, nil)
-
-			// evaluates block
-			result += options.evalBlock(val.Index(i).Interface(), data, i)
-		}
-	case reflect.Map:
-		// note: a go hash is not ordered, so result may vary, this behaviour differs from the JS implementation
-		keys := val.MapKeys()
-		for i := 0; i < len(keys); i++ {
-			key := keys[i].Interface()
-			ctx := val.MapIndex(keys[i]).Interface()
-
-			// computes private data
-			data := options.newIterDataFrame(len(keys), i, key)
-
-			// evaluates block
-			result += options.evalBlock(ctx, data, key)
-		}
-	case reflect.Struct:
-		var exportedFields []int
-
-		// collect exported fields only
-		for i := 0; i < val.NumField(); i++ {
-			if tField := val.Type().Field(i); tField.PkgPath == "" {
-				exportedFields = append(exportedFields, i)
-			}
-		}
-
-		for i, fieldIndex := range exportedFields {
-			key := val.Type().Field(fieldIndex).Name
-			ctx := val.Field(fieldIndex).Interface()
-
-			// computes private data
-			data := options.newIterDataFrame(len(exportedFields), i, key)
-
-			// evaluates block
-			result += options.evalBlock(ctx, data, key)
-		}
-	}
-
-	return result
-}
-
-// #log helper
-func logHelper(message string) interface{} {
-	log.Print(message)
-	return ""
-}
-
-// #lookup helper
-func lookupHelper(obj interface{}, field string, options *Options) interface{} {
-	return Str(options.Eval(obj, field))
-}
-
-// #equal helper
-// Ref: https://github.com/aymerick/raymond/issues/7
-func equalHelper(a interface{}, b interface{}, options *Options) interface{} {
-	if Str(a) == Str(b) {
-		return options.Fn()
-	}
-
-	return ""
-}

+ 0 - 639
vendor/github.com/aymerick/raymond/lexer/lexer.go

@@ -1,639 +0,0 @@
-// Package lexer provides a handlebars tokenizer.
-package lexer
-
-import (
-	"fmt"
-	"regexp"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-// References:
-//   - https://github.com/wycats/handlebars.js/blob/master/src/handlebars.l
-//   - https://github.com/golang/go/blob/master/src/text/template/parse/lex.go
-
-const (
-	// Mustaches detection
-	escapedEscapedOpenMustache  = "\\\\{{"
-	escapedOpenMustache         = "\\{{"
-	openMustache                = "{{"
-	closeMustache               = "}}"
-	closeStripMustache          = "~}}"
-	closeUnescapedStripMustache = "}~}}"
-)
-
-const eof = -1
-
-// lexFunc represents a function that returns the next lexer function.
-type lexFunc func(*Lexer) lexFunc
-
-// Lexer is a lexical analyzer.
-type Lexer struct {
-	input    string     // input to scan
-	name     string     // lexer name, used for testing purpose
-	tokens   chan Token // channel of scanned tokens
-	nextFunc lexFunc    // the next function to execute
-
-	pos   int // current byte position in input string
-	line  int // current line position in input string
-	width int // size of last rune scanned from input string
-	start int // start position of the token we are scanning
-
-	// the shameful contextual properties needed because `nextFunc` is not enough
-	closeComment *regexp.Regexp // regexp to scan close of current comment
-	rawBlock     bool           // are we parsing a raw block content ?
-}
-
-var (
-	lookheadChars        = `[\s` + regexp.QuoteMeta("=~}/)|") + `]`
-	literalLookheadChars = `[\s` + regexp.QuoteMeta("~})") + `]`
-
-	// characters not allowed in an identifier
-	unallowedIDChars = " \n\t!\"#%&'()*+,./;<=>@[\\]^`{|}~"
-
-	// regular expressions
-	rID                  = regexp.MustCompile(`^[^` + regexp.QuoteMeta(unallowedIDChars) + `]+`)
-	rDotID               = regexp.MustCompile(`^\.` + lookheadChars)
-	rTrue                = regexp.MustCompile(`^true` + literalLookheadChars)
-	rFalse               = regexp.MustCompile(`^false` + literalLookheadChars)
-	rOpenRaw             = regexp.MustCompile(`^\{\{\{\{`)
-	rCloseRaw            = regexp.MustCompile(`^\}\}\}\}`)
-	rOpenEndRaw          = regexp.MustCompile(`^\{\{\{\{/`)
-	rOpenEndRawLookAhead = regexp.MustCompile(`\{\{\{\{/`)
-	rOpenUnescaped       = regexp.MustCompile(`^\{\{~?\{`)
-	rCloseUnescaped      = regexp.MustCompile(`^\}~?\}\}`)
-	rOpenBlock           = regexp.MustCompile(`^\{\{~?#`)
-	rOpenEndBlock        = regexp.MustCompile(`^\{\{~?/`)
-	rOpenPartial         = regexp.MustCompile(`^\{\{~?>`)
-	// {{^}} or {{else}}
-	rInverse          = regexp.MustCompile(`^(\{\{~?\^\s*~?\}\}|\{\{~?\s*else\s*~?\}\})`)
-	rOpenInverse      = regexp.MustCompile(`^\{\{~?\^`)
-	rOpenInverseChain = regexp.MustCompile(`^\{\{~?\s*else`)
-	// {{ or {{&
-	rOpen            = regexp.MustCompile(`^\{\{~?&?`)
-	rClose           = regexp.MustCompile(`^~?\}\}`)
-	rOpenBlockParams = regexp.MustCompile(`^as\s+\|`)
-	// {{!--  ... --}}
-	rOpenCommentDash  = regexp.MustCompile(`^\{\{~?!--\s*`)
-	rCloseCommentDash = regexp.MustCompile(`^\s*--~?\}\}`)
-	// {{! ... }}
-	rOpenComment  = regexp.MustCompile(`^\{\{~?!\s*`)
-	rCloseComment = regexp.MustCompile(`^\s*~?\}\}`)
-)
-
-// Scan scans given input.
-//
-// Tokens can then be fetched sequentially thanks to NextToken() function on returned lexer.
-func Scan(input string) *Lexer {
-	return scanWithName(input, "")
-}
-
-// scanWithName scans given input, with a name used for testing
-//
-// Tokens can then be fetched sequentially thanks to NextToken() function on returned lexer.
-func scanWithName(input string, name string) *Lexer {
-	result := &Lexer{
-		input:  input,
-		name:   name,
-		tokens: make(chan Token),
-		line:   1,
-	}
-
-	go result.run()
-
-	return result
-}
-
-// Collect scans and collect all tokens.
-//
-// This should be used for debugging purpose only. You should use Scan() and lexer.NextToken() functions instead.
-func Collect(input string) []Token {
-	var result []Token
-
-	l := Scan(input)
-	for {
-		token := l.NextToken()
-		result = append(result, token)
-
-		if token.Kind == TokenEOF || token.Kind == TokenError {
-			break
-		}
-	}
-
-	return result
-}
-
-// NextToken returns the next scanned token.
-func (l *Lexer) NextToken() Token {
-	result := <-l.tokens
-
-	return result
-}
-
-// run starts lexical analysis
-func (l *Lexer) run() {
-	for l.nextFunc = lexContent; l.nextFunc != nil; {
-		l.nextFunc = l.nextFunc(l)
-	}
-}
-
-// next returns next character from input, or eof of there is nothing left to scan
-func (l *Lexer) next() rune {
-	if l.pos >= len(l.input) {
-		l.width = 0
-		return eof
-	}
-
-	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
-	l.width = w
-	l.pos += l.width
-
-	return r
-}
-
-func (l *Lexer) produce(kind TokenKind, val string) {
-	l.tokens <- Token{kind, val, l.start, l.line}
-
-	// scanning a new token
-	l.start = l.pos
-
-	// update line number
-	l.line += strings.Count(val, "\n")
-}
-
-// emit emits a new scanned token
-func (l *Lexer) emit(kind TokenKind) {
-	l.produce(kind, l.input[l.start:l.pos])
-}
-
-// emitContent emits scanned content
-func (l *Lexer) emitContent() {
-	if l.pos > l.start {
-		l.emit(TokenContent)
-	}
-}
-
-// emitString emits a scanned string
-func (l *Lexer) emitString(delimiter rune) {
-	str := l.input[l.start:l.pos]
-
-	// replace escaped delimiters
-	str = strings.Replace(str, "\\"+string(delimiter), string(delimiter), -1)
-
-	l.produce(TokenString, str)
-}
-
-// peek returns but does not consume the next character in the input
-func (l *Lexer) peek() rune {
-	r := l.next()
-	l.backup()
-	return r
-}
-
-// backup steps back one character
-//
-// WARNING: Can only be called once per call of next
-func (l *Lexer) backup() {
-	l.pos -= l.width
-}
-
-// ignoreskips all characters that have been scanned up to current position
-func (l *Lexer) ignore() {
-	l.start = l.pos
-}
-
-// accept scans the next character if it is included in given string
-func (l *Lexer) accept(valid string) bool {
-	if strings.IndexRune(valid, l.next()) >= 0 {
-		return true
-	}
-
-	l.backup()
-
-	return false
-}
-
-// acceptRun scans all following characters that are part of given string
-func (l *Lexer) acceptRun(valid string) {
-	for strings.IndexRune(valid, l.next()) >= 0 {
-	}
-
-	l.backup()
-}
-
-// errorf emits an error token
-func (l *Lexer) errorf(format string, args ...interface{}) lexFunc {
-	l.tokens <- Token{TokenError, fmt.Sprintf(format, args...), l.start, l.line}
-	return nil
-}
-
-// isString returns true if content at current scanning position starts with given string
-func (l *Lexer) isString(str string) bool {
-	return strings.HasPrefix(l.input[l.pos:], str)
-}
-
-// findRegexp returns the first string from current scanning position that matches given regular expression
-func (l *Lexer) findRegexp(r *regexp.Regexp) string {
-	return r.FindString(l.input[l.pos:])
-}
-
-// indexRegexp returns the index of the first string from current scanning position that matches given regular expression
-//
-// It returns -1 if not found
-func (l *Lexer) indexRegexp(r *regexp.Regexp) int {
-	loc := r.FindStringIndex(l.input[l.pos:])
-	if loc == nil {
-		return -1
-	}
-	return loc[0]
-}
-
-// lexContent scans content (ie: not between mustaches)
-func lexContent(l *Lexer) lexFunc {
-	var next lexFunc
-
-	if l.rawBlock {
-		if i := l.indexRegexp(rOpenEndRawLookAhead); i != -1 {
-			// {{{{/
-			l.rawBlock = false
-			l.pos += i
-
-			next = lexOpenMustache
-		} else {
-			return l.errorf("Unclosed raw block")
-		}
-	} else if l.isString(escapedEscapedOpenMustache) {
-		// \\{{
-
-		// emit content with only one escaped escape
-		l.next()
-		l.emitContent()
-
-		// ignore second escaped escape
-		l.next()
-		l.ignore()
-
-		next = lexContent
-	} else if l.isString(escapedOpenMustache) {
-		// \{{
-		next = lexEscapedOpenMustache
-	} else if str := l.findRegexp(rOpenCommentDash); str != "" {
-		// {{!--
-		l.closeComment = rCloseCommentDash
-
-		next = lexComment
-	} else if str := l.findRegexp(rOpenComment); str != "" {
-		// {{!
-		l.closeComment = rCloseComment
-
-		next = lexComment
-	} else if l.isString(openMustache) {
-		// {{
-		next = lexOpenMustache
-	}
-
-	if next != nil {
-		// emit scanned content
-		l.emitContent()
-
-		// scan next token
-		return next
-	}
-
-	// scan next rune
-	if l.next() == eof {
-		// emit scanned content
-		l.emitContent()
-
-		// this is over
-		l.emit(TokenEOF)
-		return nil
-	}
-
-	// continue content scanning
-	return lexContent
-}
-
-// lexEscapedOpenMustache scans \{{
-func lexEscapedOpenMustache(l *Lexer) lexFunc {
-	// ignore escape character
-	l.next()
-	l.ignore()
-
-	// scan mustaches
-	for l.peek() == '{' {
-		l.next()
-	}
-
-	return lexContent
-}
-
-// lexOpenMustache scans {{
-func lexOpenMustache(l *Lexer) lexFunc {
-	var str string
-	var tok TokenKind
-
-	nextFunc := lexExpression
-
-	if str = l.findRegexp(rOpenEndRaw); str != "" {
-		tok = TokenOpenEndRawBlock
-	} else if str = l.findRegexp(rOpenRaw); str != "" {
-		tok = TokenOpenRawBlock
-		l.rawBlock = true
-	} else if str = l.findRegexp(rOpenUnescaped); str != "" {
-		tok = TokenOpenUnescaped
-	} else if str = l.findRegexp(rOpenBlock); str != "" {
-		tok = TokenOpenBlock
-	} else if str = l.findRegexp(rOpenEndBlock); str != "" {
-		tok = TokenOpenEndBlock
-	} else if str = l.findRegexp(rOpenPartial); str != "" {
-		tok = TokenOpenPartial
-	} else if str = l.findRegexp(rInverse); str != "" {
-		tok = TokenInverse
-		nextFunc = lexContent
-	} else if str = l.findRegexp(rOpenInverse); str != "" {
-		tok = TokenOpenInverse
-	} else if str = l.findRegexp(rOpenInverseChain); str != "" {
-		tok = TokenOpenInverseChain
-	} else if str = l.findRegexp(rOpen); str != "" {
-		tok = TokenOpen
-	} else {
-		// this is rotten
-		panic("Current pos MUST be an opening mustache")
-	}
-
-	l.pos += len(str)
-	l.emit(tok)
-
-	return nextFunc
-}
-
-// lexCloseMustache scans }} or ~}}
-func lexCloseMustache(l *Lexer) lexFunc {
-	var str string
-	var tok TokenKind
-
-	if str = l.findRegexp(rCloseRaw); str != "" {
-		// }}}}
-		tok = TokenCloseRawBlock
-	} else if str = l.findRegexp(rCloseUnescaped); str != "" {
-		// }}}
-		tok = TokenCloseUnescaped
-	} else if str = l.findRegexp(rClose); str != "" {
-		// }}
-		tok = TokenClose
-	} else {
-		// this is rotten
-		panic("Current pos MUST be a closing mustache")
-	}
-
-	l.pos += len(str)
-	l.emit(tok)
-
-	return lexContent
-}
-
-// lexExpression scans inside mustaches
-func lexExpression(l *Lexer) lexFunc {
-	// search close mustache delimiter
-	if l.isString(closeMustache) || l.isString(closeStripMustache) || l.isString(closeUnescapedStripMustache) {
-		return lexCloseMustache
-	}
-
-	// search some patterns before advancing scanning position
-
-	// "as |"
-	if str := l.findRegexp(rOpenBlockParams); str != "" {
-		l.pos += len(str)
-		l.emit(TokenOpenBlockParams)
-		return lexExpression
-	}
-
-	// ..
-	if l.isString("..") {
-		l.pos += len("..")
-		l.emit(TokenID)
-		return lexExpression
-	}
-
-	// .
-	if str := l.findRegexp(rDotID); str != "" {
-		l.pos += len(".")
-		l.emit(TokenID)
-		return lexExpression
-	}
-
-	// true
-	if str := l.findRegexp(rTrue); str != "" {
-		l.pos += len("true")
-		l.emit(TokenBoolean)
-		return lexExpression
-	}
-
-	// false
-	if str := l.findRegexp(rFalse); str != "" {
-		l.pos += len("false")
-		l.emit(TokenBoolean)
-		return lexExpression
-	}
-
-	// let's scan next character
-	switch r := l.next(); {
-	case r == eof:
-		return l.errorf("Unclosed expression")
-	case isIgnorable(r):
-		return lexIgnorable
-	case r == '(':
-		l.emit(TokenOpenSexpr)
-	case r == ')':
-		l.emit(TokenCloseSexpr)
-	case r == '=':
-		l.emit(TokenEquals)
-	case r == '@':
-		l.emit(TokenData)
-	case r == '"' || r == '\'':
-		l.backup()
-		return lexString
-	case r == '/' || r == '.':
-		l.emit(TokenSep)
-	case r == '|':
-		l.emit(TokenCloseBlockParams)
-	case r == '+' || r == '-' || (r >= '0' && r <= '9'):
-		l.backup()
-		return lexNumber
-	case r == '[':
-		return lexPathLiteral
-	case strings.IndexRune(unallowedIDChars, r) < 0:
-		l.backup()
-		return lexIdentifier
-	default:
-		return l.errorf("Unexpected character in expression: '%c'", r)
-	}
-
-	return lexExpression
-}
-
-// lexComment scans {{!-- or {{!
-func lexComment(l *Lexer) lexFunc {
-	if str := l.findRegexp(l.closeComment); str != "" {
-		l.pos += len(str)
-		l.emit(TokenComment)
-
-		return lexContent
-	}
-
-	if r := l.next(); r == eof {
-		return l.errorf("Unclosed comment")
-	}
-
-	return lexComment
-}
-
-// lexIgnorable scans all following ignorable characters
-func lexIgnorable(l *Lexer) lexFunc {
-	for isIgnorable(l.peek()) {
-		l.next()
-	}
-	l.ignore()
-
-	return lexExpression
-}
-
-// lexString scans a string
-func lexString(l *Lexer) lexFunc {
-	// get string delimiter
-	delim := l.next()
-	var prev rune
-
-	// ignore delimiter
-	l.ignore()
-
-	for {
-		r := l.next()
-		if r == eof || r == '\n' {
-			return l.errorf("Unterminated string")
-		}
-
-		if (r == delim) && (prev != '\\') {
-			break
-		}
-
-		prev = r
-	}
-
-	// remove end delimiter
-	l.backup()
-
-	// emit string
-	l.emitString(delim)
-
-	// skip end delimiter
-	l.next()
-	l.ignore()
-
-	return lexExpression
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-//
-// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/parse/lex.go
-func lexNumber(l *Lexer) lexFunc {
-	if !l.scanNumber() {
-		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
-	}
-	if sign := l.peek(); sign == '+' || sign == '-' {
-		// Complex: 1+2i. No spaces, must end in 'i'.
-		if !l.scanNumber() || l.input[l.pos-1] != 'i' {
-			return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
-		}
-		l.emit(TokenNumber)
-	} else {
-		l.emit(TokenNumber)
-	}
-	return lexExpression
-}
-
-// scanNumber scans a number
-//
-// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/parse/lex.go
-func (l *Lexer) scanNumber() bool {
-	// Optional leading sign.
-	l.accept("+-")
-
-	// Is it hex?
-	digits := "0123456789"
-
-	if l.accept("0") && l.accept("xX") {
-		digits = "0123456789abcdefABCDEF"
-	}
-
-	l.acceptRun(digits)
-
-	if l.accept(".") {
-		l.acceptRun(digits)
-	}
-
-	if l.accept("eE") {
-		l.accept("+-")
-		l.acceptRun("0123456789")
-	}
-
-	// Is it imaginary?
-	l.accept("i")
-
-	// Next thing mustn't be alphanumeric.
-	if isAlphaNumeric(l.peek()) {
-		l.next()
-		return false
-	}
-
-	return true
-}
-
-// lexIdentifier scans an ID
-func lexIdentifier(l *Lexer) lexFunc {
-	str := l.findRegexp(rID)
-	if len(str) == 0 {
-		// this is rotten
-		panic("Identifier expected")
-	}
-
-	l.pos += len(str)
-	l.emit(TokenID)
-
-	return lexExpression
-}
-
-// lexPathLiteral scans an [ID]
-func lexPathLiteral(l *Lexer) lexFunc {
-	for {
-		r := l.next()
-		if r == eof || r == '\n' {
-			return l.errorf("Unterminated path literal")
-		}
-
-		if r == ']' {
-			break
-		}
-	}
-
-	l.emit(TokenID)
-
-	return lexExpression
-}
-
-// isIgnorable returns true if given character is ignorable (ie. whitespace of line feed)
-func isIgnorable(r rune) bool {
-	return r == ' ' || r == '\t' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-//
-// NOTE borrowed from https://github.com/golang/go/tree/master/src/text/template/parse/lex.go
-func isAlphaNumeric(r rune) bool {
-	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}

+ 0 - 183
vendor/github.com/aymerick/raymond/lexer/token.go

@@ -1,183 +0,0 @@
-package lexer
-
-import "fmt"
-
-const (
-	// TokenError represents an error
-	TokenError TokenKind = iota
-
-	// TokenEOF represents an End Of File
-	TokenEOF
-
-	//
-	// Mustache delimiters
-	//
-
-	// TokenOpen is the OPEN token
-	TokenOpen
-
-	// TokenClose is the CLOSE token
-	TokenClose
-
-	// TokenOpenRawBlock is the OPEN_RAW_BLOCK token
-	TokenOpenRawBlock
-
-	// TokenCloseRawBlock is the CLOSE_RAW_BLOCK token
-	TokenCloseRawBlock
-
-	// TokenOpenEndRawBlock is the END_RAW_BLOCK token
-	TokenOpenEndRawBlock
-
-	// TokenOpenUnescaped is the OPEN_UNESCAPED token
-	TokenOpenUnescaped
-
-	// TokenCloseUnescaped is the CLOSE_UNESCAPED token
-	TokenCloseUnescaped
-
-	// TokenOpenBlock is the OPEN_BLOCK token
-	TokenOpenBlock
-
-	// TokenOpenEndBlock is the OPEN_ENDBLOCK token
-	TokenOpenEndBlock
-
-	// TokenInverse is the INVERSE token
-	TokenInverse
-
-	// TokenOpenInverse is the OPEN_INVERSE token
-	TokenOpenInverse
-
-	// TokenOpenInverseChain is the OPEN_INVERSE_CHAIN token
-	TokenOpenInverseChain
-
-	// TokenOpenPartial is the OPEN_PARTIAL token
-	TokenOpenPartial
-
-	// TokenComment is the COMMENT token
-	TokenComment
-
-	//
-	// Inside mustaches
-	//
-
-	// TokenOpenSexpr is the OPEN_SEXPR token
-	TokenOpenSexpr
-
-	// TokenCloseSexpr is the CLOSE_SEXPR token
-	TokenCloseSexpr
-
-	// TokenEquals is the EQUALS token
-	TokenEquals
-
-	// TokenData is the DATA token
-	TokenData
-
-	// TokenSep is the SEP token
-	TokenSep
-
-	// TokenOpenBlockParams is the OPEN_BLOCK_PARAMS token
-	TokenOpenBlockParams
-
-	// TokenCloseBlockParams is the CLOSE_BLOCK_PARAMS token
-	TokenCloseBlockParams
-
-	//
-	// Tokens with content
-	//
-
-	// TokenContent is the CONTENT token
-	TokenContent
-
-	// TokenID is the ID token
-	TokenID
-
-	// TokenString is the STRING token
-	TokenString
-
-	// TokenNumber is the NUMBER token
-	TokenNumber
-
-	// TokenBoolean is the BOOLEAN token
-	TokenBoolean
-)
-
-const (
-	// Option to generate token position in its string representation
-	dumpTokenPos = false
-
-	// Option to generate values for all token kinds for their string representations
-	dumpAllTokensVal = true
-)
-
-// TokenKind represents a Token type.
-type TokenKind int
-
-// Token represents a scanned token.
-type Token struct {
-	Kind TokenKind // Token kind
-	Val  string    // Token value
-
-	Pos  int // Byte position in input string
-	Line int // Line number in input string
-}
-
-// tokenName permits to display token name given token type
-var tokenName = map[TokenKind]string{
-	TokenError:            "Error",
-	TokenEOF:              "EOF",
-	TokenContent:          "Content",
-	TokenComment:          "Comment",
-	TokenOpen:             "Open",
-	TokenClose:            "Close",
-	TokenOpenUnescaped:    "OpenUnescaped",
-	TokenCloseUnescaped:   "CloseUnescaped",
-	TokenOpenBlock:        "OpenBlock",
-	TokenOpenEndBlock:     "OpenEndBlock",
-	TokenOpenRawBlock:     "OpenRawBlock",
-	TokenCloseRawBlock:    "CloseRawBlock",
-	TokenOpenEndRawBlock:  "OpenEndRawBlock",
-	TokenOpenBlockParams:  "OpenBlockParams",
-	TokenCloseBlockParams: "CloseBlockParams",
-	TokenInverse:          "Inverse",
-	TokenOpenInverse:      "OpenInverse",
-	TokenOpenInverseChain: "OpenInverseChain",
-	TokenOpenPartial:      "OpenPartial",
-	TokenOpenSexpr:        "OpenSexpr",
-	TokenCloseSexpr:       "CloseSexpr",
-	TokenID:               "ID",
-	TokenEquals:           "Equals",
-	TokenString:           "String",
-	TokenNumber:           "Number",
-	TokenBoolean:          "Boolean",
-	TokenData:             "Data",
-	TokenSep:              "Sep",
-}
-
-// String returns the token kind string representation for debugging.
-func (k TokenKind) String() string {
-	s := tokenName[k]
-	if s == "" {
-		return fmt.Sprintf("Token-%d", int(k))
-	}
-	return s
-}
-
-// String returns the token string representation for debugging.
-func (t Token) String() string {
-	result := ""
-
-	if dumpTokenPos {
-		result += fmt.Sprintf("%d:", t.Pos)
-	}
-
-	result += fmt.Sprintf("%s", t.Kind)
-
-	if (dumpAllTokensVal || (t.Kind >= TokenContent)) && len(t.Val) > 0 {
-		if len(t.Val) > 100 {
-			result += fmt.Sprintf("{%.20q...}", t.Val)
-		} else {
-			result += fmt.Sprintf("{%q}", t.Val)
-		}
-	}
-
-	return result
-}

+ 0 - 846
vendor/github.com/aymerick/raymond/parser/parser.go

@@ -1,846 +0,0 @@
-// Package parser provides a handlebars syntax analyser. It consumes the tokens provided by the lexer to build an AST.
-package parser
-
-import (
-	"fmt"
-	"regexp"
-	"runtime"
-	"strconv"
-
-	"github.com/aymerick/raymond/ast"
-	"github.com/aymerick/raymond/lexer"
-)
-
-// References:
-//   - https://github.com/wycats/handlebars.js/blob/master/src/handlebars.yy
-//   - https://github.com/golang/go/blob/master/src/text/template/parse/parse.go
-
-// parser is a syntax analyzer.
-type parser struct {
-	// Lexer
-	lex *lexer.Lexer
-
-	// Root node
-	root ast.Node
-
-	// Tokens parsed but not consumed yet
-	tokens []*lexer.Token
-
-	// All tokens have been retreieved from lexer
-	lexOver bool
-}
-
-var (
-	rOpenComment  = regexp.MustCompile(`^\{\{~?!-?-?`)
-	rCloseComment = regexp.MustCompile(`-?-?~?\}\}$`)
-	rOpenAmp      = regexp.MustCompile(`^\{\{~?&`)
-)
-
-// new instanciates a new parser
-func new(input string) *parser {
-	return &parser{
-		lex: lexer.Scan(input),
-	}
-}
-
-// Parse analyzes given input and returns the AST root node.
-func Parse(input string) (result *ast.Program, err error) {
-	// recover error
-	defer errRecover(&err)
-
-	parser := new(input)
-
-	// parse
-	result = parser.parseProgram()
-
-	// check last token
-	token := parser.shift()
-	if token.Kind != lexer.TokenEOF {
-		// Parsing ended before EOF
-		errToken(token, "Syntax error")
-	}
-
-	// fix whitespaces
-	processWhitespaces(result)
-
-	// named returned values
-	return
-}
-
-// errRecover recovers parsing panic
-func errRecover(errp *error) {
-	e := recover()
-	if e != nil {
-		switch err := e.(type) {
-		case runtime.Error:
-			panic(e)
-		case error:
-			*errp = err
-		default:
-			panic(e)
-		}
-	}
-}
-
-// errPanic panics
-func errPanic(err error, line int) {
-	panic(fmt.Errorf("Parse error on line %d:\n%s", line, err))
-}
-
-// errNode panics with given node infos
-func errNode(node ast.Node, msg string) {
-	errPanic(fmt.Errorf("%s\nNode: %s", msg, node), node.Location().Line)
-}
-
-// errNode panics with given Token infos
-func errToken(tok *lexer.Token, msg string) {
-	errPanic(fmt.Errorf("%s\nToken: %s", msg, tok), tok.Line)
-}
-
-// errNode panics because of an unexpected Token kind
-func errExpected(expect lexer.TokenKind, tok *lexer.Token) {
-	errPanic(fmt.Errorf("Expecting %s, got: '%s'", expect, tok), tok.Line)
-}
-
-// program : statement*
-func (p *parser) parseProgram() *ast.Program {
-	result := ast.NewProgram(p.next().Pos, p.next().Line)
-
-	for p.isStatement() {
-		result.AddStatement(p.parseStatement())
-	}
-
-	return result
-}
-
-// statement : mustache | block | rawBlock | partial | content | COMMENT
-func (p *parser) parseStatement() ast.Node {
-	var result ast.Node
-
-	tok := p.next()
-
-	switch tok.Kind {
-	case lexer.TokenOpen, lexer.TokenOpenUnescaped:
-		// mustache
-		result = p.parseMustache()
-	case lexer.TokenOpenBlock:
-		// block
-		result = p.parseBlock()
-	case lexer.TokenOpenInverse:
-		// block
-		result = p.parseInverse()
-	case lexer.TokenOpenRawBlock:
-		// rawBlock
-		result = p.parseRawBlock()
-	case lexer.TokenOpenPartial:
-		// partial
-		result = p.parsePartial()
-	case lexer.TokenContent:
-		// content
-		result = p.parseContent()
-	case lexer.TokenComment:
-		// COMMENT
-		result = p.parseComment()
-	}
-
-	return result
-}
-
-// isStatement returns true if next token starts a statement
-func (p *parser) isStatement() bool {
-	if !p.have(1) {
-		return false
-	}
-
-	switch p.next().Kind {
-	case lexer.TokenOpen, lexer.TokenOpenUnescaped, lexer.TokenOpenBlock,
-		lexer.TokenOpenInverse, lexer.TokenOpenRawBlock, lexer.TokenOpenPartial,
-		lexer.TokenContent, lexer.TokenComment:
-		return true
-	}
-
-	return false
-}
-
-// content : CONTENT
-func (p *parser) parseContent() *ast.ContentStatement {
-	// CONTENT
-	tok := p.shift()
-	if tok.Kind != lexer.TokenContent {
-		// @todo This check can be removed if content is optional in a raw block
-		errExpected(lexer.TokenContent, tok)
-	}
-
-	return ast.NewContentStatement(tok.Pos, tok.Line, tok.Val)
-}
-
-// COMMENT
-func (p *parser) parseComment() *ast.CommentStatement {
-	// COMMENT
-	tok := p.shift()
-
-	value := rOpenComment.ReplaceAllString(tok.Val, "")
-	value = rCloseComment.ReplaceAllString(value, "")
-
-	result := ast.NewCommentStatement(tok.Pos, tok.Line, value)
-	result.Strip = ast.NewStripForStr(tok.Val)
-
-	return result
-}
-
-// param* hash?
-func (p *parser) parseExpressionParamsHash() ([]ast.Node, *ast.Hash) {
-	var params []ast.Node
-	var hash *ast.Hash
-
-	// params*
-	if p.isParam() {
-		params = p.parseParams()
-	}
-
-	// hash?
-	if p.isHashSegment() {
-		hash = p.parseHash()
-	}
-
-	return params, hash
-}
-
-// helperName param* hash?
-func (p *parser) parseExpression(tok *lexer.Token) *ast.Expression {
-	result := ast.NewExpression(tok.Pos, tok.Line)
-
-	// helperName
-	result.Path = p.parseHelperName()
-
-	// param* hash?
-	result.Params, result.Hash = p.parseExpressionParamsHash()
-
-	return result
-}
-
-// rawBlock : openRawBlock content endRawBlock
-// openRawBlock : OPEN_RAW_BLOCK helperName param* hash? CLOSE_RAW_BLOCK
-// endRawBlock : OPEN_END_RAW_BLOCK helperName CLOSE_RAW_BLOCK
-func (p *parser) parseRawBlock() *ast.BlockStatement {
-	// OPEN_RAW_BLOCK
-	tok := p.shift()
-
-	result := ast.NewBlockStatement(tok.Pos, tok.Line)
-
-	// helperName param* hash?
-	result.Expression = p.parseExpression(tok)
-
-	openName := result.Expression.Canonical()
-
-	// CLOSE_RAW_BLOCK
-	tok = p.shift()
-	if tok.Kind != lexer.TokenCloseRawBlock {
-		errExpected(lexer.TokenCloseRawBlock, tok)
-	}
-
-	// content
-	// @todo Is content mandatory in a raw block ?
-	content := p.parseContent()
-
-	program := ast.NewProgram(tok.Pos, tok.Line)
-	program.AddStatement(content)
-
-	result.Program = program
-
-	// OPEN_END_RAW_BLOCK
-	tok = p.shift()
-	if tok.Kind != lexer.TokenOpenEndRawBlock {
-		// should never happen as it is caught by lexer
-		errExpected(lexer.TokenOpenEndRawBlock, tok)
-	}
-
-	// helperName
-	endID := p.parseHelperName()
-
-	closeName, ok := ast.HelperNameStr(endID)
-	if !ok {
-		errNode(endID, "Erroneous closing expression")
-	}
-
-	if openName != closeName {
-		errNode(endID, fmt.Sprintf("%s doesn't match %s", openName, closeName))
-	}
-
-	// CLOSE_RAW_BLOCK
-	tok = p.shift()
-	if tok.Kind != lexer.TokenCloseRawBlock {
-		errExpected(lexer.TokenCloseRawBlock, tok)
-	}
-
-	return result
-}
-
-// block : openBlock program inverseChain? closeBlock
-func (p *parser) parseBlock() *ast.BlockStatement {
-	// openBlock
-	result, blockParams := p.parseOpenBlock()
-
-	// program
-	program := p.parseProgram()
-	program.BlockParams = blockParams
-	result.Program = program
-
-	// inverseChain?
-	if p.isInverseChain() {
-		result.Inverse = p.parseInverseChain()
-	}
-
-	// closeBlock
-	p.parseCloseBlock(result)
-
-	setBlockInverseStrip(result)
-
-	return result
-}
-
-// setBlockInverseStrip is called when parsing `block` (openBlock | openInverse) and `inverseChain`
-//
-// TODO: This was totally cargo culted ! CHECK THAT !
-//
-// cf. prepareBlock() in:
-//   https://github.com/wycats/handlebars.js/blob/master/lib/handlebars/compiler/helper.js
-func setBlockInverseStrip(block *ast.BlockStatement) {
-	if block.Inverse == nil {
-		return
-	}
-
-	if block.Inverse.Chained {
-		b, _ := block.Inverse.Body[0].(*ast.BlockStatement)
-		b.CloseStrip = block.CloseStrip
-	}
-
-	block.InverseStrip = block.Inverse.Strip
-}
-
-// block : openInverse program inverseAndProgram? closeBlock
-func (p *parser) parseInverse() *ast.BlockStatement {
-	// openInverse
-	result, blockParams := p.parseOpenBlock()
-
-	// program
-	program := p.parseProgram()
-
-	program.BlockParams = blockParams
-	result.Inverse = program
-
-	// inverseAndProgram?
-	if p.isInverse() {
-		result.Program = p.parseInverseAndProgram()
-	}
-
-	// closeBlock
-	p.parseCloseBlock(result)
-
-	setBlockInverseStrip(result)
-
-	return result
-}
-
-// helperName param* hash? blockParams?
-func (p *parser) parseOpenBlockExpression(tok *lexer.Token) (*ast.BlockStatement, []string) {
-	var blockParams []string
-
-	result := ast.NewBlockStatement(tok.Pos, tok.Line)
-
-	// helperName param* hash?
-	result.Expression = p.parseExpression(tok)
-
-	// blockParams?
-	if p.isBlockParams() {
-		blockParams = p.parseBlockParams()
-	}
-
-	// named returned values
-	return result, blockParams
-}
-
-// inverseChain : openInverseChain program inverseChain?
-//              | inverseAndProgram
-func (p *parser) parseInverseChain() *ast.Program {
-	if p.isInverse() {
-		// inverseAndProgram
-		return p.parseInverseAndProgram()
-	}
-
-	result := ast.NewProgram(p.next().Pos, p.next().Line)
-
-	// openInverseChain
-	block, blockParams := p.parseOpenBlock()
-
-	// program
-	program := p.parseProgram()
-
-	program.BlockParams = blockParams
-	block.Program = program
-
-	// inverseChain?
-	if p.isInverseChain() {
-		block.Inverse = p.parseInverseChain()
-	}
-
-	setBlockInverseStrip(block)
-
-	result.Chained = true
-	result.AddStatement(block)
-
-	return result
-}
-
-// Returns true if current token starts an inverse chain
-func (p *parser) isInverseChain() bool {
-	return p.isOpenInverseChain() || p.isInverse()
-}
-
-// inverseAndProgram : INVERSE program
-func (p *parser) parseInverseAndProgram() *ast.Program {
-	// INVERSE
-	tok := p.shift()
-
-	// program
-	result := p.parseProgram()
-	result.Strip = ast.NewStripForStr(tok.Val)
-
-	return result
-}
-
-// openBlock : OPEN_BLOCK helperName param* hash? blockParams? CLOSE
-// openInverse : OPEN_INVERSE helperName param* hash? blockParams? CLOSE
-// openInverseChain: OPEN_INVERSE_CHAIN helperName param* hash? blockParams? CLOSE
-func (p *parser) parseOpenBlock() (*ast.BlockStatement, []string) {
-	// OPEN_BLOCK | OPEN_INVERSE | OPEN_INVERSE_CHAIN
-	tok := p.shift()
-
-	// helperName param* hash? blockParams?
-	result, blockParams := p.parseOpenBlockExpression(tok)
-
-	// CLOSE
-	tokClose := p.shift()
-	if tokClose.Kind != lexer.TokenClose {
-		errExpected(lexer.TokenClose, tokClose)
-	}
-
-	result.OpenStrip = ast.NewStrip(tok.Val, tokClose.Val)
-
-	// named returned values
-	return result, blockParams
-}
-
-// closeBlock : OPEN_ENDBLOCK helperName CLOSE
-func (p *parser) parseCloseBlock(block *ast.BlockStatement) {
-	// OPEN_ENDBLOCK
-	tok := p.shift()
-	if tok.Kind != lexer.TokenOpenEndBlock {
-		errExpected(lexer.TokenOpenEndBlock, tok)
-	}
-
-	// helperName
-	endID := p.parseHelperName()
-
-	closeName, ok := ast.HelperNameStr(endID)
-	if !ok {
-		errNode(endID, "Erroneous closing expression")
-	}
-
-	openName := block.Expression.Canonical()
-	if openName != closeName {
-		errNode(endID, fmt.Sprintf("%s doesn't match %s", openName, closeName))
-	}
-
-	// CLOSE
-	tokClose := p.shift()
-	if tokClose.Kind != lexer.TokenClose {
-		errExpected(lexer.TokenClose, tokClose)
-	}
-
-	block.CloseStrip = ast.NewStrip(tok.Val, tokClose.Val)
-}
-
-// mustache : OPEN helperName param* hash? CLOSE
-//          | OPEN_UNESCAPED helperName param* hash? CLOSE_UNESCAPED
-func (p *parser) parseMustache() *ast.MustacheStatement {
-	// OPEN | OPEN_UNESCAPED
-	tok := p.shift()
-
-	closeToken := lexer.TokenClose
-	if tok.Kind == lexer.TokenOpenUnescaped {
-		closeToken = lexer.TokenCloseUnescaped
-	}
-
-	unescaped := false
-	if (tok.Kind == lexer.TokenOpenUnescaped) || (rOpenAmp.MatchString(tok.Val)) {
-		unescaped = true
-	}
-
-	result := ast.NewMustacheStatement(tok.Pos, tok.Line, unescaped)
-
-	// helperName param* hash?
-	result.Expression = p.parseExpression(tok)
-
-	// CLOSE | CLOSE_UNESCAPED
-	tokClose := p.shift()
-	if tokClose.Kind != closeToken {
-		errExpected(closeToken, tokClose)
-	}
-
-	result.Strip = ast.NewStrip(tok.Val, tokClose.Val)
-
-	return result
-}
-
-// partial : OPEN_PARTIAL partialName param* hash? CLOSE
-func (p *parser) parsePartial() *ast.PartialStatement {
-	// OPEN_PARTIAL
-	tok := p.shift()
-
-	result := ast.NewPartialStatement(tok.Pos, tok.Line)
-
-	// partialName
-	result.Name = p.parsePartialName()
-
-	// param* hash?
-	result.Params, result.Hash = p.parseExpressionParamsHash()
-
-	// CLOSE
-	tokClose := p.shift()
-	if tokClose.Kind != lexer.TokenClose {
-		errExpected(lexer.TokenClose, tokClose)
-	}
-
-	result.Strip = ast.NewStrip(tok.Val, tokClose.Val)
-
-	return result
-}
-
-// helperName | sexpr
-func (p *parser) parseHelperNameOrSexpr() ast.Node {
-	if p.isSexpr() {
-		// sexpr
-		return p.parseSexpr()
-	}
-
-	// helperName
-	return p.parseHelperName()
-}
-
-// param : helperName | sexpr
-func (p *parser) parseParam() ast.Node {
-	return p.parseHelperNameOrSexpr()
-}
-
-// Returns true if next tokens represent a `param`
-func (p *parser) isParam() bool {
-	return (p.isSexpr() || p.isHelperName()) && !p.isHashSegment()
-}
-
-// param*
-func (p *parser) parseParams() []ast.Node {
-	var result []ast.Node
-
-	for p.isParam() {
-		result = append(result, p.parseParam())
-	}
-
-	return result
-}
-
-// sexpr : OPEN_SEXPR helperName param* hash? CLOSE_SEXPR
-func (p *parser) parseSexpr() *ast.SubExpression {
-	// OPEN_SEXPR
-	tok := p.shift()
-
-	result := ast.NewSubExpression(tok.Pos, tok.Line)
-
-	// helperName param* hash?
-	result.Expression = p.parseExpression(tok)
-
-	// CLOSE_SEXPR
-	tok = p.shift()
-	if tok.Kind != lexer.TokenCloseSexpr {
-		errExpected(lexer.TokenCloseSexpr, tok)
-	}
-
-	return result
-}
-
-// hash : hashSegment+
-func (p *parser) parseHash() *ast.Hash {
-	var pairs []*ast.HashPair
-
-	for p.isHashSegment() {
-		pairs = append(pairs, p.parseHashSegment())
-	}
-
-	firstLoc := pairs[0].Location()
-
-	result := ast.NewHash(firstLoc.Pos, firstLoc.Line)
-	result.Pairs = pairs
-
-	return result
-}
-
-// returns true if next tokens represents a `hashSegment`
-func (p *parser) isHashSegment() bool {
-	return p.have(2) && (p.next().Kind == lexer.TokenID) && (p.nextAt(1).Kind == lexer.TokenEquals)
-}
-
-// hashSegment : ID EQUALS param
-func (p *parser) parseHashSegment() *ast.HashPair {
-	// ID
-	tok := p.shift()
-
-	// EQUALS
-	p.shift()
-
-	// param
-	param := p.parseParam()
-
-	result := ast.NewHashPair(tok.Pos, tok.Line)
-	result.Key = tok.Val
-	result.Val = param
-
-	return result
-}
-
-// blockParams : OPEN_BLOCK_PARAMS ID+ CLOSE_BLOCK_PARAMS
-func (p *parser) parseBlockParams() []string {
-	var result []string
-
-	// OPEN_BLOCK_PARAMS
-	tok := p.shift()
-
-	// ID+
-	for p.isID() {
-		result = append(result, p.shift().Val)
-	}
-
-	if len(result) == 0 {
-		errExpected(lexer.TokenID, p.next())
-	}
-
-	// CLOSE_BLOCK_PARAMS
-	tok = p.shift()
-	if tok.Kind != lexer.TokenCloseBlockParams {
-		errExpected(lexer.TokenCloseBlockParams, tok)
-	}
-
-	return result
-}
-
-// helperName : path | dataName | STRING | NUMBER | BOOLEAN | UNDEFINED | NULL
-func (p *parser) parseHelperName() ast.Node {
-	var result ast.Node
-
-	tok := p.next()
-
-	switch tok.Kind {
-	case lexer.TokenBoolean:
-		// BOOLEAN
-		p.shift()
-		result = ast.NewBooleanLiteral(tok.Pos, tok.Line, (tok.Val == "true"), tok.Val)
-	case lexer.TokenNumber:
-		// NUMBER
-		p.shift()
-
-		val, isInt := parseNumber(tok)
-		result = ast.NewNumberLiteral(tok.Pos, tok.Line, val, isInt, tok.Val)
-	case lexer.TokenString:
-		// STRING
-		p.shift()
-		result = ast.NewStringLiteral(tok.Pos, tok.Line, tok.Val)
-	case lexer.TokenData:
-		// dataName
-		result = p.parseDataName()
-	default:
-		// path
-		result = p.parsePath(false)
-	}
-
-	return result
-}
-
-// parseNumber parses a number
-func parseNumber(tok *lexer.Token) (result float64, isInt bool) {
-	var valInt int
-	var err error
-
-	valInt, err = strconv.Atoi(tok.Val)
-	if err == nil {
-		isInt = true
-
-		result = float64(valInt)
-	} else {
-		isInt = false
-
-		result, err = strconv.ParseFloat(tok.Val, 64)
-		if err != nil {
-			errToken(tok, fmt.Sprintf("Failed to parse number: %s", tok.Val))
-		}
-	}
-
-	// named returned values
-	return
-}
-
-// Returns true if next tokens represent a `helperName`
-func (p *parser) isHelperName() bool {
-	switch p.next().Kind {
-	case lexer.TokenBoolean, lexer.TokenNumber, lexer.TokenString, lexer.TokenData, lexer.TokenID:
-		return true
-	}
-
-	return false
-}
-
-// partialName : helperName | sexpr
-func (p *parser) parsePartialName() ast.Node {
-	return p.parseHelperNameOrSexpr()
-}
-
-// dataName : DATA pathSegments
-func (p *parser) parseDataName() *ast.PathExpression {
-	// DATA
-	p.shift()
-
-	// pathSegments
-	return p.parsePath(true)
-}
-
-// path : pathSegments
-// pathSegments : pathSegments SEP ID
-//              | ID
-func (p *parser) parsePath(data bool) *ast.PathExpression {
-	var tok *lexer.Token
-
-	// ID
-	tok = p.shift()
-	if tok.Kind != lexer.TokenID {
-		errExpected(lexer.TokenID, tok)
-	}
-
-	result := ast.NewPathExpression(tok.Pos, tok.Line, data)
-	result.Part(tok.Val)
-
-	for p.isPathSep() {
-		// SEP
-		tok = p.shift()
-		result.Sep(tok.Val)
-
-		// ID
-		tok = p.shift()
-		if tok.Kind != lexer.TokenID {
-			errExpected(lexer.TokenID, tok)
-		}
-
-		result.Part(tok.Val)
-
-		if len(result.Parts) > 0 {
-			switch tok.Val {
-			case "..", ".", "this":
-				errToken(tok, "Invalid path: "+result.Original)
-			}
-		}
-	}
-
-	return result
-}
-
-// Ensures there is token to parse at given index
-func (p *parser) ensure(index int) {
-	if p.lexOver {
-		// nothing more to grab
-		return
-	}
-
-	nb := index + 1
-
-	for len(p.tokens) < nb {
-		// fetch next token
-		tok := p.lex.NextToken()
-
-		// queue it
-		p.tokens = append(p.tokens, &tok)
-
-		if (tok.Kind == lexer.TokenEOF) || (tok.Kind == lexer.TokenError) {
-			p.lexOver = true
-			break
-		}
-	}
-}
-
-// have returns true is there are a list given number of tokens to consume left
-func (p *parser) have(nb int) bool {
-	p.ensure(nb - 1)
-
-	return len(p.tokens) >= nb
-}
-
-// nextAt returns next token at given index, without consuming it
-func (p *parser) nextAt(index int) *lexer.Token {
-	p.ensure(index)
-
-	return p.tokens[index]
-}
-
-// next returns next token without consuming it
-func (p *parser) next() *lexer.Token {
-	return p.nextAt(0)
-}
-
-// shift returns next token and remove it from the tokens buffer
-//
-// Panics if next token is `TokenError`
-func (p *parser) shift() *lexer.Token {
-	var result *lexer.Token
-
-	p.ensure(0)
-
-	result, p.tokens = p.tokens[0], p.tokens[1:]
-
-	// check error token
-	if result.Kind == lexer.TokenError {
-		errToken(result, "Lexer error")
-	}
-
-	return result
-}
-
-// isToken returns true if next token is of given type
-func (p *parser) isToken(kind lexer.TokenKind) bool {
-	return p.have(1) && p.next().Kind == kind
-}
-
-// isSexpr returns true if next token starts a sexpr
-func (p *parser) isSexpr() bool {
-	return p.isToken(lexer.TokenOpenSexpr)
-}
-
-// isPathSep returns true if next token is a path separator
-func (p *parser) isPathSep() bool {
-	return p.isToken(lexer.TokenSep)
-}
-
-// isID returns true if next token is an ID
-func (p *parser) isID() bool {
-	return p.isToken(lexer.TokenID)
-}
-
-// isBlockParams returns true if next token starts a block params
-func (p *parser) isBlockParams() bool {
-	return p.isToken(lexer.TokenOpenBlockParams)
-}
-
-// isInverse returns true if next token starts an INVERSE sequence
-func (p *parser) isInverse() bool {
-	return p.isToken(lexer.TokenInverse)
-}
-
-// isOpenInverseChain returns true if next token is OPEN_INVERSE_CHAIN
-func (p *parser) isOpenInverseChain() bool {
-	return p.isToken(lexer.TokenOpenInverseChain)
-}

+ 0 - 360
vendor/github.com/aymerick/raymond/parser/whitespace.go

@@ -1,360 +0,0 @@
-package parser
-
-import (
-	"regexp"
-
-	"github.com/aymerick/raymond/ast"
-)
-
-// whitespaceVisitor walks through the AST to perform whitespace control
-//
-// The logic was shamelessly borrowed from:
-//   https://github.com/wycats/handlebars.js/blob/master/lib/handlebars/compiler/whitespace-control.js
-type whitespaceVisitor struct {
-	isRootSeen bool
-}
-
-var (
-	rTrimLeft         = regexp.MustCompile(`^[ \t]*\r?\n?`)
-	rTrimLeftMultiple = regexp.MustCompile(`^\s+`)
-
-	rTrimRight         = regexp.MustCompile(`[ \t]+$`)
-	rTrimRightMultiple = regexp.MustCompile(`\s+$`)
-
-	rPrevWhitespace      = regexp.MustCompile(`\r?\n\s*?$`)
-	rPrevWhitespaceStart = regexp.MustCompile(`(^|\r?\n)\s*?$`)
-
-	rNextWhitespace    = regexp.MustCompile(`^\s*?\r?\n`)
-	rNextWhitespaceEnd = regexp.MustCompile(`^\s*?(\r?\n|$)`)
-
-	rPartialIndent = regexp.MustCompile(`([ \t]+$)`)
-)
-
-// newWhitespaceVisitor instanciates a new whitespaceVisitor
-func newWhitespaceVisitor() *whitespaceVisitor {
-	return &whitespaceVisitor{}
-}
-
-// processWhitespaces performs whitespace control on given AST
-//
-// WARNING: It must be called only once on AST.
-func processWhitespaces(node ast.Node) {
-	node.Accept(newWhitespaceVisitor())
-}
-
-func omitRightFirst(body []ast.Node, multiple bool) {
-	omitRight(body, -1, multiple)
-}
-
-func omitRight(body []ast.Node, i int, multiple bool) {
-	if i+1 >= len(body) {
-		return
-	}
-
-	current := body[i+1]
-
-	node, ok := current.(*ast.ContentStatement)
-	if !ok {
-		return
-	}
-
-	if !multiple && node.RightStripped {
-		return
-	}
-
-	original := node.Value
-
-	r := rTrimLeft
-	if multiple {
-		r = rTrimLeftMultiple
-	}
-
-	node.Value = r.ReplaceAllString(node.Value, "")
-
-	node.RightStripped = (original != node.Value)
-}
-
-func omitLeftLast(body []ast.Node, multiple bool) {
-	omitLeft(body, len(body), multiple)
-}
-
-func omitLeft(body []ast.Node, i int, multiple bool) bool {
-	if i-1 < 0 {
-		return false
-	}
-
-	current := body[i-1]
-
-	node, ok := current.(*ast.ContentStatement)
-	if !ok {
-		return false
-	}
-
-	if !multiple && node.LeftStripped {
-		return false
-	}
-
-	original := node.Value
-
-	r := rTrimRight
-	if multiple {
-		r = rTrimRightMultiple
-	}
-
-	node.Value = r.ReplaceAllString(node.Value, "")
-
-	node.LeftStripped = (original != node.Value)
-
-	return node.LeftStripped
-}
-
-func isPrevWhitespace(body []ast.Node) bool {
-	return isPrevWhitespaceProgram(body, len(body), false)
-}
-
-func isPrevWhitespaceProgram(body []ast.Node, i int, isRoot bool) bool {
-	if i < 1 {
-		return isRoot
-	}
-
-	prev := body[i-1]
-
-	if node, ok := prev.(*ast.ContentStatement); ok {
-		if (node.Value == "") && node.RightStripped {
-			// already stripped, so it may be an empty string not catched by regexp
-			return true
-		}
-
-		r := rPrevWhitespaceStart
-		if (i > 1) || !isRoot {
-			r = rPrevWhitespace
-		}
-
-		return r.MatchString(node.Value)
-	}
-
-	return false
-}
-
-func isNextWhitespace(body []ast.Node) bool {
-	return isNextWhitespaceProgram(body, -1, false)
-}
-
-func isNextWhitespaceProgram(body []ast.Node, i int, isRoot bool) bool {
-	if i+1 >= len(body) {
-		return isRoot
-	}
-
-	next := body[i+1]
-
-	if node, ok := next.(*ast.ContentStatement); ok {
-		if (node.Value == "") && node.LeftStripped {
-			// already stripped, so it may be an empty string not catched by regexp
-			return true
-		}
-
-		r := rNextWhitespaceEnd
-		if (i+2 > len(body)) || !isRoot {
-			r = rNextWhitespace
-		}
-
-		return r.MatchString(node.Value)
-	}
-
-	return false
-}
-
-//
-// Visitor interface
-//
-
-func (v *whitespaceVisitor) VisitProgram(program *ast.Program) interface{} {
-	isRoot := !v.isRootSeen
-	v.isRootSeen = true
-
-	body := program.Body
-	for i, current := range body {
-		strip, _ := current.Accept(v).(*ast.Strip)
-		if strip == nil {
-			continue
-		}
-
-		_isPrevWhitespace := isPrevWhitespaceProgram(body, i, isRoot)
-		_isNextWhitespace := isNextWhitespaceProgram(body, i, isRoot)
-
-		openStandalone := strip.OpenStandalone && _isPrevWhitespace
-		closeStandalone := strip.CloseStandalone && _isNextWhitespace
-		inlineStandalone := strip.InlineStandalone && _isPrevWhitespace && _isNextWhitespace
-
-		if strip.Close {
-			omitRight(body, i, true)
-		}
-
-		if strip.Open && (i > 0) {
-			omitLeft(body, i, true)
-		}
-
-		if inlineStandalone {
-			omitRight(body, i, false)
-
-			if omitLeft(body, i, false) {
-				// If we are on a standalone node, save the indent info for partials
-				if partial, ok := current.(*ast.PartialStatement); ok {
-					// Pull out the whitespace from the final line
-					if i > 0 {
-						if prevContent, ok := body[i-1].(*ast.ContentStatement); ok {
-							partial.Indent = rPartialIndent.FindString(prevContent.Original)
-						}
-					}
-				}
-			}
-		}
-
-		if b, ok := current.(*ast.BlockStatement); ok {
-			if openStandalone {
-				prog := b.Program
-				if prog == nil {
-					prog = b.Inverse
-				}
-
-				omitRightFirst(prog.Body, false)
-
-				// Strip out the previous content node if it's whitespace only
-				omitLeft(body, i, false)
-			}
-
-			if closeStandalone {
-				prog := b.Inverse
-				if prog == nil {
-					prog = b.Program
-				}
-
-				// Always strip the next node
-				omitRight(body, i, false)
-
-				omitLeftLast(prog.Body, false)
-			}
-
-		}
-	}
-
-	return nil
-}
-
-func (v *whitespaceVisitor) VisitBlock(block *ast.BlockStatement) interface{} {
-	if block.Program != nil {
-		block.Program.Accept(v)
-	}
-
-	if block.Inverse != nil {
-		block.Inverse.Accept(v)
-	}
-
-	program := block.Program
-	inverse := block.Inverse
-
-	if program == nil {
-		program = inverse
-		inverse = nil
-	}
-
-	firstInverse := inverse
-	lastInverse := inverse
-
-	if (inverse != nil) && inverse.Chained {
-		b, _ := inverse.Body[0].(*ast.BlockStatement)
-		firstInverse = b.Program
-
-		for lastInverse.Chained {
-			b, _ := lastInverse.Body[len(lastInverse.Body)-1].(*ast.BlockStatement)
-			lastInverse = b.Program
-		}
-	}
-
-	closeProg := firstInverse
-	if closeProg == nil {
-		closeProg = program
-	}
-
-	strip := &ast.Strip{
-		Open:  (block.OpenStrip != nil) && block.OpenStrip.Open,
-		Close: (block.CloseStrip != nil) && block.CloseStrip.Close,
-
-		OpenStandalone:  isNextWhitespace(program.Body),
-		CloseStandalone: isPrevWhitespace(closeProg.Body),
-	}
-
-	if (block.OpenStrip != nil) && block.OpenStrip.Close {
-		omitRightFirst(program.Body, true)
-	}
-
-	if inverse != nil {
-		if block.InverseStrip != nil {
-			inverseStrip := block.InverseStrip
-
-			if inverseStrip.Open {
-				omitLeftLast(program.Body, true)
-			}
-
-			if inverseStrip.Close {
-				omitRightFirst(firstInverse.Body, true)
-			}
-		}
-
-		if (block.CloseStrip != nil) && block.CloseStrip.Open {
-			omitLeftLast(lastInverse.Body, true)
-		}
-
-		// Find standalone else statements
-		if isPrevWhitespace(program.Body) && isNextWhitespace(firstInverse.Body) {
-			omitLeftLast(program.Body, false)
-
-			omitRightFirst(firstInverse.Body, false)
-		}
-	} else if (block.CloseStrip != nil) && block.CloseStrip.Open {
-		omitLeftLast(program.Body, true)
-	}
-
-	return strip
-}
-
-func (v *whitespaceVisitor) VisitMustache(mustache *ast.MustacheStatement) interface{} {
-	return mustache.Strip
-}
-
-func _inlineStandalone(strip *ast.Strip) interface{} {
-	return &ast.Strip{
-		Open:             strip.Open,
-		Close:            strip.Close,
-		InlineStandalone: true,
-	}
-}
-
-func (v *whitespaceVisitor) VisitPartial(node *ast.PartialStatement) interface{} {
-	strip := node.Strip
-	if strip == nil {
-		strip = &ast.Strip{}
-	}
-
-	return _inlineStandalone(strip)
-}
-
-func (v *whitespaceVisitor) VisitComment(node *ast.CommentStatement) interface{} {
-	strip := node.Strip
-	if strip == nil {
-		strip = &ast.Strip{}
-	}
-
-	return _inlineStandalone(strip)
-}
-
-// NOOP
-func (v *whitespaceVisitor) VisitContent(node *ast.ContentStatement) interface{}    { return nil }
-func (v *whitespaceVisitor) VisitExpression(node *ast.Expression) interface{}       { return nil }
-func (v *whitespaceVisitor) VisitSubExpression(node *ast.SubExpression) interface{} { return nil }
-func (v *whitespaceVisitor) VisitPath(node *ast.PathExpression) interface{}         { return nil }
-func (v *whitespaceVisitor) VisitString(node *ast.StringLiteral) interface{}        { return nil }
-func (v *whitespaceVisitor) VisitBoolean(node *ast.BooleanLiteral) interface{}      { return nil }
-func (v *whitespaceVisitor) VisitNumber(node *ast.NumberLiteral) interface{}        { return nil }
-func (v *whitespaceVisitor) VisitHash(node *ast.Hash) interface{}                   { return nil }
-func (v *whitespaceVisitor) VisitHashPair(node *ast.HashPair) interface{}           { return nil }

+ 0 - 101
vendor/github.com/aymerick/raymond/partial.go

@@ -1,101 +0,0 @@
-package raymond
-
-import (
-	"fmt"
-	"sync"
-)
-
-// partial represents a partial template
-type partial struct {
-	name   string
-	source string
-	tpl    *Template
-}
-
-// partials stores all global partials
-var partials map[string]*partial
-
-// protects global partials
-var partialsMutex sync.RWMutex
-
-func init() {
-	partials = make(map[string]*partial)
-}
-
-// newPartial instanciates a new partial
-func newPartial(name string, source string, tpl *Template) *partial {
-	return &partial{
-		name:   name,
-		source: source,
-		tpl:    tpl,
-	}
-}
-
-// RegisterPartial registers a global partial. That partial will be available to all templates.
-func RegisterPartial(name string, source string) {
-	partialsMutex.Lock()
-	defer partialsMutex.Unlock()
-
-	if partials[name] != nil {
-		panic(fmt.Errorf("Partial already registered: %s", name))
-	}
-
-	partials[name] = newPartial(name, source, nil)
-}
-
-// RegisterPartials registers several global partials. Those partials will be available to all templates.
-func RegisterPartials(partials map[string]string) {
-	for name, p := range partials {
-		RegisterPartial(name, p)
-	}
-}
-
-// RegisterPartialTemplate registers a global partial with given parsed template. That partial will be available to all templates.
-func RegisterPartialTemplate(name string, tpl *Template) {
-	partialsMutex.Lock()
-	defer partialsMutex.Unlock()
-
-	if partials[name] != nil {
-		panic(fmt.Errorf("Partial already registered: %s", name))
-	}
-
-	partials[name] = newPartial(name, "", tpl)
-}
-
-// RemovePartial removes the partial registered under the given name. The partial will not be available globally anymore. This does not affect partials registered on a specific template.
-func RemovePartial(name string) {
-	partialsMutex.Lock()
-	defer partialsMutex.Unlock()
-
-	delete(partials, name)
-}
-
-// RemoveAllPartials removes all globally registered partials. This does not affect partials registered on a specific template.
-func RemoveAllPartials() {
-	partialsMutex.Lock()
-	defer partialsMutex.Unlock()
-
-	partials = make(map[string]*partial)
-}
-
-// findPartial finds a registered global partial
-func findPartial(name string) *partial {
-	partialsMutex.RLock()
-	defer partialsMutex.RUnlock()
-
-	return partials[name]
-}
-
-// template returns parsed partial template
-func (p *partial) template() (*Template, error) {
-	if p.tpl == nil {
-		var err error
-
-		p.tpl, err = Parse(p.source)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return p.tpl, nil
-}

+ 0 - 28
vendor/github.com/aymerick/raymond/raymond.go

@@ -1,28 +0,0 @@
-// Package raymond provides handlebars evaluation
-package raymond
-
-// Render parses a template and evaluates it with given context
-//
-// Note that this function call is not optimal as your template is parsed everytime you call it. You should use Parse() function instead.
-func Render(source string, ctx interface{}) (string, error) {
-	// parse template
-	tpl, err := Parse(source)
-	if err != nil {
-		return "", err
-	}
-
-	// renders template
-	str, err := tpl.Exec(ctx)
-	if err != nil {
-		return "", err
-	}
-
-	return str, nil
-}
-
-// MustRender parses a template and evaluates it with given context. It panics on error.
-//
-// Note that this function call is not optimal as your template is parsed everytime you call it. You should use Parse() function instead.
-func MustRender(source string, ctx interface{}) string {
-	return MustParse(source).MustExec(ctx)
-}

二進制
vendor/github.com/aymerick/raymond/raymond.png


+ 0 - 84
vendor/github.com/aymerick/raymond/string.go

@@ -1,84 +0,0 @@
-package raymond
-
-import (
-	"fmt"
-	"reflect"
-	"strconv"
-)
-
-// SafeString represents a string that must not be escaped.
-//
-// A SafeString can be returned by helpers to disable escaping.
-type SafeString string
-
-// isSafeString returns true if argument is a SafeString
-func isSafeString(value interface{}) bool {
-	if _, ok := value.(SafeString); ok {
-		return true
-	}
-	return false
-}
-
-// Str returns string representation of any basic type value.
-func Str(value interface{}) string {
-	return strValue(reflect.ValueOf(value))
-}
-
-// strValue returns string representation of a reflect.Value
-func strValue(value reflect.Value) string {
-	result := ""
-
-	ival, ok := printableValue(value)
-	if !ok {
-		panic(fmt.Errorf("Can't print value: %q", value))
-	}
-
-	val := reflect.ValueOf(ival)
-
-	switch val.Kind() {
-	case reflect.Array, reflect.Slice:
-		for i := 0; i < val.Len(); i++ {
-			result += strValue(val.Index(i))
-		}
-	case reflect.Bool:
-		result = "false"
-		if val.Bool() {
-			result = "true"
-		}
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		result = fmt.Sprintf("%d", ival)
-	case reflect.Float32, reflect.Float64:
-		result = strconv.FormatFloat(val.Float(), 'f', -1, 64)
-	case reflect.Invalid:
-		result = ""
-	default:
-		result = fmt.Sprintf("%s", ival)
-	}
-
-	return result
-}
-
-// printableValue returns the, possibly indirected, interface value inside v that
-// is best for a call to formatted printer.
-//
-// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
-func printableValue(v reflect.Value) (interface{}, bool) {
-	if v.Kind() == reflect.Ptr {
-		v, _ = indirect(v) // fmt.Fprint handles nil.
-	}
-	if !v.IsValid() {
-		return "", true
-	}
-
-	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
-		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
-			v = v.Addr()
-		} else {
-			switch v.Kind() {
-			case reflect.Chan, reflect.Func:
-				return nil, false
-			}
-		}
-	}
-	return v.Interface(), true
-}

+ 0 - 248
vendor/github.com/aymerick/raymond/template.go

@@ -1,248 +0,0 @@
-package raymond
-
-import (
-	"fmt"
-	"io/ioutil"
-	"reflect"
-	"runtime"
-	"sync"
-
-	"github.com/aymerick/raymond/ast"
-	"github.com/aymerick/raymond/parser"
-)
-
-// Template represents a handlebars template.
-type Template struct {
-	source   string
-	program  *ast.Program
-	helpers  map[string]reflect.Value
-	partials map[string]*partial
-	mutex    sync.RWMutex // protects helpers and partials
-}
-
-// newTemplate instanciate a new template without parsing it
-func newTemplate(source string) *Template {
-	return &Template{
-		source:   source,
-		helpers:  make(map[string]reflect.Value),
-		partials: make(map[string]*partial),
-	}
-}
-
-// Parse instanciates a template by parsing given source.
-func Parse(source string) (*Template, error) {
-	tpl := newTemplate(source)
-
-	// parse template
-	if err := tpl.parse(); err != nil {
-		return nil, err
-	}
-
-	return tpl, nil
-}
-
-// MustParse instanciates a template by parsing given source. It panics on error.
-func MustParse(source string) *Template {
-	result, err := Parse(source)
-	if err != nil {
-		panic(err)
-	}
-	return result
-}
-
-// ParseFile reads given file and returns parsed template.
-func ParseFile(filePath string) (*Template, error) {
-	b, err := ioutil.ReadFile(filePath)
-	if err != nil {
-		return nil, err
-	}
-
-	return Parse(string(b))
-}
-
-// parse parses the template
-//
-// It can be called several times, the parsing will be done only once.
-func (tpl *Template) parse() error {
-	if tpl.program == nil {
-		var err error
-
-		tpl.program, err = parser.Parse(tpl.source)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// Clone returns a copy of that template.
-func (tpl *Template) Clone() *Template {
-	result := newTemplate(tpl.source)
-
-	result.program = tpl.program
-
-	tpl.mutex.RLock()
-	defer tpl.mutex.RUnlock()
-
-	for name, helper := range tpl.helpers {
-		result.RegisterHelper(name, helper.Interface())
-	}
-
-	for name, partial := range tpl.partials {
-		result.addPartial(name, partial.source, partial.tpl)
-	}
-
-	return result
-}
-
-func (tpl *Template) findHelper(name string) reflect.Value {
-	tpl.mutex.RLock()
-	defer tpl.mutex.RUnlock()
-
-	return tpl.helpers[name]
-}
-
-// RegisterHelper registers a helper for that template.
-func (tpl *Template) RegisterHelper(name string, helper interface{}) {
-	tpl.mutex.Lock()
-	defer tpl.mutex.Unlock()
-
-	if tpl.helpers[name] != zero {
-		panic(fmt.Sprintf("Helper %s already registered", name))
-	}
-
-	val := reflect.ValueOf(helper)
-	ensureValidHelper(name, val)
-
-	tpl.helpers[name] = val
-}
-
-// RegisterHelpers registers several helpers for that template.
-func (tpl *Template) RegisterHelpers(helpers map[string]interface{}) {
-	for name, helper := range helpers {
-		tpl.RegisterHelper(name, helper)
-	}
-}
-
-func (tpl *Template) addPartial(name string, source string, template *Template) {
-	tpl.mutex.Lock()
-	defer tpl.mutex.Unlock()
-
-	if tpl.partials[name] != nil {
-		panic(fmt.Sprintf("Partial %s already registered", name))
-	}
-
-	tpl.partials[name] = newPartial(name, source, template)
-}
-
-func (tpl *Template) findPartial(name string) *partial {
-	tpl.mutex.RLock()
-	defer tpl.mutex.RUnlock()
-
-	return tpl.partials[name]
-}
-
-// RegisterPartial registers a partial for that template.
-func (tpl *Template) RegisterPartial(name string, source string) {
-	tpl.addPartial(name, source, nil)
-}
-
-// RegisterPartials registers several partials for that template.
-func (tpl *Template) RegisterPartials(partials map[string]string) {
-	for name, partial := range partials {
-		tpl.RegisterPartial(name, partial)
-	}
-}
-
-// RegisterPartialFile reads given file and registers its content as a partial with given name.
-func (tpl *Template) RegisterPartialFile(filePath string, name string) error {
-	b, err := ioutil.ReadFile(filePath)
-	if err != nil {
-		return err
-	}
-
-	tpl.RegisterPartial(name, string(b))
-
-	return nil
-}
-
-// RegisterPartialFiles reads several files and registers them as partials, the filename base is used as the partial name.
-func (tpl *Template) RegisterPartialFiles(filePaths ...string) error {
-	if len(filePaths) == 0 {
-		return nil
-	}
-
-	for _, filePath := range filePaths {
-		name := fileBase(filePath)
-
-		if err := tpl.RegisterPartialFile(filePath, name); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// RegisterPartialTemplate registers an already parsed partial for that template.
-func (tpl *Template) RegisterPartialTemplate(name string, template *Template) {
-	tpl.addPartial(name, "", template)
-}
-
-// Exec evaluates template with given context.
-func (tpl *Template) Exec(ctx interface{}) (result string, err error) {
-	return tpl.ExecWith(ctx, nil)
-}
-
-// MustExec evaluates template with given context. It panics on error.
-func (tpl *Template) MustExec(ctx interface{}) string {
-	result, err := tpl.Exec(ctx)
-	if err != nil {
-		panic(err)
-	}
-	return result
-}
-
-// ExecWith evaluates template with given context and private data frame.
-func (tpl *Template) ExecWith(ctx interface{}, privData *DataFrame) (result string, err error) {
-	defer errRecover(&err)
-
-	// parses template if necessary
-	err = tpl.parse()
-	if err != nil {
-		return
-	}
-
-	// setup visitor
-	v := newEvalVisitor(tpl, ctx, privData)
-
-	// visit AST
-	result, _ = tpl.program.Accept(v).(string)
-
-	// named return values
-	return
-}
-
-// errRecover recovers evaluation panic
-func errRecover(errp *error) {
-	e := recover()
-	if e != nil {
-		switch err := e.(type) {
-		case runtime.Error:
-			panic(e)
-		case error:
-			*errp = err
-		default:
-			panic(e)
-		}
-	}
-}
-
-// PrintAST returns string representation of parsed template.
-func (tpl *Template) PrintAST() string {
-	if err := tpl.parse(); err != nil {
-		return fmt.Sprintf("PARSER ERROR: %s", err)
-	}
-
-	return ast.Print(tpl.program)
-}

+ 0 - 85
vendor/github.com/aymerick/raymond/utils.go

@@ -1,85 +0,0 @@
-package raymond
-
-import (
-	"path"
-	"reflect"
-)
-
-// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
-// We indirect through pointers and empty interfaces (only) because
-// non-empty interfaces have methods we might need.
-//
-// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
-	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
-		if v.IsNil() {
-			return v, true
-		}
-		if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
-			break
-		}
-	}
-	return v, false
-}
-
-// IsTrue returns true if obj is a truthy value.
-func IsTrue(obj interface{}) bool {
-	thruth, ok := isTrueValue(reflect.ValueOf(obj))
-	if !ok {
-		return false
-	}
-	return thruth
-}
-
-// isTrueValue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value
-//
-// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
-func isTrueValue(val reflect.Value) (truth, ok bool) {
-	if !val.IsValid() {
-		// Something like var x interface{}, never set. It's a form of nil.
-		return false, true
-	}
-	switch val.Kind() {
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
-		truth = val.Len() > 0
-	case reflect.Bool:
-		truth = val.Bool()
-	case reflect.Complex64, reflect.Complex128:
-		truth = val.Complex() != 0
-	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
-		truth = !val.IsNil()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		truth = val.Int() != 0
-	case reflect.Float32, reflect.Float64:
-		truth = val.Float() != 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		truth = val.Uint() != 0
-	case reflect.Struct:
-		truth = true // Struct values are always true.
-	default:
-		return
-	}
-	return truth, true
-}
-
-// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
-//
-// NOTE: borrowed from https://github.com/golang/go/tree/master/src/text/template/exec.go
-func canBeNil(typ reflect.Type) bool {
-	switch typ.Kind() {
-	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return true
-	}
-	return false
-}
-
-// fileBase returns base file name
-//
-// example: /foo/bar/baz.png => baz
-func fileBase(filePath string) string {
-	fileName := path.Base(filePath)
-	fileExt := path.Ext(filePath)
-
-	return fileName[:len(fileName)-len(fileExt)]
-}

+ 0 - 7
vendor/github.com/eknkc/amber/.travis.yml

@@ -1,7 +0,0 @@
-language: go
-
-go:
- - tip
-
-script:
- - go test -v ./...

+ 0 - 9
vendor/github.com/eknkc/amber/LICENSE

@@ -1,9 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2012 Ekin Koc ekin@eknkc.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 442
vendor/github.com/eknkc/amber/README.md

@@ -1,442 +0,0 @@
-# amber [![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](http://godoc.org/github.com/eknkc/amber) [![Build Status](https://travis-ci.org/eknkc/amber.svg?branch=master)](https://travis-ci.org/eknkc/amber)
-
-## Notice
-> While Amber is perfectly fine and stable to use, I've been working on a direct Pug.js port for Go. It is somewhat hacky at the moment but take a look at [Pug.go](https://github.com/eknkc/pug) if you are looking for a [Pug.js](https://github.com/pugjs/pug) compatible Go template engine.
-
-### Usage
-```go
-import "github.com/eknkc/amber"
-```
-
-Amber is an elegant templating engine for Go Programming Language
-It is inspired from HAML and Jade
-
-### Tags
-
-A tag is simply a word:
-
-    html
-
-is converted to
-
-```html
-<html></html>
-```
-
-It is possible to add ID and CLASS attributes to tags:
-
-    div#main
-    span.time
-
-are converted to
-
-```html
-<div id="main"></div>
-<span class="time"></span>
-```
-
-Any arbitrary attribute name / value pair can be added this way:
-
-    a[href="http://www.google.com"]
-
-You can mix multiple attributes together
-
-    a#someid[href="/"][title="Main Page"].main.link Click Link
-
-gets converted to
-
-```html
-<a id="someid" class="main link" href="/" title="Main Page">Click Link</a>
-```
-
-It is also possible to define these attributes within the block of a tag
-
-    a
-        #someid
-        [href="/"]
-        [title="Main Page"]
-        .main
-        .link
-        | Click Link
-
-### Doctypes
-
-To add a doctype, use `!!!` or `doctype` keywords:
-
-    !!! transitional
-    // <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-or use `doctype`
-
-    doctype 5
-    // <!DOCTYPE html>
-
-Available options: `5`, `default`, `xml`, `transitional`, `strict`, `frameset`, `1.1`, `basic`, `mobile`
-
-### Tag Content
-
-For single line tag text, you can just append the text after tag name:
-
-    p Testing!
-
-would yield
-
-    <p>Testing!</p>
-
-For multi line tag text, or nested tags, use indentation:
-
-    html
-        head
-            title Page Title
-        body
-            div#content
-                p
-                    | This is a long page content
-                    | These lines are all part of the parent p
-
-                    a[href="/"] Go To Main Page
-
-### Data
-
-Input template data can be reached by key names directly. For example, assuming the template has been
-executed with following JSON data:
-
-```json
-{
-  "Name": "Ekin",
-  "LastName": "Koc",
-  "Repositories": [
-    "amber",
-    "dateformat"
-  ],
-  "Avatar": "/images/ekin.jpg",
-  "Friends": 17
-}
-```
-
-It is possible to interpolate fields using `#{}`
-
-    p Welcome #{Name}!
-
-would print
-
-```html
-<p>Welcome Ekin!</p>
-```
-
-Attributes can have field names as well
-
-    a[title=Name][href="/ekin.koc"]
-
-would print
-
-```html
-<a title="Ekin" href="/ekin.koc"></a>
-```
-
-### Expressions
-
-Amber can expand basic expressions. For example, it is possible to concatenate strings with + operator:
-
-    p Welcome #{Name + " " + LastName}
-
-Arithmetic expressions are also supported:
-
-    p You need #{50 - Friends} more friends to reach 50!
-
-Expressions can be used within attributes
-
-    img[alt=Name + " " + LastName][src=Avatar]
-
-### Variables
-
-It is possible to define dynamic variables within templates,
-all variables must start with a $ character and can be assigned as in the following example:
-
-    div
-        $fullname = Name + " " + LastName
-        p Welcome #{$fullname}
-
-If you need to access the supplied data itself (i.e. the object containing Name, LastName etc fields.) you can use `$` variable
-
-    p $.Name
-
-### Conditions
-
-For conditional blocks, it is possible to use `if <expression>`
-
-    div
-        if Friends > 10
-            p You have more than 10 friends
-        else if Friends > 5
-            p You have more than 5 friends
-        else
-            p You need more friends
-
-Again, it is possible to use arithmetic and boolean operators
-
-    div
-        if Name == "Ekin" && LastName == "Koc"
-            p Hey! I know you..
-
-There is a special syntax for conditional attributes. Only block attributes can have conditions;
-
-    div
-        .hasfriends ? Friends > 0
-
-This would yield a div with `hasfriends` class only if the `Friends > 0` condition holds. It is
-perfectly fine to use the same method for other types of attributes:
-
-    div
-        #foo ? Name == "Ekin"
-        [bar=baz] ? len(Repositories) > 0
-
-### Iterations
-
-It is possible to iterate over arrays and maps using `each`:
-
-    each $repo in Repositories
-        p #{$repo}
-
-would print
-
-    p amber
-    p dateformat
-
-It is also possible to iterate over values and indexes at the same time
-
-    each $i, $repo in Repositories
-        p
-            .even ? $i % 2 == 0
-            .odd ? $i % 2 == 1
-
-### Mixins
-
-Mixins (reusable template blocks that accept arguments) can be defined:
-
-    mixin surprise
-        span Surprise!
-    mixin link($href, $title, $text)
-        a[href=$href][title=$title] #{$text}
-
-and then called multiple times within a template (or even within another mixin definition):
-
-    div
-    	+surprise
-    	+surprise
-        +link("http://google.com", "Google", "Check out Google")
-
-Template data, variables, expressions, etc., can all be passed as arguments:
-
-    +link(GoogleUrl, $googleTitle, "Check out " + $googleTitle)
-
-### Imports
-
-A template can import other templates using `import`:
-
-    a.amber
-        p this is template a
-
-    b.amber
-        p this is template b
-
-    c.amber
-        div
-            import a
-            import b
-
-gets compiled to
-
-    div
-        p this is template a
-        p this is template b
-
-### Inheritance
-
-A template can inherit other templates. In order to inherit another template, an `extends` keyword should be used.
-Parent template can define several named blocks and child template can modify the blocks.
-
-    master.amber
-        !!! 5
-        html
-            head
-                block meta
-                    meta[name="description"][content="This is a great website"]
-
-                title
-                    block title
-                        | Default title
-            body
-                block content
-
-    subpage.amber
-        extends master
-
-        block title
-            | Some sub page!
-
-        block append meta
-            // This will be added after the description meta tag. It is also possible
-            // to prepend someting to an existing block
-            meta[name="keywords"][content="foo bar"]
-
-        block content
-            div#main
-                p Some content here
-
-### License
-(The MIT License)
-
-Copyright (c) 2012 Ekin Koc <ekin@eknkc.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-## Usage
-
-```go
-var DefaultOptions = Options{true, false}
-var DefaultDirOptions = DirOptions{".amber", true}
-```
-
-#### func  Compile
-
-```go
-func Compile(input string, options Options) (*template.Template, error)
-```
-Parses and compiles the supplied amber template string. Returns corresponding Go
-Template (html/templates) instance. Necessary runtime functions will be injected
-and the template will be ready to be executed.
-
-#### func  CompileFile
-
-```go
-func CompileFile(filename string, options Options) (*template.Template, error)
-```
-Parses and compiles the contents of supplied filename. Returns corresponding Go
-Template (html/templates) instance. Necessary runtime functions will be injected
-and the template will be ready to be executed.
-
-#### func  CompileDir
-```go
-func CompileDir(dirname string, dopt DirOptions, opt Options) (map[string]*template.Template, error)
-```
-Parses and compiles the contents of a supplied directory name. Returns a mapping of template name (extension stripped) to corresponding Go Template (html/template) instance. Necessary runtime functions will be injected and the template will be ready to be executed.
-
-If there are templates in subdirectories, its key in the map will be it's path relative to `dirname`. For example:
-```
-templates/
-   |-- index.amber
-   |-- layouts/
-         |-- base.amber
-```
-```go
-templates, err := amber.CompileDir("templates/", amber.DefaultDirOptions, amber.DefaultOptions)
-templates["index"] // index.amber Go Template
-templates["layouts/base"] // base.amber Go Template
-```
-By default, the search will be recursive and will match only files ending in ".amber". If recursive is turned off, it will only search the top level of the directory. Specified extension must start with a period.
-
-#### type Compiler
-
-```go
-type Compiler struct {
-	// Compiler options
-	Options
-}
-```
-
-Compiler is the main interface of Amber Template Engine. In order to use an
-Amber template, it is required to create a Compiler and compile an Amber source
-to native Go template.
-
-    compiler := amber.New()
-    // Parse the input file
-    err := compiler.ParseFile("./input.amber")
-    if err == nil {
-    	// Compile input file to Go template
-    	tpl, err := compiler.Compile()
-    	if err == nil {
-    		// Check built in html/template documentation for further details
-    		tpl.Execute(os.Stdout, somedata)
-    	}
-    }
-
-#### func  New
-
-```go
-func New() *Compiler
-```
-Create and initialize a new Compiler
-
-#### func (*Compiler) Compile
-
-```go
-func (c *Compiler) Compile() (*template.Template, error)
-```
-Compile amber and create a Go Template (html/templates) instance. Necessary
-runtime functions will be injected and the template will be ready to be
-executed.
-
-#### func (*Compiler) CompileString
-
-```go
-func (c *Compiler) CompileString() (string, error)
-```
-Compile template and return the Go Template source You would not be using this
-unless debugging / checking the output. Please use Compile method to obtain a
-template instance directly.
-
-#### func (*Compiler) CompileWriter
-
-```go
-func (c *Compiler) CompileWriter(out io.Writer) (err error)
-```
-Compile amber and write the Go Template source into given io.Writer instance You
-would not be using this unless debugging / checking the output. Please use
-Compile method to obtain a template instance directly.
-
-#### func (*Compiler) Parse
-
-```go
-func (c *Compiler) Parse(input string) (err error)
-```
-Parse given raw amber template string.
-
-#### func (*Compiler) ParseFile
-
-```go
-func (c *Compiler) ParseFile(filename string) (err error)
-```
-Parse the amber template file in given path
-
-#### type Options
-
-```go
-type Options struct {
-	// Setting if pretty printing is enabled.
-	// Pretty printing ensures that the output html is properly indented and in human readable form.
-	// If disabled, produced HTML is compact. This might be more suitable in production environments.
-	// Defaukt: true
-	PrettyPrint bool
-	// Setting if line number emitting is enabled
-	// In this form, Amber emits line number comments in the output template. It is usable in debugging environments.
-	// Default: false
-	LineNumbers bool
-}
-```
-
-#### type DirOptions
-
-```go
-// Used to provide options to directory compilation
-type DirOptions struct {
-	// File extension to match for compilation
-	Ext string
-	// Whether or not to walk subdirectories
-	Recursive bool
-}
-```

+ 0 - 844
vendor/github.com/eknkc/amber/compiler.go

@@ -1,844 +0,0 @@
-package amber
-
-import (
-	"bytes"
-	"container/list"
-	"errors"
-	"fmt"
-	"go/ast"
-	gp "go/parser"
-	gt "go/token"
-	"html/template"
-	"io"
-	"net/http"
-	"os"
-	"path/filepath"
-	"reflect"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-
-	"github.com/eknkc/amber/parser"
-)
-
-var builtinFunctions = [...]string{
-	"len",
-	"print",
-	"printf",
-	"println",
-	"urlquery",
-	"js",
-	"json",
-	"index",
-	"html",
-	"unescaped",
-}
-
-const (
-	dollar = "__DOLLAR__"
-)
-
-// Compiler is the main interface of Amber Template Engine.
-// In order to use an Amber template, it is required to create a Compiler and
-// compile an Amber source to native Go template.
-//	compiler := amber.New()
-// 	// Parse the input file
-//	err := compiler.ParseFile("./input.amber")
-//	if err == nil {
-//		// Compile input file to Go template
-//		tpl, err := compiler.Compile()
-//		if err == nil {
-//			// Check built in html/template documentation for further details
-//			tpl.Execute(os.Stdout, somedata)
-//		}
-//	}
-type Compiler struct {
-	// Compiler options
-	Options
-	filename     string
-	node         parser.Node
-	indentLevel  int
-	newline      bool
-	buffer       *bytes.Buffer
-	tempvarIndex int
-	mixins       map[string]*parser.Mixin
-}
-
-// New creates and initialize a new Compiler.
-func New() *Compiler {
-	compiler := new(Compiler)
-	compiler.filename = ""
-	compiler.tempvarIndex = 0
-	compiler.PrettyPrint = true
-	compiler.Options = DefaultOptions
-	compiler.mixins = make(map[string]*parser.Mixin)
-
-	return compiler
-}
-
-// Options defines template output behavior.
-type Options struct {
-	// Setting if pretty printing is enabled.
-	// Pretty printing ensures that the output html is properly indented and in human readable form.
-	// If disabled, produced HTML is compact. This might be more suitable in production environments.
-	// Default: true
-	PrettyPrint bool
-	// Setting if line number emitting is enabled
-	// In this form, Amber emits line number comments in the output template. It is usable in debugging environments.
-	// Default: false
-	LineNumbers bool
-	// Setting the virtual filesystem to use
-	// If set, will attempt to use a virtual filesystem provided instead of os.
-	// Default: nil
-	VirtualFilesystem http.FileSystem
-}
-
-// DirOptions is used to provide options to directory compilation.
-type DirOptions struct {
-	// File extension to match for compilation
-	Ext string
-	// Whether or not to walk subdirectories
-	Recursive bool
-}
-
-// DefaultOptions sets pretty-printing to true and line numbering to false.
-var DefaultOptions = Options{true, false, nil}
-
-// DefaultDirOptions sets expected file extension to ".amber" and recursive search for templates within a directory to true.
-var DefaultDirOptions = DirOptions{".amber", true}
-
-// Compile parses and compiles the supplied amber template string. Returns corresponding Go Template (html/templates) instance.
-// Necessary runtime functions will be injected and the template will be ready to be executed.
-func Compile(input string, options Options) (*template.Template, error) {
-	comp := New()
-	comp.Options = options
-
-	err := comp.Parse(input)
-	if err != nil {
-		return nil, err
-	}
-
-	return comp.Compile()
-}
-
-// Compile parses and compiles the supplied amber template []byte.
-// Returns corresponding Go Template (html/templates) instance.
-// Necessary runtime functions will be injected and the template will be ready to be executed.
-func CompileData(input []byte, filename string, options Options) (*template.Template, error) {
-	comp := New()
-	comp.Options = options
-
-	err := comp.ParseData(input, filename)
-	if err != nil {
-		return nil, err
-	}
-
-	return comp.Compile()
-}
-
-// MustCompile is the same as Compile, except the input is assumed error free. If else, panic.
-func MustCompile(input string, options Options) *template.Template {
-	t, err := Compile(input, options)
-	if err != nil {
-		panic(err)
-	}
-	return t
-}
-
-// CompileFile parses and compiles the contents of supplied filename. Returns corresponding Go Template (html/templates) instance.
-// Necessary runtime functions will be injected and the template will be ready to be executed.
-func CompileFile(filename string, options Options) (*template.Template, error) {
-	comp := New()
-	comp.Options = options
-
-	err := comp.ParseFile(filename)
-	if err != nil {
-		return nil, err
-	}
-
-	return comp.Compile()
-}
-
-// MustCompileFile is the same as CompileFile, except the input is assumed error free. If else, panic.
-func MustCompileFile(filename string, options Options) *template.Template {
-	t, err := CompileFile(filename, options)
-	if err != nil {
-		panic(err)
-	}
-	return t
-}
-
-// CompileDir parses and compiles the contents of a supplied directory path, with options.
-// Returns a map of a template identifier (key) to a Go Template instance.
-// Ex: if the dirname="templates/" had a file "index.amber" the key would be "index"
-// If option for recursive is True, this parses every file of relevant extension
-// in all subdirectories. The key then is the path e.g: "layouts/layout"
-func CompileDir(dirname string, dopt DirOptions, opt Options) (map[string]*template.Template, error) {
-	dir, err := os.Open(dirname)
-	if err != nil && opt.VirtualFilesystem != nil {
-		vdir, err := opt.VirtualFilesystem.Open(dirname)
-		if err != nil {
-			return nil, err
-		}
-		dir = vdir.(*os.File)
-	} else if err != nil {
-		return nil, err
-	}
-	defer dir.Close()
-
-	files, err := dir.Readdir(0)
-	if err != nil {
-		return nil, err
-	}
-
-	compiled := make(map[string]*template.Template)
-	for _, file := range files {
-		// filename is for example "index.amber"
-		filename := file.Name()
-		fileext := filepath.Ext(filename)
-
-		// If recursive is true and there's a subdirectory, recurse
-		if dopt.Recursive && file.IsDir() {
-			dirpath := filepath.Join(dirname, filename)
-			subcompiled, err := CompileDir(dirpath, dopt, opt)
-			if err != nil {
-				return nil, err
-			}
-			// Copy templates from subdirectory into parent template mapping
-			for k, v := range subcompiled {
-				// Concat with parent directory name for unique paths
-				key := filepath.Join(filename, k)
-				compiled[key] = v
-			}
-		} else if fileext == dopt.Ext {
-			// Otherwise compile the file and add to mapping
-			fullpath := filepath.Join(dirname, filename)
-			tmpl, err := CompileFile(fullpath, opt)
-			if err != nil {
-				return nil, err
-			}
-			// Strip extension
-			key := filename[0 : len(filename)-len(fileext)]
-			compiled[key] = tmpl
-		}
-	}
-
-	return compiled, nil
-}
-
-// MustCompileDir is the same as CompileDir, except input is assumed error free. If else, panic.
-func MustCompileDir(dirname string, dopt DirOptions, opt Options) map[string]*template.Template {
-	m, err := CompileDir(dirname, dopt, opt)
-	if err != nil {
-		panic(err)
-	}
-	return m
-}
-
-// Parse given raw amber template string.
-func (c *Compiler) Parse(input string) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			err = errors.New(r.(string))
-		}
-	}()
-
-	parser, err := parser.StringParser(input)
-
-	if err != nil {
-		return
-	}
-
-	c.node = parser.Parse()
-	return
-}
-
-// Parse given raw amber template bytes, and the filename that belongs with it
-func (c *Compiler) ParseData(input []byte, filename string) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			err = errors.New(r.(string))
-		}
-	}()
-
-	parser, err := parser.ByteParser(input)
-	parser.SetFilename(filename)
-	if c.VirtualFilesystem != nil {
-		parser.SetVirtualFilesystem(c.VirtualFilesystem)
-	}
-
-	if err != nil {
-		return
-	}
-
-	c.node = parser.Parse()
-	return
-}
-
-// ParseFile parses the amber template file in given path.
-func (c *Compiler) ParseFile(filename string) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			err = errors.New(r.(string))
-		}
-	}()
-
-	p, err := parser.FileParser(filename)
-	if err != nil && c.VirtualFilesystem != nil {
-		p, err = parser.VirtualFileParser(filename, c.VirtualFilesystem)
-	}
-	if err != nil {
-		return
-	}
-
-	c.node = p.Parse()
-	c.filename = filename
-	return
-}
-
-// Compile amber and create a Go Template (html/templates) instance.
-// Necessary runtime functions will be injected and the template will be ready to be executed.
-func (c *Compiler) Compile() (*template.Template, error) {
-	return c.CompileWithName(filepath.Base(c.filename))
-}
-
-// CompileWithName is the same as Compile, but allows to specify a name for the template.
-func (c *Compiler) CompileWithName(name string) (*template.Template, error) {
-	return c.CompileWithTemplate(template.New(name))
-}
-
-// CompileWithTemplate is the same as Compile but allows to specify a template.
-func (c *Compiler) CompileWithTemplate(t *template.Template) (*template.Template, error) {
-	data, err := c.CompileString()
-
-	if err != nil {
-		return nil, err
-	}
-
-	tpl, err := t.Funcs(FuncMap).Parse(data)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return tpl, nil
-}
-
-// CompileWriter compiles amber and writes the Go Template source into given io.Writer instance.
-// You would not be using this unless debugging / checking the output. Please use Compile
-// method to obtain a template instance directly.
-func (c *Compiler) CompileWriter(out io.Writer) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			err = errors.New(r.(string))
-		}
-	}()
-
-	c.buffer = new(bytes.Buffer)
-	c.visit(c.node)
-
-	if c.buffer.Len() > 0 {
-		c.write("\n")
-	}
-
-	_, err = c.buffer.WriteTo(out)
-	return
-}
-
-// CompileString compiles the template and returns the Go Template source.
-// You would not be using this unless debugging / checking the output. Please use Compile
-// method to obtain a template instance directly.
-func (c *Compiler) CompileString() (string, error) {
-	var buf bytes.Buffer
-
-	if err := c.CompileWriter(&buf); err != nil {
-		return "", err
-	}
-
-	result := buf.String()
-
-	return result, nil
-}
-
-func (c *Compiler) visit(node parser.Node) {
-	defer func() {
-		if r := recover(); r != nil {
-			if rs, ok := r.(string); ok && rs[:len("Amber Error")] == "Amber Error" {
-				panic(r)
-			}
-
-			pos := node.Pos()
-
-			if len(pos.Filename) > 0 {
-				panic(fmt.Sprintf("Amber Error in <%s>: %v - Line: %d, Column: %d, Length: %d", pos.Filename, r, pos.LineNum, pos.ColNum, pos.TokenLength))
-			} else {
-				panic(fmt.Sprintf("Amber Error: %v - Line: %d, Column: %d, Length: %d", r, pos.LineNum, pos.ColNum, pos.TokenLength))
-			}
-		}
-	}()
-
-	switch node.(type) {
-	case *parser.Block:
-		c.visitBlock(node.(*parser.Block))
-	case *parser.Doctype:
-		c.visitDoctype(node.(*parser.Doctype))
-	case *parser.Comment:
-		c.visitComment(node.(*parser.Comment))
-	case *parser.Tag:
-		c.visitTag(node.(*parser.Tag))
-	case *parser.Text:
-		c.visitText(node.(*parser.Text))
-	case *parser.Condition:
-		c.visitCondition(node.(*parser.Condition))
-	case *parser.Each:
-		c.visitEach(node.(*parser.Each))
-	case *parser.Assignment:
-		c.visitAssignment(node.(*parser.Assignment))
-	case *parser.Mixin:
-		c.visitMixin(node.(*parser.Mixin))
-	case *parser.MixinCall:
-		c.visitMixinCall(node.(*parser.MixinCall))
-	}
-}
-
-func (c *Compiler) write(value string) {
-	c.buffer.WriteString(value)
-}
-
-func (c *Compiler) indent(offset int, newline bool) {
-	if !c.PrettyPrint {
-		return
-	}
-
-	if newline && c.buffer.Len() > 0 {
-		c.write("\n")
-	}
-
-	for i := 0; i < c.indentLevel+offset; i++ {
-		c.write("\t")
-	}
-}
-
-func (c *Compiler) tempvar() string {
-	c.tempvarIndex++
-	return "$__amber_" + strconv.Itoa(c.tempvarIndex)
-}
-
-func (c *Compiler) escape(input string) string {
-	return strings.Replace(strings.Replace(input, `\`, `\\`, -1), `"`, `\"`, -1)
-}
-
-func (c *Compiler) visitBlock(block *parser.Block) {
-	for _, node := range block.Children {
-		if _, ok := node.(*parser.Text); !block.CanInline() && ok {
-			c.indent(0, true)
-		}
-
-		c.visit(node)
-	}
-}
-
-func (c *Compiler) visitDoctype(doctype *parser.Doctype) {
-	c.write(doctype.String())
-}
-
-func (c *Compiler) visitComment(comment *parser.Comment) {
-	if comment.Silent {
-		return
-	}
-
-	c.indent(0, false)
-
-	if comment.Block == nil {
-		c.write(`{{unescaped "<!-- ` + c.escape(comment.Value) + ` -->"}}`)
-	} else {
-		c.write(`<!-- ` + comment.Value)
-		c.visitBlock(comment.Block)
-		c.write(` -->`)
-	}
-}
-
-func (c *Compiler) visitCondition(condition *parser.Condition) {
-	c.write(`{{if ` + c.visitRawInterpolation(condition.Expression) + `}}`)
-	c.visitBlock(condition.Positive)
-	if condition.Negative != nil {
-		c.write(`{{else}}`)
-		c.visitBlock(condition.Negative)
-	}
-	c.write(`{{end}}`)
-}
-
-func (c *Compiler) visitEach(each *parser.Each) {
-	if each.Block == nil {
-		return
-	}
-
-	if len(each.Y) == 0 {
-		c.write(`{{range ` + each.X + ` := ` + c.visitRawInterpolation(each.Expression) + `}}`)
-	} else {
-		c.write(`{{range ` + each.X + `, ` + each.Y + ` := ` + c.visitRawInterpolation(each.Expression) + `}}`)
-	}
-	c.visitBlock(each.Block)
-	c.write(`{{end}}`)
-}
-
-func (c *Compiler) visitAssignment(assgn *parser.Assignment) {
-	c.write(`{{` + assgn.X + ` := ` + c.visitRawInterpolation(assgn.Expression) + `}}`)
-}
-
-func (c *Compiler) visitTag(tag *parser.Tag) {
-	type attrib struct {
-		name      string
-		value     func() string
-		condition string
-	}
-
-	attribs := make(map[string]*attrib)
-
-	for _, item := range tag.Attributes {
-		attritem := item
-		attr := new(attrib)
-		attr.name = item.Name
-
-		attr.value = func() string {
-			if !attritem.IsRaw {
-				return c.visitInterpolation(attritem.Value)
-			} else if attritem.Value == "" {
-				return ""
-			} else {
-				return attritem.Value
-			}
-		}
-
-		if len(attritem.Condition) != 0 {
-			attr.condition = c.visitRawInterpolation(attritem.Condition)
-		}
-
-		if attr.name == "class" && attribs["class"] != nil {
-			prevclass := attribs["class"]
-			prevvalue := prevclass.value
-
-			prevclass.value = func() string {
-				aval := attr.value()
-
-				if len(attr.condition) > 0 {
-					aval = `{{if ` + attr.condition + `}}` + aval + `{{end}}`
-				}
-
-				if len(prevclass.condition) > 0 {
-					return `{{if ` + prevclass.condition + `}}` + prevvalue() + `{{end}} ` + aval
-				}
-
-				return prevvalue() + " " + aval
-			}
-		} else {
-			attribs[attritem.Name] = attr
-		}
-	}
-
-	keys := make([]string, 0, len(attribs))
-	for key := range attribs {
-		keys = append(keys, key)
-	}
-	sort.Strings(keys)
-
-	c.indent(0, true)
-	c.write("<" + tag.Name)
-
-	for _, name := range keys {
-		value := attribs[name]
-
-		if len(value.condition) > 0 {
-			c.write(`{{if ` + value.condition + `}}`)
-		}
-
-		val := value.value()
-
-		if val == "" {
-			c.write(` ` + name)
-		} else {
-			c.write(` ` + name + `="` + val + `"`)
-		}
-
-		if len(value.condition) > 0 {
-			c.write(`{{end}}`)
-		}
-	}
-
-	if tag.IsSelfClosing() {
-		c.write(` />`)
-	} else {
-		c.write(`>`)
-
-		if tag.Block != nil {
-			if !tag.Block.CanInline() {
-				c.indentLevel++
-			}
-
-			c.visitBlock(tag.Block)
-
-			if !tag.Block.CanInline() {
-				c.indentLevel--
-				c.indent(0, true)
-			}
-		}
-
-		c.write(`</` + tag.Name + `>`)
-	}
-}
-
-var textInterpolateRegexp = regexp.MustCompile(`#\{(.*?)\}`)
-var textEscapeRegexp = regexp.MustCompile(`\{\{(.*?)\}\}`)
-
-func (c *Compiler) visitText(txt *parser.Text) {
-	value := textEscapeRegexp.ReplaceAllStringFunc(txt.Value, func(value string) string {
-		return `{{"{{"}}` + value[2:len(value)-2] + `{{"}}"}}`
-	})
-
-	value = textInterpolateRegexp.ReplaceAllStringFunc(value, func(value string) string {
-		return c.visitInterpolation(value[2 : len(value)-1])
-	})
-
-	lines := strings.Split(value, "\n")
-	for i := 0; i < len(lines); i++ {
-		c.write(lines[i])
-
-		if i < len(lines)-1 {
-			c.write("\n")
-			c.indent(0, false)
-		}
-	}
-}
-
-func (c *Compiler) visitInterpolation(value string) string {
-	return `{{` + c.visitRawInterpolation(value) + `}}`
-}
-
-func (c *Compiler) visitRawInterpolation(value string) string {
-	if value == "" {
-		value = "\"\""
-	}
-
-	value = strings.Replace(value, "$", dollar, -1)
-	expr, err := gp.ParseExpr(value)
-	if err != nil {
-		panic("Unable to parse expression.")
-	}
-	value = strings.Replace(c.visitExpression(expr), dollar, "$", -1)
-	return value
-}
-
-func (c *Compiler) visitExpression(outerexpr ast.Expr) string {
-	stack := list.New()
-
-	pop := func() string {
-		if stack.Front() == nil {
-			return ""
-		}
-
-		val := stack.Front().Value.(string)
-		stack.Remove(stack.Front())
-		return val
-	}
-
-	var exec func(ast.Expr)
-
-	exec = func(expr ast.Expr) {
-		switch expr := expr.(type) {
-		case *ast.BinaryExpr:
-			{
-				be := expr
-
-				exec(be.Y)
-				exec(be.X)
-
-				negate := false
-				name := c.tempvar()
-				c.write(`{{` + name + ` := `)
-
-				switch be.Op {
-				case gt.ADD:
-					c.write("__amber_add ")
-				case gt.SUB:
-					c.write("__amber_sub ")
-				case gt.MUL:
-					c.write("__amber_mul ")
-				case gt.QUO:
-					c.write("__amber_quo ")
-				case gt.REM:
-					c.write("__amber_rem ")
-				case gt.LAND:
-					c.write("and ")
-				case gt.LOR:
-					c.write("or ")
-				case gt.EQL:
-					c.write("__amber_eql ")
-				case gt.NEQ:
-					c.write("__amber_eql ")
-					negate = true
-				case gt.LSS:
-					c.write("__amber_lss ")
-				case gt.GTR:
-					c.write("__amber_gtr ")
-				case gt.LEQ:
-					c.write("__amber_gtr ")
-					negate = true
-				case gt.GEQ:
-					c.write("__amber_lss ")
-					negate = true
-				default:
-					panic("Unexpected operator!")
-				}
-
-				c.write(pop() + ` ` + pop() + `}}`)
-
-				if !negate {
-					stack.PushFront(name)
-				} else {
-					negname := c.tempvar()
-					c.write(`{{` + negname + ` := not ` + name + `}}`)
-					stack.PushFront(negname)
-				}
-			}
-		case *ast.UnaryExpr:
-			{
-				ue := expr
-
-				exec(ue.X)
-
-				name := c.tempvar()
-				c.write(`{{` + name + ` := `)
-
-				switch ue.Op {
-				case gt.SUB:
-					c.write("__amber_minus ")
-				case gt.ADD:
-					c.write("__amber_plus ")
-				case gt.NOT:
-					c.write("not ")
-				default:
-					panic("Unexpected operator!")
-				}
-
-				c.write(pop() + `}}`)
-				stack.PushFront(name)
-			}
-		case *ast.ParenExpr:
-			exec(expr.X)
-		case *ast.BasicLit:
-			stack.PushFront(strings.Replace(expr.Value, dollar, "$", -1))
-		case *ast.Ident:
-			name := expr.Name
-			if len(name) >= len(dollar) && name[:len(dollar)] == dollar {
-				if name == dollar {
-					stack.PushFront(`.`)
-				} else {
-					stack.PushFront(`$` + expr.Name[len(dollar):])
-				}
-			} else {
-				stack.PushFront(`.` + expr.Name)
-			}
-		case *ast.SelectorExpr:
-			se := expr
-			exec(se.X)
-			x := pop()
-
-			if x == "." {
-				x = ""
-			}
-
-			name := c.tempvar()
-			c.write(`{{` + name + ` := ` + x + `.` + se.Sel.Name + `}}`)
-			stack.PushFront(name)
-		case *ast.CallExpr:
-			ce := expr
-
-			for i := len(ce.Args) - 1; i >= 0; i-- {
-				exec(ce.Args[i])
-			}
-
-			name := c.tempvar()
-			builtin := false
-
-			if ident, ok := ce.Fun.(*ast.Ident); ok {
-				for _, fname := range builtinFunctions {
-					if fname == ident.Name {
-						builtin = true
-						break
-					}
-				}
-				for fname, _ := range FuncMap {
-					if fname == ident.Name {
-						builtin = true
-						break
-					}
-				}
-			}
-
-			if builtin {
-				stack.PushFront(ce.Fun.(*ast.Ident).Name)
-				c.write(`{{` + name + ` := ` + pop())
-			} else if se, ok := ce.Fun.(*ast.SelectorExpr); ok {
-				exec(se.X)
-				x := pop()
-
-				if x == "." {
-					x = ""
-				}
-				stack.PushFront(se.Sel.Name)
-				c.write(`{{` + name + ` := ` + x + `.` + pop())
-			} else {
-				exec(ce.Fun)
-				c.write(`{{` + name + ` := call ` + pop())
-			}
-
-			for i := 0; i < len(ce.Args); i++ {
-				c.write(` `)
-				c.write(pop())
-			}
-
-			c.write(`}}`)
-
-			stack.PushFront(name)
-		default:
-			panic("Unable to parse expression. Unsupported: " + reflect.TypeOf(expr).String())
-		}
-	}
-
-	exec(outerexpr)
-	return pop()
-}
-
-func (c *Compiler) visitMixin(mixin *parser.Mixin) {
-	c.mixins[mixin.Name] = mixin
-}
-
-func (c *Compiler) visitMixinCall(mixinCall *parser.MixinCall) {
-	mixin := c.mixins[mixinCall.Name]
-
-	switch {
-	case mixin == nil:
-		panic(fmt.Sprintf("unknown mixin %q", mixinCall.Name))
-
-	case len(mixinCall.Args) < len(mixin.Args):
-		panic(fmt.Sprintf(
-			"not enough arguments in call to mixin %q (have: %d, want: %d)",
-			mixinCall.Name,
-			len(mixinCall.Args),
-			len(mixin.Args),
-		))
-	case len(mixinCall.Args) > len(mixin.Args):
-		panic(fmt.Sprintf(
-			"too many arguments in call to mixin %q (have: %d, want: %d)",
-			mixinCall.Name,
-			len(mixinCall.Args),
-			len(mixin.Args),
-		))
-	}
-
-	for i, arg := range mixin.Args {
-		c.write(fmt.Sprintf(`{{%s := %s}}`, arg, c.visitRawInterpolation(mixinCall.Args[i])))
-	}
-	c.visitBlock(mixin.Block)
-}

+ 0 - 257
vendor/github.com/eknkc/amber/doc.go

@@ -1,257 +0,0 @@
-/*
-Package amber is an elegant templating engine for Go Programming Language.
-It is inspired from HAML and Jade.
-
-Tags
-
-A tag is simply a word:
-
-    html
-
-is converted to
-
-    <html></html>
-
-It is possible to add ID and CLASS attributes to tags:
-
-    div#main
-    span.time
-
-are converted to
-
-    <div id="main"></div>
-    <span class="time"></span>
-
-Any arbitrary attribute name / value pair can be added this way:
-
-    a[href="http://www.google.com"]
-
-You can mix multiple attributes together
-
-    a#someid[href="/"][title="Main Page"].main.link Click Link
-
-gets converted to
-
-    <a id="someid" class="main link" href="/" title="Main Page">Click Link</a>
-
-It is also possible to define these attributes within the block of a tag
-
-    a
-        #someid
-        [href="/"]
-        [title="Main Page"]
-        .main
-        .link
-        | Click Link
-
-Doctypes
-
-To add a doctype, use `!!!` or `doctype` keywords:
-
-    !!! transitional
-    // <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-or use `doctype`
-
-    doctype 5
-    // <!DOCTYPE html>
-
-Available options: `5`, `default`, `xml`, `transitional`, `strict`, `frameset`, `1.1`, `basic`, `mobile`
-
-Tag Content
-
-For single line tag text, you can just append the text after tag name:
-
-    p Testing!
-
-would yield
-
-    <p>Testing!</p>
-
-For multi line tag text, or nested tags, use indentation:
-
-    html
-        head
-            title Page Title
-        body
-            div#content
-                p
-                    | This is a long page content
-                    | These lines are all part of the parent p
-
-                    a[href="/"] Go To Main Page
-
-Data
-
-Input template data can be reached by key names directly. For example, assuming the template has been
-executed with following JSON data:
-
-    {
-        "Name": "Ekin",
-        "LastName": "Koc",
-        "Repositories": [
-            "amber",
-            "dateformat"
-        ],
-        "Avatar": "/images/ekin.jpg",
-        "Friends": 17
-    }
-
-It is possible to interpolate fields using `#{}`
-
-    p Welcome #{Name}!
-
-would print
-
-    <p>Welcome Ekin!</p>
-
-Attributes can have field names as well
-
-    a[title=Name][href="/ekin.koc"]
-
-would print
-
-    <a title="Ekin" href="/ekin.koc"></a>
-
-Expressions
-
-Amber can expand basic expressions. For example, it is possible to concatenate strings with + operator:
-
-    p Welcome #{Name + " " + LastName}
-
-Arithmetic expressions are also supported:
-
-    p You need #{50 - Friends} more friends to reach 50!
-
-Expressions can be used within attributes
-
-    img[alt=Name + " " + LastName][src=Avatar]
-
-Variables
-
-It is possible to define dynamic variables within templates,
-all variables must start with a $ character and can be assigned as in the following example:
-
-    div
-        $fullname = Name + " " + LastName
-        p Welcome #{$fullname}
-
-If you need to access the supplied data itself (i.e. the object containing Name, LastName etc fields.) you can use `$` variable
-
-    p $.Name
-
-Conditions
-
-For conditional blocks, it is possible to use `if <expression>`
-
-    div
-        if Friends > 10
-            p You have more than 10 friends
-        else if Friends > 5
-            p You have more than 5 friends
-        else
-            p You need more friends
-
-Again, it is possible to use arithmetic and boolean operators
-
-    div
-        if Name == "Ekin" && LastName == "Koc"
-            p Hey! I know you..
-
-There is a special syntax for conditional attributes. Only block attributes can have conditions;
-
-    div
-        .hasfriends ? Friends > 0
-
-This would yield a div with `hasfriends` class only if the `Friends > 0` condition holds. It is
-perfectly fine to use the same method for other types of attributes:
-
-    div
-        #foo ? Name == "Ekin"
-        [bar=baz] ? len(Repositories) > 0
-
-Iterations
-
-It is possible to iterate over arrays and maps using `each`:
-
-    each $repo in Repositories
-        p #{$repo}
-
-would print
-
-    p amber
-    p dateformat
-
-It is also possible to iterate over values and indexes at the same time
-
-    each $i, $repo in Repositories
-        p
-            .even ? $i % 2 == 0
-            .odd ? $i % 2 == 1
-
-Includes
-
-A template can include other templates using `include`:
-
-    a.amber
-        p this is template a
-
-    b.amber
-        p this is template b
-
-    c.amber
-        div
-            include a
-            include b
-
-gets compiled to
-
-    div
-        p this is template a
-        p this is template b
-
-Inheritance
-
-A template can inherit other templates. In order to inherit another template, an `extends` keyword should be used.
-Parent template can define several named blocks and child template can modify the blocks.
-
-    master.amber
-        !!! 5
-        html
-            head
-                block meta
-                    meta[name="description"][content="This is a great website"]
-
-                title
-                    block title
-                        | Default title
-            body
-                block content
-
-    subpage.amber
-        extends master
-
-        block title
-            | Some sub page!
-
-        block append meta
-            // This will be added after the description meta tag. It is also possible
-            // to prepend something to an existing block
-            meta[name="keywords"][content="foo bar"]
-
-        block content
-            div#main
-                p Some content here
-
-License
-(The MIT License)
-
-Copyright (c) 2012 Ekin Koc <ekin@eknkc.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-package amber

+ 0 - 285
vendor/github.com/eknkc/amber/parser/nodes.go

@@ -1,285 +0,0 @@
-package parser
-
-import (
-	"regexp"
-	"strings"
-)
-
-var selfClosingTags = [...]string{
-	"meta",
-	"img",
-	"link",
-	"input",
-	"source",
-	"area",
-	"base",
-	"col",
-	"br",
-	"hr",
-}
-
-var doctypes = map[string]string{
-	"5":            `<!DOCTYPE html>`,
-	"default":      `<!DOCTYPE html>`,
-	"xml":          `<?xml version="1.0" encoding="utf-8" ?>`,
-	"transitional": `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">`,
-	"strict":       `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">`,
-	"frameset":     `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">`,
-	"1.1":          `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">`,
-	"basic":        `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">`,
-	"mobile":       `<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">`,
-}
-
-type Node interface {
-	Pos() SourcePosition
-}
-
-type SourcePosition struct {
-	LineNum     int
-	ColNum      int
-	TokenLength int
-	Filename    string
-}
-
-func (s *SourcePosition) Pos() SourcePosition {
-	return *s
-}
-
-type Doctype struct {
-	SourcePosition
-	Value string
-}
-
-func newDoctype(value string) *Doctype {
-	dt := new(Doctype)
-	dt.Value = value
-	return dt
-}
-
-func (d *Doctype) String() string {
-	if defined := doctypes[d.Value]; len(defined) != 0 {
-		return defined
-	}
-
-	return `<!DOCTYPE ` + d.Value + `>`
-}
-
-type Comment struct {
-	SourcePosition
-	Value  string
-	Block  *Block
-	Silent bool
-}
-
-func newComment(value string) *Comment {
-	dt := new(Comment)
-	dt.Value = value
-	dt.Block = nil
-	dt.Silent = false
-	return dt
-}
-
-type Text struct {
-	SourcePosition
-	Value string
-	Raw   bool
-}
-
-func newText(value string, raw bool) *Text {
-	dt := new(Text)
-	dt.Value = value
-	dt.Raw = raw
-	return dt
-}
-
-type Block struct {
-	SourcePosition
-	Children []Node
-}
-
-func newBlock() *Block {
-	block := new(Block)
-	block.Children = make([]Node, 0)
-	return block
-}
-
-func (b *Block) push(node Node) {
-	b.Children = append(b.Children, node)
-}
-
-func (b *Block) pushFront(node Node) {
-	b.Children = append([]Node{node}, b.Children...)
-}
-
-func (b *Block) CanInline() bool {
-	if len(b.Children) == 0 {
-		return true
-	}
-
-	allText := true
-
-	for _, child := range b.Children {
-		if txt, ok := child.(*Text); !ok || txt.Raw {
-			allText = false
-			break
-		}
-	}
-
-	return allText
-}
-
-const (
-	NamedBlockDefault = iota
-	NamedBlockAppend
-	NamedBlockPrepend
-)
-
-type NamedBlock struct {
-	Block
-	Name     string
-	Modifier int
-}
-
-func newNamedBlock(name string) *NamedBlock {
-	bb := new(NamedBlock)
-	bb.Name = name
-	bb.Block.Children = make([]Node, 0)
-	bb.Modifier = NamedBlockDefault
-	return bb
-}
-
-type Attribute struct {
-	SourcePosition
-	Name      string
-	Value     string
-	IsRaw     bool
-	Condition string
-}
-
-type Tag struct {
-	SourcePosition
-	Block          *Block
-	Name           string
-	IsInterpolated bool
-	Attributes     []Attribute
-}
-
-func newTag(name string) *Tag {
-	tag := new(Tag)
-	tag.Block = nil
-	tag.Name = name
-	tag.Attributes = make([]Attribute, 0)
-	tag.IsInterpolated = false
-	return tag
-
-}
-
-func (t *Tag) IsSelfClosing() bool {
-	for _, tag := range selfClosingTags {
-		if tag == t.Name {
-			return true
-		}
-	}
-
-	return false
-}
-
-func (t *Tag) IsRawText() bool {
-	return t.Name == "style" || t.Name == "script"
-}
-
-type Condition struct {
-	SourcePosition
-	Positive   *Block
-	Negative   *Block
-	Expression string
-}
-
-func newCondition(exp string) *Condition {
-	cond := new(Condition)
-	cond.Expression = exp
-	return cond
-}
-
-type Each struct {
-	SourcePosition
-	X          string
-	Y          string
-	Expression string
-	Block      *Block
-}
-
-func newEach(exp string) *Each {
-	each := new(Each)
-	each.Expression = exp
-	return each
-}
-
-type Assignment struct {
-	SourcePosition
-	X          string
-	Expression string
-}
-
-func newAssignment(x, expression string) *Assignment {
-	assgn := new(Assignment)
-	assgn.X = x
-	assgn.Expression = expression
-	return assgn
-}
-
-type Mixin struct {
-	SourcePosition
-	Block *Block
-	Name  string
-	Args  []string
-}
-
-func newMixin(name, args string) *Mixin {
-	mixin := new(Mixin)
-	mixin.Name = name
-
-	delExp := regexp.MustCompile(`,\s`)
-	mixin.Args = delExp.Split(args, -1)
-
-	for i := 0; i < len(mixin.Args); i++ {
-		mixin.Args[i] = strings.TrimSpace(mixin.Args[i])
-		if mixin.Args[i] == "" {
-			mixin.Args = append(mixin.Args[:i], mixin.Args[i+1:]...)
-			i--
-		}
-	}
-
-	return mixin
-}
-
-type MixinCall struct {
-	SourcePosition
-	Name string
-	Args []string
-}
-
-func newMixinCall(name, args string) *MixinCall {
-	mixinCall := new(MixinCall)
-	mixinCall.Name = name
-
-	if args != "" {
-		const t = "%s"
-		quoteExp := regexp.MustCompile(`"(.*?)"`)
-		delExp := regexp.MustCompile(`,\s`)
-
-		quotes := quoteExp.FindAllString(args, -1)
-		replaced := quoteExp.ReplaceAllString(args, t)
-		mixinCall.Args = delExp.Split(replaced, -1)
-
-		qi := 0
-		for i, arg := range mixinCall.Args {
-			if arg == t {
-				mixinCall.Args[i] = quotes[qi]
-				qi++
-			}
-		}
-	}
-
-	return mixinCall
-}

+ 0 - 482
vendor/github.com/eknkc/amber/parser/parser.go

@@ -1,482 +0,0 @@
-package parser
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"path/filepath"
-	"strings"
-)
-
-type Parser struct {
-	scanner      *scanner
-	filename     string
-	fs           http.FileSystem
-	currenttoken *token
-	namedBlocks  map[string]*NamedBlock
-	parent       *Parser
-	result       *Block
-}
-
-func newParser(rdr io.Reader) *Parser {
-	p := new(Parser)
-	p.scanner = newScanner(rdr)
-	p.namedBlocks = make(map[string]*NamedBlock)
-	return p
-}
-
-func StringParser(input string) (*Parser, error) {
-	return newParser(bytes.NewReader([]byte(input))), nil
-}
-
-func ByteParser(input []byte) (*Parser, error) {
-	return newParser(bytes.NewReader(input)), nil
-}
-
-func (p *Parser) SetFilename(filename string) {
-	p.filename = filename
-}
-
-func (p *Parser) SetVirtualFilesystem(fs http.FileSystem) {
-	p.fs = fs
-}
-
-func FileParser(filename string) (*Parser, error) {
-	data, err := ioutil.ReadFile(filename)
-
-	if err != nil {
-		return nil, err
-	}
-
-	parser := newParser(bytes.NewReader(data))
-	parser.filename = filename
-	return parser, nil
-}
-
-func VirtualFileParser(filename string, fs http.FileSystem) (*Parser, error) {
-	file, err := fs.Open(filename)
-	if err != nil {
-		return nil, err
-	}
-
-	data, err := ioutil.ReadAll(file)
-	if err != nil {
-		return nil, err
-	}
-
-	parser := newParser(bytes.NewReader(data))
-	parser.filename = filename
-	parser.fs = fs
-	return parser, nil
-}
-
-func (p *Parser) Parse() *Block {
-	if p.result != nil {
-		return p.result
-	}
-
-	defer func() {
-		if r := recover(); r != nil {
-			if rs, ok := r.(string); ok && rs[:len("Amber Error")] == "Amber Error" {
-				panic(r)
-			}
-
-			pos := p.pos()
-
-			if len(pos.Filename) > 0 {
-				panic(fmt.Sprintf("Amber Error in <%s>: %v - Line: %d, Column: %d, Length: %d", pos.Filename, r, pos.LineNum, pos.ColNum, pos.TokenLength))
-			} else {
-				panic(fmt.Sprintf("Amber Error: %v - Line: %d, Column: %d, Length: %d", r, pos.LineNum, pos.ColNum, pos.TokenLength))
-			}
-		}
-	}()
-
-	block := newBlock()
-	p.advance()
-
-	for {
-		if p.currenttoken == nil || p.currenttoken.Kind == tokEOF {
-			break
-		}
-
-		if p.currenttoken.Kind == tokBlank {
-			p.advance()
-			continue
-		}
-
-		block.push(p.parse())
-	}
-
-	if p.parent != nil {
-		p.parent.Parse()
-
-		for _, prev := range p.parent.namedBlocks {
-			ours := p.namedBlocks[prev.Name]
-
-			if ours == nil {
-				// Put a copy of the named block into current context, so that sub-templates can use the block
-				p.namedBlocks[prev.Name] = prev
-				continue
-			}
-
-			top := findTopmostParentWithNamedBlock(p, prev.Name)
-			nb := top.namedBlocks[prev.Name]
-			switch ours.Modifier {
-			case NamedBlockAppend:
-				for i := 0; i < len(ours.Children); i++ {
-					nb.push(ours.Children[i])
-				}
-			case NamedBlockPrepend:
-				for i := len(ours.Children) - 1; i >= 0; i-- {
-					nb.pushFront(ours.Children[i])
-				}
-			default:
-				nb.Children = ours.Children
-			}
-		}
-
-		block = p.parent.result
-	}
-
-	p.result = block
-	return block
-}
-
-func (p *Parser) pos() SourcePosition {
-	pos := p.scanner.Pos()
-	pos.Filename = p.filename
-	return pos
-}
-
-func (p *Parser) parseRelativeFile(filename string) *Parser {
-	if len(p.filename) == 0 {
-		panic("Unable to import or extend " + filename + " in a non filesystem based parser.")
-	}
-
-	filename = filepath.Join(filepath.Dir(p.filename), filename)
-
-	if strings.IndexRune(filepath.Base(filename), '.') < 0 {
-		filename = filename + ".amber"
-	}
-
-	parser, err := FileParser(filename)
-	if err != nil && p.fs != nil {
-		parser, err = VirtualFileParser(filename, p.fs)
-	}
-	if err != nil {
-		panic("Unable to read " + filename + ", Error: " + string(err.Error()))
-	}
-
-	return parser
-}
-
-func (p *Parser) parse() Node {
-	switch p.currenttoken.Kind {
-	case tokDoctype:
-		return p.parseDoctype()
-	case tokComment:
-		return p.parseComment()
-	case tokText:
-		return p.parseText()
-	case tokIf:
-		return p.parseIf()
-	case tokEach:
-		return p.parseEach()
-	case tokImport:
-		return p.parseImport()
-	case tokTag:
-		return p.parseTag()
-	case tokAssignment:
-		return p.parseAssignment()
-	case tokNamedBlock:
-		return p.parseNamedBlock()
-	case tokExtends:
-		return p.parseExtends()
-	case tokIndent:
-		return p.parseBlock(nil)
-	case tokMixin:
-		return p.parseMixin()
-	case tokMixinCall:
-		return p.parseMixinCall()
-	}
-
-	panic(fmt.Sprintf("Unexpected token: %d", p.currenttoken.Kind))
-}
-
-func (p *Parser) expect(typ rune) *token {
-	if p.currenttoken.Kind != typ {
-		panic("Unexpected token!")
-	}
-	curtok := p.currenttoken
-	p.advance()
-	return curtok
-}
-
-func (p *Parser) advance() {
-	p.currenttoken = p.scanner.Next()
-}
-
-func (p *Parser) parseExtends() *Block {
-	if p.parent != nil {
-		panic("Unable to extend multiple parent templates.")
-	}
-
-	tok := p.expect(tokExtends)
-	parser := p.parseRelativeFile(tok.Value)
-	parser.Parse()
-	p.parent = parser
-	return newBlock()
-}
-
-func (p *Parser) parseBlock(parent Node) *Block {
-	p.expect(tokIndent)
-	block := newBlock()
-	block.SourcePosition = p.pos()
-
-	for {
-		if p.currenttoken == nil || p.currenttoken.Kind == tokEOF || p.currenttoken.Kind == tokOutdent {
-			break
-		}
-
-		if p.currenttoken.Kind == tokBlank {
-			p.advance()
-			continue
-		}
-
-		if p.currenttoken.Kind == tokId ||
-			p.currenttoken.Kind == tokClassName ||
-			p.currenttoken.Kind == tokAttribute {
-
-			if tag, ok := parent.(*Tag); ok {
-				attr := p.expect(p.currenttoken.Kind)
-				cond := attr.Data["Condition"]
-
-				switch attr.Kind {
-				case tokId:
-					tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "id", attr.Value, true, cond})
-				case tokClassName:
-					tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "class", attr.Value, true, cond})
-				case tokAttribute:
-					tag.Attributes = append(tag.Attributes, Attribute{p.pos(), attr.Value, attr.Data["Content"], attr.Data["Mode"] == "raw", cond})
-				}
-
-				continue
-			} else {
-				panic("Conditional attributes must be placed immediately within a parent tag.")
-			}
-		}
-
-		block.push(p.parse())
-	}
-
-	p.expect(tokOutdent)
-
-	return block
-}
-
-func (p *Parser) parseIf() *Condition {
-	tok := p.expect(tokIf)
-	cnd := newCondition(tok.Value)
-	cnd.SourcePosition = p.pos()
-
-readmore:
-	switch p.currenttoken.Kind {
-	case tokIndent:
-		cnd.Positive = p.parseBlock(cnd)
-		goto readmore
-	case tokElse:
-		p.expect(tokElse)
-		if p.currenttoken.Kind == tokIf {
-			cnd.Negative = newBlock()
-			cnd.Negative.push(p.parseIf())
-		} else if p.currenttoken.Kind == tokIndent {
-			cnd.Negative = p.parseBlock(cnd)
-		} else {
-			panic("Unexpected token!")
-		}
-		goto readmore
-	}
-
-	return cnd
-}
-
-func (p *Parser) parseEach() *Each {
-	tok := p.expect(tokEach)
-	ech := newEach(tok.Value)
-	ech.SourcePosition = p.pos()
-	ech.X = tok.Data["X"]
-	ech.Y = tok.Data["Y"]
-
-	if p.currenttoken.Kind == tokIndent {
-		ech.Block = p.parseBlock(ech)
-	}
-
-	return ech
-}
-
-func (p *Parser) parseImport() *Block {
-	tok := p.expect(tokImport)
-	node := p.parseRelativeFile(tok.Value).Parse()
-	node.SourcePosition = p.pos()
-	return node
-}
-
-func (p *Parser) parseNamedBlock() *Block {
-	tok := p.expect(tokNamedBlock)
-
-	if p.namedBlocks[tok.Value] != nil {
-		panic("Multiple definitions of named blocks are not permitted. Block " + tok.Value + " has been re defined.")
-	}
-
-	block := newNamedBlock(tok.Value)
-	block.SourcePosition = p.pos()
-
-	if tok.Data["Modifier"] == "append" {
-		block.Modifier = NamedBlockAppend
-	} else if tok.Data["Modifier"] == "prepend" {
-		block.Modifier = NamedBlockPrepend
-	}
-
-	if p.currenttoken.Kind == tokIndent {
-		block.Block = *(p.parseBlock(nil))
-	}
-
-	p.namedBlocks[block.Name] = block
-
-	if block.Modifier == NamedBlockDefault {
-		return &block.Block
-	}
-
-	return newBlock()
-}
-
-func (p *Parser) parseDoctype() *Doctype {
-	tok := p.expect(tokDoctype)
-	node := newDoctype(tok.Value)
-	node.SourcePosition = p.pos()
-	return node
-}
-
-func (p *Parser) parseComment() *Comment {
-	tok := p.expect(tokComment)
-	cmnt := newComment(tok.Value)
-	cmnt.SourcePosition = p.pos()
-	cmnt.Silent = tok.Data["Mode"] == "silent"
-
-	if p.currenttoken.Kind == tokIndent {
-		cmnt.Block = p.parseBlock(cmnt)
-	}
-
-	return cmnt
-}
-
-func (p *Parser) parseText() *Text {
-	tok := p.expect(tokText)
-	node := newText(tok.Value, tok.Data["Mode"] == "raw")
-	node.SourcePosition = p.pos()
-	return node
-}
-
-func (p *Parser) parseAssignment() *Assignment {
-	tok := p.expect(tokAssignment)
-	node := newAssignment(tok.Data["X"], tok.Value)
-	node.SourcePosition = p.pos()
-	return node
-}
-
-func (p *Parser) parseTag() *Tag {
-	tok := p.expect(tokTag)
-	tag := newTag(tok.Value)
-	tag.SourcePosition = p.pos()
-
-	ensureBlock := func() {
-		if tag.Block == nil {
-			tag.Block = newBlock()
-		}
-	}
-
-readmore:
-	switch p.currenttoken.Kind {
-	case tokIndent:
-		if tag.IsRawText() {
-			p.scanner.readRaw = true
-		}
-
-		block := p.parseBlock(tag)
-		if tag.Block == nil {
-			tag.Block = block
-		} else {
-			for _, c := range block.Children {
-				tag.Block.push(c)
-			}
-		}
-	case tokId:
-		id := p.expect(tokId)
-		if len(id.Data["Condition"]) > 0 {
-			panic("Conditional attributes must be placed in a block within a tag.")
-		}
-		tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "id", id.Value, true, ""})
-		goto readmore
-	case tokClassName:
-		cls := p.expect(tokClassName)
-		if len(cls.Data["Condition"]) > 0 {
-			panic("Conditional attributes must be placed in a block within a tag.")
-		}
-		tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "class", cls.Value, true, ""})
-		goto readmore
-	case tokAttribute:
-		attr := p.expect(tokAttribute)
-		if len(attr.Data["Condition"]) > 0 {
-			panic("Conditional attributes must be placed in a block within a tag.")
-		}
-		tag.Attributes = append(tag.Attributes, Attribute{p.pos(), attr.Value, attr.Data["Content"], attr.Data["Mode"] == "raw", ""})
-		goto readmore
-	case tokText:
-		if p.currenttoken.Data["Mode"] != "piped" {
-			ensureBlock()
-			tag.Block.pushFront(p.parseText())
-			goto readmore
-		}
-	}
-
-	return tag
-}
-
-func (p *Parser) parseMixin() *Mixin {
-	tok := p.expect(tokMixin)
-	mixin := newMixin(tok.Value, tok.Data["Args"])
-	mixin.SourcePosition = p.pos()
-
-	if p.currenttoken.Kind == tokIndent {
-		mixin.Block = p.parseBlock(mixin)
-	}
-
-	return mixin
-}
-
-func (p *Parser) parseMixinCall() *MixinCall {
-	tok := p.expect(tokMixinCall)
-	mixinCall := newMixinCall(tok.Value, tok.Data["Args"])
-	mixinCall.SourcePosition = p.pos()
-	return mixinCall
-}
-
-func findTopmostParentWithNamedBlock(p *Parser, name string) *Parser {
-	top := p
-
-	for {
-		if top.namedBlocks[name] == nil {
-			return nil
-		}
-		if top.parent == nil {
-			return top
-		}
-		if top.parent.namedBlocks[name] != nil {
-			top = top.parent
-		} else {
-			return top
-		}
-	}
-}

+ 0 - 501
vendor/github.com/eknkc/amber/parser/scanner.go

@@ -1,501 +0,0 @@
-package parser
-
-import (
-	"bufio"
-	"container/list"
-	"fmt"
-	"io"
-	"regexp"
-)
-
-const (
-	tokEOF = -(iota + 1)
-	tokDoctype
-	tokComment
-	tokIndent
-	tokOutdent
-	tokBlank
-	tokId
-	tokClassName
-	tokTag
-	tokText
-	tokAttribute
-	tokIf
-	tokElse
-	tokEach
-	tokAssignment
-	tokImport
-	tokNamedBlock
-	tokExtends
-	tokMixin
-	tokMixinCall
-)
-
-const (
-	scnNewLine = iota
-	scnLine
-	scnEOF
-)
-
-type scanner struct {
-	reader      *bufio.Reader
-	indentStack *list.List
-	stash       *list.List
-
-	state  int32
-	buffer string
-
-	line          int
-	col           int
-	lastTokenLine int
-	lastTokenCol  int
-	lastTokenSize int
-
-	readRaw bool
-}
-
-type token struct {
-	Kind  rune
-	Value string
-	Data  map[string]string
-}
-
-func newScanner(r io.Reader) *scanner {
-	s := new(scanner)
-	s.reader = bufio.NewReader(r)
-	s.indentStack = list.New()
-	s.stash = list.New()
-	s.state = scnNewLine
-	s.line = -1
-	s.col = 0
-
-	return s
-}
-
-func (s *scanner) Pos() SourcePosition {
-	return SourcePosition{s.lastTokenLine + 1, s.lastTokenCol + 1, s.lastTokenSize, ""}
-}
-
-// Returns next token found in buffer
-func (s *scanner) Next() *token {
-	if s.readRaw {
-		s.readRaw = false
-		return s.NextRaw()
-	}
-
-	s.ensureBuffer()
-
-	if stashed := s.stash.Front(); stashed != nil {
-		tok := stashed.Value.(*token)
-		s.stash.Remove(stashed)
-		return tok
-	}
-
-	switch s.state {
-	case scnEOF:
-		if outdent := s.indentStack.Back(); outdent != nil {
-			s.indentStack.Remove(outdent)
-			return &token{tokOutdent, "", nil}
-		}
-
-		return &token{tokEOF, "", nil}
-	case scnNewLine:
-		s.state = scnLine
-
-		if tok := s.scanIndent(); tok != nil {
-			return tok
-		}
-
-		return s.Next()
-	case scnLine:
-		if tok := s.scanMixin(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanMixinCall(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanDoctype(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanCondition(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanEach(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanImport(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanExtends(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanBlock(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanAssignment(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanTag(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanId(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanClassName(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanAttribute(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanComment(); tok != nil {
-			return tok
-		}
-
-		if tok := s.scanText(); tok != nil {
-			return tok
-		}
-	}
-
-	return nil
-}
-
-func (s *scanner) NextRaw() *token {
-	result := ""
-	level := 0
-
-	for {
-		s.ensureBuffer()
-
-		switch s.state {
-		case scnEOF:
-			return &token{tokText, result, map[string]string{"Mode": "raw"}}
-		case scnNewLine:
-			s.state = scnLine
-
-			if tok := s.scanIndent(); tok != nil {
-				if tok.Kind == tokIndent {
-					level++
-				} else if tok.Kind == tokOutdent {
-					level--
-				} else {
-					result = result + "\n"
-					continue
-				}
-
-				if level < 0 {
-					s.stash.PushBack(&token{tokOutdent, "", nil})
-
-					if len(result) > 0 && result[len(result)-1] == '\n' {
-						result = result[:len(result)-1]
-					}
-
-					return &token{tokText, result, map[string]string{"Mode": "raw"}}
-				}
-			}
-		case scnLine:
-			if len(result) > 0 {
-				result = result + "\n"
-			}
-			for i := 0; i < level; i++ {
-				result += "\t"
-			}
-			result = result + s.buffer
-			s.consume(len(s.buffer))
-		}
-	}
-
-	return nil
-}
-
-var rgxIndent = regexp.MustCompile(`^(\s+)`)
-
-func (s *scanner) scanIndent() *token {
-	if len(s.buffer) == 0 {
-		return &token{tokBlank, "", nil}
-	}
-
-	var head *list.Element
-	for head = s.indentStack.Front(); head != nil; head = head.Next() {
-		value := head.Value.(*regexp.Regexp)
-
-		if match := value.FindString(s.buffer); len(match) != 0 {
-			s.consume(len(match))
-		} else {
-			break
-		}
-	}
-
-	newIndent := rgxIndent.FindString(s.buffer)
-
-	if len(newIndent) != 0 && head == nil {
-		s.indentStack.PushBack(regexp.MustCompile(regexp.QuoteMeta(newIndent)))
-		s.consume(len(newIndent))
-		return &token{tokIndent, newIndent, nil}
-	}
-
-	if len(newIndent) == 0 && head != nil {
-		for head != nil {
-			next := head.Next()
-			s.indentStack.Remove(head)
-			if next == nil {
-				return &token{tokOutdent, "", nil}
-			} else {
-				s.stash.PushBack(&token{tokOutdent, "", nil})
-			}
-			head = next
-		}
-	}
-
-	if len(newIndent) != 0 && head != nil {
-		panic("Mismatching indentation. Please use a coherent indent schema.")
-	}
-
-	return nil
-}
-
-var rgxDoctype = regexp.MustCompile(`^(!!!|doctype)\s*(.*)`)
-
-func (s *scanner) scanDoctype() *token {
-	if sm := rgxDoctype.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		if len(sm[2]) == 0 {
-			sm[2] = "html"
-		}
-
-		s.consume(len(sm[0]))
-		return &token{tokDoctype, sm[2], nil}
-	}
-
-	return nil
-}
-
-var rgxIf = regexp.MustCompile(`^if\s+(.+)$`)
-var rgxElse = regexp.MustCompile(`^else\s*`)
-
-func (s *scanner) scanCondition() *token {
-	if sm := rgxIf.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokIf, sm[1], nil}
-	}
-
-	if sm := rgxElse.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokElse, "", nil}
-	}
-
-	return nil
-}
-
-var rgxEach = regexp.MustCompile(`^each\s+(\$[\w0-9\-_]*)(?:\s*,\s*(\$[\w0-9\-_]*))?\s+in\s+(.+)$`)
-
-func (s *scanner) scanEach() *token {
-	if sm := rgxEach.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokEach, sm[3], map[string]string{"X": sm[1], "Y": sm[2]}}
-	}
-
-	return nil
-}
-
-var rgxAssignment = regexp.MustCompile(`^(\$[\w0-9\-_]*)?\s*=\s*(.+)$`)
-
-func (s *scanner) scanAssignment() *token {
-	if sm := rgxAssignment.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokAssignment, sm[2], map[string]string{"X": sm[1]}}
-	}
-
-	return nil
-}
-
-var rgxComment = regexp.MustCompile(`^\/\/(-)?\s*(.*)$`)
-
-func (s *scanner) scanComment() *token {
-	if sm := rgxComment.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		mode := "embed"
-		if len(sm[1]) != 0 {
-			mode = "silent"
-		}
-
-		s.consume(len(sm[0]))
-		return &token{tokComment, sm[2], map[string]string{"Mode": mode}}
-	}
-
-	return nil
-}
-
-var rgxId = regexp.MustCompile(`^#([\w-]+)(?:\s*\?\s*(.*)$)?`)
-
-func (s *scanner) scanId() *token {
-	if sm := rgxId.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokId, sm[1], map[string]string{"Condition": sm[2]}}
-	}
-
-	return nil
-}
-
-var rgxClassName = regexp.MustCompile(`^\.([\w-]+)(?:\s*\?\s*(.*)$)?`)
-
-func (s *scanner) scanClassName() *token {
-	if sm := rgxClassName.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokClassName, sm[1], map[string]string{"Condition": sm[2]}}
-	}
-
-	return nil
-}
-
-var rgxAttribute = regexp.MustCompile(`^\[([\w\-:@\.]+)\s*(?:=\s*(\"([^\"\\]*)\"|([^\]]+)))?\](?:\s*\?\s*(.*)$)?`)
-
-func (s *scanner) scanAttribute() *token {
-	if sm := rgxAttribute.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-
-		if len(sm[3]) != 0 || sm[2] == "" {
-			return &token{tokAttribute, sm[1], map[string]string{"Content": sm[3], "Mode": "raw", "Condition": sm[5]}}
-		}
-
-		return &token{tokAttribute, sm[1], map[string]string{"Content": sm[4], "Mode": "expression", "Condition": sm[5]}}
-	}
-
-	return nil
-}
-
-var rgxImport = regexp.MustCompile(`^import\s+([0-9a-zA-Z_\-\. \/]*)$`)
-
-func (s *scanner) scanImport() *token {
-	if sm := rgxImport.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokImport, sm[1], nil}
-	}
-
-	return nil
-}
-
-var rgxExtends = regexp.MustCompile(`^extends\s+([0-9a-zA-Z_\-\. \/]*)$`)
-
-func (s *scanner) scanExtends() *token {
-	if sm := rgxExtends.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokExtends, sm[1], nil}
-	}
-
-	return nil
-}
-
-var rgxBlock = regexp.MustCompile(`^block\s+(?:(append|prepend)\s+)?([0-9a-zA-Z_\-\. \/]*)$`)
-
-func (s *scanner) scanBlock() *token {
-	if sm := rgxBlock.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokNamedBlock, sm[2], map[string]string{"Modifier": sm[1]}}
-	}
-
-	return nil
-}
-
-var rgxTag = regexp.MustCompile(`^(\w[-:\w]*)`)
-
-func (s *scanner) scanTag() *token {
-	if sm := rgxTag.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokTag, sm[1], nil}
-	}
-
-	return nil
-}
-
-var rgxMixin = regexp.MustCompile(`^mixin ([a-zA-Z_-]+\w*)(\(((\$\w*(,\s)?)*)\))?$`)
-
-func (s *scanner) scanMixin() *token {
-	if sm := rgxMixin.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokMixin, sm[1], map[string]string{"Args": sm[3]}}
-	}
-
-	return nil
-}
-
-var rgxMixinCall = regexp.MustCompile(`^\+([A-Za-z_-]+\w*)(\((.+(,\s)?)*\))?$`)
-
-func (s *scanner) scanMixinCall() *token {
-	if sm := rgxMixinCall.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-		return &token{tokMixinCall, sm[1], map[string]string{"Args": sm[3]}}
-	}
-
-	return nil
-}
-
-var rgxText = regexp.MustCompile(`^(\|)? ?(.*)$`)
-
-func (s *scanner) scanText() *token {
-	if sm := rgxText.FindStringSubmatch(s.buffer); len(sm) != 0 {
-		s.consume(len(sm[0]))
-
-		mode := "inline"
-		if sm[1] == "|" {
-			mode = "piped"
-		}
-
-		return &token{tokText, sm[2], map[string]string{"Mode": mode}}
-	}
-
-	return nil
-}
-
-// Moves position forward, and removes beginning of s.buffer (len bytes)
-func (s *scanner) consume(runes int) {
-	if len(s.buffer) < runes {
-		panic(fmt.Sprintf("Unable to consume %d runes from buffer.", runes))
-	}
-
-	s.lastTokenLine = s.line
-	s.lastTokenCol = s.col
-	s.lastTokenSize = runes
-
-	s.buffer = s.buffer[runes:]
-	s.col += runes
-}
-
-// Reads string into s.buffer
-func (s *scanner) ensureBuffer() {
-	if len(s.buffer) > 0 {
-		return
-	}
-
-	buf, err := s.reader.ReadString('\n')
-
-	if err != nil && err != io.EOF {
-		panic(err)
-	} else if err != nil && len(buf) == 0 {
-		s.state = scnEOF
-	} else {
-		// endline "LF only" or "\n" use Unix, Linux, modern MacOS X, FreeBSD, BeOS, RISC OS
-		if buf[len(buf)-1] == '\n' {
-			buf = buf[:len(buf)-1]
-		}
-		// endline "CR+LF" or "\r\n" use internet protocols, DEC RT-11, Windows, CP/M, MS-DOS, OS/2, Symbian OS
-		if len(buf) > 0 && buf[len(buf)-1] == '\r' {
-			buf = buf[:len(buf)-1]
-		}
-
-		s.state = scnNewLine
-		s.buffer = buf
-		s.line += 1
-		s.col = 0
-	}
-}

+ 0 - 287
vendor/github.com/eknkc/amber/runtime.go

@@ -1,287 +0,0 @@
-package amber
-
-import (
-	"encoding/json"
-	"fmt"
-	"html/template"
-	"reflect"
-)
-
-var FuncMap = template.FuncMap{
-	"__amber_add":   runtime_add,
-	"__amber_sub":   runtime_sub,
-	"__amber_mul":   runtime_mul,
-	"__amber_quo":   runtime_quo,
-	"__amber_rem":   runtime_rem,
-	"__amber_minus": runtime_minus,
-	"__amber_plus":  runtime_plus,
-	"__amber_eql":   runtime_eql,
-	"__amber_gtr":   runtime_gtr,
-	"__amber_lss":   runtime_lss,
-
-	"json":      runtime_json,
-	"unescaped": runtime_unescaped,
-}
-
-func runtime_add(x, y interface{}) interface{} {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() + vy.Int()
-			case reflect.Float32, reflect.Float64:
-				return float64(vx.Int()) + vy.Float()
-			case reflect.String:
-				return fmt.Sprintf("%d%s", vx.Int(), vy.String())
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Float() + float64(vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.Float() + vy.Float()
-			case reflect.String:
-				return fmt.Sprintf("%f%s", vx.Float(), vy.String())
-			}
-		}
-	case reflect.String:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return fmt.Sprintf("%s%d", vx.String(), vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return fmt.Sprintf("%s%f", vx.String(), vy.Float())
-			case reflect.String:
-				return fmt.Sprintf("%s%s", vx.String(), vy.String())
-			}
-		}
-	}
-
-	return "<nil>"
-}
-
-func runtime_sub(x, y interface{}) interface{} {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() - vy.Int()
-			case reflect.Float32, reflect.Float64:
-				return float64(vx.Int()) - vy.Float()
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Float() - float64(vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.Float() - vy.Float()
-			}
-		}
-	}
-
-	return "<nil>"
-}
-
-func runtime_mul(x, y interface{}) interface{} {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() * vy.Int()
-			case reflect.Float32, reflect.Float64:
-				return float64(vx.Int()) * vy.Float()
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Float() * float64(vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.Float() * vy.Float()
-			}
-		}
-	}
-
-	return "<nil>"
-}
-
-func runtime_quo(x, y interface{}) interface{} {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() / vy.Int()
-			case reflect.Float32, reflect.Float64:
-				return float64(vx.Int()) / vy.Float()
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Float() / float64(vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.Float() / vy.Float()
-			}
-		}
-	}
-
-	return "<nil>"
-}
-
-func runtime_rem(x, y interface{}) interface{} {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() % vy.Int()
-			}
-		}
-	}
-
-	return "<nil>"
-}
-
-func runtime_minus(x interface{}) interface{} {
-	vx := reflect.ValueOf(x)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		return -vx.Int()
-	case reflect.Float32, reflect.Float64:
-		return -vx.Float()
-	}
-
-	return "<nil>"
-}
-
-func runtime_plus(x interface{}) interface{} {
-	vx := reflect.ValueOf(x)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		return +vx.Int()
-	case reflect.Float32, reflect.Float64:
-		return +vx.Float()
-	}
-
-	return "<nil>"
-}
-
-func runtime_eql(x, y interface{}) bool {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() == vy.Int()
-			case reflect.Float32, reflect.Float64:
-				return float64(vx.Int()) == vy.Float()
-			case reflect.String:
-				return fmt.Sprintf("%d", vx.Int()) == vy.String()
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Float() == float64(vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.Float() == vy.Float()
-			case reflect.String:
-				return fmt.Sprintf("%f", vx.Float()) == vy.String()
-			}
-		}
-	case reflect.String:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.String() == fmt.Sprintf("%d", vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.String() == fmt.Sprintf("%f", vy.Float())
-			case reflect.String:
-				return vx.String() == fmt.Sprintf("%s", vy.String())
-			}
-		}
-	case reflect.Bool:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Bool() && vy.Int() != 0
-			case reflect.Bool:
-				return vx.Bool() == vy.Bool()
-			}
-		}
-	}
-
-	return false
-}
-
-func runtime_lss(x, y interface{}) bool {
-	vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
-	switch vx.Kind() {
-	case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Int() < vy.Int()
-			case reflect.Float32, reflect.Float64:
-				return float64(vx.Int()) < vy.Float()
-			case reflect.String:
-				return fmt.Sprintf("%d", vx.Int()) < vy.String()
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.Float() < float64(vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.Float() < vy.Float()
-			case reflect.String:
-				return fmt.Sprintf("%f", vx.Float()) < vy.String()
-			}
-		}
-	case reflect.String:
-		{
-			switch vy.Kind() {
-			case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8:
-				return vx.String() < fmt.Sprintf("%d", vy.Int())
-			case reflect.Float32, reflect.Float64:
-				return vx.String() < fmt.Sprintf("%f", vy.Float())
-			case reflect.String:
-				return vx.String() < vy.String()
-			}
-		}
-	}
-
-	return false
-}
-
-func runtime_gtr(x, y interface{}) bool {
-	return !runtime_lss(x, y) && !runtime_eql(x, y)
-}
-
-func runtime_json(x interface{}) (res string, err error) {
-	bres, err := json.Marshal(x)
-	res = string(bres)
-	return
-}
-
-func runtime_unescaped(x string) interface{} {
-	return template.HTML(x)
-}

+ 0 - 8
vendor/github.com/gobwas/glob/.gitignore

@@ -1,8 +0,0 @@
-glob.iml
-.idea
-*.cpu
-*.mem
-*.test
-*.dot
-*.png
-*.svg

+ 0 - 9
vendor/github.com/gobwas/glob/.travis.yml

@@ -1,9 +0,0 @@
-sudo: false
-
-language: go
-
-go:
-  - 1.5.3
-
-script:
-  - go test -v ./...

+ 0 - 21
vendor/github.com/gobwas/glob/LICENSE

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Sergey Kamardin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.

+ 0 - 26
vendor/github.com/gobwas/glob/bench.sh

@@ -1,26 +0,0 @@
-#! /bin/bash
-
-bench() {
-    filename="/tmp/$1-$2.bench"
-    if test -e "${filename}";
-    then
-        echo "Already exists ${filename}"
-    else
-        backup=`git rev-parse --abbrev-ref HEAD`
-        git checkout $1
-        echo -n "Creating ${filename}... "
-        go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem
-        echo "OK"
-        git checkout ${backup}
-        sleep 5
-    fi
-}
-
-
-to=$1
-current=`git rev-parse --abbrev-ref HEAD`
-
-bench ${to} $2
-bench ${current} $2
-
-benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench"

+ 0 - 525
vendor/github.com/gobwas/glob/compiler/compiler.go

@@ -1,525 +0,0 @@
-package compiler
-
-// TODO use constructor with all matchers, and to their structs private
-// TODO glue multiple Text nodes (like after QuoteMeta)
-
-import (
-	"fmt"
-	"reflect"
-
-	"github.com/gobwas/glob/match"
-	"github.com/gobwas/glob/syntax/ast"
-	"github.com/gobwas/glob/util/runes"
-)
-
-func optimizeMatcher(matcher match.Matcher) match.Matcher {
-	switch m := matcher.(type) {
-
-	case match.Any:
-		if len(m.Separators) == 0 {
-			return match.NewSuper()
-		}
-
-	case match.AnyOf:
-		if len(m.Matchers) == 1 {
-			return m.Matchers[0]
-		}
-
-		return m
-
-	case match.List:
-		if m.Not == false && len(m.List) == 1 {
-			return match.NewText(string(m.List))
-		}
-
-		return m
-
-	case match.BTree:
-		m.Left = optimizeMatcher(m.Left)
-		m.Right = optimizeMatcher(m.Right)
-
-		r, ok := m.Value.(match.Text)
-		if !ok {
-			return m
-		}
-
-		var (
-			leftNil  = m.Left == nil
-			rightNil = m.Right == nil
-		)
-		if leftNil && rightNil {
-			return match.NewText(r.Str)
-		}
-
-		_, leftSuper := m.Left.(match.Super)
-		lp, leftPrefix := m.Left.(match.Prefix)
-		la, leftAny := m.Left.(match.Any)
-
-		_, rightSuper := m.Right.(match.Super)
-		rs, rightSuffix := m.Right.(match.Suffix)
-		ra, rightAny := m.Right.(match.Any)
-
-		switch {
-		case leftSuper && rightSuper:
-			return match.NewContains(r.Str, false)
-
-		case leftSuper && rightNil:
-			return match.NewSuffix(r.Str)
-
-		case rightSuper && leftNil:
-			return match.NewPrefix(r.Str)
-
-		case leftNil && rightSuffix:
-			return match.NewPrefixSuffix(r.Str, rs.Suffix)
-
-		case rightNil && leftPrefix:
-			return match.NewPrefixSuffix(lp.Prefix, r.Str)
-
-		case rightNil && leftAny:
-			return match.NewSuffixAny(r.Str, la.Separators)
-
-		case leftNil && rightAny:
-			return match.NewPrefixAny(r.Str, ra.Separators)
-		}
-
-		return m
-	}
-
-	return matcher
-}
-
-func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
-	if len(matchers) == 0 {
-		return nil, fmt.Errorf("compile error: need at least one matcher")
-	}
-	if len(matchers) == 1 {
-		return matchers[0], nil
-	}
-	if m := glueMatchers(matchers); m != nil {
-		return m, nil
-	}
-
-	idx := -1
-	maxLen := -1
-	var val match.Matcher
-	for i, matcher := range matchers {
-		if l := matcher.Len(); l != -1 && l >= maxLen {
-			maxLen = l
-			idx = i
-			val = matcher
-		}
-	}
-
-	if val == nil { // not found matcher with static length
-		r, err := compileMatchers(matchers[1:])
-		if err != nil {
-			return nil, err
-		}
-		return match.NewBTree(matchers[0], nil, r), nil
-	}
-
-	left := matchers[:idx]
-	var right []match.Matcher
-	if len(matchers) > idx+1 {
-		right = matchers[idx+1:]
-	}
-
-	var l, r match.Matcher
-	var err error
-	if len(left) > 0 {
-		l, err = compileMatchers(left)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	if len(right) > 0 {
-		r, err = compileMatchers(right)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return match.NewBTree(val, l, r), nil
-}
-
-func glueMatchers(matchers []match.Matcher) match.Matcher {
-	if m := glueMatchersAsEvery(matchers); m != nil {
-		return m
-	}
-	if m := glueMatchersAsRow(matchers); m != nil {
-		return m
-	}
-	return nil
-}
-
-func glueMatchersAsRow(matchers []match.Matcher) match.Matcher {
-	if len(matchers) <= 1 {
-		return nil
-	}
-
-	var (
-		c []match.Matcher
-		l int
-	)
-	for _, matcher := range matchers {
-		if ml := matcher.Len(); ml == -1 {
-			return nil
-		} else {
-			c = append(c, matcher)
-			l += ml
-		}
-	}
-	return match.NewRow(l, c...)
-}
-
-func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher {
-	if len(matchers) <= 1 {
-		return nil
-	}
-
-	var (
-		hasAny    bool
-		hasSuper  bool
-		hasSingle bool
-		min       int
-		separator []rune
-	)
-
-	for i, matcher := range matchers {
-		var sep []rune
-
-		switch m := matcher.(type) {
-		case match.Super:
-			sep = []rune{}
-			hasSuper = true
-
-		case match.Any:
-			sep = m.Separators
-			hasAny = true
-
-		case match.Single:
-			sep = m.Separators
-			hasSingle = true
-			min++
-
-		case match.List:
-			if !m.Not {
-				return nil
-			}
-			sep = m.List
-			hasSingle = true
-			min++
-
-		default:
-			return nil
-		}
-
-		// initialize
-		if i == 0 {
-			separator = sep
-		}
-
-		if runes.Equal(sep, separator) {
-			continue
-		}
-
-		return nil
-	}
-
-	if hasSuper && !hasAny && !hasSingle {
-		return match.NewSuper()
-	}
-
-	if hasAny && !hasSuper && !hasSingle {
-		return match.NewAny(separator)
-	}
-
-	if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
-		return match.NewMin(min)
-	}
-
-	every := match.NewEveryOf()
-
-	if min > 0 {
-		every.Add(match.NewMin(min))
-
-		if !hasAny && !hasSuper {
-			every.Add(match.NewMax(min))
-		}
-	}
-
-	if len(separator) > 0 {
-		every.Add(match.NewContains(string(separator), true))
-	}
-
-	return every
-}
-
-func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
-	var done match.Matcher
-	var left, right, count int
-
-	for l := 0; l < len(matchers); l++ {
-		for r := len(matchers); r > l; r-- {
-			if glued := glueMatchers(matchers[l:r]); glued != nil {
-				var swap bool
-
-				if done == nil {
-					swap = true
-				} else {
-					cl, gl := done.Len(), glued.Len()
-					swap = cl > -1 && gl > -1 && gl > cl
-					swap = swap || count < r-l
-				}
-
-				if swap {
-					done = glued
-					left = l
-					right = r
-					count = r - l
-				}
-			}
-		}
-	}
-
-	if done == nil {
-		return matchers
-	}
-
-	next := append(append([]match.Matcher{}, matchers[:left]...), done)
-	if right < len(matchers) {
-		next = append(next, matchers[right:]...)
-	}
-
-	if len(next) == len(matchers) {
-		return next
-	}
-
-	return minimizeMatchers(next)
-}
-
-// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree
-func minimizeTree(tree *ast.Node) *ast.Node {
-	switch tree.Kind {
-	case ast.KindAnyOf:
-		return minimizeTreeAnyOf(tree)
-	default:
-		return nil
-	}
-}
-
-// minimizeAnyOf tries to find common children of given node of AnyOf pattern
-// it searches for common children from left and from right
-// if any common children are found – then it returns new optimized ast tree
-// else it returns nil
-func minimizeTreeAnyOf(tree *ast.Node) *ast.Node {
-	if !areOfSameKind(tree.Children, ast.KindPattern) {
-		return nil
-	}
-
-	commonLeft, commonRight := commonChildren(tree.Children)
-	commonLeftCount, commonRightCount := len(commonLeft), len(commonRight)
-	if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts
-		return nil
-	}
-
-	var result []*ast.Node
-	if commonLeftCount > 0 {
-		result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...))
-	}
-
-	var anyOf []*ast.Node
-	for _, child := range tree.Children {
-		reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount]
-		var node *ast.Node
-		if len(reuse) == 0 {
-			// this pattern is completely reduced by commonLeft and commonRight patterns
-			// so it become nothing
-			node = ast.NewNode(ast.KindNothing, nil)
-		} else {
-			node = ast.NewNode(ast.KindPattern, nil, reuse...)
-		}
-		anyOf = appendIfUnique(anyOf, node)
-	}
-	switch {
-	case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing:
-		result = append(result, anyOf[0])
-	case len(anyOf) > 1:
-		result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...))
-	}
-
-	if commonRightCount > 0 {
-		result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...))
-	}
-
-	return ast.NewNode(ast.KindPattern, nil, result...)
-}
-
-func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) {
-	if len(nodes) <= 1 {
-		return
-	}
-
-	// find node that has least number of children
-	idx := leastChildren(nodes)
-	if idx == -1 {
-		return
-	}
-	tree := nodes[idx]
-	treeLength := len(tree.Children)
-
-	// allocate max able size for rightCommon slice
-	// to get ability insert elements in reverse order (from end to start)
-	// without sorting
-	commonRight = make([]*ast.Node, treeLength)
-	lastRight := treeLength // will use this to get results as commonRight[lastRight:]
-
-	var (
-		breakLeft   bool
-		breakRight  bool
-		commonTotal int
-	)
-	for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 {
-		treeLeft := tree.Children[i]
-		treeRight := tree.Children[j]
-
-		for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ {
-			// skip least children node
-			if k == idx {
-				continue
-			}
-
-			restLeft := nodes[k].Children[i]
-			restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength]
-
-			breakLeft = breakLeft || !treeLeft.Equal(restLeft)
-
-			// disable searching for right common parts, if left part is already overlapping
-			breakRight = breakRight || (!breakLeft && j <= i)
-			breakRight = breakRight || !treeRight.Equal(restRight)
-		}
-
-		if !breakLeft {
-			commonTotal++
-			commonLeft = append(commonLeft, treeLeft)
-		}
-		if !breakRight {
-			commonTotal++
-			lastRight = j
-			commonRight[j] = treeRight
-		}
-	}
-
-	commonRight = commonRight[lastRight:]
-
-	return
-}
-
-func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node {
-	for _, n := range target {
-		if reflect.DeepEqual(n, val) {
-			return target
-		}
-	}
-	return append(target, val)
-}
-
-func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool {
-	for _, n := range nodes {
-		if n.Kind != kind {
-			return false
-		}
-	}
-	return true
-}
-
-func leastChildren(nodes []*ast.Node) int {
-	min := -1
-	idx := -1
-	for i, n := range nodes {
-		if idx == -1 || (len(n.Children) < min) {
-			min = len(n.Children)
-			idx = i
-		}
-	}
-	return idx
-}
-
-func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) {
-	var matchers []match.Matcher
-	for _, desc := range tree.Children {
-		m, err := compile(desc, sep)
-		if err != nil {
-			return nil, err
-		}
-		matchers = append(matchers, optimizeMatcher(m))
-	}
-	return matchers, nil
-}
-
-func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) {
-	switch tree.Kind {
-	case ast.KindAnyOf:
-		// todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go)
-		if n := minimizeTree(tree); n != nil {
-			return compile(n, sep)
-		}
-		matchers, err := compileTreeChildren(tree, sep)
-		if err != nil {
-			return nil, err
-		}
-		return match.NewAnyOf(matchers...), nil
-
-	case ast.KindPattern:
-		if len(tree.Children) == 0 {
-			return match.NewNothing(), nil
-		}
-		matchers, err := compileTreeChildren(tree, sep)
-		if err != nil {
-			return nil, err
-		}
-		m, err = compileMatchers(minimizeMatchers(matchers))
-		if err != nil {
-			return nil, err
-		}
-
-	case ast.KindAny:
-		m = match.NewAny(sep)
-
-	case ast.KindSuper:
-		m = match.NewSuper()
-
-	case ast.KindSingle:
-		m = match.NewSingle(sep)
-
-	case ast.KindNothing:
-		m = match.NewNothing()
-
-	case ast.KindList:
-		l := tree.Value.(ast.List)
-		m = match.NewList([]rune(l.Chars), l.Not)
-
-	case ast.KindRange:
-		r := tree.Value.(ast.Range)
-		m = match.NewRange(r.Lo, r.Hi, r.Not)
-
-	case ast.KindText:
-		t := tree.Value.(ast.Text)
-		m = match.NewText(t.Text)
-
-	default:
-		return nil, fmt.Errorf("could not compile tree: unknown node type")
-	}
-
-	return optimizeMatcher(m), nil
-}
-
-func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) {
-	m, err := compile(tree, sep)
-	if err != nil {
-		return nil, err
-	}
-
-	return m, nil
-}

+ 0 - 80
vendor/github.com/gobwas/glob/glob.go

@@ -1,80 +0,0 @@
-package glob
-
-import (
-	"github.com/gobwas/glob/compiler"
-	"github.com/gobwas/glob/syntax"
-)
-
-// Glob represents compiled glob pattern.
-type Glob interface {
-	Match(string) bool
-}
-
-// Compile creates Glob for given pattern and strings (if any present after pattern) as separators.
-// The pattern syntax is:
-//
-//    pattern:
-//        { term }
-//
-//    term:
-//        `*`         matches any sequence of non-separator characters
-//        `**`        matches any sequence of characters
-//        `?`         matches any single non-separator character
-//        `[` [ `!` ] { character-range } `]`
-//                    character class (must be non-empty)
-//        `{` pattern-list `}`
-//                    pattern alternatives
-//        c           matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
-//        `\` c       matches character c
-//
-//    character-range:
-//        c           matches character c (c != `\\`, `-`, `]`)
-//        `\` c       matches character c
-//        lo `-` hi   matches character c for lo <= c <= hi
-//
-//    pattern-list:
-//        pattern { `,` pattern }
-//                    comma-separated (without spaces) patterns
-//
-func Compile(pattern string, separators ...rune) (Glob, error) {
-	ast, err := syntax.Parse(pattern)
-	if err != nil {
-		return nil, err
-	}
-
-	matcher, err := compiler.Compile(ast, separators)
-	if err != nil {
-		return nil, err
-	}
-
-	return matcher, nil
-}
-
-// MustCompile is the same as Compile, except that if Compile returns error, this will panic
-func MustCompile(pattern string, separators ...rune) Glob {
-	g, err := Compile(pattern, separators...)
-	if err != nil {
-		panic(err)
-	}
-
-	return g
-}
-
-// QuoteMeta returns a string that quotes all glob pattern meta characters
-// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`.
-func QuoteMeta(s string) string {
-	b := make([]byte, 2*len(s))
-
-	// a byte loop is correct because all meta characters are ASCII
-	j := 0
-	for i := 0; i < len(s); i++ {
-		if syntax.Special(s[i]) {
-			b[j] = '\\'
-			j++
-		}
-		b[j] = s[i]
-		j++
-	}
-
-	return string(b[0:j])
-}

+ 0 - 45
vendor/github.com/gobwas/glob/match/any.go

@@ -1,45 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"github.com/gobwas/glob/util/strings"
-)
-
-type Any struct {
-	Separators []rune
-}
-
-func NewAny(s []rune) Any {
-	return Any{s}
-}
-
-func (self Any) Match(s string) bool {
-	return strings.IndexAnyRunes(s, self.Separators) == -1
-}
-
-func (self Any) Index(s string) (int, []int) {
-	found := strings.IndexAnyRunes(s, self.Separators)
-	switch found {
-	case -1:
-	case 0:
-		return 0, segments0
-	default:
-		s = s[:found]
-	}
-
-	segments := acquireSegments(len(s))
-	for i := range s {
-		segments = append(segments, i)
-	}
-	segments = append(segments, len(s))
-
-	return 0, segments
-}
-
-func (self Any) Len() int {
-	return lenNo
-}
-
-func (self Any) String() string {
-	return fmt.Sprintf("<any:![%s]>", string(self.Separators))
-}

+ 0 - 82
vendor/github.com/gobwas/glob/match/any_of.go

@@ -1,82 +0,0 @@
-package match
-
-import "fmt"
-
-type AnyOf struct {
-	Matchers Matchers
-}
-
-func NewAnyOf(m ...Matcher) AnyOf {
-	return AnyOf{Matchers(m)}
-}
-
-func (self *AnyOf) Add(m Matcher) error {
-	self.Matchers = append(self.Matchers, m)
-	return nil
-}
-
-func (self AnyOf) Match(s string) bool {
-	for _, m := range self.Matchers {
-		if m.Match(s) {
-			return true
-		}
-	}
-
-	return false
-}
-
-func (self AnyOf) Index(s string) (int, []int) {
-	index := -1
-
-	segments := acquireSegments(len(s))
-	for _, m := range self.Matchers {
-		idx, seg := m.Index(s)
-		if idx == -1 {
-			continue
-		}
-
-		if index == -1 || idx < index {
-			index = idx
-			segments = append(segments[:0], seg...)
-			continue
-		}
-
-		if idx > index {
-			continue
-		}
-
-		// here idx == index
-		segments = appendMerge(segments, seg)
-	}
-
-	if index == -1 {
-		releaseSegments(segments)
-		return -1, nil
-	}
-
-	return index, segments
-}
-
-func (self AnyOf) Len() (l int) {
-	l = -1
-	for _, m := range self.Matchers {
-		ml := m.Len()
-		switch {
-		case l == -1:
-			l = ml
-			continue
-
-		case ml == -1:
-			return -1
-
-		case l != ml:
-			return -1
-		}
-	}
-
-	return
-}
-
-func (self AnyOf) String() string {
-	return fmt.Sprintf("<any_of:[%s]>", self.Matchers)
-}

+ 0 - 146
vendor/github.com/gobwas/glob/match/btree.go

@@ -1,146 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"unicode/utf8"
-)
-
-type BTree struct {
-	Value            Matcher
-	Left             Matcher
-	Right            Matcher
-	ValueLengthRunes int
-	LeftLengthRunes  int
-	RightLengthRunes int
-	LengthRunes      int
-}
-
-func NewBTree(Value, Left, Right Matcher) (tree BTree) {
-	tree.Value = Value
-	tree.Left = Left
-	tree.Right = Right
-
-	lenOk := true
-	if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 {
-		lenOk = false
-	}
-
-	if Left != nil {
-		if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 {
-			lenOk = false
-		}
-	}
-
-	if Right != nil {
-		if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 {
-			lenOk = false
-		}
-	}
-
-	if lenOk {
-		tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes
-	} else {
-		tree.LengthRunes = -1
-	}
-
-	return tree
-}
-
-func (self BTree) Len() int {
-	return self.LengthRunes
-}
-
-// todo?
-func (self BTree) Index(s string) (int, []int) {
-	return -1, nil
-}
-
-func (self BTree) Match(s string) bool {
-	inputLen := len(s)
-
-	// self.Length, self.RLen and self.LLen are values meaning the length of runes for each part
-	// here we manipulating byte length for better optimizations
-	// but these checks still works, cause minLen of 1-rune string is 1 byte.
-	if self.LengthRunes != -1 && self.LengthRunes > inputLen {
-		return false
-	}
-
-	// try to cut unnecessary parts
-	// by knowledge of length of right and left part
-	var offset, limit int
-	if self.LeftLengthRunes >= 0 {
-		offset = self.LeftLengthRunes
-	}
-	if self.RightLengthRunes >= 0 {
-		limit = inputLen - self.RightLengthRunes
-	} else {
-		limit = inputLen
-	}
-
-	for offset < limit {
-		// search for matching part in substring
-		index, segments := self.Value.Index(s[offset:limit])
-		if index == -1 {
-			releaseSegments(segments)
-			return false
-		}
-
-		l := s[:offset+index]
-		var left bool
-		if self.Left != nil {
-			left = self.Left.Match(l)
-		} else {
-			left = l == ""
-		}
-
-		if left {
-			for i := len(segments) - 1; i >= 0; i-- {
-				length := segments[i]
-
-				var right bool
-				var r string
-				// if there is no string for the right branch
-				if inputLen <= offset+index+length {
-					r = ""
-				} else {
-					r = s[offset+index+length:]
-				}
-
-				if self.Right != nil {
-					right = self.Right.Match(r)
-				} else {
-					right = r == ""
-				}
-
-				if right {
-					releaseSegments(segments)
-					return true
-				}
-			}
-		}
-
-		_, step := utf8.DecodeRuneInString(s[offset+index:])
-		offset += index + step
-
-		releaseSegments(segments)
-	}
-
-	return false
-}
-
-func (self BTree) String() string {
-	const n string = "<nil>"
-	var l, r string
-	if self.Left == nil {
-		l = n
-	} else {
-		l = self.Left.String()
-	}
-	if self.Right == nil {
-		r = n
-	} else {
-		r = self.Right.String()
-	}
-
-	return fmt.Sprintf("<btree:[%s<-%s->%s]>", l, self.Value, r)
-}

+ 0 - 58
vendor/github.com/gobwas/glob/match/contains.go

@@ -1,58 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-)
-
-type Contains struct {
-	Needle string
-	Not    bool
-}
-
-func NewContains(needle string, not bool) Contains {
-	return Contains{needle, not}
-}
-
-func (self Contains) Match(s string) bool {
-	return strings.Contains(s, self.Needle) != self.Not
-}
-
-func (self Contains) Index(s string) (int, []int) {
-	var offset int
-
-	idx := strings.Index(s, self.Needle)
-
-	if !self.Not {
-		if idx == -1 {
-			return -1, nil
-		}
-
-		offset = idx + len(self.Needle)
-		if len(s) <= offset {
-			return 0, []int{offset}
-		}
-		s = s[offset:]
-	} else if idx != -1 {
-		s = s[:idx]
-	}
-
-	segments := acquireSegments(len(s) + 1)
-	for i := range s {
-		segments = append(segments, offset+i)
-	}
-
-	return 0, append(segments, offset+len(s))
-}
-
-func (self Contains) Len() int {
-	return lenNo
-}
-
-func (self Contains) String() string {
-	var not string
-	if self.Not {
-		not = "!"
-	}
-	return fmt.Sprintf("<contains:%s[%s]>", not, self.Needle)
-}

+ 0 - 99
vendor/github.com/gobwas/glob/match/every_of.go

@@ -1,99 +0,0 @@
-package match
-
-import (
-	"fmt"
-)
-
-type EveryOf struct {
-	Matchers Matchers
-}
-
-func NewEveryOf(m ...Matcher) EveryOf {
-	return EveryOf{Matchers(m)}
-}
-
-func (self *EveryOf) Add(m Matcher) error {
-	self.Matchers = append(self.Matchers, m)
-	return nil
-}
-
-func (self EveryOf) Len() (l int) {
-	for _, m := range self.Matchers {
-		if ml := m.Len(); l > 0 {
-			l += ml
-		} else {
-			return -1
-		}
-	}
-
-	return
-}
-
-func (self EveryOf) Index(s string) (int, []int) {
-	var index int
-	var offset int
-
-	// make `in` with cap as len(s),
-	// cause it is the maximum size of output segments values
-	next := acquireSegments(len(s))
-	current := acquireSegments(len(s))
-
-	sub := s
-	for i, m := range self.Matchers {
-		idx, seg := m.Index(sub)
-		if idx == -1 {
-			releaseSegments(next)
-			releaseSegments(current)
-			return -1, nil
-		}
-
-		if i == 0 {
-			// we use copy here instead of `current = seg`
-			// cause seg is a slice from reusable buffer `in`
-			// and it could be overwritten in next iteration
-			current = append(current, seg...)
-		} else {
-			// clear the next
-			next = next[:0]
-
-			delta := index - (idx + offset)
-			for _, ex := range current {
-				for _, n := range seg {
-					if ex+delta == n {
-						next = append(next, n)
-					}
-				}
-			}
-
-			if len(next) == 0 {
-				releaseSegments(next)
-				releaseSegments(current)
-				return -1, nil
-			}
-
-			current = append(current[:0], next...)
-		}
-
-		index = idx + offset
-		sub = s[index:]
-		offset += idx
-	}
-
-	releaseSegments(next)
-
-	return index, current
-}
-
-func (self EveryOf) Match(s string) bool {
-	for _, m := range self.Matchers {
-		if !m.Match(s) {
-			return false
-		}
-	}
-
-	return true
-}
-
-func (self EveryOf) String() string {
-	return fmt.Sprintf("<every_of:[%s]>", self.Matchers)
-}

+ 0 - 49
vendor/github.com/gobwas/glob/match/list.go

@@ -1,49 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"github.com/gobwas/glob/util/runes"
-	"unicode/utf8"
-)
-
-type List struct {
-	List []rune
-	Not  bool
-}
-
-func NewList(list []rune, not bool) List {
-	return List{list, not}
-}
-
-func (self List) Match(s string) bool {
-	r, w := utf8.DecodeRuneInString(s)
-	if len(s) > w {
-		return false
-	}
-
-	inList := runes.IndexRune(self.List, r) != -1
-	return inList == !self.Not
-}
-
-func (self List) Len() int {
-	return lenOne
-}
-
-func (self List) Index(s string) (int, []int) {
-	for i, r := range s {
-		if self.Not == (runes.IndexRune(self.List, r) == -1) {
-			return i, segmentsByRuneLength[utf8.RuneLen(r)]
-		}
-	}
-
-	return -1, nil
-}
-
-func (self List) String() string {
-	var not string
-	if self.Not {
-		not = "!"
-	}
-
-	return fmt.Sprintf("<list:%s[%s]>", not, string(self.List))
-}

+ 0 - 81
vendor/github.com/gobwas/glob/match/match.go

@@ -1,81 +0,0 @@
-package match
-
-// todo common table of rune's length
-
-import (
-	"fmt"
-	"strings"
-)
-
-const lenOne = 1
-const lenZero = 0
-const lenNo = -1
-
-type Matcher interface {
-	Match(string) bool
-	Index(string) (int, []int)
-	Len() int
-	String() string
-}
-
-type Matchers []Matcher
-
-func (m Matchers) String() string {
-	var s []string
-	for _, matcher := range m {
-		s = append(s, fmt.Sprint(matcher))
-	}
-
-	return fmt.Sprintf("%s", strings.Join(s, ","))
-}
-
-// appendMerge merges and sorts given already SORTED and UNIQUE segments.
-func appendMerge(target, sub []int) []int {
-	lt, ls := len(target), len(sub)
-	out := make([]int, 0, lt+ls)
-
-	for x, y := 0, 0; x < lt || y < ls; {
-		if x >= lt {
-			out = append(out, sub[y:]...)
-			break
-		}
-
-		if y >= ls {
-			out = append(out, target[x:]...)
-			break
-		}
-
-		xValue := target[x]
-		yValue := sub[y]
-
-		switch {
-
-		case xValue == yValue:
-			out = append(out, xValue)
-			x++
-			y++
-
-		case xValue < yValue:
-			out = append(out, xValue)
-			x++
-
-		case yValue < xValue:
-			out = append(out, yValue)
-			y++
-
-		}
-	}
-
-	target = append(target[:0], out...)
-
-	return target
-}
-
-func reverseSegments(input []int) {
-	l := len(input)
-	m := l / 2
-
-	for i := 0; i < m; i++ {
-		input[i], input[l-i-1] = input[l-i-1], input[i]
-	}
-}

+ 0 - 49
vendor/github.com/gobwas/glob/match/max.go

@@ -1,49 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"unicode/utf8"
-)
-
-type Max struct {
-	Limit int
-}
-
-func NewMax(l int) Max {
-	return Max{l}
-}
-
-func (self Max) Match(s string) bool {
-	var l int
-	for range s {
-		l += 1
-		if l > self.Limit {
-			return false
-		}
-	}
-
-	return true
-}
-
-func (self Max) Index(s string) (int, []int) {
-	segments := acquireSegments(self.Limit + 1)
-	segments = append(segments, 0)
-	var count int
-	for i, r := range s {
-		count++
-		if count > self.Limit {
-			break
-		}
-		segments = append(segments, i+utf8.RuneLen(r))
-	}
-
-	return 0, segments
-}
-
-func (self Max) Len() int {
-	return lenNo
-}
-
-func (self Max) String() string {
-	return fmt.Sprintf("<max:%d>", self.Limit)
-}

+ 0 - 57
vendor/github.com/gobwas/glob/match/min.go

@@ -1,57 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"unicode/utf8"
-)
-
-type Min struct {
-	Limit int
-}
-
-func NewMin(l int) Min {
-	return Min{l}
-}
-
-func (self Min) Match(s string) bool {
-	var l int
-	for range s {
-		l += 1
-		if l >= self.Limit {
-			return true
-		}
-	}
-
-	return false
-}
-
-func (self Min) Index(s string) (int, []int) {
-	var count int
-
-	c := len(s) - self.Limit + 1
-	if c <= 0 {
-		return -1, nil
-	}
-
-	segments := acquireSegments(c)
-	for i, r := range s {
-		count++
-		if count >= self.Limit {
-			segments = append(segments, i+utf8.RuneLen(r))
-		}
-	}
-
-	if len(segments) == 0 {
-		return -1, nil
-	}
-
-	return 0, segments
-}
-
-func (self Min) Len() int {
-	return lenNo
-}
-
-func (self Min) String() string {
-	return fmt.Sprintf("<min:%d>", self.Limit)
-}

+ 0 - 27
vendor/github.com/gobwas/glob/match/nothing.go

@@ -1,27 +0,0 @@
-package match
-
-import (
-	"fmt"
-)
-
-type Nothing struct{}
-
-func NewNothing() Nothing {
-	return Nothing{}
-}
-
-func (self Nothing) Match(s string) bool {
-	return len(s) == 0
-}
-
-func (self Nothing) Index(s string) (int, []int) {
-	return 0, segments0
-}
-
-func (self Nothing) Len() int {
-	return lenZero
-}
-
-func (self Nothing) String() string {
-	return fmt.Sprintf("<nothing>")
-}

+ 0 - 50
vendor/github.com/gobwas/glob/match/prefix.go

@@ -1,50 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-	"unicode/utf8"
-)
-
-type Prefix struct {
-	Prefix string
-}
-
-func NewPrefix(p string) Prefix {
-	return Prefix{p}
-}
-
-func (self Prefix) Index(s string) (int, []int) {
-	idx := strings.Index(s, self.Prefix)
-	if idx == -1 {
-		return -1, nil
-	}
-
-	length := len(self.Prefix)
-	var sub string
-	if len(s) > idx+length {
-		sub = s[idx+length:]
-	} else {
-		sub = ""
-	}
-
-	segments := acquireSegments(len(sub) + 1)
-	segments = append(segments, length)
-	for i, r := range sub {
-		segments = append(segments, length+i+utf8.RuneLen(r))
-	}
-
-	return idx, segments
-}
-
-func (self Prefix) Len() int {
-	return lenNo
-}
-
-func (self Prefix) Match(s string) bool {
-	return strings.HasPrefix(s, self.Prefix)
-}
-
-func (self Prefix) String() string {
-	return fmt.Sprintf("<prefix:%s>", self.Prefix)
-}

+ 0 - 55
vendor/github.com/gobwas/glob/match/prefix_any.go

@@ -1,55 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-	"unicode/utf8"
-
-	sutil "github.com/gobwas/glob/util/strings"
-)
-
-type PrefixAny struct {
-	Prefix     string
-	Separators []rune
-}
-
-func NewPrefixAny(s string, sep []rune) PrefixAny {
-	return PrefixAny{s, sep}
-}
-
-func (self PrefixAny) Index(s string) (int, []int) {
-	idx := strings.Index(s, self.Prefix)
-	if idx == -1 {
-		return -1, nil
-	}
-
-	n := len(self.Prefix)
-	sub := s[idx+n:]
-	i := sutil.IndexAnyRunes(sub, self.Separators)
-	if i > -1 {
-		sub = sub[:i]
-	}
-
-	seg := acquireSegments(len(sub) + 1)
-	seg = append(seg, n)
-	for i, r := range sub {
-		seg = append(seg, n+i+utf8.RuneLen(r))
-	}
-
-	return idx, seg
-}
-
-func (self PrefixAny) Len() int {
-	return lenNo
-}
-
-func (self PrefixAny) Match(s string) bool {
-	if !strings.HasPrefix(s, self.Prefix) {
-		return false
-	}
-	return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1
-}
-
-func (self PrefixAny) String() string {
-	return fmt.Sprintf("<prefix_any:%s![%s]>", self.Prefix, string(self.Separators))
-}

+ 0 - 62
vendor/github.com/gobwas/glob/match/prefix_suffix.go

@@ -1,62 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-)
-
-type PrefixSuffix struct {
-	Prefix, Suffix string
-}
-
-func NewPrefixSuffix(p, s string) PrefixSuffix {
-	return PrefixSuffix{p, s}
-}
-
-func (self PrefixSuffix) Index(s string) (int, []int) {
-	prefixIdx := strings.Index(s, self.Prefix)
-	if prefixIdx == -1 {
-		return -1, nil
-	}
-
-	suffixLen := len(self.Suffix)
-	if suffixLen <= 0 {
-		return prefixIdx, []int{len(s) - prefixIdx}
-	}
-
-	if (len(s) - prefixIdx) <= 0 {
-		return -1, nil
-	}
-
-	segments := acquireSegments(len(s) - prefixIdx)
-	for sub := s[prefixIdx:]; ; {
-		suffixIdx := strings.LastIndex(sub, self.Suffix)
-		if suffixIdx == -1 {
-			break
-		}
-
-		segments = append(segments, suffixIdx+suffixLen)
-		sub = sub[:suffixIdx]
-	}
-
-	if len(segments) == 0 {
-		releaseSegments(segments)
-		return -1, nil
-	}
-
-	reverseSegments(segments)
-
-	return prefixIdx, segments
-}
-
-func (self PrefixSuffix) Len() int {
-	return lenNo
-}
-
-func (self PrefixSuffix) Match(s string) bool {
-	return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix)
-}
-
-func (self PrefixSuffix) String() string {
-	return fmt.Sprintf("<prefix_suffix:[%s,%s]>", self.Prefix, self.Suffix)
-}

+ 0 - 48
vendor/github.com/gobwas/glob/match/range.go

@@ -1,48 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"unicode/utf8"
-)
-
-type Range struct {
-	Lo, Hi rune
-	Not    bool
-}
-
-func NewRange(lo, hi rune, not bool) Range {
-	return Range{lo, hi, not}
-}
-
-func (self Range) Len() int {
-	return lenOne
-}
-
-func (self Range) Match(s string) bool {
-	r, w := utf8.DecodeRuneInString(s)
-	if len(s) > w {
-		return false
-	}
-
-	inRange := r >= self.Lo && r <= self.Hi
-
-	return inRange == !self.Not
-}
-
-func (self Range) Index(s string) (int, []int) {
-	for i, r := range s {
-		if self.Not != (r >= self.Lo && r <= self.Hi) {
-			return i, segmentsByRuneLength[utf8.RuneLen(r)]
-		}
-	}
-
-	return -1, nil
-}
-
-func (self Range) String() string {
-	var not string
-	if self.Not {
-		not = "!"
-	}
-	return fmt.Sprintf("<range:%s[%s,%s]>", not, string(self.Lo), string(self.Hi))
-}

+ 0 - 77
vendor/github.com/gobwas/glob/match/row.go

@@ -1,77 +0,0 @@
-package match
-
-import (
-	"fmt"
-)
-
-type Row struct {
-	Matchers    Matchers
-	RunesLength int
-	Segments    []int
-}
-
-func NewRow(len int, m ...Matcher) Row {
-	return Row{
-		Matchers:    Matchers(m),
-		RunesLength: len,
-		Segments:    []int{len},
-	}
-}
-
-func (self Row) matchAll(s string) bool {
-	var idx int
-	for _, m := range self.Matchers {
-		length := m.Len()
-
-		var next, i int
-		for next = range s[idx:] {
-			i++
-			if i == length {
-				break
-			}
-		}
-
-		if i < length || !m.Match(s[idx:idx+next+1]) {
-			return false
-		}
-
-		idx += next + 1
-	}
-
-	return true
-}
-
-func (self Row) lenOk(s string) bool {
-	var i int
-	for range s {
-		i++
-		if i > self.RunesLength {
-			return false
-		}
-	}
-	return self.RunesLength == i
-}
-
-func (self Row) Match(s string) bool {
-	return self.lenOk(s) && self.matchAll(s)
-}
-
-func (self Row) Len() (l int) {
-	return self.RunesLength
-}
-
-func (self Row) Index(s string) (int, []int) {
-	for i := range s {
-		if len(s[i:]) < self.RunesLength {
-			break
-		}
-		if self.matchAll(s[i:]) {
-			return i, self.Segments
-		}
-	}
-	return -1, nil
-}
-
-func (self Row) String() string {
-	return fmt.Sprintf("<row_%d:[%s]>", self.RunesLength, self.Matchers)
-}

+ 0 - 91
vendor/github.com/gobwas/glob/match/segments.go

@@ -1,91 +0,0 @@
-package match
-
-import (
-	"sync"
-)
-
-type SomePool interface {
-	Get() []int
-	Put([]int)
-}
-
-var segmentsPools [1024]sync.Pool
-
-func toPowerOfTwo(v int) int {
-	v--
-	v |= v >> 1
-	v |= v >> 2
-	v |= v >> 4
-	v |= v >> 8
-	v |= v >> 16
-	v++
-
-	return v
-}
-
-const (
-	cacheFrom             = 16
-	cacheToAndHigher      = 1024
-	cacheFromIndex        = 15
-	cacheToAndHigherIndex = 1023
-)
-
-var (
-	segments0 = []int{0}
-	segments1 = []int{1}
-	segments2 = []int{2}
-	segments3 = []int{3}
-	segments4 = []int{4}
-)
-
-var segmentsByRuneLength [5][]int = [5][]int{
-	0: segments0,
-	1: segments1,
-	2: segments2,
-	3: segments3,
-	4: segments4,
-}
-
-func init() {
-	for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 {
-		func(i int) {
-			segmentsPools[i-1] = sync.Pool{New: func() interface{} {
-				return make([]int, 0, i)
-			}}
-		}(i)
-	}
-}
-
-func getTableIndex(c int) int {
-	p := toPowerOfTwo(c)
-	switch {
-	case p >= cacheToAndHigher:
-		return cacheToAndHigherIndex
-	case p <= cacheFrom:
-		return cacheFromIndex
-	default:
-		return p - 1
-	}
-}
-
-func acquireSegments(c int) []int {
-	// make []int with less capacity than cacheFrom
-	// is faster than acquiring it from pool
-	if c < cacheFrom {
-		return make([]int, 0, c)
-	}
-
-	return segmentsPools[getTableIndex(c)].Get().([]int)[:0]
-}
-
-func releaseSegments(s []int) {
-	c := cap(s)
-
-	// make []int with less capacity than cacheFrom
-	// is faster than acquiring it from pool
-	if c < cacheFrom {
-		return
-	}
-
-	segmentsPools[getTableIndex(c)].Put(s)
-}

+ 0 - 43
vendor/github.com/gobwas/glob/match/single.go

@@ -1,43 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"github.com/gobwas/glob/util/runes"
-	"unicode/utf8"
-)
-
-// single represents ?
-type Single struct {
-	Separators []rune
-}
-
-func NewSingle(s []rune) Single {
-	return Single{s}
-}
-
-func (self Single) Match(s string) bool {
-	r, w := utf8.DecodeRuneInString(s)
-	if len(s) > w {
-		return false
-	}
-
-	return runes.IndexRune(self.Separators, r) == -1
-}
-
-func (self Single) Len() int {
-	return lenOne
-}
-
-func (self Single) Index(s string) (int, []int) {
-	for i, r := range s {
-		if runes.IndexRune(self.Separators, r) == -1 {
-			return i, segmentsByRuneLength[utf8.RuneLen(r)]
-		}
-	}
-
-	return -1, nil
-}
-
-func (self Single) String() string {
-	return fmt.Sprintf("<single:![%s]>", string(self.Separators))
-}

+ 0 - 35
vendor/github.com/gobwas/glob/match/suffix.go

@@ -1,35 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-)
-
-type Suffix struct {
-	Suffix string
-}
-
-func NewSuffix(s string) Suffix {
-	return Suffix{s}
-}
-
-func (self Suffix) Len() int {
-	return lenNo
-}
-
-func (self Suffix) Match(s string) bool {
-	return strings.HasSuffix(s, self.Suffix)
-}
-
-func (self Suffix) Index(s string) (int, []int) {
-	idx := strings.Index(s, self.Suffix)
-	if idx == -1 {
-		return -1, nil
-	}
-
-	return 0, []int{idx + len(self.Suffix)}
-}
-
-func (self Suffix) String() string {
-	return fmt.Sprintf("<suffix:%s>", self.Suffix)
-}

+ 0 - 43
vendor/github.com/gobwas/glob/match/suffix_any.go

@@ -1,43 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-
-	sutil "github.com/gobwas/glob/util/strings"
-)
-
-type SuffixAny struct {
-	Suffix     string
-	Separators []rune
-}
-
-func NewSuffixAny(s string, sep []rune) SuffixAny {
-	return SuffixAny{s, sep}
-}
-
-func (self SuffixAny) Index(s string) (int, []int) {
-	idx := strings.Index(s, self.Suffix)
-	if idx == -1 {
-		return -1, nil
-	}
-
-	i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1
-
-	return i, []int{idx + len(self.Suffix) - i}
-}
-
-func (self SuffixAny) Len() int {
-	return lenNo
-}
-
-func (self SuffixAny) Match(s string) bool {
-	if !strings.HasSuffix(s, self.Suffix) {
-		return false
-	}
-	return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1
-}
-
-func (self SuffixAny) String() string {
-	return fmt.Sprintf("<suffix_any:![%s]%s>", string(self.Separators), self.Suffix)
-}

+ 0 - 33
vendor/github.com/gobwas/glob/match/super.go

@@ -1,33 +0,0 @@
-package match
-
-import (
-	"fmt"
-)
-
-type Super struct{}
-
-func NewSuper() Super {
-	return Super{}
-}
-
-func (self Super) Match(s string) bool {
-	return true
-}
-
-func (self Super) Len() int {
-	return lenNo
-}
-
-func (self Super) Index(s string) (int, []int) {
-	segments := acquireSegments(len(s) + 1)
-	for i := range s {
-		segments = append(segments, i)
-	}
-	segments = append(segments, len(s))
-
-	return 0, segments
-}
-
-func (self Super) String() string {
-	return fmt.Sprintf("<super>")
-}

+ 0 - 45
vendor/github.com/gobwas/glob/match/text.go

@@ -1,45 +0,0 @@
-package match
-
-import (
-	"fmt"
-	"strings"
-	"unicode/utf8"
-)
-
-// raw represents raw string to match
-type Text struct {
-	Str         string
-	RunesLength int
-	BytesLength int
-	Segments    []int
-}
-
-func NewText(s string) Text {
-	return Text{
-		Str:         s,
-		RunesLength: utf8.RuneCountInString(s),
-		BytesLength: len(s),
-		Segments:    []int{len(s)},
-	}
-}
-
-func (self Text) Match(s string) bool {
-	return self.Str == s
-}
-
-func (self Text) Len() int {
-	return self.RunesLength
-}
-
-func (self Text) Index(s string) (int, []int) {
-	index := strings.Index(s, self.Str)
-	if index == -1 {
-		return -1, nil
-	}
-
-	return index, self.Segments
-}
-
-func (self Text) String() string {
-	return fmt.Sprintf("<text:`%v`>", self.Str)
-}

+ 0 - 148
vendor/github.com/gobwas/glob/readme.md

@@ -1,148 +0,0 @@
-# glob.[go](https://golang.org)
-
-[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url]
-
-> Go Globbing Library.
-
-## Install
-
-```shell
-    go get github.com/gobwas/glob
-```
-
-## Example
-
-```go
-
-package main
-
-import "github.com/gobwas/glob"
-
-func main() {
-    var g glob.Glob
-    
-    // create simple glob
-    g = glob.MustCompile("*.github.com")
-    g.Match("api.github.com") // true
-    
-    // quote meta characters and then create simple glob 
-    g = glob.MustCompile(glob.QuoteMeta("*.github.com"))
-    g.Match("*.github.com") // true
-    
-    // create new glob with set of delimiters as ["."]
-    g = glob.MustCompile("api.*.com", '.')
-    g.Match("api.github.com") // true
-    g.Match("api.gi.hub.com") // false
-    
-    // create new glob with set of delimiters as ["."]
-    // but now with super wildcard
-    g = glob.MustCompile("api.**.com", '.')
-    g.Match("api.github.com") // true
-    g.Match("api.gi.hub.com") // true
-        
-    // create glob with single symbol wildcard
-    g = glob.MustCompile("?at")
-    g.Match("cat") // true
-    g.Match("fat") // true
-    g.Match("at") // false
-    
-    // create glob with single symbol wildcard and delimiters ['f']
-    g = glob.MustCompile("?at", 'f')
-    g.Match("cat") // true
-    g.Match("fat") // false
-    g.Match("at") // false 
-    
-    // create glob with character-list matchers 
-    g = glob.MustCompile("[abc]at")
-    g.Match("cat") // true
-    g.Match("bat") // true
-    g.Match("fat") // false
-    g.Match("at") // false
-    
-    // create glob with character-list matchers 
-    g = glob.MustCompile("[!abc]at")
-    g.Match("cat") // false
-    g.Match("bat") // false
-    g.Match("fat") // true
-    g.Match("at") // false 
-    
-    // create glob with character-range matchers 
-    g = glob.MustCompile("[a-c]at")
-    g.Match("cat") // true
-    g.Match("bat") // true
-    g.Match("fat") // false
-    g.Match("at") // false
-    
-    // create glob with character-range matchers 
-    g = glob.MustCompile("[!a-c]at")
-    g.Match("cat") // false
-    g.Match("bat") // false
-    g.Match("fat") // true
-    g.Match("at") // false 
-    
-    // create glob with pattern-alternatives list 
-    g = glob.MustCompile("{cat,bat,[fr]at}")
-    g.Match("cat") // true
-    g.Match("bat") // true
-    g.Match("fat") // true
-    g.Match("rat") // true
-    g.Match("at") // false 
-    g.Match("zat") // false 
-}
-
-```
-
-## Performance
-
-This library is created for compile-once patterns. This means, that compilation could take time, but 
-strings matching is done faster, than in case when always parsing template.
-
-If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower.
-
-Run `go test -bench=.` from source root to see the benchmarks:
-
-Pattern | Fixture | Match | Speed (ns/op)
---------|---------|-------|--------------
-`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432
-`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199
-`https://*.google.*` | `https://account.google.com` | `true` | 96
-`https://*.google.*` | `https://google.com` | `false` | 66
-`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163
-`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197
-`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22
-`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24
-`abc*` | `abcdef` | `true` | 8.15
-`abc*` | `af` | `false` | 5.68
-`*def` | `abcdef` | `true` | 8.84
-`*def` | `af` | `false` | 5.74
-`ab*ef` | `abcdef` | `true` | 15.2
-`ab*ef` | `af` | `false` | 10.4
-
-The same things with `regexp` package:
-
-Pattern | Fixture | Match | Speed (ns/op)
---------|---------|-------|--------------
-`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553
-`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383
-`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205
-`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767
-`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435
-`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674
-`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039
-`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272
-`^abc.*$` | `abcdef` | `true` | 237
-`^abc.*$` | `af` | `false` | 100
-`^.*def$` | `abcdef` | `true` | 464
-`^.*def$` | `af` | `false` | 265
-`^ab.*ef$` | `abcdef` | `true` | 375
-`^ab.*ef$` | `af` | `false` | 145
-
-[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg
-[godoc-url]: https://godoc.org/github.com/gobwas/glob
-[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master
-[travis-url]: https://travis-ci.org/gobwas/glob
-
-## Syntax
-
-Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm),
-except that `**` is aka super-asterisk, that do not sensitive for separators.

+ 0 - 122
vendor/github.com/gobwas/glob/syntax/ast/ast.go

@@ -1,122 +0,0 @@
-package ast
-
-import (
-	"bytes"
-	"fmt"
-)
-
-type Node struct {
-	Parent   *Node
-	Children []*Node
-	Value    interface{}
-	Kind     Kind
-}
-
-func NewNode(k Kind, v interface{}, ch ...*Node) *Node {
-	n := &Node{
-		Kind:  k,
-		Value: v,
-	}
-	for _, c := range ch {
-		Insert(n, c)
-	}
-	return n
-}
-
-func (a *Node) Equal(b *Node) bool {
-	if a.Kind != b.Kind {
-		return false
-	}
-	if a.Value != b.Value {
-		return false
-	}
-	if len(a.Children) != len(b.Children) {
-		return false
-	}
-	for i, c := range a.Children {
-		if !c.Equal(b.Children[i]) {
-			return false
-		}
-	}
-	return true
-}
-
-func (a *Node) String() string {
-	var buf bytes.Buffer
-	buf.WriteString(a.Kind.String())
-	if a.Value != nil {
-		buf.WriteString(" =")
-		buf.WriteString(fmt.Sprintf("%v", a.Value))
-	}
-	if len(a.Children) > 0 {
-		buf.WriteString(" [")
-		for i, c := range a.Children {
-			if i > 0 {
-				buf.WriteString(", ")
-			}
-			buf.WriteString(c.String())
-		}
-		buf.WriteString("]")
-	}
-	return buf.String()
-}
-
-func Insert(parent *Node, children ...*Node) {
-	parent.Children = append(parent.Children, children...)
-	for _, ch := range children {
-		ch.Parent = parent
-	}
-}
-
-type List struct {
-	Not   bool
-	Chars string
-}
-
-type Range struct {
-	Not    bool
-	Lo, Hi rune
-}
-
-type Text struct {
-	Text string
-}
-
-type Kind int
-
-const (
-	KindNothing Kind = iota
-	KindPattern
-	KindList
-	KindRange
-	KindText
-	KindAny
-	KindSuper
-	KindSingle
-	KindAnyOf
-)
-
-func (k Kind) String() string {
-	switch k {
-	case KindNothing:
-		return "Nothing"
-	case KindPattern:
-		return "Pattern"
-	case KindList:
-		return "List"
-	case KindRange:
-		return "Range"
-	case KindText:
-		return "Text"
-	case KindAny:
-		return "Any"
-	case KindSuper:
-		return "Super"
-	case KindSingle:
-		return "Single"
-	case KindAnyOf:
-		return "AnyOf"
-	default:
-		return ""
-	}
-}

+ 0 - 157
vendor/github.com/gobwas/glob/syntax/ast/parser.go

@@ -1,157 +0,0 @@
-package ast
-
-import (
-	"errors"
-	"fmt"
-	"github.com/gobwas/glob/syntax/lexer"
-	"unicode/utf8"
-)
-
-type Lexer interface {
-	Next() lexer.Token
-}
-
-type parseFn func(*Node, Lexer) (parseFn, *Node, error)
-
-func Parse(lexer Lexer) (*Node, error) {
-	var parser parseFn
-
-	root := NewNode(KindPattern, nil)
-
-	var (
-		tree *Node
-		err  error
-	)
-	for parser, tree = parserMain, root; parser != nil; {
-		parser, tree, err = parser(tree, lexer)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return root, nil
-}
-
-func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) {
-	for {
-		token := lex.Next()
-		switch token.Type {
-		case lexer.EOF:
-			return nil, tree, nil
-
-		case lexer.Error:
-			return nil, tree, errors.New(token.Raw)
-
-		case lexer.Text:
-			Insert(tree, NewNode(KindText, Text{token.Raw}))
-			return parserMain, tree, nil
-
-		case lexer.Any:
-			Insert(tree, NewNode(KindAny, nil))
-			return parserMain, tree, nil
-
-		case lexer.Super:
-			Insert(tree, NewNode(KindSuper, nil))
-			return parserMain, tree, nil
-
-		case lexer.Single:
-			Insert(tree, NewNode(KindSingle, nil))
-			return parserMain, tree, nil
-
-		case lexer.RangeOpen:
-			return parserRange, tree, nil
-
-		case lexer.TermsOpen:
-			a := NewNode(KindAnyOf, nil)
-			Insert(tree, a)
-
-			p := NewNode(KindPattern, nil)
-			Insert(a, p)
-
-			return parserMain, p, nil
-
-		case lexer.Separator:
-			p := NewNode(KindPattern, nil)
-			Insert(tree.Parent, p)
-
-			return parserMain, p, nil
-
-		case lexer.TermsClose:
-			return parserMain, tree.Parent.Parent, nil
-
-		default:
-			return nil, tree, fmt.Errorf("unexpected token: %s", token)
-		}
-	}
-	return nil, tree, fmt.Errorf("unknown error")
-}
-
-func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) {
-	var (
-		not   bool
-		lo    rune
-		hi    rune
-		chars string
-	)
-	for {
-		token := lex.Next()
-		switch token.Type {
-		case lexer.EOF:
-			return nil, tree, errors.New("unexpected end")
-
-		case lexer.Error:
-			return nil, tree, errors.New(token.Raw)
-
-		case lexer.Not:
-			not = true
-
-		case lexer.RangeLo:
-			r, w := utf8.DecodeRuneInString(token.Raw)
-			if len(token.Raw) > w {
-				return nil, tree, fmt.Errorf("unexpected length of lo character")
-			}
-			lo = r
-
-		case lexer.RangeBetween:
-			//
-
-		case lexer.RangeHi:
-			r, w := utf8.DecodeRuneInString(token.Raw)
-			if len(token.Raw) > w {
-				return nil, tree, fmt.Errorf("unexpected length of lo character")
-			}
-
-			hi = r
-
-			if hi < lo {
-				return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
-			}
-
-		case lexer.Text:
-			chars = token.Raw
-
-		case lexer.RangeClose:
-			isRange := lo != 0 && hi != 0
-			isChars := chars != ""
-
-			if isChars == isRange {
-				return nil, tree, fmt.Errorf("could not parse range")
-			}
-
-			if isRange {
-				Insert(tree, NewNode(KindRange, Range{
-					Lo:  lo,
-					Hi:  hi,
-					Not: not,
-				}))
-			} else {
-				Insert(tree, NewNode(KindList, List{
-					Chars: chars,
-					Not:   not,
-				}))
-			}
-
-			return parserMain, tree, nil
-		}
-	}
-}

+ 0 - 273
vendor/github.com/gobwas/glob/syntax/lexer/lexer.go

@@ -1,273 +0,0 @@
-package lexer
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/gobwas/glob/util/runes"
-	"unicode/utf8"
-)
-
-const (
-	char_any           = '*'
-	char_comma         = ','
-	char_single        = '?'
-	char_escape        = '\\'
-	char_range_open    = '['
-	char_range_close   = ']'
-	char_terms_open    = '{'
-	char_terms_close   = '}'
-	char_range_not     = '!'
-	char_range_between = '-'
-)
-
-var specials = []byte{
-	char_any,
-	char_single,
-	char_escape,
-	char_range_open,
-	char_range_close,
-	char_terms_open,
-	char_terms_close,
-}
-
-func Special(c byte) bool {
-	return bytes.IndexByte(specials, c) != -1
-}
-
-type tokens []Token
-
-func (i *tokens) shift() (ret Token) {
-	ret = (*i)[0]
-	copy(*i, (*i)[1:])
-	*i = (*i)[:len(*i)-1]
-	return
-}
-
-func (i *tokens) push(v Token) {
-	*i = append(*i, v)
-}
-
-func (i *tokens) empty() bool {
-	return len(*i) == 0
-}
-
-var eof rune = 0
-
-type lexer struct {
-	data string
-	pos  int
-	err  error
-
-	tokens     tokens
-	termsLevel int
-
-	lastRune     rune
-	lastRuneSize int
-	hasRune      bool
-}
-
-func NewLexer(source string) *lexer {
-	l := &lexer{
-		data:   source,
-		tokens: tokens(make([]Token, 0, 4)),
-	}
-	return l
-}
-
-func (l *lexer) Next() Token {
-	if l.err != nil {
-		return Token{Error, l.err.Error()}
-	}
-	if !l.tokens.empty() {
-		return l.tokens.shift()
-	}
-
-	l.fetchItem()
-	return l.Next()
-}
-
-func (l *lexer) peek() (r rune, w int) {
-	if l.pos == len(l.data) {
-		return eof, 0
-	}
-
-	r, w = utf8.DecodeRuneInString(l.data[l.pos:])
-	if r == utf8.RuneError {
-		l.errorf("could not read rune")
-		r = eof
-		w = 0
-	}
-
-	return
-}
-
-func (l *lexer) read() rune {
-	if l.hasRune {
-		l.hasRune = false
-		l.seek(l.lastRuneSize)
-		return l.lastRune
-	}
-
-	r, s := l.peek()
-	l.seek(s)
-
-	l.lastRune = r
-	l.lastRuneSize = s
-
-	return r
-}
-
-func (l *lexer) seek(w int) {
-	l.pos += w
-}
-
-func (l *lexer) unread() {
-	if l.hasRune {
-		l.errorf("could not unread rune")
-		return
-	}
-	l.seek(-l.lastRuneSize)
-	l.hasRune = true
-}
-
-func (l *lexer) errorf(f string, v ...interface{}) {
-	l.err = fmt.Errorf(f, v...)
-}
-
-func (l *lexer) inTerms() bool {
-	return l.termsLevel > 0
-}
-
-func (l *lexer) termsEnter() {
-	l.termsLevel++
-}
-
-func (l *lexer) termsLeave() {
-	l.termsLevel--
-}
-
-var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open}
-var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma)
-
-func (l *lexer) fetchItem() {
-	r := l.read()
-	switch {
-	case r == eof:
-		l.tokens.push(Token{EOF, ""})
-
-	case r == char_terms_open:
-		l.termsEnter()
-		l.tokens.push(Token{TermsOpen, string(r)})
-
-	case r == char_comma && l.inTerms():
-		l.tokens.push(Token{Separator, string(r)})
-
-	case r == char_terms_close && l.inTerms():
-		l.tokens.push(Token{TermsClose, string(r)})
-		l.termsLeave()
-
-	case r == char_range_open:
-		l.tokens.push(Token{RangeOpen, string(r)})
-		l.fetchRange()
-
-	case r == char_single:
-		l.tokens.push(Token{Single, string(r)})
-
-	case r == char_any:
-		if l.read() == char_any {
-			l.tokens.push(Token{Super, string(r) + string(r)})
-		} else {
-			l.unread()
-			l.tokens.push(Token{Any, string(r)})
-		}
-
-	default:
-		l.unread()
-
-		var breakers []rune
-		if l.inTerms() {
-			breakers = inTermsBreakers
-		} else {
-			breakers = inTextBreakers
-		}
-		l.fetchText(breakers)
-	}
-}
-
-func (l *lexer) fetchRange() {
-	var wantHi bool
-	var wantClose bool
-	var seenNot bool
-	for {
-		r := l.read()
-		if r == eof {
-			l.errorf("unexpected end of input")
-			return
-		}
-
-		if wantClose {
-			if r != char_range_close {
-				l.errorf("expected close range character")
-			} else {
-				l.tokens.push(Token{RangeClose, string(r)})
-			}
-			return
-		}
-
-		if wantHi {
-			l.tokens.push(Token{RangeHi, string(r)})
-			wantClose = true
-			continue
-		}
-
-		if !seenNot && r == char_range_not {
-			l.tokens.push(Token{Not, string(r)})
-			seenNot = true
-			continue
-		}
-
-		if n, w := l.peek(); n == char_range_between {
-			l.seek(w)
-			l.tokens.push(Token{RangeLo, string(r)})
-			l.tokens.push(Token{RangeBetween, string(n)})
-			wantHi = true
-			continue
-		}
-
-		l.unread() // unread first peek and fetch as text
-		l.fetchText([]rune{char_range_close})
-		wantClose = true
-	}
-}
-
-func (l *lexer) fetchText(breakers []rune) {
-	var data []rune
-	var escaped bool
-
-reading:
-	for {
-		r := l.read()
-		if r == eof {
-			break
-		}
-
-		if !escaped {
-			if r == char_escape {
-				escaped = true
-				continue
-			}
-
-			if runes.IndexRune(breakers, r) != -1 {
-				l.unread()
-				break reading
-			}
-		}
-
-		escaped = false
-		data = append(data, r)
-	}
-
-	if len(data) > 0 {
-		l.tokens.push(Token{Text, string(data)})
-	}
-}

+ 0 - 88
vendor/github.com/gobwas/glob/syntax/lexer/token.go

@@ -1,88 +0,0 @@
-package lexer
-
-import "fmt"
-
-type TokenType int
-
-const (
-	EOF TokenType = iota
-	Error
-	Text
-	Char
-	Any
-	Super
-	Single
-	Not
-	Separator
-	RangeOpen
-	RangeClose
-	RangeLo
-	RangeHi
-	RangeBetween
-	TermsOpen
-	TermsClose
-)
-
-func (tt TokenType) String() string {
-	switch tt {
-	case EOF:
-		return "eof"
-
-	case Error:
-		return "error"
-
-	case Text:
-		return "text"
-
-	case Char:
-		return "char"
-
-	case Any:
-		return "any"
-
-	case Super:
-		return "super"
-
-	case Single:
-		return "single"
-
-	case Not:
-		return "not"
-
-	case Separator:
-		return "separator"
-
-	case RangeOpen:
-		return "range_open"
-
-	case RangeClose:
-		return "range_close"
-
-	case RangeLo:
-		return "range_lo"
-
-	case RangeHi:
-		return "range_hi"
-
-	case RangeBetween:
-		return "range_between"
-
-	case TermsOpen:
-		return "terms_open"
-
-	case TermsClose:
-		return "terms_close"
-
-	default:
-		return "undef"
-	}
-}
-
-type Token struct {
-	Type TokenType
-	Raw  string
-}
-
-func (t Token) String() string {
-	return fmt.Sprintf("%v<%q>", t.Type, t.Raw)
-}

+ 0 - 14
vendor/github.com/gobwas/glob/syntax/syntax.go

@@ -1,14 +0,0 @@
-package syntax
-
-import (
-	"github.com/gobwas/glob/syntax/ast"
-	"github.com/gobwas/glob/syntax/lexer"
-)
-
-func Parse(s string) (*ast.Node, error) {
-	return ast.Parse(lexer.NewLexer(s))
-}
-
-func Special(b byte) bool {
-	return lexer.Special(b)
-}

+ 0 - 154
vendor/github.com/gobwas/glob/util/runes/runes.go

@@ -1,154 +0,0 @@
-package runes
-
-func Index(s, needle []rune) int {
-	ls, ln := len(s), len(needle)
-
-	switch {
-	case ln == 0:
-		return 0
-	case ln == 1:
-		return IndexRune(s, needle[0])
-	case ln == ls:
-		if Equal(s, needle) {
-			return 0
-		}
-		return -1
-	case ln > ls:
-		return -1
-	}
-
-head:
-	for i := 0; i < ls && ls-i >= ln; i++ {
-		for y := 0; y < ln; y++ {
-			if s[i+y] != needle[y] {
-				continue head
-			}
-		}
-
-		return i
-	}
-
-	return -1
-}
-
-func LastIndex(s, needle []rune) int {
-	ls, ln := len(s), len(needle)
-
-	switch {
-	case ln == 0:
-		if ls == 0 {
-			return 0
-		}
-		return ls
-	case ln == 1:
-		return IndexLastRune(s, needle[0])
-	case ln == ls:
-		if Equal(s, needle) {
-			return 0
-		}
-		return -1
-	case ln > ls:
-		return -1
-	}
-
-head:
-	for i := ls - 1; i >= 0 && i >= ln; i-- {
-		for y := ln - 1; y >= 0; y-- {
-			if s[i-(ln-y-1)] != needle[y] {
-				continue head
-			}
-		}
-
-		return i - ln + 1
-	}
-
-	return -1
-}
-
-// IndexAny returns the index of the first instance of any Unicode code point
-// from chars in s, or -1 if no Unicode code point from chars is present in s.
-func IndexAny(s, chars []rune) int {
-	if len(chars) > 0 {
-		for i, c := range s {
-			for _, m := range chars {
-				if c == m {
-					return i
-				}
-			}
-		}
-	}
-	return -1
-}
-
-func Contains(s, needle []rune) bool {
-	return Index(s, needle) >= 0
-}
-
-func Max(s []rune) (max rune) {
-	for _, r := range s {
-		if r > max {
-			max = r
-		}
-	}
-
-	return
-}
-
-func Min(s []rune) rune {
-	min := rune(-1)
-	for _, r := range s {
-		if min == -1 {
-			min = r
-			continue
-		}
-
-		if r < min {
-			min = r
-		}
-	}
-
-	return min
-}
-
-func IndexRune(s []rune, r rune) int {
-	for i, c := range s {
-		if c == r {
-			return i
-		}
-	}
-	return -1
-}
-
-func IndexLastRune(s []rune, r rune) int {
-	for i := len(s) - 1; i >= 0; i-- {
-		if s[i] == r {
-			return i
-		}
-	}
-
-	return -1
-}
-
-func Equal(a, b []rune) bool {
-	if len(a) == len(b) {
-		for i := 0; i < len(a); i++ {
-			if a[i] != b[i] {
-				return false
-			}
-		}
-
-		return true
-	}
-
-	return false
-}
-
-// HasPrefix tests whether the string s begins with prefix.
-func HasPrefix(s, prefix []rune) bool {
-	return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
-}
-
-// HasSuffix tests whether the string s ends with suffix.
-func HasSuffix(s, suffix []rune) bool {
-	return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
-}

+ 0 - 39
vendor/github.com/gobwas/glob/util/strings/strings.go

@@ -1,39 +0,0 @@
-package strings
-
-import (
-	"strings"
-	"unicode/utf8"
-)
-
-func IndexAnyRunes(s string, rs []rune) int {
-	for _, r := range rs {
-		if i := strings.IndexRune(s, r); i != -1 {
-			return i
-		}
-	}
-
-	return -1
-}
-
-func LastIndexAnyRunes(s string, rs []rune) int {
-	for _, r := range rs {
-		i := -1
-		if 0 <= r && r < utf8.RuneSelf {
-			i = strings.LastIndexByte(s, byte(r))
-		} else {
-			sub := s
-			for len(sub) > 0 {
-				j := strings.IndexRune(s, r)
-				if j == -1 {
-					break
-				}
-				i = j
-				sub = sub[i+1:]
-			}
-		}
-		if i != -1 {
-			return i
-		}
-	}
-	return -1
-}

+ 0 - 27
vendor/github.com/google/go-querystring/LICENSE

@@ -1,27 +0,0 @@
-Copyright (c) 2013 Google. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Some files were not shown because too many files changed in this diff