lex.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. // Copyright 2016 José Santos <henrique_1609@me.com>
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package jet
  15. import (
  16. "fmt"
  17. "strings"
  18. "unicode"
  19. "unicode/utf8"
  20. )
  21. // item represents a token or text string returned from the scanner.
  22. type item struct {
  23. typ itemType // The type of this item.
  24. pos Pos // The starting position, in bytes, of this item in the input string.
  25. val string // The value of this item.
  26. }
  27. func (i item) String() string {
  28. switch {
  29. case i.typ == itemEOF:
  30. return "EOF"
  31. case i.typ == itemError:
  32. return i.val
  33. case i.typ > itemKeyword:
  34. return fmt.Sprintf("<%s>", i.val)
  35. case len(i.val) > 10:
  36. return fmt.Sprintf("%.10q...", i.val)
  37. }
  38. return fmt.Sprintf("%q", i.val)
  39. }
  40. // itemType identifies the type of lex items.
  41. type itemType int
  42. const (
  43. itemError itemType = iota // error occurred; value is text of error
  44. itemBool // boolean constant
  45. itemChar // printable ASCII character; grab bag for comma etc.
  46. itemCharConstant // character constant
  47. itemComplex // complex constant (1+2i); imaginary is just a number
  48. itemEOF
  49. itemField // alphanumeric identifier starting with '.'
  50. itemIdentifier // alphanumeric identifier not starting with '.'
  51. itemLeftDelim // left action delimiter
  52. itemLeftParen // '(' inside action
  53. itemNumber // simple number, including imaginary
  54. itemPipe // pipe symbol
  55. itemRawString // raw quoted string (includes quotes)
  56. itemRightDelim // right action delimiter
  57. itemRightParen // ')' inside action
  58. itemSpace // run of spaces separating arguments
  59. itemString // quoted string (includes quotes)
  60. itemText // plain text
  61. itemAssign
  62. itemEquals
  63. itemNotEquals
  64. itemGreat
  65. itemGreatEquals
  66. itemLess
  67. itemLessEquals
  68. itemComma
  69. itemSemicolon
  70. itemAdd
  71. itemMinus
  72. itemMul
  73. itemDiv
  74. itemMod
  75. itemColon
  76. itemTernary
  77. itemLeftBrackets
  78. itemRightBrackets
  79. itemUnderscore
  80. // Keywords appear after all the rest.
  81. itemKeyword // used only to delimit the keywords
  82. itemExtends
  83. itemImport
  84. itemInclude
  85. itemBlock
  86. itemEnd
  87. itemYield
  88. itemContent
  89. itemIf
  90. itemElse
  91. itemRange
  92. itemTry
  93. itemCatch
  94. itemReturn
  95. itemAnd
  96. itemOr
  97. itemNot
  98. itemNil
  99. itemMSG
  100. itemTrans
  101. )
  102. var key = map[string]itemType{
  103. "extends": itemExtends,
  104. "import": itemImport,
  105. "include": itemInclude,
  106. "block": itemBlock,
  107. "end": itemEnd,
  108. "yield": itemYield,
  109. "content": itemContent,
  110. "if": itemIf,
  111. "else": itemElse,
  112. "range": itemRange,
  113. "try": itemTry,
  114. "catch": itemCatch,
  115. "return": itemReturn,
  116. "and": itemAnd,
  117. "or": itemOr,
  118. "not": itemNot,
  119. "nil": itemNil,
  120. "msg": itemMSG,
  121. "trans": itemTrans,
  122. }
  123. const eof = -1
  124. const (
  125. defaultLeftDelim = "{{"
  126. defaultRightDelim = "}}"
  127. leftComment = "{*"
  128. rightComment = "*}"
  129. leftTrimMarker = "- "
  130. rightTrimMarker = " -"
  131. trimMarkerLen = Pos(len(leftTrimMarker))
  132. )
  133. // stateFn represents the state of the scanner as a function that returns the next state.
  134. type stateFn func(*lexer) stateFn
  135. // lexer holds the state of the scanner.
  136. type lexer struct {
  137. name string // the name of the input; used only for error reports
  138. input string // the string being scanned
  139. state stateFn // the next lexing function to enter
  140. pos Pos // current position in the input
  141. start Pos // start position of this item
  142. width Pos // width of last rune read from input
  143. lastPos Pos // position of most recent item returned by nextItem
  144. items chan item // channel of scanned items
  145. parenDepth int // nesting depth of ( ) exprs
  146. lastType itemType
  147. leftDelim string
  148. rightDelim string
  149. trimRightDelim string
  150. }
  151. func (l *lexer) setDelimiters(leftDelim, rightDelim string) {
  152. if leftDelim != "" {
  153. l.leftDelim = leftDelim
  154. }
  155. if rightDelim != "" {
  156. l.rightDelim = rightDelim
  157. }
  158. }
  159. // next returns the next rune in the input.
  160. func (l *lexer) next() rune {
  161. if int(l.pos) >= len(l.input) {
  162. l.width = 0
  163. return eof
  164. }
  165. r, w := utf8.DecodeRuneInString(l.input[l.pos:])
  166. l.width = Pos(w)
  167. l.pos += l.width
  168. return r
  169. }
  170. // peek returns but does not consume the next rune in the input.
  171. func (l *lexer) peek() rune {
  172. r := l.next()
  173. l.backup()
  174. return r
  175. }
  176. // backup steps back one rune. Can only be called once per call of next.
  177. func (l *lexer) backup() {
  178. l.pos -= l.width
  179. }
  180. // emit passes an item back to the client.
  181. func (l *lexer) emit(t itemType) {
  182. l.lastType = t
  183. l.items <- item{t, l.start, l.input[l.start:l.pos]}
  184. l.start = l.pos
  185. }
  186. // ignore skips over the pending input before this point.
  187. func (l *lexer) ignore() {
  188. l.start = l.pos
  189. }
  190. // accept consumes the next rune if it's from the valid set.
  191. func (l *lexer) accept(valid string) bool {
  192. if strings.IndexRune(valid, l.next()) >= 0 {
  193. return true
  194. }
  195. l.backup()
  196. return false
  197. }
  198. // acceptRun consumes a run of runes from the valid set.
  199. func (l *lexer) acceptRun(valid string) {
  200. for strings.IndexRune(valid, l.next()) >= 0 {
  201. }
  202. l.backup()
  203. }
  204. // lineNumber reports which line we're on, based on the position of
  205. // the previous item returned by nextItem. Doing it this way
  206. // means we don't have to worry about peek double counting.
  207. func (l *lexer) lineNumber() int {
  208. return 1 + strings.Count(l.input[:l.lastPos], "\n")
  209. }
  210. // errorf returns an error token and terminates the scan by passing
  211. // back a nil pointer that will be the next state, terminating l.nextItem.
  212. func (l *lexer) errorf(format string, args ...interface{}) stateFn {
  213. l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
  214. return nil
  215. }
  216. // nextItem returns the next item from the input.
  217. // Called by the parser, not in the lexing goroutine.
  218. func (l *lexer) nextItem() item {
  219. item := <-l.items
  220. l.lastPos = item.pos
  221. return item
  222. }
  223. // drain drains the output so the lexing goroutine will exit.
  224. // Called by the parser, not in the lexing goroutine.
  225. func (l *lexer) drain() {
  226. for range l.items {
  227. }
  228. }
  229. // lex creates a new scanner for the input string.
  230. func lex(name, input string, run bool) *lexer {
  231. l := &lexer{
  232. name: name,
  233. input: input,
  234. items: make(chan item),
  235. leftDelim: defaultLeftDelim,
  236. rightDelim: defaultRightDelim,
  237. trimRightDelim: rightTrimMarker + defaultRightDelim,
  238. }
  239. if run {
  240. l.run()
  241. }
  242. return l
  243. }
  244. // run runs the state machine for the lexer.
  245. func (l *lexer) run() {
  246. go func() {
  247. for l.state = lexText; l.state != nil; {
  248. l.state = l.state(l)
  249. }
  250. close(l.items)
  251. }()
  252. }
  253. // state functions
  254. func lexText(l *lexer) stateFn {
  255. for {
  256. // without breaking the API, this seems like a reasonable workaround to correctly parse comments
  257. i := strings.IndexByte(l.input[l.pos:], l.leftDelim[0]) // index of suspected left delimiter
  258. ic := strings.IndexByte(l.input[l.pos:], leftComment[0]) // index of suspected left comment marker
  259. if ic > -1 && ic < i { // use whichever is lower for future lexing
  260. i = ic
  261. }
  262. // if no token is found, skip till the end of template
  263. if i == -1 {
  264. l.pos = Pos(len(l.input))
  265. break
  266. } else {
  267. l.pos += Pos(i)
  268. if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
  269. ld := Pos(len(l.leftDelim))
  270. trimLength := Pos(0)
  271. if strings.HasPrefix(l.input[l.pos+ld:], leftTrimMarker) {
  272. trimLength = rightTrimLength(l.input[l.start:l.pos])
  273. }
  274. l.pos -= trimLength
  275. if l.pos > l.start {
  276. l.emit(itemText)
  277. }
  278. l.pos += trimLength
  279. l.ignore()
  280. return lexLeftDelim
  281. }
  282. if strings.HasPrefix(l.input[l.pos:], leftComment) {
  283. if l.pos > l.start {
  284. l.emit(itemText)
  285. }
  286. return lexComment
  287. }
  288. }
  289. if l.next() == eof {
  290. break
  291. }
  292. }
  293. // Correctly reached EOF.
  294. if l.pos > l.start {
  295. l.emit(itemText)
  296. }
  297. l.emit(itemEOF)
  298. return nil
  299. }
  300. func lexLeftDelim(l *lexer) stateFn {
  301. l.pos += Pos(len(l.leftDelim))
  302. l.emit(itemLeftDelim)
  303. trimSpace := strings.HasPrefix(l.input[l.pos:], leftTrimMarker)
  304. if trimSpace {
  305. l.pos += trimMarkerLen
  306. l.ignore()
  307. }
  308. l.parenDepth = 0
  309. return lexInsideAction
  310. }
  311. // lexComment scans a comment. The left comment marker is known to be present.
  312. func lexComment(l *lexer) stateFn {
  313. l.pos += Pos(len(leftComment))
  314. i := strings.Index(l.input[l.pos:], rightComment)
  315. if i < 0 {
  316. return l.errorf("unclosed comment")
  317. }
  318. l.pos += Pos(i + len(rightComment))
  319. l.ignore()
  320. return lexText
  321. }
  322. // lexRightDelim scans the right delimiter, which is known to be present.
  323. func lexRightDelim(l *lexer) stateFn {
  324. trimSpace := strings.HasPrefix(l.input[l.pos:], rightTrimMarker)
  325. if trimSpace {
  326. l.pos += trimMarkerLen
  327. l.ignore()
  328. }
  329. l.pos += Pos(len(l.rightDelim))
  330. l.emit(itemRightDelim)
  331. if trimSpace {
  332. l.pos += leftTrimLength(l.input[l.pos:])
  333. l.ignore()
  334. }
  335. return lexText
  336. }
  337. // lexInsideAction scans the elements inside action delimiters.
  338. func lexInsideAction(l *lexer) stateFn {
  339. // Either number, quoted string, or identifier.
  340. // Spaces separate arguments; runs of spaces turn into itemSpace.
  341. // Pipe symbols separate and are emitted.
  342. delim, _ := l.atRightDelim()
  343. if delim {
  344. if l.parenDepth == 0 {
  345. return lexRightDelim
  346. }
  347. return l.errorf("unclosed left parenthesis")
  348. }
  349. switch r := l.next(); {
  350. case r == eof:
  351. return l.errorf("unclosed action")
  352. case isSpace(r):
  353. return lexSpace
  354. case r == ',':
  355. l.emit(itemComma)
  356. case r == ';':
  357. l.emit(itemSemicolon)
  358. case r == '*':
  359. l.emit(itemMul)
  360. case r == '/':
  361. l.emit(itemDiv)
  362. case r == '%':
  363. l.emit(itemMod)
  364. case r == '-':
  365. if r := l.peek(); '0' <= r && r <= '9' &&
  366. itemAdd != l.lastType &&
  367. itemMinus != l.lastType &&
  368. itemNumber != l.lastType &&
  369. itemIdentifier != l.lastType &&
  370. itemString != l.lastType &&
  371. itemRawString != l.lastType &&
  372. itemCharConstant != l.lastType &&
  373. itemBool != l.lastType &&
  374. itemField != l.lastType &&
  375. itemChar != l.lastType &&
  376. itemTrans != l.lastType {
  377. l.backup()
  378. return lexNumber
  379. }
  380. l.emit(itemMinus)
  381. case r == '+':
  382. if r := l.peek(); '0' <= r && r <= '9' &&
  383. itemAdd != l.lastType &&
  384. itemMinus != l.lastType &&
  385. itemNumber != l.lastType &&
  386. itemIdentifier != l.lastType &&
  387. itemString != l.lastType &&
  388. itemRawString != l.lastType &&
  389. itemCharConstant != l.lastType &&
  390. itemBool != l.lastType &&
  391. itemField != l.lastType &&
  392. itemChar != l.lastType &&
  393. itemTrans != l.lastType {
  394. l.backup()
  395. return lexNumber
  396. }
  397. l.emit(itemAdd)
  398. case r == '?':
  399. l.emit(itemTernary)
  400. case r == '&':
  401. if l.next() == '&' {
  402. l.emit(itemAnd)
  403. } else {
  404. l.backup()
  405. }
  406. case r == '<':
  407. if l.next() == '=' {
  408. l.emit(itemLessEquals)
  409. } else {
  410. l.backup()
  411. l.emit(itemLess)
  412. }
  413. case r == '>':
  414. if l.next() == '=' {
  415. l.emit(itemGreatEquals)
  416. } else {
  417. l.backup()
  418. l.emit(itemGreat)
  419. }
  420. case r == '!':
  421. if l.next() == '=' {
  422. l.emit(itemNotEquals)
  423. } else {
  424. l.backup()
  425. l.emit(itemNot)
  426. }
  427. case r == '=':
  428. if l.next() == '=' {
  429. l.emit(itemEquals)
  430. } else {
  431. l.backup()
  432. l.emit(itemAssign)
  433. }
  434. case r == ':':
  435. if l.next() == '=' {
  436. l.emit(itemAssign)
  437. } else {
  438. l.backup()
  439. l.emit(itemColon)
  440. }
  441. case r == '|':
  442. if l.next() == '|' {
  443. l.emit(itemOr)
  444. } else {
  445. l.backup()
  446. l.emit(itemPipe)
  447. }
  448. case r == '"':
  449. return lexQuote
  450. case r == '`':
  451. return lexRawQuote
  452. case r == '\'':
  453. return lexChar
  454. case r == '.':
  455. // special look-ahead for ".field" so we don't break l.backup().
  456. if l.pos < Pos(len(l.input)) {
  457. r := l.input[l.pos]
  458. if r < '0' || '9' < r {
  459. return lexField
  460. }
  461. }
  462. fallthrough // '.' can start a number.
  463. case '0' <= r && r <= '9':
  464. l.backup()
  465. return lexNumber
  466. case r == '_':
  467. if !isAlphaNumeric(l.peek()) {
  468. l.emit(itemUnderscore)
  469. return lexInsideAction
  470. }
  471. fallthrough // no space? must be the start of an identifier
  472. case isAlphaNumeric(r):
  473. l.backup()
  474. return lexIdentifier
  475. case r == '[':
  476. l.emit(itemLeftBrackets)
  477. case r == ']':
  478. l.emit(itemRightBrackets)
  479. case r == '(':
  480. l.emit(itemLeftParen)
  481. l.parenDepth++
  482. case r == ')':
  483. l.emit(itemRightParen)
  484. l.parenDepth--
  485. if l.parenDepth < 0 {
  486. return l.errorf("unexpected right paren %#U", r)
  487. }
  488. case r <= unicode.MaxASCII && unicode.IsPrint(r):
  489. l.emit(itemChar)
  490. default:
  491. return l.errorf("unrecognized character in action: %#U", r)
  492. }
  493. return lexInsideAction
  494. }
  495. // lexSpace scans a run of space characters.
  496. // One space has already been seen.
  497. func lexSpace(l *lexer) stateFn {
  498. var numSpaces int
  499. for isSpace(l.peek()) {
  500. numSpaces++
  501. l.next()
  502. }
  503. if strings.HasPrefix(l.input[l.pos-1:], l.trimRightDelim) {
  504. l.backup()
  505. if numSpaces == 1 {
  506. return lexRightDelim
  507. }
  508. }
  509. l.emit(itemSpace)
  510. return lexInsideAction
  511. }
  512. // lexIdentifier scans an alphanumeric.
  513. func lexIdentifier(l *lexer) stateFn {
  514. Loop:
  515. for {
  516. switch r := l.next(); {
  517. case isAlphaNumeric(r):
  518. // absorb.
  519. default:
  520. l.backup()
  521. word := l.input[l.start:l.pos]
  522. if !l.atTerminator() {
  523. return l.errorf("bad character %#U", r)
  524. }
  525. switch {
  526. case key[word] > itemKeyword:
  527. l.emit(key[word])
  528. case word[0] == '.':
  529. l.emit(itemField)
  530. case word == "true", word == "false":
  531. l.emit(itemBool)
  532. default:
  533. l.emit(itemIdentifier)
  534. }
  535. break Loop
  536. }
  537. }
  538. return lexInsideAction
  539. }
  540. // lexField scans a field: .Alphanumeric.
  541. // The . has been scanned.
  542. func lexField(l *lexer) stateFn {
  543. if l.atTerminator() {
  544. // Nothing interesting follows -> "." or "$".
  545. l.emit(itemIdentifier)
  546. return lexInsideAction
  547. }
  548. var r rune
  549. for {
  550. r = l.next()
  551. if !isAlphaNumeric(r) {
  552. l.backup()
  553. break
  554. }
  555. }
  556. if !l.atTerminator() {
  557. return l.errorf("bad character %#U", r)
  558. }
  559. l.emit(itemField)
  560. return lexInsideAction
  561. }
  562. // atTerminator reports whether the input is at valid termination character to
  563. // appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
  564. // like "$x+2" not being acceptable without a space, in case we decide one
  565. // day to implement arithmetic.
  566. func (l *lexer) atTerminator() bool {
  567. r := l.peek()
  568. if isSpace(r) {
  569. return true
  570. }
  571. switch r {
  572. case eof, '.', ',', '|', ':', ')', '=', '(', ';', '?', '[', ']', '+', '-', '/', '%', '*', '&', '!', '<', '>':
  573. return true
  574. }
  575. // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
  576. // succeed but should fail) but only in extremely rare cases caused by willfully
  577. // bad choice of delimiter.
  578. if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
  579. return true
  580. }
  581. return false
  582. }
  583. // lexChar scans a character constant. The initial quote is already
  584. // scanned. Syntax checking is done by the parser.
  585. func lexChar(l *lexer) stateFn {
  586. Loop:
  587. for {
  588. switch l.next() {
  589. case '\\':
  590. if r := l.next(); r != eof && r != '\n' {
  591. break
  592. }
  593. fallthrough
  594. case eof, '\n':
  595. return l.errorf("unterminated character constant")
  596. case '\'':
  597. break Loop
  598. }
  599. }
  600. l.emit(itemCharConstant)
  601. return lexInsideAction
  602. }
  603. // lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
  604. // isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
  605. // and "089" - but when it's wrong the input is invalid and the parser (via
  606. // strconv) will notice.
  607. func lexNumber(l *lexer) stateFn {
  608. if !l.scanNumber() {
  609. return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
  610. }
  611. l.emit(itemNumber)
  612. return lexInsideAction
  613. }
  614. func (l *lexer) scanNumber() bool {
  615. // Optional leading sign.
  616. l.accept("+-")
  617. // Is it hex?
  618. digits := "0123456789"
  619. if l.accept("0") && l.accept("xX") {
  620. digits = "0123456789abcdefABCDEF"
  621. }
  622. l.acceptRun(digits)
  623. if l.accept(".") {
  624. l.acceptRun(digits)
  625. }
  626. if l.accept("eE") {
  627. l.accept("+-")
  628. l.acceptRun("0123456789")
  629. }
  630. //Is it imaginary?
  631. l.accept("i")
  632. //Next thing mustn't be alphanumeric.
  633. if isAlphaNumeric(l.peek()) {
  634. l.next()
  635. return false
  636. }
  637. return true
  638. }
  639. // lexQuote scans a quoted string.
  640. func lexQuote(l *lexer) stateFn {
  641. Loop:
  642. for {
  643. switch l.next() {
  644. case '\\':
  645. if r := l.next(); r != eof && r != '\n' {
  646. break
  647. }
  648. fallthrough
  649. case eof, '\n':
  650. return l.errorf("unterminated quoted string")
  651. case '"':
  652. break Loop
  653. }
  654. }
  655. l.emit(itemString)
  656. return lexInsideAction
  657. }
  658. // lexRawQuote scans a raw quoted string.
  659. func lexRawQuote(l *lexer) stateFn {
  660. Loop:
  661. for {
  662. switch l.next() {
  663. case eof:
  664. return l.errorf("unterminated raw quoted string")
  665. case '`':
  666. break Loop
  667. }
  668. }
  669. l.emit(itemRawString)
  670. return lexInsideAction
  671. }
  672. // isSpace reports whether r is a space character.
  673. func isSpace(r rune) bool {
  674. return r == ' ' || r == '\t' || r == '\r' || r == '\n'
  675. }
  676. // isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
  677. func isAlphaNumeric(r rune) bool {
  678. return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
  679. }
  680. // rightTrimLength returns the length of the spaces at the end of the string.
  681. func rightTrimLength(s string) Pos {
  682. return Pos(len(s) - len(strings.TrimRightFunc(s, isSpace)))
  683. }
  684. // leftTrimLength returns the length of the spaces at the beginning of the string.
  685. func leftTrimLength(s string) Pos {
  686. return Pos(len(s) - len(strings.TrimLeftFunc(s, isSpace)))
  687. }
  688. // atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
  689. func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
  690. if strings.HasPrefix(l.input[l.pos:], l.trimRightDelim) { // With trim marker.
  691. return true, true
  692. }
  693. if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
  694. return true, false
  695. }
  696. return false, false
  697. }