mirror of
https://github.com/kataras/iris.git
synced 2025-01-24 11:11:03 +01:00
5e4b63acb2
# FAQ ### Looking for free support? http://support.iris-go.com https://kataras.rocket.chat/channel/iris ### Looking for previous versions? https://github.com/kataras/iris#version ### Should I upgrade my Iris? Developers are not forced to upgrade if they don't really need it. Upgrade whenever you feel ready. > Iris uses the [vendor directory](https://docs.google.com/document/d/1Bz5-UB7g2uPBdOx-rw5t9MxJwkfpx90cqG9AFL0JAYo) feature, so you get truly reproducible builds, as this method guards against upstream renames and deletes. **How to upgrade**: Open your command-line and execute this command: `go get -u github.com/kataras/iris`. For further installation support, please click [here](http://support.iris-go.com/d/16-how-to-install-iris-web-framework). ### About our new home page http://iris-go.com Thanks to [Santosh Anand](https://github.com/santoshanand) the http://iris-go.com has been upgraded and it's really awesome! [Santosh](https://github.com/santoshanand) is a freelancer, he has a great knowledge of nodejs and express js, Android, iOS, React Native, Vue.js etc, if you need a developer to find or create a solution for your problem or task, please contact with him. The amount of the next two or three donations you'll send they will be immediately transferred to his own account balance, so be generous please! Read more at https://github.com/kataras/iris/blob/master/HISTORY.md Former-commit-id: eec2d71bbe011d6b48d2526eb25919e36e5ad94e
187 lines
3.8 KiB
Go
187 lines
3.8 KiB
Go
// Copyright 2017 Gerasimos Maropoulos. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package lexer
|
|
|
|
import (
|
|
"github.com/kataras/iris/core/router/macro/interpreter/token"
|
|
)
|
|
|
|
type Lexer struct {
|
|
input string
|
|
pos int // current pos in input, current char
|
|
readPos int // current reading pos in input, after current char
|
|
ch byte // current char under examination
|
|
}
|
|
|
|
func New(src string) *Lexer {
|
|
l := &Lexer{
|
|
input: src,
|
|
}
|
|
// step to the first character in order to be ready
|
|
l.readChar()
|
|
return l
|
|
}
|
|
|
|
func (l *Lexer) readChar() {
|
|
if l.readPos >= len(l.input) {
|
|
l.ch = 0
|
|
} else {
|
|
l.ch = l.input[l.readPos]
|
|
}
|
|
l.pos = l.readPos
|
|
l.readPos += 1
|
|
}
|
|
|
|
const (
|
|
Begin = '{' // token.LBRACE
|
|
End = '}' // token.RBRACE
|
|
)
|
|
|
|
func resolveTokenType(ch byte) token.TokenType {
|
|
switch ch {
|
|
case Begin:
|
|
return token.LBRACE
|
|
case End:
|
|
return token.RBRACE
|
|
// Let's keep it simple, no evaluation for logical operators, we are not making a new programming language, keep it simple makis.
|
|
// ||
|
|
// case '|':
|
|
// if l.peekChar() == '|' {
|
|
// ch := l.ch
|
|
// l.readChar()
|
|
// t = token.Token{Type: token.OR, Literal: string(ch) + string(l.ch)}
|
|
// }
|
|
// ==
|
|
case ':':
|
|
return token.COLON
|
|
case '(':
|
|
return token.LPAREN
|
|
case ')':
|
|
return token.RPAREN
|
|
case ',':
|
|
return token.COMMA
|
|
// literals
|
|
case 0:
|
|
return token.EOF
|
|
default:
|
|
return token.IDENT //
|
|
}
|
|
|
|
}
|
|
|
|
func (l *Lexer) NextToken() (t token.Token) {
|
|
l.skipWhitespace()
|
|
typ := resolveTokenType(l.ch)
|
|
t.Type = typ
|
|
switch typ {
|
|
case token.EOF:
|
|
t.Literal = ""
|
|
case token.IDENT:
|
|
if isLetter(l.ch) {
|
|
// letters
|
|
lit := l.readIdentifier()
|
|
typ := token.LookupIdent(lit)
|
|
t = l.newToken(typ, lit)
|
|
return
|
|
}
|
|
if isDigit(l.ch) {
|
|
// numbers
|
|
lit := l.readNumber()
|
|
t = l.newToken(token.INT, lit)
|
|
return
|
|
}
|
|
|
|
t = l.newTokenRune(token.ILLEGAL, l.ch)
|
|
default:
|
|
t = l.newTokenRune(typ, l.ch)
|
|
}
|
|
l.readChar() // set the pos to the next
|
|
return
|
|
}
|
|
|
|
func (l *Lexer) NextDynamicToken() (t token.Token) {
|
|
// calculate anything, even spaces.
|
|
|
|
// numbers
|
|
lit := l.readNumber()
|
|
if lit != "" {
|
|
return l.newToken(token.INT, lit)
|
|
}
|
|
|
|
lit = l.readIdentifierFuncArgument()
|
|
return l.newToken(token.IDENT, lit)
|
|
}
|
|
|
|
// used to skip any illegal token if inside parenthesis, used to be able to set custom regexp inside a func.
|
|
func (l *Lexer) readIdentifierFuncArgument() string {
|
|
pos := l.pos
|
|
for resolveTokenType(l.ch) != token.RPAREN {
|
|
l.readChar()
|
|
}
|
|
|
|
return l.input[pos:l.pos]
|
|
}
|
|
|
|
// useful when we want to peek but no continue, i.e empty param functions 'even()'
|
|
func (l *Lexer) PeekNextTokenType() token.TokenType {
|
|
if len(l.input)-1 > l.pos {
|
|
ch := l.input[l.pos]
|
|
return resolveTokenType(ch)
|
|
}
|
|
return resolveTokenType(0) // EOF
|
|
}
|
|
|
|
func (l *Lexer) newToken(tokenType token.TokenType, lit string) token.Token {
|
|
t := token.Token{
|
|
Type: tokenType,
|
|
Literal: lit,
|
|
Start: l.pos,
|
|
End: l.pos,
|
|
}
|
|
// remember, l.pos is the last char
|
|
// and we want to include both start and end
|
|
// in order to be easy to the user to see by just marking the expression
|
|
if l.pos > 1 && len(lit) > 1 {
|
|
t.End = l.pos - 1
|
|
t.Start = t.End - len(lit) + 1
|
|
}
|
|
|
|
return t
|
|
}
|
|
|
|
func (l *Lexer) newTokenRune(tokenType token.TokenType, ch byte) token.Token {
|
|
return l.newToken(tokenType, string(ch))
|
|
}
|
|
|
|
func (l *Lexer) skipWhitespace() {
|
|
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
|
|
l.readChar()
|
|
}
|
|
}
|
|
|
|
func (l *Lexer) readIdentifier() string {
|
|
pos := l.pos
|
|
for isLetter(l.ch) {
|
|
l.readChar()
|
|
}
|
|
return l.input[pos:l.pos]
|
|
}
|
|
|
|
func isLetter(ch byte) bool {
|
|
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
|
|
}
|
|
|
|
func (l *Lexer) readNumber() string {
|
|
pos := l.pos
|
|
for isDigit(l.ch) {
|
|
l.readChar()
|
|
}
|
|
return l.input[pos:l.pos]
|
|
}
|
|
|
|
func isDigit(ch byte) bool {
|
|
return '0' <= ch && ch <= '9'
|
|
}
|