Removed most redundant fully-qualified enum names

This commit is contained in:
nocturn9x 2021-09-14 10:53:02 +02:00
parent 1042093eeb
commit f9ce99b3ff
1 changed files with 57 additions and 57 deletions

View File

@ -25,64 +25,64 @@ export token # Makes Token available when importing the lexer module
# Table of all tokens except reserved keywords # Table of all tokens except reserved keywords
const tokens = to_table({ const tokens = to_table({
'(': TokenType.LeftParen, ')': TokenType.RightParen, '(': LeftParen, ')': RightParen,
'{': TokenType.LeftBrace, '}': TokenType.RightBrace, '{': LeftBrace, '}': RightBrace,
'.': TokenType.Dot, ',': TokenType.Comma, '.': Dot, ',': Comma,
'-': TokenType.Minus, '+': TokenType.Plus, '-': Minus, '+': Plus,
';': TokenType.Semicolon, '*': TokenType.Asterisk, ';': Semicolon, '*': Asterisk,
'>': TokenType.GreaterThan, '<': TokenType.LessThan, '>': GreaterThan, '<': LessThan,
'=': TokenType.Equal, '~': TokenType.Tilde, '=': Equal, '~': Tilde,
'/': TokenType.Slash, '%': TokenType.Percentage, '/': Slash, '%': Percentage,
'[': TokenType.LeftBracket, ']': TokenType.RightBracket, '[': LeftBracket, ']': RightBracket,
':': TokenType.Colon, '^': TokenType.Caret, ':': Colon, '^': Caret,
'&': TokenType.Ampersand, '|': TokenType.Pipe, '&': Ampersand, '|': Pipe,
'!': TokenType.ExclamationMark}) '!': ExclamationMark})
# Table of all triple-character tokens # Table of all triple-character tokens
const triple = to_table({"//=": TokenType.InplaceFloorDiv, const triple = to_table({"//=": InplaceFloorDiv,
"**=": TokenType.InplacePow "**=": InplacePow
}) })
# Table of all double-character tokens # Table of all double-character tokens
const double = to_table({"**": TokenType.DoubleAsterisk, const double = to_table({"**": DoubleAsterisk,
">>": TokenType.RightShift, ">>": RightShift,
"<<": TokenType.LeftShift, "<<": LeftShift,
"==": TokenType.DoubleEqual, "==": DoubleEqual,
"!=": TokenType.NotEqual, "!=": NotEqual,
">=": TokenType.GreaterOrEqual, ">=": GreaterOrEqual,
"<=": TokenType.LessOrEqual, "<=": LessOrEqual,
"//": TokenType.FloorDiv, "//": FloorDiv,
"+=": TokenType.InplaceAdd, "+=": InplaceAdd,
"-=": TokenType.InplaceSub, "-=": InplaceSub,
"/=": TokenType.InplaceDiv, "/=": InplaceDiv,
"*=": TokenType.InplaceMul, "*=": InplaceMul,
"^=": TokenType.InplaceXor, "^=": InplaceXor,
"&=": TokenType.InplaceAnd, "&=": InplaceAnd,
"|=": TokenType.InplaceOr, "|=": InplaceOr,
"~=": TokenType.InplaceNot, "~=": InplaceNot,
"%=": TokenType.InplaceMod "%=": InplaceMod
}) })
# Constant table storing all the reserved keywords (parsed as identifiers) # Constant table storing all the reserved keywords (parsed as identifiers)
const reserved = to_table({ const reserved = to_table({
"fun": TokenType.Fun, "raise": TokenType.Raise, "fun": Fun, "raise": Raise,
"if": TokenType.If, "else": TokenType.Else, "if": If, "else": Else,
"for": TokenType.For, "while": TokenType.While, "for": For, "while": While,
"var": TokenType.Var, "nil": TokenType.NIL, "var": Var, "nil": NIL,
"true": TokenType.True, "false": TokenType.False, "true": True, "false": False,
"return": TokenType.Return, "break": TokenType.Break, "return": Return, "break": Break,
"continue": TokenType.Continue, "inf": TokenType.Inf, "continue": Continue, "inf": TokenType.Inf,
"nan": TokenType.NaN, "is": TokenType.Is, "nan": TokenType.NaN, "is": Is,
"lambda": TokenType.Lambda, "class": TokenType.Class, "lambda": Lambda, "class": Class,
"async": TokenType.Async, "import": TokenType.Import, "async": Async, "import": Import,
"isnot": TokenType.IsNot, "from": TokenType.From, "isnot": IsNot, "from": From,
"let": TokenType.Let, "const": TokenType.Const, "let": Let, "const": Const,
"assert": TokenType.Assert, "or": TokenType.LogicalOr, "assert": Assert, "or": LogicalOr,
"and": TokenType.LogicalAnd, "del": TokenType.Del, "and": LogicalAnd, "del": Del,
"async": TokenType.Async, "await": TokenType.Await, "async": Async, "await": Await,
"dynamyc": TokenType.Dynamic, "foreach": TokenType.Foreach, "dynamyc": Dynamic, "foreach": Foreach,
"inf": TokenType.Infinity "inf": Infinity
}) })
type type
@ -349,7 +349,7 @@ proc parseString(self: Lexer, delimiter: char, mode: string = "single") =
self.error("unexpected EOL while parsing multi-line string literal") self.error("unexpected EOL while parsing multi-line string literal")
else: else:
discard self.step() discard self.step()
self.createToken(TokenType.String) self.createToken(String)
proc parseBinary(self: Lexer) = proc parseBinary(self: Lexer) =
@ -358,7 +358,7 @@ proc parseBinary(self: Lexer) =
if not self.check(['0', '1']): if not self.check(['0', '1']):
self.error(&"invalid digit '{self.peek()}' in binary literal") self.error(&"invalid digit '{self.peek()}' in binary literal")
discard self.step() discard self.step()
self.createToken(TokenType.Binary) self.createToken(Binary)
# To make our life easier, we pad the binary number in here already # To make our life easier, we pad the binary number in here already
while (self.tokens[^1].lexeme.len() - 2) mod 8 != 0: while (self.tokens[^1].lexeme.len() - 2) mod 8 != 0:
self.tokens[^1].lexeme = "0b" & "0" & self.tokens[^1].lexeme[2..^1] self.tokens[^1].lexeme = "0b" & "0" & self.tokens[^1].lexeme[2..^1]
@ -371,7 +371,7 @@ proc parseOctal(self: Lexer) =
if self.peek() notin '0'..'7': if self.peek() notin '0'..'7':
self.error(&"invalid digit '{self.peek()}' in octal literal") self.error(&"invalid digit '{self.peek()}' in octal literal")
discard self.step() discard self.step()
self.createToken(TokenType.Octal) self.createToken(Octal)
proc parseHex(self: Lexer) = proc parseHex(self: Lexer) =
@ -380,7 +380,7 @@ proc parseHex(self: Lexer) =
if not self.peek().isDigit() and self.peek().toLowerAscii() notin 'a'..'f': if not self.peek().isDigit() and self.peek().toLowerAscii() notin 'a'..'f':
self.error(&"invalid hexadecimal literal") self.error(&"invalid hexadecimal literal")
discard self.step() discard self.step()
self.createToken(TokenType.Hex) self.createToken(Hex)
proc parseNumber(self: Lexer) = proc parseNumber(self: Lexer) =
@ -407,11 +407,11 @@ proc parseNumber(self: Lexer) =
discard self.step() discard self.step()
self.parseOctal() self.parseOctal()
else: else:
var kind: TokenType = TokenType.Integer var kind: TokenType = Integer
while isDigit(self.peek()): while isDigit(self.peek()):
discard self.step() discard self.step()
if self.check(['e', 'E']): if self.check(['e', 'E']):
kind = TokenType.Float kind = Float
discard self.step() discard self.step()
while self.peek().isDigit(): while self.peek().isDigit():
discard self.step() discard self.step()
@ -420,7 +420,7 @@ proc parseNumber(self: Lexer) =
discard self.step() discard self.step()
if not isDigit(self.peek()): if not isDigit(self.peek()):
self.error("invalid float number literal") self.error("invalid float number literal")
kind = TokenType.Float kind = Float
while isDigit(self.peek()): while isDigit(self.peek()):
discard self.step() discard self.step()
if self.check(['e', 'E']): if self.check(['e', 'E']):
@ -442,7 +442,7 @@ proc parseIdentifier(self: Lexer) =
self.createToken(reserved[text]) self.createToken(reserved[text])
else: else:
# Identifier! # Identifier!
self.createToken(TokenType.Identifier) self.createToken(Identifier)
proc next(self: Lexer) = proc next(self: Lexer) =
@ -516,6 +516,6 @@ proc lex*(self: Lexer, source, file: string): seq[Token] =
while not self.done(): while not self.done():
self.next() self.next()
self.start = self.current self.start = self.current
self.tokens.add(Token(kind: TokenType.EndOfFile, lexeme: "", self.tokens.add(Token(kind: EndOfFile, lexeme: "",
line: self.line)) line: self.line))
return self.tokens return self.tokens