diff python/libs/compiler/lexer.py @ 63:32078200cdd6

Several move action
author windel
date Sun, 07 Oct 2012 17:04:10 +0200
parents python/ide/compiler/lexer.py@fd7d5069734e
children
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/libs/compiler/lexer.py	Sun Oct 07 17:04:10 2012 +0200
@@ -0,0 +1,71 @@
+import collections
+import re
+from .errors import CompilerException
+
+"""
+ Lexical analyzer part. Splits the input character stream into tokens.
+"""
+
+# Token is used in the lexical analyzer:
+Token = collections.namedtuple('Token', 'typ val row col')
+
+keywords = ['and', 'array', 'begin', 'by', 'case', 'const', 'div', 'do', \
+   'else', 'elsif', 'end', 'false', 'for', 'if', 'import', 'in', 'is', \
+   'mod', 'module', 'nil', 'not', 'of', 'or', 'pointer', 'procedure', \
+   'record', 'repeat', 'return', 'then', 'to', 'true', 'type', 'until', 'var', \
+   'while', 'asm' ]
+
+def tokenize(s):
+     """
+       Tokenizer, generates an iterator that
+       returns tokens!
+
+       This GREAT example was taken from python re doc page!
+     """
+     tok_spec = [
+       ('REAL', r'\d+\.\d+'),
+       ('HEXNUMBER', r'0x[\da-fA-F]+'),
+       ('NUMBER', r'\d+'),
+       ('ID', r'[A-Za-z][A-Za-z\d_]*'),
+       ('NEWLINE', r'\n'),
+       ('SKIP', r'[ \t]'),
+       ('COMMENTS', r'{.*}'),
+       ('LEESTEKEN', r':=|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<'),
+       ('STRING', r"'.*?'")
+     ]
+     tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
+     gettok = re.compile(tok_re).match
+     line = 1
+     pos = line_start = 0
+     mo = gettok(s)
+     while mo is not None:
+       typ = mo.lastgroup
+       val = mo.group(typ)
+       if typ == 'NEWLINE':
+         line_start = pos
+         line += 1
+       elif typ == 'COMMENTS':
+         pass
+       elif typ != 'SKIP':
+         if typ == 'ID':
+           if val in keywords:
+             typ = val
+         elif typ == 'LEESTEKEN':
+           typ = val
+         elif typ == 'NUMBER':
+           val = int(val)
+         elif typ == 'HEXNUMBER':
+           val = int(val[2:], 16)
+           typ = 'NUMBER'
+         elif typ == 'REAL':
+           val = float(val)
+         elif typ == 'STRING':
+           val = val[1:-1]
+         yield Token(typ, val, line, mo.start()-line_start)
+       pos = mo.end()
+       mo = gettok(s, pos)
+     if pos != len(s):
+       col = pos - line_start
+       raise CompilerException('Unexpected character {0}'.format(s[pos]), line, col)
+     yield Token('END', '', line, 0)
+