changeset 293:6aa721e7b10b

Try to improve build sequence
author Windel Bouwman
date Thu, 28 Nov 2013 20:39:37 +0100
parents 534b94b40aa8
children e89cca5779b0
files kernel/kernel.c3 kernel/make.py kernel/make.sh kernel/memory.c3 kernel/schedule.c3 python/c3/__init__.py python/c3/analyse.py python/c3/builder.py python/c3/lexer.py python/c3/parser.py python/ppci/__init__.py python/ppci/common.py python/ppci/errors.py python/zcc.py test/testc3.py user/ipc.c3
diffstat 16 files changed, 231 insertions(+), 156 deletions(-) [+]
line wrap: on
line diff
--- a/kernel/kernel.c3	Wed Nov 27 08:06:42 2013 +0100
+++ b/kernel/kernel.c3	Thu Nov 28 20:39:37 2013 +0100
@@ -6,7 +6,7 @@
 import arch;
 
 // Main entry point of the kernel:
-func start()
+function void start()
 {
     process.Init();
     memory.Init();
@@ -18,7 +18,7 @@
 }
 
 
-func panic()
+function void panic()
 {
     arch.halt();
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/make.py	Thu Nov 28 20:39:37 2013 +0100
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import sys
+import os
+sys.path.insert(0, os.path.join('..', 'python'))
+import zcc
+
+arglist = ['memory.c3', 'kernel.c3', 'syscall.c3']
+arglist += ['--target', 'arm']
+arglist += ['--dumpasm']
+arglist += ['--log', 'debug']
+
+args = zcc.parser.parse_args(arglist)
+zcc.main(args)
--- a/kernel/make.sh	Wed Nov 27 08:06:42 2013 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-../python/zcc.py memory.c3 kernel.c3
-
--- a/kernel/memory.c3	Wed Nov 27 08:06:42 2013 +0100
+++ b/kernel/memory.c3	Thu Nov 28 20:39:37 2013 +0100
@@ -1,9 +1,10 @@
 module memory;
+
 import process;
 
-uint8_t* ptr;
+var uint8_t* ptr;
 
-func uint8_t* Alloc(int size)
+function uint8_t* Alloc(int size)
 {
     ptr += size;
     return ptr;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/schedule.c3	Thu Nov 28 20:39:37 2013 +0100
@@ -0,0 +1,13 @@
+
+module schedule.c3;
+
+func void executeNext()
+{
+    process_t *old;
+
+    if (old != current)
+    {
+        execute(current);
+    }
+}
+
--- a/python/c3/__init__.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/c3/__init__.py	Thu Nov 28 20:39:37 2013 +0100
@@ -1,11 +1,8 @@
+""" This is the C3 language front end. """
 
-"""
-This is the C3 language front end.
-"""
-
-# Convenience imports:
 
 from .parser import Parser
+from .lexer import Lexer
 from .analyse import Analyzer, TypeChecker
 from .codegenerator import CodeGenerator
 from .visitor import Visitor, AstPrinter
--- a/python/c3/analyse.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/c3/analyse.py	Thu Nov 28 20:39:37 2013 +0100
@@ -75,10 +75,10 @@
         self.logger.info('Resolving imports for package {}'.format(pkg.name))
         # Handle imports:
         for i in pkg.imports:
-            ip = packageDict[i]
-            if not ip:
+            if i not in packageDict:
                 self.error('Cannot import {}'.format(i))
                 continue
+            ip = packageDict[i]
             pkg.scope.addSymbol(ip)
         FixRefs(self.diag).fixRefs(pkg)
         return self.ok
--- a/python/c3/builder.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/c3/builder.py	Thu Nov 28 20:39:37 2013 +0100
@@ -19,39 +19,38 @@
 
     def checkSource(self, srcs, imps=[]):
         """ Performs syntax and type check. """
-        packages = {}
-        s_pkgs = []
-        for src in srcs:
-            pkg = self.parser.parseSource(src)
-            if not pkg:
-                self.ok = False
-                continue
-            # Store for later use:
-            packages[pkg.name] = pkg
-            s_pkgs.append(pkg)
-        if imps:
-            for src in imps:
+        iter(srcs)
+        iter(imps)
+        def doParse(srcs):
+            for src in srcs:
                 pkg = self.parser.parseSource(src)
-                if not pkg:
+                if pkg:
+                    yield pkg
+                else:
                     self.ok = False
-                    continue
-                # Store for later use:
-                packages[pkg.name] = pkg
+        s_pkgs = set(doParse(srcs))
+        print(s_pkgs)
+        i_pkgs = set(doParse(imps))
+        all_pkgs = s_pkgs | i_pkgs
         # Fix scopes:
-        for pkg in packages.values():
-            if not AddScope(self.diag).addScope(pkg):
-                self.ok = False
+        def doF(f, pkgs):
+            for pkg in pkgs:
+                if f(pkg):
+                    yield pkg
+                else:
+                    self.ok = False
+        all_pkgs = set(doF(AddScope(self.diag).addScope, all_pkgs))
         # TODO: fix error handling better
-        for pkg in packages.values():
-            if not self.al.analyzePackage(pkg, packages):
-                self.ok = False
-                continue
-        for pkg in packages.values():
-            if not self.tc.checkPackage(pkg):
-                self.ok = False
-                continue
-        for pkg in s_pkgs:
-            yield pkg
+        def doA(pkgs):
+            packages = {pkg.name: pkg for pkg in pkgs}
+            for pkg in pkgs:
+                if self.al.analyzePackage(pkg, packages):
+                    yield pkg
+                else:
+                    self.ok = False
+        all_pkgs = set(doA(all_pkgs))
+        all_pkgs = set(doF(self.tc.checkPackage, all_pkgs))
+        return all_pkgs & s_pkgs
 
     def build(self, srcs, imps=[]):
         """ Create IR-code from sources """
--- a/python/c3/lexer.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/c3/lexer.py	Thu Nov 28 20:39:37 2013 +0100
@@ -13,75 +13,81 @@
    'struct', 'cast',
    'import', 'module']
 
-def tokenize(input_file):
-    """
-       Tokenizer, generates an iterator that
-       returns tokens!
+
+class Lexer:
+    def __init__(self, diag):
+        self.diag = diag
 
-       Input is a file like object.
+    def tokenize(self, input_file):
+        """
+           Tokenizer, generates an iterator that
+           returns tokens!
+
+           Input is a file like object.
 
-       This GREAT example was taken from python re doc page!
-    """
-    filename = input_file.name if hasattr(input_file, 'name') else ''
-    s = input_file.read()
-    input_file.close()
-    tok_spec = [
-       ('REAL', r'\d+\.\d+'),
-       ('HEXNUMBER', r'0x[\da-fA-F]+'),
-       ('NUMBER', r'\d+'),
-       ('ID', r'[A-Za-z][A-Za-z\d_]*'),
-       ('NEWLINE', r'\n'),
-       ('SKIP', r'[ \t]'),
-       ('COMMENTS', r'//.*'),
-       ('LONGCOMMENTBEGIN', r'\/\*'),
-       ('LONGCOMMENTEND', r'\*\/'),
-       ('LEESTEKEN', r'==|->|<<|>>|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<|{|}|&|\^|\|'),
-       ('STRING', r"'.*?'")
-     ]
-    tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
-    gettok = re.compile(tok_re).match
-    line = 1
-    pos = line_start = 0
-    mo = gettok(s)
-    incomment = False
-    while mo is not None:
-        typ = mo.lastgroup
-        val = mo.group(typ)
-        if typ == 'NEWLINE':
-            line_start = pos
-            line += 1
-        elif typ == 'COMMENTS':
-            pass
-        elif typ == 'LONGCOMMENTBEGIN':
-            incomment = True
-        elif typ == 'LONGCOMMENTEND':
-            incomment = False
-        elif typ == 'SKIP':
-            pass
-        elif incomment:
-            pass # Wait until we are not in a comment section
-        else:
-            if typ == 'ID':
-                if val in keywords:
+           This GREAT example was taken from python re doc page!
+        """
+        filename = input_file.name if hasattr(input_file, 'name') else ''
+        s = input_file.read()
+        input_file.close()
+        self.diag.addSource(filename, s)
+        tok_spec = [
+           ('REAL', r'\d+\.\d+'),
+           ('HEXNUMBER', r'0x[\da-fA-F]+'),
+           ('NUMBER', r'\d+'),
+           ('ID', r'[A-Za-z][A-Za-z\d_]*'),
+           ('NEWLINE', r'\n'),
+           ('SKIP', r'[ \t]'),
+           ('COMMENTS', r'//.*'),
+           ('LONGCOMMENTBEGIN', r'\/\*'),
+           ('LONGCOMMENTEND', r'\*\/'),
+           ('LEESTEKEN', r'==|->|<<|>>|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<|{|}|&|\^|\|'),
+           ('STRING', r"'.*?'")
+         ]
+        tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
+        gettok = re.compile(tok_re).match
+        line = 1
+        pos = line_start = 0
+        mo = gettok(s)
+        incomment = False
+        while mo is not None:
+            typ = mo.lastgroup
+            val = mo.group(typ)
+            if typ == 'NEWLINE':
+                line_start = pos
+                line += 1
+            elif typ == 'COMMENTS':
+                pass
+            elif typ == 'LONGCOMMENTBEGIN':
+                incomment = True
+            elif typ == 'LONGCOMMENTEND':
+                incomment = False
+            elif typ == 'SKIP':
+                pass
+            elif incomment:
+                pass # Wait until we are not in a comment section
+            else:
+                if typ == 'ID':
+                    if val in keywords:
+                        typ = val
+                elif typ == 'LEESTEKEN':
                     typ = val
-            elif typ == 'LEESTEKEN':
-                typ = val
-            elif typ == 'NUMBER':
-                val = int(val)
-            elif typ == 'HEXNUMBER':
-                val = int(val[2:], 16)
-                typ = 'NUMBER'
-            elif typ == 'REAL':
-                val = float(val)
-            elif typ == 'STRING':
-                val = val[1:-1]
-            loc = SourceLocation(filename, line, mo.start() - line_start, mo.end() - mo.start())
-            yield Token(typ, val, loc)
-        pos = mo.end()
-        mo = gettok(s, pos)
-    if pos != len(s):
-        col = pos - line_start
-        loc = SourceLocation(filename, line, col, 1)
-        raise CompilerError('Unexpected character "{0}"'.format(s[pos]), loc)
-    loc = SourceLocation(filename, line, 0, 0)
-    yield Token('END', '', loc)
+                elif typ == 'NUMBER':
+                    val = int(val)
+                elif typ == 'HEXNUMBER':
+                    val = int(val[2:], 16)
+                    typ = 'NUMBER'
+                elif typ == 'REAL':
+                    val = float(val)
+                elif typ == 'STRING':
+                    val = val[1:-1]
+                loc = SourceLocation(filename, line, mo.start() - line_start, mo.end() - mo.start())
+                yield Token(typ, val, loc)
+            pos = mo.end()
+            mo = gettok(s, pos)
+        if pos != len(s):
+            col = pos - line_start
+            loc = SourceLocation(filename, line, col, 1)
+            raise CompilerError('Unexpected character "{0}"'.format(s[pos]), loc)
+        loc = SourceLocation(filename, line, 0, 0)
+        yield Token('END', '', loc)
--- a/python/c3/parser.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/c3/parser.py	Thu Nov 28 20:39:37 2013 +0100
@@ -1,5 +1,6 @@
 import logging
-from . import astnodes, lexer
+from .lexer import Lexer
+from . import astnodes
 from ppci import CompilerError
 
 
@@ -8,6 +9,7 @@
     def __init__(self, diag):
         self.logger = logging.getLogger('c3')
         self.diag = diag
+        self.lexer = Lexer(diag)
 
     def parseSource(self, source):
         self.logger.info('Parsing source')
@@ -49,7 +51,7 @@
         return t
 
     def initLex(self, source):
-        self.tokens = lexer.tokenize(source)   # Lexical stage
+        self.tokens = self.lexer.tokenize(source)
         self.token = self.tokens.__next__()
 
     def addDeclaration(self, decl):
--- a/python/ppci/__init__.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/ppci/__init__.py	Thu Nov 28 20:39:37 2013 +0100
@@ -11,4 +11,3 @@
 
 from .common import SourceLocation, SourceRange, Token
 from .errors import CompilerError, DiagnosticsManager
-from .errors import printError
--- a/python/ppci/common.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/ppci/common.py	Thu Nov 28 20:39:37 2013 +0100
@@ -23,7 +23,7 @@
         self.length = ln
 
     def __repr__(self):
-        return '{}, {}'.format(self.row, self.col)
+        return '{}, {}, {}'.format(self.filename, self.row, self.col)
 
 
 SourceRange = namedtuple('SourceRange', ['p1', 'p2'])
--- a/python/ppci/errors.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/ppci/errors.py	Thu Nov 28 20:39:37 2013 +0100
@@ -19,43 +19,21 @@
             self.row = self.col = 0
 
     def __repr__(self):
-        if self.row:
-            return '"{0}" at row {1}'.format(self.msg, self.row)
-        else:
-            return '"{0}"'.format(self.msg)
+        return '"{}"'.format(self.msg)
 
-def printError(source, e):
-    def printLine(row, txt):
-        print(str(row)+':'+txt)
-    if e.row == 0 or True:
-            print('Error: {0}'.format(e.msg))
-    else:
-        lines = source.split('\n')
-        ro, co = e.row, e.col
-        prerow = ro - 2
-        if prerow < 1:
-           prerow = 1
-        afterrow = ro + 3
-        if afterrow > len(lines):
-           afterrow = len(lines)
-
-        # print preceding source lines:
-        for r in range(prerow, ro):
-           printLine(r, lines[r-1])
-        # print source line containing error:
-        printLine(ro, lines[ro-1])
-        print(' '*(len(str(ro)+':')+co-1) + '^ Error: {0}'.format(e.msg))
-        # print trailing source line:
-        for r in range(ro+1, afterrow+1):
-          printLine(r, lines[r-1])
 
 class DiagnosticsManager:
     def __init__(self):
         self.diags = []
+        self.sources = {}
         self.logger = logging.getLogger('diagnostics')
 
+    def addSource(self, name, src):
+        self.logger.info('Adding source {}'.format(name))
+        self.sources[name] = src
+
     def addDiag(self, d):
-        self.logger.info(str(d.msg))
+        self.logger.warning(str(d.msg))
         self.diags.append(d)
 
     def error(self, msg, loc):
@@ -63,13 +41,43 @@
 
     def clear(self):
         del self.diags[:]
+        self.sources.clear()
 
-    def printErrors(self, src):
+    def printErrors(self):
         if len(self.diags) > 0:
             print('==============')
             print('{0} Errors'.format(len(self.diags)))
             for d in self.diags:
                 print('==============')
-                printError(src, d)
+                self.printError(d)
             print('==============')
 
+    def printError(self, e):
+        def printLine(row, txt):
+            print(str(row)+':'+txt)
+        print(type(e), e, e.msg)
+        if not e.loc:
+            print('Error: {0}'.format(e))
+        else:
+            if e.loc.filename not in self.sources:
+                print('Error: {0}'.format(e))
+                return
+            source = self.sources[e.loc.filename]
+            lines = source.split('\n')
+            ro, co = e.row, e.col
+            prerow = ro - 2
+            if prerow < 1:
+               prerow = 1
+            afterrow = ro + 3
+            if afterrow > len(lines):
+               afterrow = len(lines)
+
+            # print preceding source lines:
+            for r in range(prerow, ro):
+               printLine(r, lines[r-1])
+            # print source line containing error:
+            printLine(ro, lines[ro-1])
+            print(' '*(len(str(ro)+':')+co-1) + '^ Error: {0}'.format(e.msg))
+            # print trailing source line:
+            for r in range(ro+1, afterrow+1):
+              printLine(r, lines[r-1])
--- a/python/zcc.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/python/zcc.py	Thu Nov 28 20:39:37 2013 +0100
@@ -82,13 +82,15 @@
     logging.basicConfig(format=logformat, level=args.log)
     src = args.source
     imps = args.imp
+    if not imps:
+        imps = []
     tg = targets[args.target]
     diag = ppci.DiagnosticsManager()
     outs = outstream.TextOutputStream()
 
     res = zcc(src, imps, tg, outs, diag, dumpir=args.dumpir)
     if not res:
-        diag.printErrors(src)
+        diag.printErrors()
         return 1
 
     if args.dumpasm:
--- a/test/testc3.py	Wed Nov 27 08:06:42 2013 +0100
+++ b/test/testc3.py	Thu Nov 28 20:39:37 2013 +0100
@@ -66,29 +66,37 @@
 """
 
 class testLexer(unittest.TestCase):
+    def setUp(self):
+        diag = ppci.DiagnosticsManager()
+        self.l = c3.Lexer(diag)
+
     def testUnexpectedCharacter(self):
         snippet = io.StringIO(""" var s \u6c34 """)
         with self.assertRaises(ppci.CompilerError):
-            list(c3.lexer.tokenize(snippet))
+            list(self.l.tokenize(snippet))
+
+    def check(self, snippet, toks):
+        toks2 = list(tok.typ for tok in self.l.tokenize(io.StringIO(snippet)))
+        self.assertSequenceEqual(toks, toks2)
 
     def testBlockComment(self):
-        snippet = io.StringIO("""
+        snippet = """
           /* Demo */
           var int x = 0;
-        """)
+        """
         toks = ['var', 'ID', 'ID', '=', 'NUMBER', ';', 'END']
-        self.assertSequenceEqual([tok.typ for tok in c3.lexer.tokenize(snippet)], toks)
+        self.check(snippet, toks)
 
     def testBlockCommentMultiLine(self):
-        snippet = io.StringIO("""
+        snippet = """
           /* Demo
           bla1
           bla2
           */
           var int x = 0;
-        """)
+        """
         toks = ['var', 'ID', 'ID', '=', 'NUMBER', ';', 'END']
-        self.assertSequenceEqual([tok.typ for tok in c3.lexer.tokenize(snippet)], toks)
+        self.check(snippet, toks)
 
 
 class testBuilder(unittest.TestCase):
@@ -141,6 +149,12 @@
         """
         self.expectOK([io.StringIO(s) for s in (p1, p2)])
 
+    def testPackageNotExists(self):
+        p1 = """module p1;
+        import p23;
+        """
+        self.expectOK([io.StringIO(p1)])
+
     def testFunctArgs(self):
         snippet = """
          module testargs;
@@ -295,6 +309,17 @@
         """
         self.expectOK(snippet)
 
+    def testStructCall(self):
+        snippet = """
+         module teststruct1;
+         function void t()
+         {
+            var struct {int x, y;} a;
+            a.b.c(9);
+         }
+        """
+        self.expectOK(snippet)
+
     def testPointerType1(self):
         snippet = """
          module testpointer1;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/user/ipc.c3	Thu Nov 28 20:39:37 2013 +0100
@@ -0,0 +1,13 @@
+
+module ipc;
+
+func SendMessage(Msg *msg)
+{
+    kernelTrap(MSG_SEND, msg)
+}
+
+func RecvMessage(Msg msg)
+{
+    kernelTrap(MSG_RECV, msg);
+}
+