Forráskód Böngészése

Simpler handling of tests expected to fail. Added race condition test.

Joeri Exelmans 5 éve
szülő
commit
80d532b9a3

+ 4 - 3
src/sccd/execution/storage.py

@@ -20,8 +20,8 @@ class DirtyStorage:
     # Dirty storage: Values copied to clean storage when rotated.
     self.dirty = list(self.clean)
 
-    self.temp_dirty = Bitmap() # dirty values written by current transition
-    self.round_dirty = Bitmap()
+    self.temp_dirty = Bitmap() # dirty values written by current transition, that also need to be read from dirty storage in RHS values while the transition's actions are executing.
+    self.round_dirty = Bitmap() # to keep track of the values that need to be copied to clean storage at the end of a round
 
     # Storage for local scope values. No values ever copied from here to 'clean' storage
     self.temp_stack = [None]*1024
@@ -43,10 +43,11 @@ class DirtyStorage:
       else:
         return self.clean[offset]
 
+  # Called after a block has executed.
   def flush_temp(self):
     race_conditions = self.temp_dirty & self.round_dirty
     if race_conditions:
-        raise Exception("Race condition for variables %s" % str(list(self.storage.scope.name(offset) for offset in race_conditions.items())))
+        raise Exception("Race condition for variables %s" % str(list(self.storage.scope.get_name(offset) for offset in race_conditions.items())))
 
     self.round_dirty |= self.temp_dirty
     self.temp_dirty = Bitmap()

+ 1 - 1
src/sccd/parser/statechart_parser.py

@@ -404,7 +404,7 @@ class StatechartParser(TreeParser):
     ext_file = el.get("src")
     statechart = None
     if ext_file is None:
-      statechart = Statechart(tree=None, semantics=Semantics(), scope=Scope("instance", wider_scope=statechart_state.builtin_scope))
+      statechart = Statechart(tree=None, semantics=Semantics(), scope=Scope("instance", parent_scope=statechart_state.builtin_scope))
     elif self.load_external:
       ext_file_path = os.path.join(os.path.dirname(src_file), ext_file)
       self.statecharts.push([])

+ 10 - 10
src/sccd/syntax/scope.py

@@ -14,11 +14,11 @@ class Variable:
 
 # Stateless stuff we know about a scope (= set of variable names)
 class Scope:
-  def __init__(self, name: str, wider_scope: 'Scope'):
+  def __init__(self, name: str, parent_scope: 'Scope'):
     self.name = name
-    self.wider_scope = wider_scope
-    if wider_scope:
-      self.start_offset = wider_scope.start_offset + len(wider_scope.names)
+    self.parent_scope = parent_scope
+    if parent_scope:
+      self.start_offset = parent_scope.start_offset + len(parent_scope.names)
     else:
       self.start_offset = 0
     self.names: Dict[str, Variable] = {}
@@ -28,23 +28,23 @@ class Scope:
     return self.start_offset + len(self.names)
 
   def all(self):
-    if self.wider_scope:
-      return itertools.chain(self.wider_scope.all(), self.names.items())
+    if self.parent_scope:
+      return itertools.chain(self.parent_scope.all(), self.names.items())
     else:
       return self.names.items()
 
-  def name(self, offset):
+  def get_name(self, offset):
     if offset >= self.start_offset:
       return self.variables[offset - self.start_offset]
     else:
-      return self.wider_scope.name(offset)
+      return self.parent_scope.name(offset)
 
   def _internal_lookup(self, name: str) -> Optional[Tuple['Scope', Variable]]:
     try:
       return (self, self.names[name])
     except KeyError:
-      if self.wider_scope is not None:
-        return self.wider_scope._internal_lookup(name)
+      if self.parent_scope is not None:
+        return self.parent_scope._internal_lookup(name)
 
   def assert_available(self, name: str):
     found = self._internal_lookup(name)

+ 3 - 3
src/sccd/syntax/test_scope.py

@@ -6,21 +6,21 @@ class TestScope(unittest.TestCase):
 
   def test_scope(self):
     
-    builtin = Scope("builtin", wider_scope=None)
+    builtin = Scope("builtin", parent_scope=None)
 
     # Lookup LHS value (creating it in the current scope if not found)
 
     variable = builtin.put("in_state", Callable[[List[str]], bool])
     self.assertEqual(variable.offset, 0)
 
-    globals = Scope("globals", wider_scope=builtin)
+    globals = Scope("globals", parent_scope=builtin)
     variable = globals.put("x", int)
     self.assertEqual(variable.offset, 1)
 
     variable = globals.put("in_state", Callable[[List[str]], bool])
     self.assertEqual(variable.offset, 0)
 
-    local = Scope("local", wider_scope=globals)
+    local = Scope("local", parent_scope=globals)
     variable = local.put("x", int)
     self.assertEqual(variable.offset, 1)
 

+ 100 - 87
test/lib/test.py

@@ -2,103 +2,116 @@ import unittest
 from dataclasses import *
 from sccd.model.model import *
 from sccd.controller.controller import *
+from lib.test_parser import *
 
 import threading
 import queue
 
-TestInput = List[InputEvent]
-TestOutput = List[List[Event]]
-
 class Test(unittest.TestCase):
-  def __init__(self, name: str, model: AbstractModel, input: TestInput, output: TestOutput):
+  def __init__(self, src: str):
     super().__init__()
-    self.name = name
-    self.model = model
-    self.input = input
-    self.output = output
+    self.src = src
 
   def __str__(self):
-    return self.name
+    return self.src
 
   def runTest(self):
-    pipe = queue.SimpleQueue()
-    interrupt = queue.SimpleQueue()
-
-    controller = Controller(self.model)
-
-    for i in self.input:
-      controller.add_input(i)
-
-    def controller_thread():
-      try:
-        # Run as-fast-as-possible, always advancing time to the next item in event queue, no sleeping.
-        # The call returns when the event queue is empty and therefore the simulation is finished.
-        controller.run_until(None, pipe, interrupt)
-      except Exception as e:
-        pipe.put(e, block=True, timeout=None)
-        return
-      # Signal end of output
-      pipe.put(None, block=True, timeout=None)
-
-    # start the controller
-    thread = threading.Thread(target=controller_thread)
-    thread.start()
-
-    # check output
-    expected = self.output
-    actual = []
-
-    def fail(msg, kill=False):
-      if kill:
-        interrupt.put(None)
-      thread.join()
-      def repr(output):
-        return '\n'.join("%d: %s" % (i, str(big_step)) for i, big_step in enumerate(output))
-      self.fail(msg + "\n\nActual:\n" + repr(actual) + ("\n(possibly more output, instance killed)" if kill else "") + "\n\nExpected:\n" + repr(expected))
-
-    while True:
-      data = pipe.get(block=True, timeout=None)
-
-      if isinstance(data, Exception):
-        raise data # Exception was caught in Controller thread, throw it here instead.
-
-      elif data is None:
-        # End of output
-        if len(actual) < len(expected):
-          fail("Less output than expected.")
-        else:
+    parser = TestParser()
+    parser.tests.push([])
+    try:
+      statechart = parser.parse(self.src)
+    except Exception as e:
+      print_debug(e)
+      raise
+    test_variants = parser.tests.pop()
+
+    for test in test_variants:
+      print_debug('\n'+test.name)
+      pipe = queue.SimpleQueue()
+      interrupt = queue.SimpleQueue()
+
+      controller = Controller(test.model)
+
+      for i in test.input:
+        controller.add_input(i)
+
+      def controller_thread():
+        try:
+          # Run as-fast-as-possible, always advancing time to the next item in event queue, no sleeping.
+          # The call returns when the event queue is empty and therefore the simulation is finished.
+          controller.run_until(None, pipe, interrupt)
+        except Exception as e:
+          print_debug(e)
+          pipe.put(e, block=True, timeout=None)
           return
+        # Signal end of output
+        pipe.put(None, block=True, timeout=None)
+
+      # start the controller
+      thread = threading.Thread(target=controller_thread)
+      thread.start()
+
+      # check output
+      expected = test.output
+      actual = []
+
+      def fail(msg, kill=False):
+        if kill:
+          interrupt.put(None)
+        thread.join()
+        def repr(output):
+          return '\n'.join("%d: %s" % (i, str(big_step)) for i, big_step in enumerate(output))
+        self.fail('\n'+test.name + '\n'+msg + "\n\nActual:\n" + repr(actual) + ("\n(possibly more output, instance killed)" if kill else "") + "\n\nExpected:\n" + repr(expected))
+
+      while True:
+        data = pipe.get(block=True, timeout=None)
+
+        if isinstance(data, Exception):
+          raise data # Exception was caught in Controller thread, throw it here instead.
+
+        elif data is None:
+          # End of output
+          if len(actual) < len(expected):
+            fail("Less output than expected.")
+          else:
+            break
 
-      else:
-        big_step = data
-        big_step_index = len(actual)
-        actual.append(big_step)
-
-        if len(actual) > len(expected):
-          fail("More output than expected.", kill=True)
-
-        actual_bag = actual[big_step_index]
-        expected_bag = expected[big_step_index]
-
-        if len(actual_bag) != len(expected_bag):
-          fail("Big step %d: output differs." % big_step_index, kill=True)
-
-        # Sort both expected and actual lists of events before comparing.
-        # In theory the set of events at the end of a big step is unordered.
-        key_f = lambda e: "%s.%s"%(e.port, e.name)
-        actual_bag.sort(key=key_f)
-        expected_bag.sort(key=key_f)
-
-        for (act_event, exp_event) in zip(actual_bag, expected_bag):
-          matches = True
-          if exp_event.name != act_event.name :
-            matches = False
-          if exp_event.port != act_event.port :
-            matches = False
-          if len(exp_event.parameters) != len(act_event.parameters) :
-            matches = False
-          for index in range(len(exp_event.parameters)) :
-            if exp_event.parameters[index] !=  act_event.parameters[index]:
-              matches = False
-          if not matches:
+        else:
+          big_step = data
+          big_step_index = len(actual)
+          actual.append(big_step)
+
+          if len(actual) > len(expected):
+            fail("More output than expected.", kill=True)
+
+          actual_bag = actual[big_step_index]
+          expected_bag = expected[big_step_index]
+
+          if len(actual_bag) != len(expected_bag):
             fail("Big step %d: output differs." % big_step_index, kill=True)
+
+          # Sort both expected and actual lists of events before comparing.
+          # In theory the set of events at the end of a big step is unordered.
+          key_f = lambda e: "%s.%s"%(e.port, e.name)
+          actual_bag.sort(key=key_f)
+          expected_bag.sort(key=key_f)
+
+          for (act_event, exp_event) in zip(actual_bag, expected_bag):
+            matches = True
+            if exp_event.name != act_event.name :
+              matches = False
+            if exp_event.port != act_event.port :
+              matches = False
+            if len(exp_event.parameters) != len(act_event.parameters) :
+              matches = False
+            for index in range(len(exp_event.parameters)) :
+              if exp_event.parameters[index] !=  act_event.parameters[index]:
+                matches = False
+            if not matches:
+              fail("Big step %d: output differs." % big_step_index, kill=True)
+
+
+class FailingTest(Test):
+  @unittest.expectedFailure
+  def runTest(self):
+    super().runTest()

+ 10 - 3
test/lib/test_parser.py

@@ -4,6 +4,13 @@ from sccd.parser.expression_parser import *
 from lib.test import *
 from copy import deepcopy
 
+@dataclass
+class TestVariant:
+  name: str
+  model: Any
+  input: list
+  output: list
+
 # Parses <test> element and all its children (including <statechart>)
 class TestParser(StatechartParser):
 
@@ -89,12 +96,12 @@ class TestParser(StatechartParser):
     def variant_description(i, variant) -> str:
       if not variant:
         return ""
-      return " (variant %d: %s)" % (i, ", ".join(str(val) for val in variant.values()))
+      return "Variant %d: %s" % (i, ", ".join(str(val) for val in variant.values()))
 
     # Generate test variants for all semantic wildcards filled in
     tests.extend( 
-      Test(
-        name=src_file + variant_description(i, variant),
+      TestVariant(
+        name=variant_description(i, variant),
         model=SingleInstanceModel(
           globals,
           Statechart(tree=statechart.tree, scope=statechart.scope, semantics=dataclasses.replace(statechart.semantics, **variant))),

+ 5 - 44
test/test.py

@@ -1,34 +1,9 @@
 import argparse
 import unittest
 from lib.os_tools import *
-from lib.test_parser import *
+from lib.test import *
 from sccd.util.debug import *
 
-class PseudoSucceededTest(unittest.TestCase):
-  def __init__(self, name: str, msg):
-    super().__init__()
-    self.name = name
-    self.msg = msg
-
-  def __str__(self):
-    return self.name
-
-  def runTest(self):
-    print_debug(self.msg)
-
-class PseudoFailedTest(unittest.TestCase):
-  def __init__(self, name: str, e: Exception):
-    super().__init__()
-    self.name = name
-    self.e = e
-
-  def __str__(self):
-    return self.name
-
-  def runTest(self):
-    raise self.e
-
-
 if __name__ == '__main__':
     argparser = argparse.ArgumentParser(
         description="Run SCCD tests.",
@@ -49,25 +24,11 @@ if __name__ == '__main__':
     suite = unittest.TestSuite()
 
     for src_file in src_files:
-        parser = TestParser()
         should_fail = os.path.basename(src_file).startswith("fail_")
 
-        try:
-            parser.tests.push([])
-            statechart = parser.parse(src_file)
-            tests = parser.tests.pop()
-
-            if should_fail:
-                suite.addTest(PseudoFailedTest(name=src_file, e=Exception("Unexpectedly succeeded at loading.")))
-            else:
-                for test in tests:
-                    suite.addTest(test)
-
-        except Exception as e:
-            if should_fail:
-                suite.addTest(PseudoSucceededTest(name=src_file, msg=str(e)))
-            else:
-                suite.addTest(PseudoFailedTest(name=src_file, e=e))
-
+        if should_fail:
+            suite.addTest(FailingTest(src_file))
+        else:
+            suite.addTest(Test(src_file))
 
     unittest.TextTestRunner(verbosity=2).run(suite)

+ 39 - 0
test/test_files/semantics/memory_protocol/fail_race_condition.xml

@@ -0,0 +1,39 @@
+<?xml version="1.0" ?>
+<test>
+  <statechart>
+    <semantics
+      big_step_maximality="take_one"
+      input_event_lifeline="whole"
+      assignment_memory_protocol="big_step"/>
+    <datamodel>
+      <var id="x" expr="0"/>
+    </datamodel>
+    <tree>
+      <state>
+        <parallel id="p">
+          <state id="a">
+            <state id="a1">
+              <transition port="in" event="e" target=".">
+                <code> x += 1; </code>
+              </transition>
+            </state>
+          </state>
+
+          <state id="b">
+            <state id="a1">
+              <transition port="in" event="e" target=".">
+                <code> x += 1; </code>
+              </transition>
+            </state>
+          </state>
+        </parallel>
+      </state>
+    </tree>
+  </statechart>
+  <input>
+    <input_event port="in" name="e" time="0 d"/>
+  </input>
+  <output>
+
+  </output>
+</test>