Ver código fonte

Port notch filter operations

Arkadiusz Ryś 1 ano atrás
pai
commit
79c213631d

+ 8 - 67
mocka/routers/notch.py

@@ -7,68 +7,8 @@ import magic
 import requests
 from fastapi import APIRouter, Query, Request, Response
 from fastapi.responses import JSONResponse
-from OMPython import OMCSessionZMQ, ModelicaSystem
-from datetime import datetime
-import struct
-import pyarrow
-from pyarrow import parquet
-
 from mocka.artefact import Artefact
 
-
-def single_simulation_om_python(model_file_path, test_frequency, test_amplitude, sampling_ratio, tolerance, data_cycles, output_stabilisation_time, output):
-    arklog.debug(f"Performing simulation")
-    arklog.debug(f"\t{test_frequency=}")
-    arklog.debug(f"\t{test_amplitude=}")
-    arklog.debug(f"\t{sampling_ratio=}")
-    sampling_frequency = test_frequency * sampling_ratio
-    omc = OMCSessionZMQ()
-    model = ModelicaSystem(str(model_file_path),model_file_path.stem)
-    # model.buildModel("(V_in.v)|(V_out.v)")
-    model.buildModel()
-    model.setSimulationOptions([
-        f"stepSize={1/sampling_frequency}",
-        f"tolerance={tolerance}",
-        f"startTime={output_stabilisation_time}",
-        f"stopTime={output_stabilisation_time + (data_cycles / test_frequency)}",
-    ])
-    model.setParameters([
-        f"Vt={test_amplitude}",
-        f"Ft={test_frequency}",
-    ])
-    model.simulate()
-    samples = model.getSolutions(output)
-    # for fileName in os.listdir():
-    #     if re.search("notchFilter.*", fileName) and fileName != "notchFilter.mo":
-    #         os.remove(fileName)
-    return save_data(samples, test_frequency, test_amplitude, sampling_ratio, model_file_path)
-
-
-def save_data(samples, test_frequency, test_amplitude, sampling_ratio, file_path):
-    time = datetime.timestamp(datetime.now())
-    common_column_metadata = {
-        "test_frequency": struct.pack("d", test_frequency),
-        "test_amplitude": struct.pack("d", test_amplitude),
-        "sampling_ratio": struct.pack("d", sampling_ratio),
-        "time": struct.pack("d", time),
-        "type": b"input"
-    }
-    input_field = pyarrow.field(f"i_{time}", pyarrow.float64(), False, metadata=common_column_metadata)
-    common_column_metadata["type"]=b"output"
-    output_field = pyarrow.field(f"o_{time}", pyarrow.float64(), False, metadata=common_column_metadata)
-    common_column_metadata["type"]=b"time"
-    time_field = pyarrow.field(f"ft_{time}",pyarrow.float64(), False, metadata=common_column_metadata)
-    # TODO In FTGPM terms we don't want to append to existing experiments
-    storage_file_path = Path(__file__).parent.parent.parent / Path("data") / Path(f"{file_path.stem}_results.parquet")
-    if storage_file_path.exists():
-        experiment_data = parquet.read_table(str(storage_file_path))
-        experiment_data = experiment_data.append_column(time_field, [samples[0]]).append_column(output_field, [samples[1]]).append_column(input_field, [samples[2]])
-    else:
-        experiment_data = pyarrow.Table.from_arrays(samples, schema=pyarrow.schema([time_field, input_field, output_field]))
-    parquet.write_table(experiment_data,str(storage_file_path))
-    return storage_file_path
-
-
 class NotchRouter(APIRouter):
     """"""
 
@@ -113,18 +53,19 @@ class NotchRouter(APIRouter):
             arklog.debug(f"{data_cycles=}")
             arklog.debug(f"{output_stabilisation_time=}")
             arklog.debug(f"{output=}")
-            artefact_file_path = single_simulation_om_python(model_file_path, frequency, amplitude, sampling_ratio, tolerance,data_cycles,output_stabilisation_time, output)
-            requests.put(f"http://localhost:5000/files/file/{artefact_file_path.name}", data=artefact_file_path.read_bytes())
-            mime = magic.Magic(mime=True).from_file(artefact_file_path)
-            simulation_artefact = Artefact("reference", f"http://localhost:5000/files/file/{artefact_file_path.name}", artefact_file_path.name, mime)
-            return JSONResponse(status_code=200, content={"ctrl": "ok", "output": {"experiment": simulation_artefact.as_dict()}})
+            # artefact_file_path = single_simulation_om_python(model_file_path, frequency, amplitude, sampling_ratio, tolerance,data_cycles,output_stabilisation_time, output)
+            # requests.put(f"http://localhost:5000/files/file/{artefact_file_path.name}", data=artefact_file_path.read_bytes())
+            # mime = magic.Magic(mime=True).from_file(artefact_file_path)
+            # simulation_artefact = Artefact("reference", f"http://localhost:5000/files/file/{artefact_file_path.name}", artefact_file_path.name, mime)
+            # return JSONResponse(status_code=200, content={"ctrl": "ok", "output": {"experiment": simulation_artefact.as_dict()}})
+            return JSONResponse(status_code=200, content={"ctrl": "ok", "output": {"experiment": {}}})
 
         @self.put("/simulation/")
         async def simulation_put_notch(request: Request, query: str | None = Query(None)) -> Response:
             return await simulation_notch(request, query)
 
-        @self.post("/simulation/")
-        async def simulation_post_notch(request: Request, query: str | None = Query(None)) -> Response:
+        @self.get("/collect/")
+        async def collect_notch(request: Request, query: str | None = Query(None)) -> Response:
             return await simulation_notch(request, query)
 
         @self.get("/validation/")

+ 101 - 0
mocka/routers/operations/collect.py

@@ -0,0 +1,101 @@
+import os
+
+import numpy
+from pyarrow import Schema, parquet
+from tqdm import tqdm
+
+import data
+import metadata
+import perform
+import process
+
+
+def collect_traces(test_frequency: float,
+                   test_amplitudes: numpy.ndarray[float],
+                   sampling_ratios: numpy.ndarray[float],
+                   systems: list) \
+    -> [list[numpy.ndarray[float]], list[str], list[numpy.ndarray[float]]]:
+    ratios_traces = []
+    legend = []
+    timescale = []
+    for sampling_ratio in tqdm(sampling_ratios, desc="evaluating traces at sampling ratios", leave=False):
+        traces = []
+        for system in tqdm(systems, desc="evaluating traces from systems", leave=False):
+            for test_amplitude in tqdm(test_amplitudes, desc="evaluating traces at amplitudes", leave=False):
+                trace_time, trace = data.query_trace(test_frequency, test_amplitude, sampling_ratio, system)
+                if trace is None:
+                    print("found no trace! (f: " + str(test_frequency) + "Hz, A: " + str(test_amplitude) + "V, R: " +
+                          str(sampling_ratio) + "), performing experimental activity")
+                    trace_time, trace = perform.perform_experimental_activity(system,
+                                                                              numpy.array([test_frequency]),
+                                                                              numpy.array([test_amplitude]),
+                                                                              numpy.array([sampling_ratio]))
+                traces.append(trace)
+                legend.append(
+                    "output: " + system + ": f:" + str(test_frequency) + "Hz, A:" + str(test_amplitude) + "V, R:" + str(
+                        sampling_ratio))
+                legend.append(
+                    "input: " + system + ": f:" + str(test_frequency) + "Hz, A:" + str(test_amplitude) + "V, R:" + str(
+                        sampling_ratio))
+        ratios_traces.append(numpy.array(traces))
+        timescale.append(process.generate_time_data(test_frequency, sampling_ratio))
+    return [ratios_traces, legend, timescale]
+
+
+def collect_poi_traces(test_frequencies: numpy.ndarray[float],
+                       test_amplitudes: numpy.ndarray[float],
+                       sampling_ratios: numpy.ndarray[int],
+                       systems: list[str],
+                       poi: str,
+                       poi_methods: list[str]) \
+    -> [numpy.ndarray[float], list[str]]:
+    traces = []
+    legend = []
+    for system in tqdm(systems, desc="evaluating pois of different systems", leave=False):
+        for sampling_ratio in tqdm(sampling_ratios, desc="evaluating pois at different sampling ratios", leave=False):
+            for test_amplitude in tqdm(test_amplitudes, desc="evaluating pois at different amplitudes", leave=False):
+                method_traces = []
+                for test_frequency in tqdm(test_frequencies, desc="evaluating pois at different frequencies",
+                                           leave=False):
+                    pois: numpy.ndarray[float] = data.query_pois(test_frequency, test_amplitude, sampling_ratio, system,
+                                                                 poi, poi_methods)
+                    if pois is None:
+                        print("found no pois! (f: " + str(test_frequency) + "Hz, A: " + str(test_amplitude) + "V, R: " +
+                              str(sampling_ratio) + "), checking if trace exists")
+                        trace_time, trace = data.query_trace(test_frequency, test_amplitude, sampling_ratio, system)
+                        if trace_time is None:
+                            print("found no trace! (f: " + str(test_frequency) + "Hz, A: " + str(
+                                test_amplitude) + "V, R: " +
+                                  str(sampling_ratio) + "), initializing experimental activity")
+                            trace_time, trace = perform.perform_experimental_activity(system,
+                                                                                      numpy.array([test_frequency]),
+                                                                                      numpy.array([test_amplitude]),
+                                                                                      numpy.array([sampling_ratio]))
+                        pois = process.compute_all_pois(trace, sampling_ratio, test_frequency, test_amplitude,
+                                                        trace_time, system, save=True)
+                    method_traces.append(pois)
+                traces.append(numpy.transpose(method_traces))
+                for poi_method in poi_methods:
+                    legend.append(
+                        system + ": A:" + str(test_amplitude) + ", R:" + str(sampling_ratio) + ", method=" + poi_method)
+    return [numpy.concatenate(traces, axis=0), legend]
+
+
+def collect_trace_metadata(system: str) \
+    -> [numpy.ndarray[float], numpy.ndarray[float], numpy.ndarray[int]]:
+    test_frequencies: list[float] = []
+    test_amplitudes: list[float] = []
+    sampling_ratios: list[int] = []
+    for amplitude_dir in os.listdir(system + "/"):
+        if os.path.isdir(system + "/" + amplitude_dir):
+            for file_name in os.listdir(system + "/" + amplitude_dir + "/"):
+                if not file_name.startswith(data.POI_FILE_PREPEND):
+                    trace_file_name: str = system + "/" + amplitude_dir + "/" + file_name
+                    schema: Schema = parquet.read_schema(trace_file_name)
+                    for data_index in range(0, parquet.read_table(trace_file_name).num_columns, 2):
+                        test_frequency, test_amplitude, sampling_ratio, time = metadata.get_column_metadata(
+                            schema.field(data_index).metadata)
+                        test_frequencies.append(test_frequency)
+                        test_amplitudes.append(test_amplitude)
+                        sampling_ratios.append(sampling_ratio)
+    return [numpy.array(test_frequencies), numpy.array(test_amplitudes), numpy.array(sampling_ratios)]

+ 41 - 0
mocka/routers/operations/context.py

@@ -0,0 +1,41 @@
+# systems
+MODELICA_FILE_NAME: str = "NotchFilter.mo"
+MODELICA_PACKAGE_NAME: str = "NotchFilter"
+SYSTEMS: list = ["Experiment",
+                 "IdealNotchFilterModel",
+                 "NonIdealNotchFilterModel"]
+
+# environment
+INPUT_TYPE: str = "sin"
+INPUT_OFFSET: float = 0.0
+DEVICE_TYPE: str = "AD2"
+DEVICE_SN: str = ""
+DEVICE_MANUFACTURER: str = "Digilent"
+# TEST_FREQUENCY: float
+# TEST_AMPLITUDE: float
+
+# workflow
+OUTPUT_STABILISATION_TIME: float = 1.0
+SOLVER: str = "DASSL"
+PYDWF_VERSION: str = "1.1.19"
+MODELICA_VERSION: str = "1.22.0"
+PYTHON_VERSION: str = "3.11"
+
+# measurement
+DATA_CYCLES: int = 2
+SAMPLING_RATIO: int
+# SAMPLING_FREQUENCY: float = SAMPLING_RATIO * TEST_FREQUENCY
+DECIMATION_FILTER: str = "average"
+TRIGGER_SOURCE_CHANNEL: int = 1
+TRIGGER_TYPE: str = "edge"
+TRIGGER_CONDITION: str = "rise"
+TRIGGER_LEVEL: float = 0.0
+TRIGGER_POSITION: float = 0.0  # time of first sample relative to trigger time
+TRIGGER_HYSTERESIS: float = 0.01
+CHANNEL_SIGNAL_MAPPING: dict = {0: "output", 1: "input"}
+SOLVER_TOLERANCE: float = 1e-9  # but rounded to 8 decimal digits
+
+#assumptions
+TEMPERATURE: float = 27
+ELECTROMAGNETIC_FIELD: float = 0.0
+GROUND_EFFECT: float = 0.0

+ 197 - 0
mocka/routers/operations/data.py

@@ -0,0 +1,197 @@
+import logging
+import os
+import re
+import signal
+import sys
+from datetime import datetime
+from os import path
+import time
+
+import numpy
+import pyarrow
+from pyarrow import parquet, Schema, Field, Table
+from tqdm import tqdm
+
+import context
+import metadata
+
+PARQUET_FILE_EXTENSION: str = ".parquet"
+POI_FILE_PREPEND: str = "poi"
+STOP: bool = False
+
+
+def query_experiment_trace_time(test_frequency: float,
+                                test_amplitude: float,
+                                sampling_ratio: int) \
+        -> float | None:
+    return query_trace_time_from_file(test_frequency, test_amplitude, sampling_ratio, context.SYSTEMS[0] + "/" + str(test_amplitude) + "/" + str(sampling_ratio) + PARQUET_FILE_EXTENSION)
+
+
+def query_simulation_trace_time(test_frequency: float,
+                                test_amplitude: float,
+                                sampling_ratio: int,
+                                model_name: str) \
+        -> float | None:
+    return query_trace_time_from_file(test_frequency, test_amplitude, sampling_ratio, model_name + "/" + str(test_amplitude) + "/" + str(sampling_ratio) + PARQUET_FILE_EXTENSION)
+
+
+def query_poi_trace_time(test_frequency: float,
+                         test_amplitude: float,
+                         sampling_ratio: int,
+                         system: str) \
+        -> float | None:
+    return query_trace_time_from_file(test_frequency, test_amplitude, sampling_ratio,
+                                      system + "/" + str(test_amplitude) + "/" + POI_FILE_PREPEND + str(sampling_ratio) + PARQUET_FILE_EXTENSION, skip=1)
+
+
+def query_trace_time_from_file(test_frequency: float,
+                               test_amplitude: float,
+                               sampling_ratio: int,
+                               file_name: str,
+                               skip: int = 2) \
+        -> float | None:
+    if path.isfile(file_name):
+        schema: Schema = parquet.read_schema(file_name)
+        for field_index in tqdm(range(0, parquet.read_table(file_name).num_columns, skip),
+                                desc="querying trace times in " + file_name, leave=False):
+            field: Field = schema.field(field_index)
+            if metadata.check_metadata_equality(field.metadata, test_frequency, test_amplitude, sampling_ratio):
+                return metadata.get_time_from_field(field)
+    return None
+
+
+def query_trace(test_frequency: float,
+                test_amplitude: float,
+                sampling_ratio: int,
+                system: str) \
+        -> [float, numpy.ndarray[float]]:
+    file_name: str = system + "/" + str(test_amplitude) + "/" + str(sampling_ratio) + PARQUET_FILE_EXTENSION
+    trace_time: float = query_trace_time_from_file(test_frequency, test_amplitude, sampling_ratio, file_name)
+    if trace_time is not None:
+        table: Table = parquet.read_table(file_name)
+        return [trace_time, numpy.array([table.column(metadata.TRACE_COLUMN_NAME_TYPE_PREPEND[b"output"] + str(trace_time)).to_numpy(),
+                                         table.column(metadata.TRACE_COLUMN_NAME_TYPE_PREPEND[b"input"] + str(trace_time)).to_numpy()])]
+    else:
+        return [None, None]
+
+
+def save_experiment_trace(samples: numpy.ndarray[float],
+                          test_frequency: float,
+                          test_amplitude: float,
+                          sampling_ratio: int) \
+        -> float:
+    return save_trace_in_file(samples, test_frequency, test_amplitude, sampling_ratio, context.SYSTEMS[0] + "/" + str(test_amplitude) + "/" + str(sampling_ratio) + PARQUET_FILE_EXTENSION)
+
+
+def save_simulation_trace(samples: numpy.ndarray[float],
+                          test_frequency: float,
+                          test_amplitude: float,
+                          sampling_ratio: int,
+                          model_name: str) \
+        -> float:
+    return save_trace_in_file(samples, test_frequency, test_amplitude, sampling_ratio, model_name + "/" + str(test_amplitude) + "/" + str(sampling_ratio) + PARQUET_FILE_EXTENSION)
+
+
+def save_trace_in_file(samples: numpy.ndarray[float],
+                       test_frequency: float,
+                       test_amplitude: float,
+                       sampling_ratio: int,
+                       file_name: str) \
+        -> float:
+
+    def interrupt_handler(signal, frame):
+        global STOP
+        STOP = True
+        logging.warning("termination signal encountered")
+    signal.signal(signal.SIGINT, interrupt_handler)
+    signal.signal(signal.SIGTERM, interrupt_handler)
+
+    trace_time: float = datetime.timestamp(datetime.now())
+    output_field, input_field = metadata.create_trace_fields(test_frequency, test_amplitude, sampling_ratio, trace_time)
+    data_table: Table
+    if path.isfile(file_name):
+        data_table = parquet.read_table(file_name)
+        data_table = data_table.append_column(output_field, [samples[0]]).append_column(input_field, [samples[1]])
+    else:
+        data_table = pyarrow.Table.from_arrays(samples, schema=pyarrow.schema([output_field, input_field],
+                                                                              metadata=metadata.TRACE_TABLE_METADATA))
+    global STOP
+    if not STOP:
+        upload_start_time: float = time.time()
+        parquet.write_table(data_table, file_name)
+        upload_end_time: float = time.time()
+        logging.info("data upload time:" + str(upload_end_time - upload_start_time))
+    else:
+        delete_modelica_build_files()
+        sys.exit()
+    return trace_time
+
+
+def query_pois(test_frequency: float,
+               test_amplitude: float,
+               sampling_ratio: int,
+               system: str,
+               return_poi: str = None,
+               return_poi_methods: list[str] = None) \
+        -> numpy.ndarray[float] | None:
+    trace_time: float = query_poi_trace_time(test_frequency, test_amplitude, sampling_ratio, system)
+    if trace_time is not None:
+        table: Table = parquet.read_table(system + "/" + str(test_amplitude) + "/" + POI_FILE_PREPEND + str(sampling_ratio) + PARQUET_FILE_EXTENSION)
+        all_pois: numpy.ndarray[float] = table.column(metadata.POI_COLUMN_NAME_PREPEND + str(trace_time)).to_numpy()
+        if return_poi is not None:
+            return select_pois(all_pois, return_poi, return_poi_methods)
+        else:
+            return all_pois
+    else:
+        return None
+
+
+def select_pois(all_pois: numpy.ndarray[float],
+                poi: str,
+                poi_methods: list[str]) \
+        -> numpy.ndarray[float]:
+    required_pois: list = []
+    for poi_method in poi_methods:
+        poi_index: int = metadata.get_poi_index(poi, poi_method)
+        required_pois.append(all_pois.item(poi_index))
+    return numpy.array(required_pois)
+
+
+def save_pois(all_pois: numpy.ndarray[float],
+              test_frequency: float,
+              test_amplitude: float,
+              sampling_ratio: int,
+              trace_time: float,
+              system: str) \
+        -> None:
+
+    def interrupt_handler(signal, frame):
+        global STOP
+        STOP = True
+        logging.warning("termination signal encountered")
+    signal.signal(signal.SIGINT, interrupt_handler)
+    signal.signal(signal.SIGTERM, interrupt_handler)
+
+    poi_metadata: dict = metadata.create_column_metadata(test_frequency, test_amplitude, sampling_ratio, trace_time)
+    poi_field: Field = pyarrow.field(metadata.POI_COLUMN_NAME_PREPEND + str(trace_time), pyarrow.float64(), False, metadata=poi_metadata)
+    file_name: str = system + "/" + str(test_amplitude) + "/" + POI_FILE_PREPEND + str(sampling_ratio) + PARQUET_FILE_EXTENSION
+    data_table: Table
+    if path.isfile(file_name):
+        data_table = parquet.read_table(file_name)
+        data_table = data_table.append_column(poi_field, [all_pois])
+    else:
+        data_table = pyarrow.Table.from_arrays([all_pois], schema=pyarrow.schema([poi_field],
+                                               metadata=metadata.POI_TABLE_METADATA))
+    global STOP
+    if not STOP:
+        parquet.write_table(data_table, file_name)
+    else:
+        delete_modelica_build_files()
+        sys.exit()
+
+
+def delete_modelica_build_files() -> None:
+    logging.info("Cleaning Modelica build files")
+    for file_name in os.listdir():
+        if re.search("NotchFilter.*NotchFilter.*", file_name):
+            os.remove(file_name)

+ 113 - 0
mocka/routers/operations/experiment.py

@@ -0,0 +1,113 @@
+import logging
+import time
+
+import numpy
+from pydwf import DwfLibrary, DwfAnalogOutNode, DwfAnalogOutFunction, DwfAnalogInFilter, DwfAcquisitionMode, \
+    DwfTriggerSource, DwfAnalogInTriggerType, DwfTriggerSlope, DwfEnumConfigInfo, DwfState, PyDwfError, AnalogOut, \
+    AnalogIn
+from pydwf.utilities import openDwfDevice
+
+import data
+import context
+
+
+def single_experiment(test_frequency: float,
+                      test_amplitude: float,
+                      sampling_ratio: int) \
+        -> [float, numpy.ndarray[float]]:
+    logging.info("Performing experiment: f:" + str(test_frequency) + ", A:" + str(test_amplitude) + ", R:" + str(sampling_ratio))
+    dwf: DwfLibrary = DwfLibrary()
+    sampling_frequency: float = test_frequency * sampling_ratio
+
+    def maximize_buffer(configuration_parameters):
+        return configuration_parameters[DwfEnumConfigInfo.AnalogInBufferSize]
+
+    try:
+        with openDwfDevice(dwf, serial_number_filter=None, score_func=maximize_buffer) as device:
+            configure_output(device.analogOut, test_frequency, test_amplitude)
+            time.sleep(context.OUTPUT_STABILISATION_TIME)
+            configure_input(device.analogIn, sampling_frequency, DwfAcquisitionMode.Record)
+            samples: numpy.ndarray[float] = record_experiment(device.analogIn, sampling_ratio)
+            trace_time: float = data.save_experiment_trace(samples, test_frequency, test_amplitude, sampling_ratio)
+    except (PyDwfError, RuntimeError) as exception:
+        logging.warning(exception)
+        return single_experiment(test_frequency, test_amplitude, sampling_ratio)
+
+    return trace_time, samples
+
+
+def configure_output(analog_output_device: AnalogOut,
+                     test_frequency: float,
+                     test_amplitude: float) \
+        -> None:
+    ch0: int = 0
+    node = DwfAnalogOutNode.Carrier
+
+    analog_output_device.reset(-1)
+
+    analog_output_device.nodeEnableSet(ch0, node, True)
+    analog_output_device.nodeFunctionSet(ch0, node, DwfAnalogOutFunction.Sine)
+    analog_output_device.nodeFrequencySet(ch0, node, test_frequency)
+    analog_output_device.nodeAmplitudeSet(ch0, node, test_amplitude)
+    analog_output_device.nodeOffsetSet(ch0, node, 0)
+
+    analog_output_device.configure(ch0, 1)
+
+
+def configure_input(analog_input_device: AnalogIn,
+                    sampling_frequency: float,
+                    acquisition_mode: DwfAcquisitionMode) \
+        -> None:
+    ch0: int = 0  # reads filter output
+    ch1:  int = 1  # reads filter input
+    channels = (ch0, ch1)
+    analog_input_device.reset()
+
+    for channel in channels:
+        analog_input_device.channelEnableSet(channel, True)
+        analog_input_device.channelFilterSet(channel, DwfAnalogInFilter.Average)
+
+    analog_input_device.acquisitionModeSet(acquisition_mode)
+    analog_input_device.recordLengthSet(-1)
+    analog_input_device.frequencySet(sampling_frequency)
+    configure_input_trigger(analog_input_device, ch1)
+
+
+def configure_input_trigger(analog_input_device: AnalogIn,
+                            channel: int) \
+        -> None:
+    analog_input_device.triggerSourceSet(DwfTriggerSource.DetectorAnalogIn)
+    analog_input_device.triggerChannelSet(channel)
+    analog_input_device.triggerTypeSet(DwfAnalogInTriggerType.Edge)
+    analog_input_device.triggerConditionSet(DwfTriggerSlope.Rise)
+    analog_input_device.triggerPositionSet(0.0)
+    analog_input_device.triggerLevelSet(0.0)
+    analog_input_device.triggerHysteresisSet(0.001)
+
+
+def record_experiment(analog_input_device: AnalogIn,
+                      sampling_ratio: int) \
+        -> numpy.ndarray[float]:
+    num_samples: int = sampling_ratio * context.DATA_CYCLES
+    acquired_samples: int = 0
+    input_samples: list = []
+    output_samples: list = []
+    analog_input_device.configure(True, True)
+    while True:
+        status = analog_input_device.status(True)
+        current_samples, current_samples_lost, current_samples_corrupted = analog_input_device.statusRecord()
+
+        if current_samples != 0:
+            output_samples.append(analog_input_device.statusData(0, current_samples))
+            input_samples.append(analog_input_device.statusData(1, current_samples))
+            logging.info("Acquired "+str(current_samples)+" samples")
+            acquired_samples = acquired_samples + current_samples
+
+        if current_samples_lost + current_samples_corrupted != 0:
+            raise RuntimeError("Samples lost/corrupted: (" + str(current_samples_lost) + "," + str(current_samples_corrupted) + ")")
+
+        if status == DwfState.Done or acquired_samples >= num_samples:
+            break
+
+    return numpy.array([numpy.concatenate(output_samples, axis=None)[:num_samples],
+                        numpy.concatenate(input_samples, axis=None)[:num_samples]])

+ 73 - 0
mocka/routers/operations/metadata.py

@@ -0,0 +1,73 @@
+import struct
+
+import pyarrow
+from pyarrow import Field
+
+TRACE_TABLE_METADATA: dict = {b"output": struct.pack("i", 0),
+                              b"input": struct.pack("i", 1)}
+POI_TABLE_METADATA: dict = {b"gain_amplitude": struct.pack("i", 0),
+                            b"gain_curvefit": struct.pack("i", 1),
+                            b"gain_rms": struct.pack("i", 2),
+                            b"phase_amplitude": struct.pack("i", 3),
+                            b"phase_curvefit": struct.pack("i", 4)}
+TRACE_COLUMN_NAME_TYPE_PREPEND: dict = {b"output": "o_",
+                                        b"input": "i_"}
+POI_COLUMN_NAME_PREPEND: str = "poi_"
+COMMON_COLUMN_METADATA: list = ["testFrequency", "testAmplitude", "samplingRatio", "time"]
+TRACE_COLUMN_METADATA: list = [b"type"]
+
+
+def check_metadata_equality(metadata: dict,
+                            test_frequency: float,
+                            test_amplitude: float,
+                            sampling_ratio: int) \
+        -> bool:
+    if (struct.unpack("d", metadata[bytes(COMMON_COLUMN_METADATA[0], "utf-8")])[0] == test_frequency and
+        struct.unpack("d", metadata[bytes(COMMON_COLUMN_METADATA[1], "utf-8")])[0] == test_amplitude and
+            struct.unpack("i", metadata[bytes(COMMON_COLUMN_METADATA[2], "utf-8")])[0] == sampling_ratio):
+        return True
+    else:
+        return False
+
+
+def create_column_metadata(test_frequency: float,
+                           test_amplitude: float,
+                           sampling_ratio: int,
+                           time: float) \
+        -> dict:
+    return {COMMON_COLUMN_METADATA[0]: struct.pack("d", test_frequency),
+            COMMON_COLUMN_METADATA[1]: struct.pack("d", test_amplitude),
+            COMMON_COLUMN_METADATA[2]: struct.pack("i", sampling_ratio),
+            COMMON_COLUMN_METADATA[3]: struct.pack("d", time)}
+
+
+def create_trace_fields(test_frequency: float,
+                        test_amplitude: float,
+                        sampling_ratio: int,
+                        time: float) \
+        -> list[Field]:
+    fields = []
+    column_metadata: dict = create_column_metadata(test_frequency, test_amplitude, sampling_ratio, time)
+    for column_type in TRACE_TABLE_METADATA.keys():
+        column_metadata["type"] = column_type
+        fields.append(pyarrow.field(TRACE_COLUMN_NAME_TYPE_PREPEND[column_type] + str(time),
+                                    pyarrow.float64(), False, metadata=column_metadata))
+    return fields
+
+
+def get_column_metadata(metadata: dict) \
+        -> [float, float, int, float]:
+    return [(struct.unpack("d", metadata[bytes(COMMON_COLUMN_METADATA[0], "utf-8")]))[0],
+            (struct.unpack("d", metadata[bytes(COMMON_COLUMN_METADATA[1], "utf-8")]))[0],
+            int((struct.unpack("i", metadata[bytes(COMMON_COLUMN_METADATA[2], "utf-8")]))[0]),
+            (struct.unpack("d", metadata[bytes(COMMON_COLUMN_METADATA[3], "utf-8")]))[0]]
+
+
+def get_poi_index(poi: str,
+                  poi_method: str) \
+        -> int:
+    return int(struct.unpack("i", POI_TABLE_METADATA[bytes(poi + "_" + poi_method, "utf-8")])[0])
+
+
+def get_time_from_field(field: Field) -> float:
+    return struct.unpack("d", field.metadata[bytes(COMMON_COLUMN_METADATA[3], "utf-8")])[0]

+ 74 - 0
mocka/routers/operations/perform.py

@@ -0,0 +1,74 @@
+import logging
+
+import numpy
+from OMPython import OMCSessionZMQ, ModelicaSystem
+import itertools
+
+import data
+import experiment
+import context
+import simulate
+
+
+def perform_experimental_activity(system: str,
+                                  test_frequencies: numpy.ndarray[float],
+                                  test_amplitudes: numpy.ndarray[float],
+                                  sampling_ratios: numpy.ndarray[int],
+                                  use_old_data: bool = True) \
+        -> [float, numpy.ndarray[float]]:
+
+    logging.basicConfig(filename=system+".log", level=logging.INFO,
+                        format="%(asctime)s:%(levelname)s:%(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", force=True)
+    experiment_frame = numpy.array(list(itertools.product(test_amplitudes, sampling_ratios, test_frequencies)))
+    if system == context.SYSTEMS[0]:
+        return perform_experiments(experiment_frame, use_old_data)
+    else:
+        return perform_simulations(system, experiment_frame, use_old_data)
+
+
+def perform_experiments(experiment_frames: numpy.ndarray[float],
+                        use_old_data: bool) \
+        -> [float, numpy.ndarray[float]]:
+    for experiment_frame in experiment_frames:
+        if data.query_experiment_trace_time(experiment_frame[2], experiment_frame[0], int(experiment_frame[1])) is None or not use_old_data:
+            trace_time, samples = experiment.single_experiment(experiment_frame[2], experiment_frame[0], int(experiment_frame[1]))
+            if len(experiment_frames) == 1:
+                return trace_time, samples
+        else:
+            logging.info("Found data: f:" + str(experiment_frame[2]) + ", A:" + str(experiment_frame[0]) + ", R:" + str(experiment_frame[1]))
+
+
+def perform_simulations(model_name: str,
+                        experiment_frames: numpy.ndarray[float],
+                        use_old_data: bool) \
+        -> [float, numpy.ndarray[float]]:
+    omc = OMCSessionZMQ()
+    model = ModelicaSystem(context.MODELICA_FILE_NAME, context.MODELICA_PACKAGE_NAME + "." + model_name)
+    model.buildModel("(V_in.v)|(V_out.v)")
+    for experiment_frame in experiment_frames:
+        if data.query_simulation_trace_time(experiment_frame[2], experiment_frame[0], int(experiment_frame[1]), model_name) is None or not use_old_data:
+            trace_time, samples = simulate.single_simulation(model, experiment_frame[2], experiment_frame[0], int(experiment_frame[1]), model_name)
+            if len(experiment_frames) == 1:
+                data.delete_modelica_build_files()
+                return trace_time, samples
+        else:
+            logging.info("Found data: f:" + str(experiment_frame[2]) + ", A:" + str(experiment_frame[0]) + ", R:" + str(experiment_frame[1]))
+    data.delete_modelica_build_files()
+
+
+# if __name__ == "__main__":
+    # perform_experimental_activity(context.SYSTEMS[0], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([200]))
+    # perform_experimental_activity(context.SYSTEMS[1], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([200]))
+    # perform_experimental_activity(context.SYSTEMS[2], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([200]))
+    # perform_experimental_activity(context.SYSTEMS[0], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([400]))
+    # perform_experimental_activity(context.SYSTEMS[1], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([400]))
+    # perform_experimental_activity(context.SYSTEMS[2], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([400]))
+    # perform_experimental_activity(context.SYSTEMS[0], numpy.arange(500, 4e5, 500), numpy.arange(1.5, 5, 0.5), numpy.array([600]))
+    # perform_experimental_activity(context.SYSTEMS[1], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([600]))
+    # perform_experimental_activity(context.SYSTEMS[2], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([600]))
+    # perform_experimental_activity(context.SYSTEMS[0], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([800]))
+    # perform_experimental_activity(context.SYSTEMS[1], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([800]))
+    # perform_experimental_activity(context.SYSTEMS[2], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([800]))
+    # perform_experimental_activity(context.SYSTEMS[0], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([1000]))
+    # perform_experimental_activity(context.SYSTEMS[1], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([1000]))
+    # perform_experimental_activity(context.SYSTEMS[2], numpy.arange(500, 4e5, 500), numpy.arange(0.5, 5, 0.5), numpy.array([1000]))

+ 143 - 0
mocka/routers/operations/plot.py

@@ -0,0 +1,143 @@
+import matplotlib
+from matplotlib import pyplot
+from mpl_toolkits import mplot3d
+import numpy
+import enquiries
+
+import collect
+import context
+import process
+
+matplotlib.use("TkAgg")
+
+DATA_OPTIONS = ["experiment trace",  # line plot # fixed test frequency # varying amplitudes, systems
+                "property of interest",  # line plot # fixed frequency range # varying amplitudes, systems
+                "concrete experiment specification",  # scatter plot # all existing trace metadata
+                "concrete validity frame"]  # scatter plot # all existing poi data
+
+
+def plot_concrete_frame(test_frequencies_list: list[numpy.ndarray[float]],
+                        test_amplitudes_list: list[numpy.ndarray[float]],
+                        sampling_ratios_list: list[numpy.ndarray[float]],
+                        legend: list[str]) \
+        -> None:
+    figure = pyplot.figure()
+    axes = pyplot.axes(projection="3d")
+    for index in range(0, len(test_frequencies_list), 1):
+        axes.scatter3D(test_frequencies_list[index], test_amplitudes_list[index], sampling_ratios_list[index],
+                       label=legend[index])
+    axes.legend()
+    axes.set_xlabel("Frequency (Hz)")
+    axes.set_ylabel("Amplitude (V)")
+    axes.set_zlabel("Sampling Ratio (n)")
+    axes.grid(True)
+    pyplot.show()
+
+
+def plot_pois(x_data: numpy.ndarray[float],
+              y_data: numpy.ndarray[float],
+              legend: list[str],
+              y_label: str) \
+        -> None:
+    make_plot(x_data, y_data, legend, y_label=y_label, x_label="Frequency (Hz)")
+
+
+def plot_traces(x_data: list[numpy.ndarray[float]],
+                y_data: list[numpy.ndarray[float]],
+                legend: list[str]) \
+        -> None:
+    figure, axis = pyplot.subplots()
+    for ratios_index in range(len(y_data)):
+        for signal_index in range(len(y_data[ratios_index])):
+            for signal_type in [0, 1]:
+                axis.plot(x_data[ratios_index], y_data[ratios_index][signal_index][signal_type])
+    pyplot.xlabel("Time (s)")
+    pyplot.ylabel("Voltage (V)")
+    pyplot.legend(legend)
+    pyplot.show()
+
+
+def make_plot(x_data: numpy.ndarray[float],
+              y_data: numpy.ndarray[float],
+              legend: list[str],
+              y_label: str,
+              x_label: str) \
+        -> None:
+    figure, axis = pyplot.subplots()
+    for index in range(len(y_data)):
+        axis.plot(x_data, y_data[index])
+    pyplot.xlabel(x_label)
+    pyplot.ylabel(y_label)
+    pyplot.legend(legend)
+    pyplot.show()
+
+
+def user_choice(question: str,
+                options: list,
+                multi: bool = False) \
+        -> str | list[str]:
+    response = enquiries.choose(question, options, multi=multi)
+    if multi:
+        for option in response:
+            print(question + ": " + option)
+    else:
+        print(question + ": " + response)
+    return response
+
+
+def user_choice_amplitude() -> numpy.ndarray[float]:
+    minimum_amplitude: float = float(input("Minimum signal amplitude (V): "))
+    maximum_amplitude: float = float(input("Maximum signal amplitude (V): "))
+    amplitude_resolution: float = float(input("Resolution of signal amplitude (V): "))
+    return numpy.arange(minimum_amplitude, maximum_amplitude + amplitude_resolution, amplitude_resolution)
+
+
+def user_choice_sampling_ratio() -> numpy.ndarray[int]:
+    minimum_sampling_ratio: float = int(input("Minimum sampling ratio (int): "))
+    maximum_sampling_ratio: float = int(input("Maximum sampling ratio (int): "))
+    sampling_ratio_resolution: float = int(input("Resolution of sampling ratio (int): "))
+    return numpy.arange(minimum_sampling_ratio, maximum_sampling_ratio + sampling_ratio_resolution, sampling_ratio_resolution)
+
+
+if __name__ == "__main__":
+    datatype: str = user_choice("plot data type?", DATA_OPTIONS)
+    systems: list[str] = user_choice("data from which system/s?", context.SYSTEMS, multi=True)
+    if datatype == DATA_OPTIONS[0]:
+        test_frequency: float = float(input("Input signal frequency (Hz): "))
+        test_amplitudes: numpy.ndarray[float] = user_choice_amplitude()
+        sampling_ratios: numpy.ndarray[int] = user_choice_sampling_ratio()
+        traces, legend, timescales = collect.collect_traces(test_frequency, test_amplitudes, sampling_ratios, systems)
+        plot_traces(timescales, traces, legend)
+    elif datatype == DATA_OPTIONS[1]:
+        minimum_frequency: float = float(input("Minimum signal frequency (Hz): "))
+        maximum_frequency: float = float(input("Maximum signal frequency (Hz): "))
+        frequency_resolution: float = float(input("Resolution of signal frequency (Hz): "))
+        testFrequencies: numpy.ndarray[float] = numpy.arange(minimum_frequency,
+                                                             maximum_frequency + frequency_resolution,
+                                                             frequency_resolution)
+        test_amplitudes: numpy.ndarray[float] = user_choice_amplitude()
+        sampling_ratios: numpy.ndarray[int] = user_choice_sampling_ratio()
+        poi: str = user_choice("Property of interest?", process.POIS)
+        poi_methods: list[str] = user_choice("PoI calculation method?", process.POI_METHODS[poi], multi=True)
+        pois, legend = collect.collect_poi_traces(testFrequencies, test_amplitudes, sampling_ratios, systems, poi,
+                                                  poi_methods)
+        y_label: str
+        if poi == process.POIS[0]:
+            pois = 10 * numpy.log10(pois)
+            y_label = "Gain (dB)"
+        else:
+            y_label = "Phase Difference (deg)"
+        plot_pois(testFrequencies, pois, legend, y_label=y_label)
+    elif datatype == DATA_OPTIONS[2]:
+        test_frequencies_list: list[numpy.ndarray[float]] = []
+        test_amplitudes_list: list[numpy.ndarray[float]] = []
+        sampling_ratios_list: list[numpy.ndarray[int]] = []
+        legend: list[str] = []
+        for system in systems:
+            test_frequencies, test_amplitudes, sampling_ratios = collect.collect_trace_metadata(system)
+            test_frequencies_list.append(test_frequencies)
+            test_amplitudes_list.append(test_amplitudes)
+            sampling_ratios_list.append(sampling_ratios)
+            legend.append(system)
+        plot_concrete_frame(test_frequencies_list, test_amplitudes_list, sampling_ratios_list, legend)
+#    elif datatype == DATA_OPTIONS[3]:

+ 155 - 0
mocka/routers/operations/process.py

@@ -0,0 +1,155 @@
+import math
+
+import numpy
+from scipy.optimize import curve_fit
+
+import context
+import data
+
+POIS: list = ["gain", "phaseDiff"]
+POI_METHODS: dict = {POIS[0]: ["amplitude", "curvefit", "rms"],
+                     POIS[1]: ["amplitude", "curvefit"]}
+
+
+def compute_poi(poi: str,
+                poi_method: str,
+                output_data: numpy.ndarray[float],
+                input_data: numpy.ndarray[float],
+                sampling_ratio: int,
+                test_frequency: float) \
+        -> float:
+    if poi == POIS[0]:
+        return compute_gain(poi_method, output_data, input_data, sampling_ratio, test_frequency)
+    elif poi == POIS[1]:
+        return compute_phasediff(poi_method, output_data, sampling_ratio, test_frequency)
+    else:
+        raise NotImplementedError("The requested poi is not implemented!")
+
+
+# def compute_pois(output_data: numpy.ndarray[float],
+#                  input_data: numpy.ndarray[float],
+#                  sampling_ratio: int,
+#                  test_frequency: float,
+#                  return_poi: str,
+#                  return_poi_methods: list[str]) \
+#         -> numpy.ndarray[float]:
+#     pois = []
+#     for poi_method in return_poi_methods:
+#         pois.append(compute_poi(return_poi, poi_method, output_data, input_data, sampling_ratio, test_frequency))
+#     return numpy.array(pois)
+
+
+def compute_all_pois(samples: numpy.ndarray[float],
+                     sampling_ratio: int,
+                     test_frequency: float,
+                     test_amplitude: float = None,
+                     time: float = None,
+                     system: str = None,
+                     save: bool = True) \
+        -> numpy.ndarray[float]:
+    all_pois: list[float] = []
+    for poi in POIS:
+        for poi_method in POI_METHODS[poi]:
+            all_pois.append(compute_poi(poi, poi_method, samples[0], samples[1], sampling_ratio, test_frequency))
+    if save:
+        data.save_pois(numpy.array(all_pois), test_frequency, test_amplitude, sampling_ratio, time, system)
+    return numpy.array(all_pois)
+
+
+# Gain
+
+def compute_gain(gain_method: str,
+                 output_data: numpy.ndarray[float],
+                 input_data: numpy.ndarray[float],
+                 sampling_ratio: int = None,
+                 test_frequency=None) \
+        -> float:
+    if gain_method == POI_METHODS[POIS[0]][0]:
+        return compute_amplitude_gain(output_data, input_data)
+    elif gain_method == POI_METHODS[POIS[0]][1]:
+        return compute_curve_fit_gain(output_data, input_data, sampling_ratio, test_frequency)
+    elif gain_method == POI_METHODS[POIS[0]][2]:
+        return compute_rms_gain(output_data, input_data)
+    else:
+        raise NotImplementedError("The requested gain calculation method is not implemented!")
+
+
+def compute_amplitude_gain(output_data: numpy.ndarray[float],
+                           input_data: numpy.ndarray[float]) \
+        -> float:
+    return numpy.max(abs(output_data)) / numpy.max(abs(input_data))
+
+
+def compute_curve_fit_gain(output_data: numpy.ndarray[float],
+                           input_data: numpy.ndarray[float],
+                           sampling_ratio: int,
+                           test_frequency: float) \
+        -> float:
+    def sin_func(x: float, a: float, phi: float) -> float:
+        return a * numpy.sin(2 * math.pi * (test_frequency * x + phi))
+
+    time_data = generate_time_data(test_frequency, sampling_ratio)
+    output_params: numpy.ndarray[float] = curve_fit(f=sin_func, xdata=time_data, ydata=output_data)[0]
+    input_params: numpy.ndarray[float] = curve_fit(f=sin_func, xdata=time_data, ydata=input_data)[0]
+    return abs(output_params.item(0) / input_params.item(0))
+
+
+def compute_rms_gain(output_data: numpy.ndarray[float],
+                     input_data: numpy.ndarray[float]) \
+        -> float:
+    return math.sqrt(numpy.square(output_data).mean() / numpy.square(input_data).mean())
+
+
+# Phase Difference
+def compute_phasediff(phasediff_method: str,
+                      output_data: numpy.ndarray[float],
+                      sampling_ratio: int,
+                      test_frequency: float = None) \
+        -> float:
+    if phasediff_method == POI_METHODS[POIS[1]][0]:
+        return compute_amplitude_phasediff(output_data, sampling_ratio)
+    elif phasediff_method == POI_METHODS[POIS[1]][1]:
+        return compute_curvefit_phasediff(output_data, sampling_ratio, test_frequency)
+    else:
+        raise NotImplementedError("The requested phase difference calculation method is not implemented!")
+
+
+def compute_amplitude_phasediff(output_data: numpy.ndarray[float],
+                                sampling_ratio: int) \
+        -> float:
+    output_index: int = numpy.argmax(output_data[:int(sampling_ratio)])
+    if output_index < 3 * sampling_ratio / 4:
+        return ((output_index / sampling_ratio) * 360) - 90
+    else:
+        return (360 * (output_index / sampling_ratio)) - (360 + 90)
+
+
+def compute_curvefit_phasediff(output_data: numpy.ndarray[float],
+                               sampling_ratio: int,
+                               test_frequency: float) \
+        -> float:
+    def sin_func(x, a, phi):
+        return a * numpy.sin(2 * math.pi * (test_frequency * x + phi))
+
+    output_params = curve_fit(f=sin_func, xdata=generate_time_data(test_frequency, sampling_ratio),
+                              ydata=output_data, bounds=[[-numpy.inf, -0.5], [numpy.inf, 0.5]])[0]
+    return output_params.item(1) * 360
+
+
+def generate_time_data(test_frequency: float,
+                       sampling_ratio: int) \
+        -> numpy.ndarray[float]:
+    time_step: float = 1 / (test_frequency * sampling_ratio)
+    return numpy.arange(context.OUTPUT_STABILISATION_TIME,
+                        context.OUTPUT_STABILISATION_TIME + (context.DATA_CYCLES * sampling_ratio * time_step),
+                        time_step)[:context.DATA_CYCLES*sampling_ratio]
+
+
+# def generate_poi_table_metadata() -> dict:
+#     poi_table_metadata: dict = {}
+#     method_index = 0
+#     for poi in POIS:
+#         for poi_method in POI_METHODS[poi]:
+#             poi_table_metadata[bytes(poi + "_" + poi_method, "utf-8")] = struct.pack("i", method_index)
+#             method_index += 1
+#     return poi_table_metadata

+ 56 - 0
mocka/routers/operations/regenerate.py

@@ -0,0 +1,56 @@
+import os
+import struct
+
+import enquiries
+import numpy
+import pyarrow
+from pyarrow import parquet, Field
+from tqdm import tqdm
+
+import context
+import data
+import metadata
+import process
+
+
+def regenerate_all_poi_files() \
+        -> None:
+    regenerate_poi_files(context.SYSTEMS)
+
+
+def regenerate_poi_files(systems: list[str]) \
+        -> None:
+    for system in tqdm(systems, desc="regenerating poi files of systems", leave=False):
+        regenerate_poi_file(system)
+
+
+def regenerate_poi_file(system: str):
+    for amplitude_dir in os.listdir(system+"/"):
+        if os.path.isdir(system+"/"+amplitude_dir):
+            for file_name in os.listdir(system+"/"+amplitude_dir+"/"):
+                if not file_name.startswith(data.POI_FILE_PREPEND):
+                    trace_file_name: str = system + "/" + amplitude_dir + "/" + file_name
+                    schema = parquet.read_schema(trace_file_name)
+                    table = parquet.read_table(trace_file_name)
+                    output_index = struct.unpack("i", metadata.TRACE_TABLE_METADATA[b"output"])[0]
+                    input_index = struct.unpack("i", metadata.TRACE_TABLE_METADATA[b"input"])[0]
+                    poi_table = None
+                    for data_index in tqdm(range(0, table.num_columns, 2), desc="generating pois from " + system, leave=False):
+                        output_data = table.column(data_index + output_index).to_numpy()
+                        input_data = table.column(data_index + input_index).to_numpy()
+                        poi_metadata = schema.field(data_index).metadata
+                        poi_metadata.pop(b"type")
+                        test_frequency, test_amplitude, sampling_ratio, time = metadata.get_column_metadata(poi_metadata)
+                        poi_field: Field = pyarrow.field(metadata.POI_COLUMN_NAME_PREPEND + str(time), pyarrow.float64(), False, metadata=poi_metadata)
+                        poi: numpy.ndarray[float] = process.compute_all_pois(numpy.array([output_data, input_data]), sampling_ratio, test_frequency, save=False)
+                        if poi_table is None:
+                            poi_table = pyarrow.Table.from_arrays([poi], schema=pyarrow.schema([poi_field],
+                                                                  metadata=metadata.POI_TABLE_METADATA))
+                        else:
+                            poi_table = poi_table.append_column(poi_field, [poi])
+                    parquet.write_table(poi_table, system + "/" + amplitude_dir + "/" + data.POI_FILE_PREPEND + file_name)
+
+
+if __name__ == "__main__":
+    systems = enquiries.choose("re-generate PoIs from?", context.SYSTEMS, multi=True)
+    regenerate_poi_files(systems)

+ 52 - 0
mocka/routers/operations/simulate.py

@@ -0,0 +1,52 @@
+import logging
+
+import numpy
+from OMPython import ModelicaSystem
+
+import data
+import context
+from data import delete_modelica_build_files
+
+
+def single_simulation(model: ModelicaSystem,
+                      test_frequency: float,
+                      test_amplitude: float,
+                      sampling_ratio: int,
+                      model_name: str) \
+        -> [float, numpy.ndarray[float]]:
+    logging.info("Performing simulation: f:" + str(test_frequency) + ", A:" + str(test_amplitude) + ", R:" + str(sampling_ratio))
+    sampling_frequency: float = test_frequency * sampling_ratio
+    delete_files: bool = False
+    if model is None:
+#        omc = OMCSessionZMQ()
+        model = ModelicaSystem("NotchFilter.mo", "NotchFilter." + model_name)
+        model.buildModel("(V_in.v)|(V_out.v)")
+        delete_files: bool = True
+    model.setSimulationOptions(["stepSize=" + str(1/sampling_frequency),
+                                "tolerance=1e-9",
+                                "startTime=0",
+                                "stopTime=" + str((2 * context.DATA_CYCLES) / test_frequency)])
+    model.setParameters(["Vt=" + str(test_amplitude),
+                         "Ft=" + str(test_frequency)])
+
+    model.simulate()
+    samples: numpy.ndarray[float] = numpy.array(model.getSolutions(["V_out.v", "V_in.v"]))
+    sample_count = context.DATA_CYCLES * sampling_ratio
+    samples = numpy.array([samples[0][-sample_count:],
+                           samples[1][-sample_count:]]).round(8)
+    if delete_files:
+        delete_modelica_build_files()
+    trace_time: float = data.save_simulation_trace(samples, test_frequency, test_amplitude, sampling_ratio, model_name)
+    return trace_time, samples
+
+# from pydelica import Session
+# def singleSimulationPydelica(testFrequency, testAmplitude, samplingRatio):
+#     with Session() as session:
+#         session.build_model("notchFilter.mo","notchFilter")
+#         session.set_parameter("Vt", testAmplitude)
+#         session.set_parameter("Ft", testFrequency)
+#         session.set_time_range(start_time=0, stop_time = outputStabilisationTime+(dataCycles/testFrequency))
+#         session.set_variable_filter(filter_str="(V_in.v)|(V_out.v)")
+#         session.simulate()
+#     samples = numpy.transpose(session.get_solutions()["notchFilter"].to_numpy())
+#     return samples

+ 7 - 3
requirements.txt

@@ -9,8 +9,13 @@ starlette         ~= 0.27.0
 python-magic      ~= 0.4.27
 opencv-python     ~= 4.8.0.76 # Octiva
 uvicorn[standard] ~= 0.23.2
-#pydelica ~= 0.4.5 # VaFL Notch
-ompython ~= 3.4.0 # VaFL Notch
+tqdm              ~= 4.66.1 # VaFL Notch
+pydwf             ~= 1.1.19 # VaFL Notch
+scipy             ~= 1.11.4 # VaFL Notch
+numpy             ~= 1.26.2 # VaFL Notch
+ompython          ~= 3.4.0  # VaFL Notch
+enquiries         ~= 0.2.0  # VaFL Notch
+matplotlib        ~= 3.8.2  # VaFL Notch
 # Test
 httpx  ~= 0.25.0
 pytest ~= 7.4.0
@@ -22,7 +27,6 @@ pip      ~= 23.2.1
 flit     ~= 3.9.0
 twine    ~= 4.0.2
 vermin      ~= 1.5.1
-numpy    ~= 1.26.0
 invoke   ~= 2.2.0
 jinja2   ~= 3.1.2
 flake8   ~= 6.1.0