12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061 |
- from subprocess import call
- import time
- def run_real(samples):
- with open("model/results_real", 'w') as f:
- for _ in range(samples):
- start = time.time()
- call(["pypy", "-m", "pytest", "integration/test_powerwindow.py", "-k", "fast", "-x"])
- print("Time to execute: " + str(time.time() - start))
- f.write(str(time.time() - start))
- f.write("\n")
- f.flush()
- def run_simulation(latency):
- import sys
- sys.path.append("model")
- from model import simulate
- print("SIMULATE for " + str(latency))
- result = simulate(latency)
- print(result)
- return result
- def benchmark(parallel, to_run):
- if parallel:
- from multiprocessing import Pool
- pool = Pool(processes=4)
- results = pool.map(run_simulation, to_run)
- else:
- results = map(run_simulation, to_run)
- return results
- def benchmark_mvs(parallel, latency_range):
- to_run = [{"mvk2mvs_latency": i} for i in latency_range]
- results = benchmark(parallel, to_run)
- print(results)
- with open("model/results_mvs_latency", 'w') as f:
- for latency, result in zip(to_run, results):
- sim, exe = result
- f.write("%s %s %s\n" % (latency, sim, exe))
- f.flush()
- def benchmark_mvi(parallel, latency_range):
- to_run = [{"mvi2mvk_latency": i} for i in latency_range]
- results = benchmark(parallel, to_run)
- print(results)
- with open("model/results_mvi_latency", 'w') as f:
- for latency, result in zip(to_run, results):
- sim, exe = result
- f.write("%s %s %s\n" % (latency, sim, exe))
- f.flush()
- if __name__ == "__main__":
- benchmark_mvs(parallel = False, latency_range=[0.0, 0.5])
- benchmark_mvi(parallel = False, latency_range=[0.0, 0.5])
- run_real(samples = 2)
- #benchmark_mvs(parallel = False, latency_range=[0.0, 0.0017, 0.012, 0.02, 0.1, 0.5])
- #benchmark_mvi(parallel = False, latency_range=[0.0, 0.0017, 0.012, 0.02, 0.1, 0.5])
- #run_real(samples = 10)
|