123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170 |
- import csv
- import os
- import sys
- import matplotlib.pyplot as plt
- from matplotlib.backends.backend_pdf import PdfPages
- import argparse
- def read_data(filepath):
- results = {}
- with open(filepath, 'r') as csvfile:
- reader = csv.DictReader(csvfile, delimiter=';')
- for col in reader.fieldnames:
- if col != "":
- results[col] = []
- for row in reader:
- for col in results:
- results[col].append(float(row[col]))
- return results
- def selectTime(raw_data, maxTime):
- indexToTrim = 0
- maxIndex = len(raw_data["time"])
- while indexToTrim<maxIndex and raw_data["time"][indexToTrim] < maxTime:
- indexToTrim+=1
-
- if indexToTrim<maxIndex:
- for col in raw_data.keys():
- del raw_data[col][indexToTrim:]
- #print("Data trimmed up to index ", indexToTrim)
-
- return raw_data
- def get_all_csvs(results_folder):
- results = []
- directory = os.fsencode(results_folder)
- for root, dirs, files in os.walk(directory):
- for file in files:
- filename = os.fsdecode(file)
- if filename.endswith(".csv"):
- filepath = os.fsdecode(os.path.join(root,file))
- results.append((filename, filepath))
- return results
- def plot_results(results_folder, analyticalSolutionDir):
- for (filename, filepath) in get_all_csvs(results_folder):
- # Read all data onto a dictionary
- print("Reading data from file",filename,"... ", end="", flush=True)
- raw_data = read_data(filepath)
- print("Done.")
-
- if analyticalSolutionDir!="":
- analytical_sol_file = os.path.join(analyticalSolutionDir, filename)
-
- if os.path.exists(analytical_sol_file):
- print("Reading analytical solution of file ",filename,"... ", end="", flush=True)
- analytical_data_raw = read_data(analytical_sol_file)
- print("Done.")
-
- print("Columns read: ", analytical_data_raw.keys())
-
- print("Trimming data to time ",raw_data["time"][-1],"... ", end="", flush=True)
- solution_raw = selectTime(analytical_data_raw, raw_data["time"][-1])
- print("Done.")
- else:
- print("No analytical solution for file ",filename,": " + analytical_sol_file)
- solution_raw = None
- else:
- solution_raw = None
-
-
- # Create a group of plots per filenames
- num_trajectories = len(raw_data.keys()) - 1 # Exclude time
- print("Creating ",num_trajectories," plots... ", end="", flush=True)
- plot_num = 1
- plot_file_name = filepath.replace(".csv",".pdf")
- pp = PdfPages(plot_file_name)
- for col in raw_data.keys():
- if col != "time":
- print("Plotting col " + col + "...")
- plt.subplot(num_trajectories, 1, plot_num)
- plt.xlabel("time")
- plt.plot(raw_data["time"], raw_data[col], '-', label=col)
- if solution_raw != None:
- if col in solution_raw:
- plt.plot(solution_raw["time"], solution_raw[col], '-', label=col+"_anl")
- plt.legend()
- plot_num += 1
- pp.savefig()
- pp.close()
- plt.clf()
-
- print("Done.")
- def smallest_identifier_string(strs):
- # Taken from https://medium.com/@d_dchris/10-methods-to-solve-the-longest-common-prefix-problem-using-python-leetcode-14-a87bb3eb0f3a
- longest_pre = ""
- if not strs:
- return longest_pre
- shortest_str = min(strs, key=len)
- for i in range(len(shortest_str)):
- if all([x.startswith(shortest_str[:i+1]) for x in strs]):
- longest_pre = shortest_str[:i+1]
- else:
- break
- return longest_pre
-
- def plot_merge_results(results_folder):
- # Get all files (and their path) ending in csv.
- all_csv_files = get_all_csvs(results_folder)
-
- # Group them by filename. Eg., all fmu1.csv files should be together.
- csv_files_grouped = {}
- for (filename, filepath) in all_csv_files:
- if not filename in csv_files_grouped:
- csv_files_grouped[filename] = []
- csv_files_grouped[filename].append(filepath)
-
- print("Files grouped.")
-
- # Create plot for each group
- for (filename, files) in csv_files_grouped.items():
- plot_filepath = files[0]
- plot_file_name = plot_filepath.replace(".csv",".pdf")
- pp = PdfPages(plot_file_name)
- group_id = smallest_identifier_string(files)
-
- for filepath in files:
- file_identifier = filepath[len(group_id):]
- raw_data = read_data(filepath)
- print("Read data from file",filename,".")
-
- num_trajectories = len(raw_data.keys()) - 1 # Exclude time
-
- # Assumes that each csv column is in the same order on all csvs. This is a reasonable assumption...
- plot_num = 1
- for col in raw_data.keys():
- if col != "time":
- print("Plotting col " + col + "...")
- plt.subplot(num_trajectories, 1, plot_num)
- plt.xlabel("time")
- plt.plot(raw_data["time"], raw_data[col], '-', label=file_identifier+"."+col)
- plt.legend()
- plot_num += 1
- pp.savefig()
- pp.close()
- plt.clf()
-
- print("Done.")
- if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='HintCO plotting utility')
- parser.add_argument('resultsdir', help='directory where csv files of the co-simulation are stored.')
- parser.add_argument('--solutionsdir', help='directory where csv files of the analytical solution are stored.')
- parser.add_argument('--merge', action="store_true", help='Instead of creating a plot per folder, merge all plots into a single folder. This requires a fixed folder structure')
-
- args = parser.parse_args()
-
- results_folder = args.resultsdir
- analyticalSolutionDir = args.solutionsdir if args.solutionsdir else ""
- merge = args.merge
- print("results=",results_folder)
- print("analyticalSolutionDir=",analyticalSolutionDir)
- print("merge=",merge)
- if merge:
- plot_merge_results(results_folder)
- else:
- plot_results(results_folder, analyticalSolutionDir)
-
|