Skip to content

Commit

Permalink
Update to v1.1.0
Browse files Browse the repository at this point in the history
  • Loading branch information
mgeisslinger committed Jan 17, 2023
1 parent de9d27f commit 402ef0f
Show file tree
Hide file tree
Showing 31 changed files with 1,984 additions and 733 deletions.
8 changes: 5 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ By default logs are saved and can be analyzed afterwards by running:
## How to reproduce results
The idea and basic principles of this algorithm are presented in Geisslinger et al. 2022<sup>1</sup>. The following describes how the results from the paper can be reproduced. To evaluate the planning algorithm on multiple scenarios execute:
The idea and basic principles of this algorithm are presented in Geisslinger et al. 2023<sup>1</sup>. The following describes how the results from the paper can be reproduced. To evaluate the planning algorithm on multiple scenarios execute:
* `python planner/Frenet/plannertools/evaluatefrenet.py`
Expand All @@ -81,16 +81,18 @@ To evaluate with the according config settings of [ethical](/planner/Frenet/conf
To evaluate on all 2000 scenarios, make sure to have at least 200 GB space left on your device for saving the log files. For better runtime, we recommend using [multiprocessing](/planner/Frenet/plannertools/evaluatefrenet.py#L46) and a [GPU](planner/Frenet/configs/prediction.json#L4) for the prediction network. Evaluating all scenarios in 10 parallel threads with a GPU takes around 48 hours. Results and logfiles for each run are stored in `planner/Frenet/results`.
Standard evaluation metrics such as cummulated harm on all scenarios are provided within the results (e.g. `results/eval/harm.json`). `planner/Frenet/analyze_tools/analyze_risk_dist.py` helps to extract risk values out of multiple logfiles. Boxplots with risk distribtuions as in Geisslinger et al. 2022<sup>1</sup> can be generated using `planner/Frenet/plot_tools/boxplots_risks.py`.
Standard evaluation metrics such as cummulated harm on all scenarios are provided within the results (e.g. `results/eval/harm.json`). `planner/Frenet/analyze_tools/analyze_risk_dist.py` helps to extract risk values out of multiple logfiles. Boxplots with risk distribtuions as in Geisslinger et al. 2023<sup>1</sup> can be generated using `planner/Frenet/plot_tools/boxplots_risks.py`.
## References
1. Geisslinger, M., Poszler, F., Lienkamp, M. An Ethical Trajectory Planning Algorithm for Autonomous Vehicles *(under review)*
1. Geisslinger, M., Poszler, F., Lienkamp, M. An Ethical Trajectory Planning Algorithm for Autonomous Vehicles. 2023
## Contributions
* Maximilian Geisslinger (Main Contributor, [[email protected]](mailto:[email protected]?subject=[GitHub]%20Ethical%20Trajectory%20Planning))
* Rainer Trauth (Computing Performance)
* Florian Pfab (Master Thesis: *Motion Planning with Risk Assessment for Automated Vehicles*)
* Simon Sagmeister (Master Thesis: *Neural Networks: Real-time Capable Trajectory Planning through Supervised Learning*)
* Tobias Geissenberger (Bachelor Thesis: *Harm Prediction for Risk-Aware Motion Planning of Automated Vehicles*)
* Clemens Krispler (Bachelor Thesis: *Motion Planning for Autonomous Vehicles: Developing a Principle of Responsibility for Ethical Decision-Making*)
* Zhi Zheng (Semester Thesis: *Parallelization of a Planning Algorithm in the Field of Autonomous Driving* supervised by Rainer Trauth)
181 changes: 133 additions & 48 deletions planner/Frenet/analyze_tools/analyze_correlations.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
import multiprocessing
import progressbar
import json
import argparse
import traceback


def all_equal(iterable):
Expand All @@ -26,15 +28,18 @@ def all_equal(iterable):
return next(g, True) and not next(g, False)


logdir = "./planner/Frenet/results/logs"

corr_mat_list = []
long_mat_list = []
lat_mat_list = []
dist_mat_list = []
corr_dict = {}
long_dict = {}
lat_dict = {}
dist_dict = {}
scenario_list = []
key_list = []

cpu_count = 60 # multiprocessing.cpu_count()
log_file_list = os.listdir(logdir)


def eval_func(logfile):
Expand All @@ -48,12 +53,14 @@ def eval_func(logfile):
"""
try:
frenet_log = FrenetLogVisualizer(
os.path.join(logdir, logfile), visualize=False, verbose=False
logfile, visualize=False, verbose=False
)
corr_mat, keys = frenet_log.correlation_matrix(plot=False)
long_mat, lat_mat, dist_mat = frenet_log.distance_matrix(plot=False)

return corr_mat, keys, logfile
return corr_mat, long_mat, lat_mat, dist_mat, keys, logfile
except Exception:
traceback.print_exc()
return None


Expand All @@ -64,46 +71,124 @@ def process_return_dict(return_list):
return_list ([type]): [description]
"""
if return_list is not None:
corr_mat_list.append(return_list[0])
key_list.append(return_list[1])
scenario_list.append(return_list[2])


with progressbar.ProgressBar(max_value=len(log_file_list)).start() as pbar:
with multiprocessing.Pool(processes=cpu_count) as pool:
for return_list in pool.imap_unordered(eval_func, log_file_list):
process_return_dict(return_list)
pbar.update(pbar.value + 1)


if not all_equal(key_list):
print(
"Error: Keys are ambiguous, but must be the same. Make sure to run with the same settings."
)

for i in range(len(key_list[0])):
for j in range(i + 1, len(key_list[0])):
corr_dict[str(key_list[0][i]) + "<->" + str(key_list[0][j])] = [
corr_mat[i, j] for corr_mat in corr_mat_list
]

for key in corr_dict:
corr_dict[key] = [x for x in corr_dict[key] if x == x]

with open("./planner/Frenet/results/corr_dict.json", "w") as fp:
json.dump(corr_dict, fp)

with open("./planner/Frenet/results/scen_list.txt", "w") as fp2:
json.dump(scenario_list, fp2)

clean_corr_dict = {k: corr_dict[k] for k in corr_dict if not isnan(sum(corr_dict[k]))}

fig, ax = plt.subplots()
ax.boxplot(clean_corr_dict.values())
ax.set_xticklabels(clean_corr_dict.keys(), rotation=90)

plt.tight_layout()
plt.savefig("./planner/Frenet/results/correlations.pdf")


print("Done.")
# Filter nans
if (return_list[0] == return_list[0]).all():
corr_mat_list.append(return_list[0])
long_mat_list.append(return_list[1])
lat_mat_list.append(return_list[2])
dist_mat_list.append(return_list[3])
key_list.append(return_list[4])
scenario_list.append(return_list[5].split("/")[-1])


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--logdir", type=str, default="./planner/Frenet/results/logs")
args = parser.parse_args()

log_file_list = os.listdir(args.logdir)
log_file_list = [os.path.join(args.logdir, l) for l in log_file_list]
result_dir = os.path.dirname(args.logdir)

with progressbar.ProgressBar(max_value=len(log_file_list)).start() as pbar:
with multiprocessing.Pool(processes=cpu_count) as pool:
for return_list in pool.imap_unordered(eval_func, log_file_list):
process_return_dict(return_list)
pbar.update(pbar.value + 1)

if not all_equal(key_list):
print(
"Error: Keys are ambiguous, but must be the same. Make sure to run with the same settings."
)
longest_len = max([len(i) for i in key_list])
new_key_list = []
new_scenario_list = []
new_corr_mat_list = []
for key, scen, corr in zip(key_list, scenario_list, corr_mat_list):
if len(key) == longest_len:
new_key_list.append(key)
new_scenario_list.append(scen)
new_corr_mat_list.append(corr)
else:
print(f"I had to remove {scen} with keys {key}")

print(key_list)

for i in range(len(key_list[0])):
for j in range(i + 1, len(key_list[0])):
corr_dict[str(key_list[0][i]) + "<->" + str(key_list[0][j])] = [
corr_mat[i, j] for corr_mat in corr_mat_list
]
long_dict[str(key_list[0][i]) + "<->" + str(key_list[0][j])] = [
long_mat[i, j] for long_mat in long_mat_list
]
lat_dict[str(key_list[0][i]) + "<->" + str(key_list[0][j])] = [
lat_mat[i, j] for lat_mat in lat_mat_list
]
dist_dict[str(key_list[0][i]) + "<->" + str(key_list[0][j])] = [
dist_mat[i, j] for dist_mat in dist_mat_list
]

corr_dict_scenes = {}
long_dict_scenes = {}
lat_dict_scenes = {}
dist_dict_scenes = {}

for key in corr_dict:
corr_dict_scenes[key] = {}
long_dict_scenes[key] = {}
lat_dict_scenes[key] = {}
dist_dict_scenes[key] = {}

for idx, val in enumerate(corr_dict[key]):
corr_dict_scenes[key][scenario_list[idx]] = corr_dict[key][idx]
long_dict_scenes[key][scenario_list[idx]] = long_dict[key][idx]
lat_dict_scenes[key][scenario_list[idx]] = lat_dict[key][idx]
dist_dict_scenes[key][scenario_list[idx]] = dist_dict[key][idx]

# corr_dict_scenes[key].sort()
corr_dict_scenes[key] = dict(sorted(corr_dict_scenes[key].items(), key=lambda item: item[1]))
long_dict_scenes[key] = dict(sorted(long_dict_scenes[key].items(), key=lambda item: item[1], reverse=True))
lat_dict_scenes[key] = dict(sorted(lat_dict_scenes[key].items(), key=lambda item: item[1], reverse=True))
dist_dict_scenes[key] = dict(sorted(dist_dict_scenes[key].items(), key=lambda item: item[1], reverse=True))

if len(corr_mat_list) != len(scenario_list):
print(f"Warning: Scenario list ({len(scenario_list)}) has not the same lentgh as corr_dict ({len(corr_mat_list)})")

with open(os.path.join(result_dir, "corr_dict.json"), "w") as fp:
json.dump(corr_dict, fp)

with open(os.path.join(result_dir, "long_dict.json"), "w") as fp:
json.dump(long_dict, fp)

with open(os.path.join(result_dir, "lat_dict.json"), "w") as fp:
json.dump(lat_dict, fp)

with open(os.path.join(result_dir, "dist_dict.json"), "w") as fp:
json.dump(dist_dict, fp)

with open(os.path.join(result_dir, "scen_list.txt"), "w") as fp:
json.dump(scenario_list, fp)

with open(os.path.join(result_dir, "corr_dict_scenes.json"), "w") as fp:
json.dump(corr_dict_scenes, fp)

with open(os.path.join(result_dir, "long_dict_scenes.json"), "w") as fp:
json.dump(long_dict_scenes, fp)

with open(os.path.join(result_dir, "lat_dict_scenes.json"), "w") as fp:
json.dump(lat_dict_scenes, fp)

with open(os.path.join(result_dir, "dist_dict_scenes.json"), "w") as fp:
json.dump(dist_dict_scenes, fp)

clean_corr_dict = {k: corr_dict[k] for k in corr_dict if not isnan(sum(corr_dict[k]))}

fig, ax = plt.subplots()
ax.boxplot(clean_corr_dict.values())
ax.set_xticklabels(clean_corr_dict.keys(), rotation=90)

plt.tight_layout()
plt.savefig(os.path.join(result_dir, "correlations.pdf"))

print("Done.")
Loading

0 comments on commit 402ef0f

Please sign in to comment.