forked from nathaniel-hudson/SUMO-FedRL
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
35 lines (30 loc) · 1.4 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
"""
For this document, we will setup a basic RL pipeline using our SinglePolicySumoEnv environment.
Refer to this recent and similar SumoRL tool that has an example for MARL using RlLib:
https://github.com/LucasAlegre/sumo-rl/blob/master/experiments/a3c_4x4grid.py
Ray RlLib agent training example.
https://github.com/ray-project/ray/blob/master/rllib/examples/custom_train_fn.py
"""
from seal.trainer.fed_agent import FedPolicyTrainer
from seal.trainer.multi_agent import MultiPolicyTrainer
from seal.trainer.single_agent import SinglePolicyTrainer
from os.path import join
if __name__ == "__main__":
n_episodes = 1 # 100
fed_step = 5
net_files = [
join("configs", "complex_inter", "complex_inter.net.xml"),
join("configs", "single_inter", "single_inter.net.xml"),
join("configs", "two_inter", "two_inter.net.xml")
]
for net_file in net_files:
for ranked in [True, False]:
print(">> Training with `FedPolicyTrainer`!")
FedPolicyTrainer(fed_step=fed_step, net_file=net_file, ranked=ranked).\
train(n_episodes)
print(">> Training with `MultiPolicyTrainer`!")
MultiPolicyTrainer(net_file=net_file, ranked=ranked).\
train(n_episodes)
print(">> Training with `SinglePolicyTrainer`!")
SinglePolicyTrainer(net_file=net_file, ranked=ranked).\
train(n_episodes)