|
droneProject
|
Variables | |
| ppo_path = os.path.join('Training', 'Saved Models', f'Drone_PPO_Model_{datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")}') | |
| log_path = os.path.join('Training', 'Logs') | |
| int | episodes = 100 |
| env = DroneRobot() | |
| debugMode | |
| int | timesteps = env.steps_per_episode * episodes |
| model = PPO('MlpPolicy', env, verbose=1, tensorboard_log=log_path, device='cpu', ent_coef=10, learning_rate=0.0001) | |
| total_timesteps | |
| Train_PPO.debugMode |
Definition at line 17 of file Train_PPO.py.
| Train_PPO.env = DroneRobot() |
Definition at line 16 of file Train_PPO.py.
| int Train_PPO.episodes = 100 |
Definition at line 13 of file Train_PPO.py.
| Train_PPO.log_path = os.path.join('Training', 'Logs') |
Definition at line 10 of file Train_PPO.py.
| Train_PPO.model = PPO('MlpPolicy', env, verbose=1, tensorboard_log=log_path, device='cpu', ent_coef=10, learning_rate=0.0001) |
Definition at line 27 of file Train_PPO.py.
| Train_PPO.ppo_path = os.path.join('Training', 'Saved Models', f'Drone_PPO_Model_{datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")}') |
Definition at line 8 of file Train_PPO.py.
| int Train_PPO.timesteps = env.steps_per_episode * episodes |
Definition at line 21 of file Train_PPO.py.
| Train_PPO.total_timesteps |
Definition at line 36 of file Train_PPO.py.