-
Notifications
You must be signed in to change notification settings - Fork 1
/
example.py
64 lines (52 loc) · 2.09 KB
/
example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from tensorforce import Agent, Environment
def main():
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/dqn.json', environment=environment)
# Train for 100 episodes
for episode in range(100):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_rewards = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_rewards += reward
print('Episode {}: {}'.format(episode, sum_rewards))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
# Evaluate for 100 episodes
sum_rewards = 0.0
for _ in range(100):
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while not terminal:
actions, internals = agent.act(
states=states, internals=internals, independent=True, deterministic=True
)
states, terminal, reward = environment.execute(actions=actions)
sum_rewards += reward
print('Mean evaluation return:', sum_rewards / 100.0)
# Close agent and environment
agent.close()
environment.close()
if __name__ == '__main__':
main()