Skip to content

Commit

Permalink
Solve warnings for unwrapped env
Browse files Browse the repository at this point in the history
  • Loading branch information
alexpalms committed Oct 1, 2023
1 parent 06270b5 commit 3aee236
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion diambra/arena/wrappers/arena_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def env_wrapping(env, wrappers_settings: WrappersSettings):
### Observation space wrappers(s)
if wrappers_settings.frame_shape[2] == 1:
if env.observation_space["frame"].shape[2] == 1:
env.logger.warning("Warning: skipping grayscaling as the frame is already single channel.")
env.unwrapped.logger.warning("Warning: skipping grayscaling as the frame is already single channel.")
else:
# Greyscaling frame to h x w x 1
env = GrayscaleFrame(env)
Expand Down
12 changes: 6 additions & 6 deletions tests/env_exec_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def env_exec(settings, options_list, wrappers_settings, episode_recording_settin
actions = env.get_no_op_action()

if settings.action_space == SpaceTypes.DISCRETE:
move_action, att_action = discrete_to_multi_discrete_action(actions, env.n_actions[0])
move_action, att_action = discrete_to_multi_discrete_action(actions, env.unwrapped.n_actions[0])
else:
move_action, att_action = actions[0], actions[1]

Expand All @@ -64,7 +64,7 @@ def env_exec(settings, options_list, wrappers_settings, episode_recording_settin

for idx in range(settings.n_players):
if settings.action_space[idx] == SpaceTypes.DISCRETE:
move_action, att_action = discrete_to_multi_discrete_action(actions["agent_{}".format(idx)], env.n_actions[0])
move_action, att_action = discrete_to_multi_discrete_action(actions["agent_{}".format(idx)], env.unwrapped.n_actions[0])
else:
move_action, att_action = actions["agent_{}".format(idx)][0], actions["agent_{}".format(idx)][1]

Expand Down Expand Up @@ -121,15 +121,15 @@ def env_exec(settings, options_list, wrappers_settings, episode_recording_settin
if len(cumulative_ep_rew_all) != max_num_ep:
raise RuntimeError("Not run all episodes")

if env.env_settings.continue_game <= 0.0 and env.env_settings.n_players == 1:
max_continue = int(-env.env_settings.continue_game)
if env.unwrapped.env_settings.continue_game <= 0.0 and env.unwrapped.env_settings.n_players == 1:
max_continue = int(-env.unwrapped.env_settings.continue_game)
else:
max_continue = 0

if env.env_settings.game_id == "tektagt":
if env.unwrapped.env_settings.game_id == "tektagt":
max_continue = (max_continue + 1) * 0.7 - 1

round_max_reward = env.max_delta_health / env.reward_normalization_value
round_max_reward = env.unwrapped.max_delta_health / env.unwrapped.reward_normalization_value
if (no_action is True and (np.mean(cumulative_ep_rew_all) > -(max_continue + 1) * round_max_reward * n_rounds + 0.001)):
message = "NoAction policy and average reward different than {} ({})".format(
-(max_continue + 1) * round_max_reward * n_rounds, np.mean(cumulative_ep_rew_all))
Expand Down

0 comments on commit 3aee236

Please sign in to comment.