Skip to content

Commit 074d0ca

Browse files
committed
Merge branch 'develop' into release/0.7.1
2 parents 01c446b + 6707571 commit 074d0ca

File tree

6 files changed

+20
-21
lines changed

6 files changed

+20
-21
lines changed

all/bodies/vision.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,10 +69,9 @@ def update(self, key, value):
6969
x = {}
7070
for k in self.keys():
7171
if not k == key:
72-
x[k] = super().__getitem__(k)
72+
x[k] = dict.__getitem__(self, k)
7373
x[key] = value
74-
state = LazyState(x, device=self.device)
75-
state.to_cache = self.to_cache
74+
state = LazyState.from_state(x, x['observation'], self.to_cache)
7675
return state
7776

7877
def to(self, device):

all/environments/multiagent_atari_test.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,12 @@
55

66
class MultiagentAtariEnvTest(unittest.TestCase):
77
def test_init(self):
8-
MultiagentAtariEnv('pong_v1', device='cpu')
8+
MultiagentAtariEnv('pong_v2', device='cpu')
99
MultiagentAtariEnv('mario_bros_v2', device='cpu')
1010
MultiagentAtariEnv('entombed_cooperative_v2', device='cpu')
1111

1212
def test_reset(self):
13-
env = MultiagentAtariEnv('pong_v1', device='cpu')
13+
env = MultiagentAtariEnv('pong_v2', device='cpu')
1414
state = env.reset()
1515
self.assertEqual(state.observation.shape, (1, 84, 84))
1616
self.assertEqual(state.reward, 0)
@@ -19,7 +19,7 @@ def test_reset(self):
1919
self.assertEqual(state['agent'], 'first_0')
2020

2121
def test_step(self):
22-
env = MultiagentAtariEnv('pong_v1', device='cpu')
22+
env = MultiagentAtariEnv('pong_v2', device='cpu')
2323
env.reset()
2424
state = env.step(0)
2525
self.assertEqual(state.observation.shape, (1, 84, 84))
@@ -29,7 +29,7 @@ def test_step(self):
2929
self.assertEqual(state['agent'], 'second_0')
3030

3131
def test_step_tensor(self):
32-
env = MultiagentAtariEnv('pong_v1', device='cpu')
32+
env = MultiagentAtariEnv('pong_v2', device='cpu')
3333
env.reset()
3434
state = env.step(torch.tensor([0]))
3535
self.assertEqual(state.observation.shape, (1, 84, 84))
@@ -39,37 +39,37 @@ def test_step_tensor(self):
3939
self.assertEqual(state['agent'], 'second_0')
4040

4141
def test_name(self):
42-
env = MultiagentAtariEnv('pong_v1', device='cpu')
43-
self.assertEqual(env.name, 'pong_v1')
42+
env = MultiagentAtariEnv('pong_v2', device='cpu')
43+
self.assertEqual(env.name, 'pong_v2')
4444

4545
def test_agent_iter(self):
46-
env = MultiagentAtariEnv('pong_v1', device='cpu')
46+
env = MultiagentAtariEnv('pong_v2', device='cpu')
4747
env.reset()
4848
it = iter(env.agent_iter())
4949
self.assertEqual(next(it), 'first_0')
5050

5151
def test_state_spaces(self):
52-
state_spaces = MultiagentAtariEnv('pong_v1', device='cpu').state_spaces
52+
state_spaces = MultiagentAtariEnv('pong_v2', device='cpu').state_spaces
5353
self.assertEqual(state_spaces['first_0'].shape, (1, 84, 84))
5454
self.assertEqual(state_spaces['second_0'].shape, (1, 84, 84))
5555

5656
def test_action_spaces(self):
57-
action_spaces = MultiagentAtariEnv('pong_v1', device='cpu').action_spaces
57+
action_spaces = MultiagentAtariEnv('pong_v2', device='cpu').action_spaces
5858
self.assertEqual(action_spaces['first_0'].n, 18)
5959
self.assertEqual(action_spaces['second_0'].n, 18)
6060

6161
def test_list_agents(self):
62-
env = MultiagentAtariEnv('pong_v1', device='cpu')
62+
env = MultiagentAtariEnv('pong_v2', device='cpu')
6363
self.assertEqual(env.agents, ['first_0', 'second_0'])
6464

6565
def test_is_done(self):
66-
env = MultiagentAtariEnv('pong_v1', device='cpu')
66+
env = MultiagentAtariEnv('pong_v2', device='cpu')
6767
env.reset()
6868
self.assertFalse(env.is_done('first_0'))
6969
self.assertFalse(env.is_done('second_0'))
7070

7171
def test_last(self):
72-
env = MultiagentAtariEnv('pong_v1', device='cpu')
72+
env = MultiagentAtariEnv('pong_v2', device='cpu')
7373
env.reset()
7474
state = env.last()
7575
self.assertEqual(state.observation.shape, (1, 84, 84))

all/memory/replay_buffer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def store(self, state, action, next_state):
9999
if state is None or state.done:
100100
return
101101
idx = self.pos
102-
super()._add((state, action, next_state))
102+
super().store(state, action, next_state)
103103
self._it_sum[idx] = self._max_priority ** self._alpha
104104
self._it_min[idx] = self._max_priority ** self._alpha
105105

all/presets/multiagent_atari_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,15 @@
99

1010
class TestMultiagentAtariPresets(unittest.TestCase):
1111
def setUp(self):
12-
self.env = MultiagentAtariEnv('pong_v1', device='cpu')
12+
self.env = MultiagentAtariEnv('pong_v2', device='cpu')
1313
self.env.reset()
1414

1515
def tearDown(self):
1616
if os.path.exists('test_preset.pt'):
1717
os.remove('test_preset.pt')
1818

1919
def test_independent(self):
20-
env = MultiagentAtariEnv('pong_v1', device='cpu')
20+
env = MultiagentAtariEnv('pong_v2', device='cpu')
2121
presets = {
2222
agent_id: dqn.device('cpu').env(env.subenvs[agent_id]).build()
2323
for agent_id in env.agents

integration/multiagent_atari_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@
2020

2121
class TestMultiagentAtariPresets(unittest.TestCase):
2222
def test_independent(self):
23-
env = MultiagentAtariEnv('pong_v1', max_cycles=1000, device=CPU)
23+
env = MultiagentAtariEnv('pong_v2', max_cycles=1000, device=CPU)
2424
presets = {
2525
agent_id: dqn.device(CPU).env(env.subenvs[agent_id]).build()
2626
for agent_id in env.agents
2727
}
2828
validate_multiagent(IndependentMultiagentPreset('independent', CPU, presets), env)
2929

3030
def test_independent_cuda(self):
31-
env = MultiagentAtariEnv('pong_v1', max_cycles=1000, device=CUDA)
31+
env = MultiagentAtariEnv('pong_v2', max_cycles=1000, device=CUDA)
3232
presets = {
3333
agent_id: dqn.device(CUDA).env(env.subenvs[agent_id]).build()
3434
for agent_id in env.agents

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"pybullet>=3.0.6", # open-source robotics environments
1414
],
1515
"ma-atari": [
16-
"PettingZoo[atari]>=1.5.0", # Multiagent atari environments
16+
"PettingZoo[atari]>=1.9.0", # Multiagent atari environments
1717
"supersuit>=2.4.0", # Multiagent env wrappers
1818
"AutoROM>=0.1.19", # Tool for downloading ROMs
1919
],

0 commit comments

Comments
 (0)