|
27 | 27 | torch.backends.cudnn.deterministic = True |
28 | 28 | torch.use_deterministic_algorithms(True) |
29 | 29 |
|
| 30 | + |
30 | 31 | class FastMRIModeEquivalenceTest(parameterized.TestCase): |
31 | 32 |
|
32 | | - def fwd_pass(self, orig, cust, dropout_rate): |
33 | | - x = torch.randn(BATCH, IN_CHANS, H, W, device=DEVICE) |
34 | | - for mode in ('train', 'eval'): |
35 | | - getattr(orig, mode)(); getattr(cust, mode)() |
36 | | - torch.manual_seed(0); y1 = orig(x) |
37 | | - torch.manual_seed(0); y2 = cust(x, dropout_rate) |
38 | | - assert_close(y1, y2, atol=0, rtol=0) |
39 | | - if mode == 'eval': # one extra test: omit dropout at eval |
40 | | - torch.manual_seed(0); y2 = cust(x) |
41 | | - assert_close(y1, y2, atol=0, rtol=0) |
42 | | - |
43 | | - @parameterized.named_parameters( |
44 | | - dict(testcase_name='p=0.0', dropout_rate=0.0), |
45 | | - dict(testcase_name='p=0.1', dropout_rate=0.1), |
46 | | - dict(testcase_name='p=0.7', dropout_rate=0.7), |
47 | | - dict(testcase_name='p=1.0', dropout_rate=1.0), |
48 | | - ) |
49 | | - def test_dropout_values(self, dropout_rate): |
50 | | - """Test different values of dropout_rate.""" |
51 | | - |
52 | | - torch.manual_seed(SEED) |
53 | | - orig = OriginalUNet(IN_CHANS, OUT_CHANS, C, LAYERS, dropout_rate=dropout_rate).to(DEVICE) |
54 | | - |
55 | | - torch.manual_seed(SEED) |
56 | | - cust = CustomUNet(IN_CHANS, OUT_CHANS, C, LAYERS).to(DEVICE) |
57 | | - |
58 | | - cust.load_state_dict(orig.state_dict()) # sync weights |
59 | | - if TORCH_COMPILE: |
60 | | - orig = torch.compile(orig); cust = torch.compile(cust) |
61 | | - |
62 | | - self.fwd_pass(orig, cust, dropout_rate) |
63 | | - |
64 | | - |
65 | | - @parameterized.named_parameters( |
66 | | - dict(testcase_name='default', use_tanh=False, use_layer_norm=False), |
67 | | - dict(testcase_name='tanh', use_tanh=True, use_layer_norm=False), |
68 | | - dict(testcase_name='layer_norm', use_tanh=False, use_layer_norm=True), |
69 | | - dict(testcase_name='both', use_tanh=True, use_layer_norm=True), |
70 | | - ) |
71 | | - def test_arch_configs(self, use_tanh, use_layer_norm): |
72 | | - """Test different architecture configurations, fixed dropout_rate.""" |
73 | | - dropout_rate = 0.1 |
74 | | - |
75 | | - torch.manual_seed(SEED) |
76 | | - orig = OriginalUNet( |
77 | | - IN_CHANS, OUT_CHANS, C, LAYERS, dropout_rate=dropout_rate, |
78 | | - use_tanh=use_tanh, use_layer_norm=use_layer_norm |
79 | | - ).to(DEVICE) |
80 | | - |
81 | | - torch.manual_seed(SEED) |
82 | | - cust = CustomUNet( |
83 | | - IN_CHANS, OUT_CHANS, C, LAYERS, |
84 | | - use_tanh=use_tanh, use_layer_norm=use_layer_norm |
85 | | - ).to(DEVICE) |
86 | | - |
87 | | - cust.load_state_dict(orig.state_dict()) # sync weights |
88 | | - if TORCH_COMPILE: |
89 | | - orig = torch.compile(orig); cust = torch.compile(cust) |
90 | | - |
91 | | - self.fwd_pass(orig, cust, dropout_rate) |
92 | | - |
93 | | - @parameterized.named_parameters( |
94 | | - dict(testcase_name=''), |
95 | | - ) |
96 | | - def test_default_dropout(self): |
97 | | - """Test default dropout_rate.""" |
98 | | - |
99 | | - torch.manual_seed(SEED) |
100 | | - orig = OriginalUNet(IN_CHANS, OUT_CHANS, C, LAYERS).to(DEVICE) |
101 | | - torch.manual_seed(SEED) |
102 | | - cust = CustomUNet(IN_CHANS, OUT_CHANS, C, LAYERS).to(DEVICE) |
103 | | - cust.load_state_dict(orig.state_dict()) # sync weights |
104 | | - |
105 | | - x = torch.randn(BATCH, IN_CHANS, H, W, device=DEVICE) |
106 | | - for mode in ('train', 'eval'): |
107 | | - getattr(orig, mode)(); getattr(cust, mode)() |
108 | | - torch.manual_seed(0); y1 = orig(x) |
109 | | - torch.manual_seed(0); y2 = cust(x) |
110 | | - assert_close(y1, y2, atol=0, rtol=0) |
| 33 | + def fwd_pass(self, orig, cust, dropout_rate): |
| 34 | + x = torch.randn(BATCH, IN_CHANS, H, W, device=DEVICE) |
| 35 | + for mode in ('train', 'eval'): |
| 36 | + getattr(orig, mode)() |
| 37 | + getattr(cust, mode)() |
| 38 | + torch.manual_seed(0) |
| 39 | + y1 = orig(x) |
| 40 | + torch.manual_seed(0) |
| 41 | + y2 = cust(x, dropout_rate) |
| 42 | + assert_close(y1, y2, atol=0, rtol=0) |
| 43 | + if mode == 'eval': # one extra test: omit dropout at eval |
| 44 | + torch.manual_seed(0) |
| 45 | + y2 = cust(x) |
| 46 | + assert_close(y1, y2, atol=0, rtol=0) |
| 47 | + |
| 48 | + @parameterized.named_parameters( |
| 49 | + dict(testcase_name='p=0.0', dropout_rate=0.0), |
| 50 | + dict(testcase_name='p=0.1', dropout_rate=0.1), |
| 51 | + dict(testcase_name='p=0.7', dropout_rate=0.7), |
| 52 | + dict(testcase_name='p=1.0', dropout_rate=1.0), |
| 53 | + ) |
| 54 | + def test_dropout_values(self, dropout_rate): |
| 55 | + """Test different values of dropout_rate.""" |
| 56 | + |
| 57 | + torch.manual_seed(SEED) |
| 58 | + orig = OriginalUNet( |
| 59 | + IN_CHANS, OUT_CHANS, C, LAYERS, dropout_rate=dropout_rate).to(DEVICE) |
| 60 | + |
| 61 | + torch.manual_seed(SEED) |
| 62 | + cust = CustomUNet(IN_CHANS, OUT_CHANS, C, LAYERS).to(DEVICE) |
| 63 | + |
| 64 | + cust.load_state_dict(orig.state_dict()) # sync weights |
| 65 | + if TORCH_COMPILE: |
| 66 | + orig = torch.compile(orig) |
| 67 | + cust = torch.compile(cust) |
| 68 | + |
| 69 | + self.fwd_pass(orig, cust, dropout_rate) |
| 70 | + |
| 71 | + @parameterized.named_parameters( |
| 72 | + dict(testcase_name='default', use_tanh=False, use_layer_norm=False), |
| 73 | + dict(testcase_name='tanh', use_tanh=True, use_layer_norm=False), |
| 74 | + dict(testcase_name='layer_norm', use_tanh=False, use_layer_norm=True), |
| 75 | + dict(testcase_name='both', use_tanh=True, use_layer_norm=True), |
| 76 | + ) |
| 77 | + def test_arch_configs(self, use_tanh, use_layer_norm): |
| 78 | + """Test different architecture configurations, fixed dropout_rate.""" |
| 79 | + dropout_rate = 0.1 |
| 80 | + |
| 81 | + torch.manual_seed(SEED) |
| 82 | + orig = OriginalUNet( |
| 83 | + IN_CHANS, |
| 84 | + OUT_CHANS, |
| 85 | + C, |
| 86 | + LAYERS, |
| 87 | + dropout_rate=dropout_rate, |
| 88 | + use_tanh=use_tanh, |
| 89 | + use_layer_norm=use_layer_norm).to(DEVICE) |
| 90 | + |
| 91 | + torch.manual_seed(SEED) |
| 92 | + cust = CustomUNet( |
| 93 | + IN_CHANS, |
| 94 | + OUT_CHANS, |
| 95 | + C, |
| 96 | + LAYERS, |
| 97 | + use_tanh=use_tanh, |
| 98 | + use_layer_norm=use_layer_norm).to(DEVICE) |
| 99 | + |
| 100 | + cust.load_state_dict(orig.state_dict()) # sync weights |
| 101 | + if TORCH_COMPILE: |
| 102 | + orig = torch.compile(orig) |
| 103 | + cust = torch.compile(cust) |
| 104 | + |
| 105 | + self.fwd_pass(orig, cust, dropout_rate) |
| 106 | + |
| 107 | + @parameterized.named_parameters( |
| 108 | + dict(testcase_name=''),) |
| 109 | + def test_default_dropout(self): |
| 110 | + """Test default dropout_rate.""" |
| 111 | + |
| 112 | + torch.manual_seed(SEED) |
| 113 | + orig = OriginalUNet(IN_CHANS, OUT_CHANS, C, LAYERS).to(DEVICE) |
| 114 | + torch.manual_seed(SEED) |
| 115 | + cust = CustomUNet(IN_CHANS, OUT_CHANS, C, LAYERS).to(DEVICE) |
| 116 | + cust.load_state_dict(orig.state_dict()) # sync weights |
| 117 | + |
| 118 | + x = torch.randn(BATCH, IN_CHANS, H, W, device=DEVICE) |
| 119 | + for mode in ('train', 'eval'): |
| 120 | + getattr(orig, mode)() |
| 121 | + getattr(cust, mode)() |
| 122 | + torch.manual_seed(0) |
| 123 | + y1 = orig(x) |
| 124 | + torch.manual_seed(0) |
| 125 | + y2 = cust(x) |
| 126 | + assert_close(y1, y2, atol=0, rtol=0) |
| 127 | + |
111 | 128 |
|
112 | 129 | if __name__ == '__main__': |
113 | | - absltest.main() |
| 130 | + absltest.main() |
0 commit comments