Skip to content

Commit 1318c91

Browse files
committed
Lint reference_algorithms/
1 parent 4424f7d commit 1318c91

File tree

83 files changed

+988
-1283
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

83 files changed

+988
-1283
lines changed

reference_algorithms/development_algorithms/cifar/cifar_jax/submission.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,11 @@
33
import functools
44
from typing import Any, Dict, Iterator, List, Optional, Tuple
55

6-
from flax import jax_utils
76
import jax
8-
from jax import lax
97
import jax.numpy as jnp
108
import optax
9+
from flax import jax_utils
10+
from jax import lax
1111

1212
from algoperf import spec
1313

reference_algorithms/development_algorithms/cifar/cifar_pytorch/submission.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,7 @@
33
from typing import Any, Dict, Iterator, List, Optional, Tuple
44

55
import torch
6-
from torch.optim.lr_scheduler import CosineAnnealingLR
7-
from torch.optim.lr_scheduler import LinearLR
8-
from torch.optim.lr_scheduler import SequentialLR
6+
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR, SequentialLR
97

108
from algoperf import spec
119

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
2-
"learning_rate": {"feasible_points": [0.1]},
3-
"warmup_epochs": {"feasible_points": [5]},
4-
"num_epochs": {"feasible_points": [200]},
5-
"l2": {"feasible_points": [5e-4]},
6-
"momentum": {"feasible_points": [0.9]}
2+
"learning_rate": { "feasible_points": [0.1] },
3+
"warmup_epochs": { "feasible_points": [5] },
4+
"num_epochs": { "feasible_points": [200] },
5+
"l2": { "feasible_points": [5e-4] },
6+
"momentum": { "feasible_points": [0.9] }
77
}
Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,17 @@
11
[
2-
{
3-
"learning_rate": 1e-3,
4-
"one_minus_beta_1": 0.999,
5-
"epsilon": 0.9
6-
},
7-
{
8-
"learning_rate": 1e-2,
9-
"one_minus_beta_1": 0.99,
10-
"epsilon": 0.99
11-
},
12-
{
13-
"learning_rate": 1e-1,
14-
"one_minus_beta_1": 0.9,
15-
"epsilon": 0.999
16-
}
17-
]
2+
{
3+
"learning_rate": 1e-3,
4+
"one_minus_beta_1": 0.999,
5+
"epsilon": 0.9
6+
},
7+
{
8+
"learning_rate": 1e-2,
9+
"one_minus_beta_1": 0.99,
10+
"epsilon": 0.99
11+
},
12+
{
13+
"learning_rate": 1e-1,
14+
"one_minus_beta_1": 0.9,
15+
"epsilon": 0.999
16+
}
17+
]

reference_algorithms/development_algorithms/mnist/mnist_jax/submission.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,11 @@
33
import functools
44
from typing import Any, Dict, Iterator, List, Optional, Tuple
55

6-
from flax import jax_utils
76
import jax
8-
from jax import lax
97
import jax.numpy as jnp
108
import optax
9+
from flax import jax_utils
10+
from jax import lax
1111

1212
from algoperf import spec
1313

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
{
2-
"learning_rate": {"min": 1e-4, "max": 1e-2, "scaling": "log"},
3-
"one_minus_beta_1": {"min": 0.9, "max": 0.999, "scaling": "log"},
4-
"epsilon": {"feasible_points": [1e-8, 1e-5, 1e-3]}
2+
"learning_rate": { "min": 1e-4, "max": 1e-2, "scaling": "log" },
3+
"one_minus_beta_1": { "min": 0.9, "max": 0.999, "scaling": "log" },
4+
"epsilon": { "feasible_points": [1e-8, 1e-5, 1e-3] }
55
}

reference_algorithms/paper_baselines/adafactor/jax/sharded_adafactor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@
3030
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
3131

3232
import jax
33-
from jax import numpy as jnp
3433
import optax
34+
from jax import numpy as jnp
3535

3636
JTensor = Any
3737
NestedJTensor = Any

reference_algorithms/paper_baselines/adafactor/jax/submission.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,11 @@
33
import functools
44
from typing import Any, Dict, Iterator, List, Optional, Tuple
55

6-
from flax import jax_utils
76
import jax
8-
from jax import lax
97
import jax.numpy as jnp
108
import optax
9+
from flax import jax_utils
10+
from jax import lax
1111

1212
from algoperf import spec
1313
from reference_algorithms.paper_baselines.adafactor.jax.sharded_adafactor import (

reference_algorithms/paper_baselines/adafactor/pytorch/submission.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,10 @@
33
from functools import partial
44
from typing import Any, Dict, Iterator, List, Optional, Tuple
55

6-
from absl import logging
76
import torch
87
import torch.distributed.nn as dist_nn
9-
from torch.optim.lr_scheduler import CosineAnnealingLR
10-
from torch.optim.lr_scheduler import LinearLR
11-
from torch.optim.lr_scheduler import SequentialLR
8+
from absl import logging
9+
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR, SequentialLR
1210

1311
from algoperf import spec
1412
from algoperf.pytorch_utils import pytorch_setup
Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,26 @@
11
{
2-
"learning_rate": {
3-
"min": 1e-4, "max": 1e-2, "scaling": "log"
4-
},
5-
"one_minus_beta1": {
6-
"min": 1e-2, "max": 0.45, "scaling": "log"
7-
},
8-
"warmup_factor": {
9-
"feasible_points": [0.05]
10-
},
11-
"weight_decay": {
12-
"min": 1e-3, "max": 1.0, "scaling": "log"
13-
},
14-
"label_smoothing": {
15-
"feasible_points": [0.1, 0.2]
16-
},
17-
"dropout_rate": {
18-
"feasible_points": [0.0, 0.1]
19-
}
2+
"learning_rate": {
3+
"min": 1e-4,
4+
"max": 1e-2,
5+
"scaling": "log"
6+
},
7+
"one_minus_beta1": {
8+
"min": 1e-2,
9+
"max": 0.45,
10+
"scaling": "log"
11+
},
12+
"warmup_factor": {
13+
"feasible_points": [0.05]
14+
},
15+
"weight_decay": {
16+
"min": 1e-3,
17+
"max": 1.0,
18+
"scaling": "log"
19+
},
20+
"label_smoothing": {
21+
"feasible_points": [0.1, 0.2]
22+
},
23+
"dropout_rate": {
24+
"feasible_points": [0.0, 0.1]
25+
}
2026
}

0 commit comments

Comments
 (0)