Skip to content

Commit b45a69b

Browse files
committed
fix: adding wandb under 'full' section
1 parent 39bb876 commit b45a69b

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

pyproject.toml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ dependencies = [
4747
"clu==0.0.12",
4848
"matplotlib>=3.9.2",
4949
"tabulate==0.9.0",
50+
5051
]
5152

5253
[build-system]
@@ -70,7 +71,7 @@ version_file = "algorithmic_efficiency/_version.py"
7071
[project.optional-dependencies]
7172
# All workloads
7273
full = [
73-
"algorithmic_efficiency[criteo1tb,fastmri,ogbg,librispeech_conformer,wmt]",
74+
"algorithmic_efficiency[criteo1tb,fastmri,ogbg,librispeech_conformer,wmt,wandb]",
7475
]
7576
# All workloads plus development dependencies
7677
full_dev = ["algorithmic_efficiency[full,dev]"]
@@ -83,6 +84,8 @@ dev = [
8384
"pre-commit==4.0.1",
8485
]
8586

87+
wandb = ["wandb==0.16.5"]
88+
8689
# Workloads
8790
criteo1tb = ["scikit-learn==1.5.2"]
8891
fastmri = ["h5py==3.12.0", "scikit-image==0.24.0"]
@@ -119,7 +122,6 @@ pytorch_gpu = [
119122
"torch==2.5.1",
120123
"torchvision==0.20.1",
121124
] # Note: omit the cuda suffix and installing from the appropriate wheel will result in using locally installed CUDA.
122-
wandb = ["wandb==0.16.5"]
123125

124126
###############################################################################
125127
# Linting Configurations #

submission_runner.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
import itertools
2222
import json
2323
import os
24-
2524
import struct
2625
import time
2726
from types import MappingProxyType
@@ -685,7 +684,7 @@ def main(_):
685684
base_workload = workloads.get_base_workload_name(FLAGS.workload)
686685
if base_workload == 'librispeech_conformer':
687686
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
688-
687+
689688
if FLAGS.set_pytorch_max_split_size:
690689
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:256'
691690

0 commit comments

Comments
 (0)