Skip to content

Commit

Permalink
Formatting some stuff I touched
Browse files Browse the repository at this point in the history
  • Loading branch information
dirkgr committed Jan 17, 2025
1 parent 86b218e commit a622aa4
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 5 deletions.
10 changes: 8 additions & 2 deletions src/olmo_core/internal/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,16 @@ def run(self, config: ExperimentConfig):
teardown_training_environment()
elif self == SubCmd.train_single:
if config.model.dp_config is not None:
log.warning("dp_config is set to %s, but you can't use data parallelism when running on a single node. Disabling.", config.model.dp_config)
log.warning(
"dp_config is set to %s, but you can't use data parallelism when running on a single node. Disabling.",
config.model.dp_config,
)
config.model.dp_config = None
if config.model.tp_config is not None:
log.warning("tp_config is set to %s, but you can't use tensor parallelism when running on a single node. Disabling.", config.model.dp_config)
log.warning(
"tp_config is set to %s, but you can't use tensor parallelism when running on a single node. Disabling.",
config.model.dp_config,
)
config.model.tp_config = None
try:
train(config)
Expand Down
4 changes: 3 additions & 1 deletion src/olmo_core/nn/transformer/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,9 @@ def build(
if torch.cuda.is_available():
model.apply_compile()
else:
log.warning("model.compile was set to True, but CUDA is not available. Compiling only works with CUDA. Ignoring.")
log.warning(
"model.compile was set to True, but CUDA is not available. Compiling only works with CUDA. Ignoring."
)

# Maybe wrap for data parallel.
if dp_mesh is None and mesh is not None:
Expand Down
2 changes: 1 addition & 1 deletion src/olmo_core/train/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@

from ..distributed.utils import init_distributed, is_distributed
from ..io import add_cached_path_clients
from ..utils import LogFilterType, prepare_cli_environment, seed_all, get_default_device
from ..utils import LogFilterType, get_default_device, prepare_cli_environment, seed_all
from .checkpoint import Checkpointer, CheckpointerConfig
from .common import Duration, DurationUnit, LoadStrategy, ReduceType
from .config import TrainerConfig
Expand Down
4 changes: 3 additions & 1 deletion src/olmo_core/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,9 @@ def __post_init__(self):
if torch.cuda.is_available():
self._loss_fn = torch.compile(self._loss_fn)
else:
log.warning("compile_loss was set to True, but CUDA is not available. Compiling only works with CUDA. Ignoring.")
log.warning(
"compile_loss was set to True, but CUDA is not available. Compiling only works with CUDA. Ignoring."
)

@property
def global_batch_size(self) -> int:
Expand Down

0 comments on commit a622aa4

Please sign in to comment.