Skip to content

Commit

Permalink
Support Early Exit Loss and/or Layer Dropout (pytorch#1076)
Browse files Browse the repository at this point in the history
Co-authored-by: ebsmothers <[email protected]>
  • Loading branch information
2 people authored and rahul-sarvam committed Dec 23, 2024
1 parent 0caae59 commit 1e74e52
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions recipes/dev/early_exit_finetune_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def __init__(self, cfg: DictConfig) -> None:

# _is_rank_zero is used primarily for logging. In the future, the logger
# should directly take care of this
_, rank = utils.get_world_size_and_rank()
_, rank = training.get_world_size_and_rank()
self._is_rank_zero = rank == 0

# Training cfg
Expand Down Expand Up @@ -646,7 +646,7 @@ def _setup_data(
DistributedSamplers with Map-style Datasets which fit into memory. Other samplers,
iterable datasets and streaming datasets are not supported.
"""
world_size, rank = utils.get_world_size_and_rank()
world_size, rank = training.get_world_size_and_rank()

if isinstance(cfg_dataset, ListConfig):
datasets = [
Expand Down Expand Up @@ -826,7 +826,7 @@ def train(self) -> None:
# clean up before training begins
training.cleanup_before_training()

world_size, rank = utils.get_world_size_and_rank()
world_size, rank = training.get_world_size_and_rank()

# zero out the gradients before starting training
if not self._optimizer_in_bwd:
Expand Down

0 comments on commit 1e74e52

Please sign in to comment.