diff --git a/ludwig/trainers/trainer_llm.py b/ludwig/trainers/trainer_llm.py index 9540663288b..8d1c28a6b1c 100644 --- a/ludwig/trainers/trainer_llm.py +++ b/ludwig/trainers/trainer_llm.py @@ -205,6 +205,9 @@ def tune_batch_size( snapshot_weights: bool = True, on_best_batch_size_updated: Optional[Callable[[int, float, int], None]] = None, ) -> int: + # TODO: Implement batch size tuning for LLM, currently just returns the default batch size + # Compared to ECD, this just requires forward passes till we OOM. + # https://github.com/ludwig-ai/ludwig/issues/3525 return MINIMUM_BATCH_SIZE @property