From 70b8cb626304479dd257d8e31fe2649d453f7770 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 30 Jan 2023 19:06:20 +0100 Subject: [PATCH] Update docs --- .../accelerators/accelerator_prepare.rst | 16 ++- .../common/lightning_module.rst | 135 ++++++------------ docs/source-pytorch/extensions/logging.rst | 4 +- .../model/manual_optimization.rst | 2 +- docs/source-pytorch/starter/style_guide.rst | 6 +- .../visualize/logging_advanced.rst | 2 +- examples/pl_ipu/mnist_sample.py | 20 +-- 7 files changed, 73 insertions(+), 112 deletions(-) diff --git a/docs/source-pytorch/accelerators/accelerator_prepare.rst b/docs/source-pytorch/accelerators/accelerator_prepare.rst index f736c57472d2c..f1da6867a0eee 100644 --- a/docs/source-pytorch/accelerators/accelerator_prepare.rst +++ b/docs/source-pytorch/accelerators/accelerator_prepare.rst @@ -105,19 +105,27 @@ Note if you use any built in metrics or custom metrics that use `TorchMetrics None: + def on_validation_epoch_end(self) -> None: # since the training step/validation step and test step are run on the IPU device # we must log the average loss outside the step functions. - self.log("val_acc", torch.stack(outputs).mean(), prog_bar=True) + self.log("val_acc", torch.stack(self.val_outptus).mean(), prog_bar=True) + self.val_outptus.clear() - def test_epoch_end(self, outputs) -> None: - self.log("test_acc", torch.stack(outputs).mean()) + def on_test_epoch_end(self) -> None: + self.log("test_acc", torch.stack(self.test_outputs).mean()) + self.test_outputs.clear() def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate) @@ -75,9 +79,7 @@ def configure_optimizers(self): if __name__ == "__main__": dm = MNISTDataModule(batch_size=32) - model = LitClassifier() - trainer = Trainer(max_epochs=2, accelerator="ipu", devices=8) trainer.fit(model, datamodule=dm)