From 49e7f65452a9fd65631ee02189d12c888d928b9d Mon Sep 17 00:00:00 2001 From: Pietro Monticone Date: Fri, 31 Mar 2023 22:14:15 +0200 Subject: [PATCH] Fix typos in docstrings --- core/custom_training_loop.py | 2 +- core/debug_training_loop.py | 2 +- core/train_pipeline.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/custom_training_loop.py b/core/custom_training_loop.py index 0241145..8a82b1c 100644 --- a/core/custom_training_loop.py +++ b/core/custom_training_loop.py @@ -115,7 +115,7 @@ def train( dataset: data iterator for the training set evaluation_iterators: data iterators for the different evaluation sets scheduler: optional learning rate scheduler - output_transform_for_metrics: optional transformation functions to transorm the model + output_transform_for_metrics: optional transformation functions to transform the model output and labels into a format the metrics can understand """ diff --git a/core/debug_training_loop.py b/core/debug_training_loop.py index 610eea9..c7a1129 100644 --- a/core/debug_training_loop.py +++ b/core/debug_training_loop.py @@ -1,6 +1,6 @@ """This is a very limited feature training loop useful for interactive debugging. -It is not intended for actual model tranining (it is not fast, doesn't compile the model). +It is not intended for actual model training (it is not fast, doesn't compile the model). It does not support checkpointing. suggested use: diff --git a/core/train_pipeline.py b/core/train_pipeline.py index cde587e..36f70ec 100644 --- a/core/train_pipeline.py +++ b/core/train_pipeline.py @@ -57,7 +57,7 @@ def _wait_for_batch(batch: In, stream: Optional[torch.cuda.streams.Stream]) -> N torch.cuda.current_stream().wait_stream(stream) # As mentioned in https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html, # PyTorch uses the "caching allocator" for memory allocation for tensors. When a tensor is - # freed, its memory is likely to be reused by newly constructed tenosrs. By default, + # freed, its memory is likely to be reused by newly constructed tensors. By default, # this allocator traces whether a tensor is still in use by only the CUDA stream where it # was created. When a tensor is used by additional CUDA streams, we need to call record_stream # to tell the allocator about all these streams. Otherwise, the allocator might free the