From b3d07ef63d0633b584bc3d3cfaa0c85d8594643c Mon Sep 17 00:00:00 2001 From: Marx-wrld Date: Sun, 2 Apr 2023 17:10:45 +0300 Subject: [PATCH] Grammar changes and spacings --- README.md | 1 - checks.json | 1 + common/run_training.py | 2 +- machines/list_ops.py | 2 +- projects/home/recap/data/preprocessors.py | 2 +- projects/home/recap/data/util.py | 4 ++-- projects/home/recap/model/config.py | 2 +- projects/home/recap/model/entrypoint.py | 2 +- projects/home/recap/model/model_and_loss.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) create mode 100644 checks.json diff --git a/README.md b/README.md index 6a03955..4f24fa5 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,6 @@ Currently these are: 2. TwHIN embeddings (projects/twhin) https://arxiv.org/abs/2202.05387 - This project can be run inside a python virtualenv. We have only tried this on Linux machines and because we use torchrec it works best with an Nvidia GPU. To setup run `./images/init_venv.sh` (Linux only). diff --git a/checks.json b/checks.json new file mode 100644 index 0000000..8b70b53 --- /dev/null +++ b/checks.json @@ -0,0 +1 @@ +{"enabled":true,"categories":{}} \ No newline at end of file diff --git a/common/run_training.py b/common/run_training.py index fafb140..c6efa39 100644 --- a/common/run_training.py +++ b/common/run_training.py @@ -32,7 +32,7 @@ def maybe_run_training( `train_fn(**training_kwargs)`. Otherwise, this function calls torchrun and points at the calling module - `module_name`. After this call, the necessary environment variables are set + `module_name`. After this call, the necessary environment variables are set and training will commence. Args: diff --git a/machines/list_ops.py b/machines/list_ops.py index 71d5201..aeb787e 100644 --- a/machines/list_ops.py +++ b/machines/list_ops.py @@ -1,7 +1,7 @@ """ Simple str.split() parsing of input string -usage example: +Usage example: python list_ops.py --input_list=$INPUT [--sep=","] [--op=] [--elem=$INDEX] Args: diff --git a/projects/home/recap/data/preprocessors.py b/projects/home/recap/data/preprocessors.py index d5720e2..8e8b297 100644 --- a/projects/home/recap/data/preprocessors.py +++ b/projects/home/recap/data/preprocessors.py @@ -51,7 +51,7 @@ class TruncateAndSlice(tf.keras.Model): class DownCast(tf.keras.Model): - """Class for Down casting dataset before serialization and transferring to training host. + """Class for Down-casting dataset before serialization and transferring to training host. Depends on the data type and the actual data range, the down casting can be lossless or not. It is strongly recommended to compare the metrics before and after down casting. """ diff --git a/projects/home/recap/data/util.py b/projects/home/recap/data/util.py index a9fd51e..22f9ab9 100644 --- a/projects/home/recap/data/util.py +++ b/projects/home/recap/data/util.py @@ -9,7 +9,7 @@ def keyed_tensor_from_tensors_dict( tensor_map: Mapping[str, torch.Tensor] ) -> "torchrec.KeyedTensor": """ - Convert a dictionary of torch tensor to torchrec keyed tensor + Converts a dictionary of torch tensor to torchrec keyed tensor Args: tensor_map: @@ -40,7 +40,7 @@ def _compute_jagged_tensor_from_tensor(tensor: torch.Tensor) -> Tuple[torch.Tens def jagged_tensor_from_tensor(tensor: torch.Tensor) -> "torchrec.JaggedTensor": """ - Convert a torch tensor to torchrec jagged tensor. + Converts a torch tensor to torchrec jagged tensor. Note: Currently only support shape of [Batch_size] or [Batch_size x N] for dense tensors. For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x N]; the dense_shape of the sparse tensor can be arbitrary. diff --git a/projects/home/recap/model/config.py b/projects/home/recap/model/config.py index 47d0640..9ced461 100644 --- a/projects/home/recap/model/config.py +++ b/projects/home/recap/model/config.py @@ -82,7 +82,7 @@ class ZScoreLogConfig(base_config.BaseConfig): analysis_path: str schema_path: str = pydantic.Field( None, - description="Schema path which feaure statistics are generated with. Can be different from scehma in data config.", + description="Schema path which feature statistics are generated with. Can be different from scehma in data config.", ) clip_magnitude: float = pydantic.Field( 5.0, description="Threshold to clip the normalized input values." diff --git a/projects/home/recap/model/entrypoint.py b/projects/home/recap/model/entrypoint.py index 8f4d534..b54f528 100644 --- a/projects/home/recap/model/entrypoint.py +++ b/projects/home/recap/model/entrypoint.py @@ -26,7 +26,7 @@ def unsanitize(sanitized_task_name): def _build_single_task_model(task: model_config_mod.TaskModel, input_shape: int): - """ "Builds a model for a single task""" + """Builds a model for a single task""" if task.mlp_config: return mlp.Mlp(in_features=input_shape, mlp_config=task.mlp_config) elif task.dcn_config: diff --git a/projects/home/recap/model/model_and_loss.py b/projects/home/recap/model/model_and_loss.py index b42da9b..3f155eb 100644 --- a/projects/home/recap/model/model_and_loss.py +++ b/projects/home/recap/model/model_and_loss.py @@ -15,7 +15,7 @@ class ModelAndLoss(torch.nn.Module): Args: model: torch module to wrap. loss_fn: Function for calculating loss, should accept logits and labels. - straitifiers: mapping of stratifier name and index of discrete features to emit for metrics stratification. + straitifiers: mapping of a stratifier name and index of discrete features to emit for metrics stratification. """ super().__init__() self.model = model