Grammar changes and spacings

This commit is contained in:
Marx-wrld 2023-04-02 17:10:45 +03:00
parent 78c3235eee
commit b3d07ef63d
9 changed files with 9 additions and 9 deletions

View File

@ -6,7 +6,6 @@ Currently these are:
2. TwHIN embeddings (projects/twhin) https://arxiv.org/abs/2202.05387 2. TwHIN embeddings (projects/twhin) https://arxiv.org/abs/2202.05387
This project can be run inside a python virtualenv. We have only tried this on Linux machines and because we use torchrec it works best with an Nvidia GPU. To setup run This project can be run inside a python virtualenv. We have only tried this on Linux machines and because we use torchrec it works best with an Nvidia GPU. To setup run
`./images/init_venv.sh` (Linux only). `./images/init_venv.sh` (Linux only).

1
checks.json Normal file
View File

@ -0,0 +1 @@
{"enabled":true,"categories":{}}

View File

@ -32,7 +32,7 @@ def maybe_run_training(
`train_fn(**training_kwargs)`. `train_fn(**training_kwargs)`.
Otherwise, this function calls torchrun and points at the calling module Otherwise, this function calls torchrun and points at the calling module
`module_name`. After this call, the necessary environment variables are set `module_name`. After this call, the necessary environment variables are set
and training will commence. and training will commence.
Args: Args:

View File

@ -1,7 +1,7 @@
""" """
Simple str.split() parsing of input string Simple str.split() parsing of input string
usage example: Usage example:
python list_ops.py --input_list=$INPUT [--sep=","] [--op=<len|select>] [--elem=$INDEX] python list_ops.py --input_list=$INPUT [--sep=","] [--op=<len|select>] [--elem=$INDEX]
Args: Args:

View File

@ -51,7 +51,7 @@ class TruncateAndSlice(tf.keras.Model):
class DownCast(tf.keras.Model): class DownCast(tf.keras.Model):
"""Class for Down casting dataset before serialization and transferring to training host. """Class for Down-casting dataset before serialization and transferring to training host.
Depends on the data type and the actual data range, the down casting can be lossless or not. Depends on the data type and the actual data range, the down casting can be lossless or not.
It is strongly recommended to compare the metrics before and after down casting. It is strongly recommended to compare the metrics before and after down casting.
""" """

View File

@ -9,7 +9,7 @@ def keyed_tensor_from_tensors_dict(
tensor_map: Mapping[str, torch.Tensor] tensor_map: Mapping[str, torch.Tensor]
) -> "torchrec.KeyedTensor": ) -> "torchrec.KeyedTensor":
""" """
Convert a dictionary of torch tensor to torchrec keyed tensor Converts a dictionary of torch tensor to torchrec keyed tensor
Args: Args:
tensor_map: tensor_map:
@ -40,7 +40,7 @@ def _compute_jagged_tensor_from_tensor(tensor: torch.Tensor) -> Tuple[torch.Tens
def jagged_tensor_from_tensor(tensor: torch.Tensor) -> "torchrec.JaggedTensor": def jagged_tensor_from_tensor(tensor: torch.Tensor) -> "torchrec.JaggedTensor":
""" """
Convert a torch tensor to torchrec jagged tensor. Converts a torch tensor to torchrec jagged tensor.
Note: Currently only support shape of [Batch_size] or [Batch_size x N] for dense tensors. Note: Currently only support shape of [Batch_size] or [Batch_size x N] for dense tensors.
For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x N]; the For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x N]; the
dense_shape of the sparse tensor can be arbitrary. dense_shape of the sparse tensor can be arbitrary.

View File

@ -82,7 +82,7 @@ class ZScoreLogConfig(base_config.BaseConfig):
analysis_path: str analysis_path: str
schema_path: str = pydantic.Field( schema_path: str = pydantic.Field(
None, None,
description="Schema path which feaure statistics are generated with. Can be different from scehma in data config.", description="Schema path which feature statistics are generated with. Can be different from scehma in data config.",
) )
clip_magnitude: float = pydantic.Field( clip_magnitude: float = pydantic.Field(
5.0, description="Threshold to clip the normalized input values." 5.0, description="Threshold to clip the normalized input values."

View File

@ -26,7 +26,7 @@ def unsanitize(sanitized_task_name):
def _build_single_task_model(task: model_config_mod.TaskModel, input_shape: int): def _build_single_task_model(task: model_config_mod.TaskModel, input_shape: int):
""" "Builds a model for a single task""" """Builds a model for a single task"""
if task.mlp_config: if task.mlp_config:
return mlp.Mlp(in_features=input_shape, mlp_config=task.mlp_config) return mlp.Mlp(in_features=input_shape, mlp_config=task.mlp_config)
elif task.dcn_config: elif task.dcn_config:

View File

@ -15,7 +15,7 @@ class ModelAndLoss(torch.nn.Module):
Args: Args:
model: torch module to wrap. model: torch module to wrap.
loss_fn: Function for calculating loss, should accept logits and labels. loss_fn: Function for calculating loss, should accept logits and labels.
straitifiers: mapping of stratifier name and index of discrete features to emit for metrics stratification. straitifiers: mapping of a stratifier name and index of discrete features to emit for metrics stratification.
""" """
super().__init__() super().__init__()
self.model = model self.model = model