Skip to content

Commit

Permalink
Replace list with Sequence (#3654)
Browse files Browse the repository at this point in the history
Co-authored-by: v-chen_data <[email protected]>
  • Loading branch information
KuuCi and v-chen_data authored Oct 14, 2024
1 parent f4bac48 commit 6ca3936
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions composer/models/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import textwrap
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Union

import torch
from torchmetrics import Metric
Expand Down Expand Up @@ -53,8 +53,8 @@ class HuggingFaceModel(ComposerModel):
.. note:: If the tokenizer is provided, its config will be saved in the composer checkpoint, and it can be reloaded
using :meth:`HuggingFaceModel.hf_from_composer_checkpoint`. If the tokenizer is not provided here, it will not be saved in the composer checkpoint.
use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``
metrics (list[Metric], optional): list of torchmetrics to apply to the output of `eval_forward` during training. If ``eval_metrics`` is ``None``, these will also be used as ``eval_metrics``. Default: ``None``.
eval_metrics (list[Metric], optional): list of torchmetrics to compute on the eval_dataloader, or be accessible to :class:`Evaluator`s. Default: ``None``.
metrics (Sequence[Metric], optional): list of torchmetrics to apply to the output of `eval_forward` during training. If ``eval_metrics`` is ``None``, these will also be used as ``eval_metrics``. Default: ``None``.
eval_metrics (Sequence[Metric], optional): list of torchmetrics to compute on the eval_dataloader, or be accessible to :class:`Evaluator`s. Default: ``None``.
shift_labels (bool, optional): If True, the batch's labels will be shifted before being used to calculate metrics. This should be set to true for CausalLM models and false otherwise. If not specified, `shift_labels` will be set automatically based on the model class name. Default: ``None``.
allow_embedding_resizing (bool, optional): If True, the model's embeddings will be automatically resized when they are smaller than the tokenizer vocab size. Default: ``False``.
peft_config (PeftConfig, optional): Optional PEFT config to apply to the model. If provided, the model will be converted to a PEFT model. Only LoRA is currently supported.
Expand All @@ -81,8 +81,8 @@ def __init__(
model: Union[transformers.PreTrainedModel, 'PeftModel'],
tokenizer: Optional[Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]] = None,
use_logits: Optional[bool] = False,
metrics: Optional[list[Metric]] = None,
eval_metrics: Optional[list[Metric]] = None,
metrics: Optional[Sequence[Metric]] = None,
eval_metrics: Optional[Sequence[Metric]] = None,
shift_labels: Optional[bool] = None,
allow_embedding_resizing: bool = False,
peft_config: Optional['PeftConfig'] = None,
Expand Down Expand Up @@ -188,7 +188,7 @@ def _check_tokenizer_and_maybe_resize_embeddings(self, allow_embedding_resizing:
f' performance.',
)

def _get_metric_dict(self, metrics: list[Metric]) -> dict[str, Metric]:
def _get_metric_dict(self, metrics: Sequence[Metric]) -> dict[str, Metric]:
"""Returns a dictionary of metrics keyed by their class name."""
return {metric.__class__.__name__: metric for metric in metrics}

Expand Down

0 comments on commit 6ca3936

Please sign in to comment.