Skip to content

Commit

Permalink
update readme
Browse files Browse the repository at this point in the history
  • Loading branch information
dnth committed Oct 11, 2024
1 parent 74d1d78 commit 5076b8a
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 43 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ xinfer is a modular Python framework that provides a unified interface for perfo
- Ultralytics YOLO: State-of-the-art real-time object detection models.
- Custom Models: Support for your own machine learning models and architectures.

## Prerequisites
Install [PyTorch](https://pytorch.org/get-started/locally/).

## Installation
Install xinfer using pip:
Expand Down
42 changes: 12 additions & 30 deletions nbs/example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,14 @@
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/dnth/mambaforge-pypy3/envs/xinfer-test/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
},
{
"data": {
"text/html": [
Expand Down Expand Up @@ -49,24 +57,11 @@
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "a59fae37d6c24825ba4afafa57451156",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Loading checkpoint shards: 100%|██████████| 2/2 [00:00<00:00, 6.83it/s]\n",
"Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.\n",
"Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.\n"
]
Expand Down Expand Up @@ -140,27 +135,14 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "89a91a5de03f4a23952cb941a30c3025",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/8 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Loading checkpoint shards: 100%|██████████| 8/8 [00:02<00:00, 3.21it/s]\n",
"Both `max_new_tokens` (=200) and `max_length`(=51) seem to have been set. `max_new_tokens` will take precedence. Please refer to the documentation for more information. (https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\n"
]
},
Expand Down Expand Up @@ -213,7 +195,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.15"
"version": "3.11.10"
}
},
"nbformat": 4,
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ ultralytics = [
]



[tool]
[tool.setuptools.packages.find]
include = ["xinfer*"]
Expand Down
13 changes: 2 additions & 11 deletions xinfer/model_factory.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,10 @@
from enum import Enum

from rich.console import Console
from rich.table import Table

from .model_registry import ModelRegistry
from .model_registry import InputOutput, ModelRegistry
from .transformers.blip2 import BLIP2, VLRMBlip2


class InputOutput(Enum):
IMAGE_TO_TEXT = "image --> text"
IMAGE_TEXT_TO_TEXT = "image-text --> text"
TEXT_TO_TEXT = "text --> text"
IMAGE_TO_BBOX = "image --> bbox"
IMAGE_TO_CLASS = "image --> class"


def register_models():
ModelRegistry.register(
"transformers",
Expand All @@ -34,6 +24,7 @@ def create_model(model_id: str, backend: str, **kwargs):
return ModelRegistry.get_model(model_id, backend, **kwargs)


# TODO: list by backend or wildcard
def list_models():
console = Console()
table = Table(title="Available Models")
Expand Down
13 changes: 11 additions & 2 deletions xinfer/model_registry.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,22 @@
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Type

from .base_model import BaseModel


class InputOutput(Enum):
IMAGE_TO_TEXT = "image --> text"
IMAGE_TEXT_TO_TEXT = "image-text --> text"
TEXT_TO_TEXT = "text --> text"
IMAGE_TO_BBOX = "image --> bbox"
IMAGE_TO_CLASS = "image --> class"


@dataclass
class ModelInfo:
model_class: Type[BaseModel]
input_output: str = ""
input_output: InputOutput


@dataclass
Expand All @@ -25,7 +34,7 @@ def register(
backend: str,
model_id: str,
model_class: Type[BaseModel],
input_output: str = "",
input_output: InputOutput,
):
if backend not in cls._registry:
cls._registry[backend] = BackendRegistry(backend_name=backend)
Expand Down

0 comments on commit 5076b8a

Please sign in to comment.