-
Notifications
You must be signed in to change notification settings - Fork 13
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
331db17
commit e3c4025
Showing
5 changed files
with
130 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
# Use the CUDA-enabled base image with Python 3.9 | ||
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 | ||
|
||
# Set the working directory | ||
WORKDIR /app | ||
|
||
# Install Python, Git, and necessary dependencies | ||
RUN apt-get update && apt-get install -y \ | ||
python3-pip \ | ||
python3-dev \ | ||
git \ | ||
&& ln -s /usr/bin/python3 /usr/bin/python \ | ||
&& rm -rf /var/lib/apt/lists/* | ||
|
||
|
||
# Install PyTorch with CUDA support | ||
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 | ||
|
||
# Copy the requirements file | ||
COPY requirements.txt . | ||
|
||
# Install other Python dependencies | ||
RUN pip install --no-cache-dir -r requirements.txt | ||
|
||
# Copy the Python script | ||
COPY computations.py . | ||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
import transformers | ||
import torch | ||
from huggingface_hub import login | ||
|
||
|
||
def compute(hf_token, model_id, prompt): | ||
""" | ||
meta-llama | ||
""" | ||
# Login with your Hugging Face token and save it to the Git credentials helper | ||
login(token=hf_token, add_to_git_credential=True) | ||
pipeline = transformers.pipeline( | ||
"text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda" | ||
) | ||
output = pipeline(prompt) | ||
|
||
return {"llama": output} | ||
|
||
|
||
def test(): | ||
"""Test the compute function.""" | ||
|
||
print("Running test") |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
transformers==4.43.1 | ||
numpy==1.26.4 | ||
accelerate==0.33.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
{ | ||
"information": { | ||
"id": "meta-llama", | ||
"name": "Llama", | ||
"description": "meta-llama", | ||
"system_versions": [ | ||
"0.1" | ||
], | ||
"block_version": "block version number", | ||
"block_source": "core/blocks/meta-llama", | ||
"block_type": "compute" | ||
}, | ||
"inputs": { | ||
"hf_token": { | ||
"type": "Any", | ||
"connections": [ | ||
|
||
] | ||
}, | ||
"model_id": { | ||
"type": "Any", | ||
"connections": [ | ||
|
||
] | ||
}, | ||
"prompt": { | ||
"type": "Any", | ||
"connections": [ | ||
|
||
] | ||
} | ||
}, | ||
"outputs": { | ||
"llama": { | ||
"type": "Any", | ||
"connections": [] | ||
} | ||
}, | ||
"action": { | ||
"container": { | ||
"image": "meta-llama-gpu1", | ||
"version": "latest", | ||
"command_line": [ | ||
"python", | ||
"-u", | ||
"entrypoint.py" | ||
] | ||
} | ||
}, | ||
"views": { | ||
"node": { | ||
"active": "True or False", | ||
"title_bar": { | ||
"background_color": "#6b2be0" | ||
}, | ||
"preview": {}, | ||
"html": "", | ||
"pos_x": "1045", | ||
"pos_y": "197", | ||
"pos_z": "999", | ||
"behavior": "modal", | ||
"order": { | ||
"input": [ | ||
"hf_token", | ||
"model_id", | ||
"prompt" | ||
], | ||
"output": [ | ||
"llama" | ||
] | ||
} | ||
} | ||
}, | ||
"events": {} | ||
} |