Skip to content

Commit

Permalink
meta llama3 (#210)
Browse files Browse the repository at this point in the history
  • Loading branch information
meet-rocking authored Sep 19, 2024
1 parent 331db17 commit e3c4025
Show file tree
Hide file tree
Showing 5 changed files with 130 additions and 0 deletions.
29 changes: 29 additions & 0 deletions frontend/core/blocks/meta-llama/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Use the CUDA-enabled base image with Python 3.9
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04

# Set the working directory
WORKDIR /app

# Install Python, Git, and necessary dependencies
RUN apt-get update && apt-get install -y \
python3-pip \
python3-dev \
git \
&& ln -s /usr/bin/python3 /usr/bin/python \
&& rm -rf /var/lib/apt/lists/*


# Install PyTorch with CUDA support
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118

# Copy the requirements file
COPY requirements.txt .

# Install other Python dependencies
RUN pip install --no-cache-dir -r requirements.txt

# Copy the Python script
COPY computations.py .



23 changes: 23 additions & 0 deletions frontend/core/blocks/meta-llama/computations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import transformers
import torch
from huggingface_hub import login


def compute(hf_token, model_id, prompt):
"""
meta-llama
"""
# Login with your Hugging Face token and save it to the Git credentials helper
login(token=hf_token, add_to_git_credential=True)
pipeline = transformers.pipeline(
"text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda"
)
output = pipeline(prompt)

return {"llama": output}


def test():
"""Test the compute function."""

print("Running test")
Binary file added frontend/core/blocks/meta-llama/cover-image.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 3 additions & 0 deletions frontend/core/blocks/meta-llama/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
transformers==4.43.1
numpy==1.26.4
accelerate==0.33.0
75 changes: 75 additions & 0 deletions frontend/core/blocks/meta-llama/specs.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
{
"information": {
"id": "meta-llama",
"name": "Llama",
"description": "meta-llama",
"system_versions": [
"0.1"
],
"block_version": "block version number",
"block_source": "core/blocks/meta-llama",
"block_type": "compute"
},
"inputs": {
"hf_token": {
"type": "Any",
"connections": [

]
},
"model_id": {
"type": "Any",
"connections": [

]
},
"prompt": {
"type": "Any",
"connections": [

]
}
},
"outputs": {
"llama": {
"type": "Any",
"connections": []
}
},
"action": {
"container": {
"image": "meta-llama-gpu1",
"version": "latest",
"command_line": [
"python",
"-u",
"entrypoint.py"
]
}
},
"views": {
"node": {
"active": "True or False",
"title_bar": {
"background_color": "#6b2be0"
},
"preview": {},
"html": "",
"pos_x": "1045",
"pos_y": "197",
"pos_z": "999",
"behavior": "modal",
"order": {
"input": [
"hf_token",
"model_id",
"prompt"
],
"output": [
"llama"
]
}
}
},
"events": {}
}

0 comments on commit e3c4025

Please sign in to comment.