Skip to content

reorganizing inductor test for triton #51

reorganizing inductor test for triton

reorganizing inductor test for triton #51

Workflow file for this run

name: Test build/test linux gpu
on:
pull_request:
branches: [ main, "*"] #will remove this once we see everything is working fine
workflow_dispatch:
inputs:
triton_pin:
description: 'Triton branch or commit to pin'
default: 'main'
required: false
pytorch_pin:
description: 'PyTorch branch or commit to pin'
default: 'main'
required: false
jobs:
build-test:
continue-on-error: true
runs-on: linux.g5.48xlarge.nvidia.gpu
timeout-minutes: 30
steps:
- name: "Checkout"
run: |
set -x
pushd ..
echo "Installing triton"
git clone https://github.com/triton-lang/triton.git
pushd triton
echo "Checking out triton branch or commit"
git checkout ${{ github.event.inputs.triton_pin || 'main' }}
export llvm_hash=$(cat cmake/llvm-hash.txt)
echo "llvm_hash: $llvm_hash"
pushd ..
echo "Cloning llvm-project"
git clone https://github.com/llvm/llvm-project.git
pushd llvm-project
echo "Checking out llvm hash"
git checkout "$llvm_hash"
mkdir build
pushd build
echo "Building llvm"
- name: "installs"
run: |
sudo yum install -y zlib-devel
echo "Installing build-time dependencies"
conda install -y ninja==1.11.1.1 cmake==3.30.2 wheel==0.44.0
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON ../llvm -DLLVM_ENABLE_PROJECTS="mlir;llvm" -DLLVM_TARGETS_TO_BUILD="host;NVPTX;AMDGPU"
ninja
export LLVM_BUILD_DIR=$(pwd)
popd
popd
popd
LLVM_INCLUDE_DIRS=$LLVM_BUILD_DIR/include LLVM_LIBRARY_DIR=$LLVM_BUILD_DIR/lib LLVM_SYSPATH=$LLVM_BUILD_DIR pip install -e python
echo "Installing triton python package"
popd
- name: "pytorch download"
run: |
echo "Cloning pytorch"
git clone https://github.com/pytorch/pytorch.git
pushd pytorch
echo "Checking out pytorch branch or commit"
git checkout ${{ github.event.inputs.pytorch_pin || 'main' }}
git submodule sync
git submodule update --init --recursive
- name: "post pytorch installs"
run: |
pip install -r requirements.txt
pip install mkl-static mkl-include pytest pytest-xdist
echo "Installing magma-cuda121"
conda install -y -c pytorch magma-cuda121
python setup.py install
- name: test
env:
gpu-arch-type: cuda
gpu-arch-version: "12.1"
# docker-image: nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04
run: |
pytest -n 1 test/inductor/test_torchinductor.py
clean-test:
needs: build-test
runs-on: linux.g5.48xlarge.nvidia.gpu
timeout-minutes: 30
steps:
- name: "cleanup"
run: |
if [ -d triton ]; then
echo "triton removed"
rm -r triton
fi
if [ -d pytorch ]; then
echo "pytorch removed"
rm -r pytorch
fi
if [ -d llvm-project ]; then
echo "llvm-project removed"
rm -r llvm-project
fi
ls -l