diff --git a/libtorch/build.sh b/libtorch/build.sh deleted file mode 100644 index e822feb26..000000000 --- a/libtorch/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -# This is mostly just a shim to manywheel/build.sh -# TODO: Make this a dedicated script to build just libtorch - -set -ex - -SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh diff --git a/manywheel/LICENSE b/manywheel/LICENSE deleted file mode 100644 index 7d8f7841a..000000000 --- a/manywheel/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 manylinux - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/manywheel/build.sh b/manywheel/build.sh deleted file mode 100755 index e79083ee0..000000000 --- a/manywheel/build.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -case "${GPU_ARCH_TYPE:-BLANK}" in - BLANK) - # Legacy behavior for CircleCI - bash "${SCRIPTPATH}/build_cuda.sh" - ;; - cuda) - bash "${SCRIPTPATH}/build_cuda.sh" - ;; - rocm) - bash "${SCRIPTPATH}/build_rocm.sh" - ;; - cpu | cpu-cxx11-abi | cpu-s390x | xpu) - bash "${SCRIPTPATH}/build_cpu.sh" - ;; - *) - echo "Un-recognized GPU_ARCH_TYPE '${GPU_ARCH_TYPE}', exiting..." - exit 1 - ;; -esac diff --git a/manywheel/build_common.sh b/manywheel/build_common.sh deleted file mode 100644 index d540d21f2..000000000 --- a/manywheel/build_common.sh +++ /dev/null @@ -1,506 +0,0 @@ -#!/usr/bin/env bash -# meant to be called only from the neighboring build.sh and build_cpu.sh scripts - -set -ex -SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" - - -# Require only one python installation -if [[ -z "$DESIRED_PYTHON" ]]; then - echo "Need to set DESIRED_PYTHON env variable" - exit 1 -fi -if [[ -n "$BUILD_PYTHONLESS" && -z "$LIBTORCH_VARIANT" ]]; then - echo "BUILD_PYTHONLESS is set, so need LIBTORCH_VARIANT to also be set" - echo "LIBTORCH_VARIANT should be one of shared-with-deps shared-without-deps static-with-deps static-without-deps" - exit 1 -fi - -# Function to retry functions that sometimes timeout or have flaky failures -retry () { - $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) -} - -# TODO move this into the Docker images -OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then - retry yum install -q -y zip openssl -elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then - retry yum install -q -y zip openssl -elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then - retry dnf install -q -y zip openssl -elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then - # TODO: Remove this once nvidia package repos are back online - # Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968 - # shellcheck disable=SC2046 - sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list") - - retry apt-get update - retry apt-get -y install zip openssl -fi - -# We use the package name to test the package by passing this to 'pip install' -# This is the env variable that setup.py uses to name the package. Note that -# pip 'normalizes' the name first by changing all - to _ -if [[ -z "$TORCH_PACKAGE_NAME" ]]; then - TORCH_PACKAGE_NAME='torch' -fi - -if [[ -z "$TORCH_NO_PYTHON_PACKAGE_NAME" ]]; then - TORCH_NO_PYTHON_PACKAGE_NAME='torch_no_python' -fi - -TORCH_PACKAGE_NAME="$(echo $TORCH_PACKAGE_NAME | tr '-' '_')" -TORCH_NO_PYTHON_PACKAGE_NAME="$(echo $TORCH_NO_PYTHON_PACKAGE_NAME | tr '-' '_')" -echo "Expecting the built wheels to all be called '$TORCH_PACKAGE_NAME' or '$TORCH_NO_PYTHON_PACKAGE_NAME'" - -# Version: setup.py uses $PYTORCH_BUILD_VERSION.post$PYTORCH_BUILD_NUMBER if -# PYTORCH_BUILD_NUMBER > 1 -build_version="$PYTORCH_BUILD_VERSION" -build_number="$PYTORCH_BUILD_NUMBER" -if [[ -n "$OVERRIDE_PACKAGE_VERSION" ]]; then - # This will be the *exact* version, since build_number<1 - build_version="$OVERRIDE_PACKAGE_VERSION" - build_number=0 -fi -if [[ -z "$build_version" ]]; then - build_version=1.0.0 -fi -if [[ -z "$build_number" ]]; then - build_number=1 -fi -export PYTORCH_BUILD_VERSION=$build_version -export PYTORCH_BUILD_NUMBER=$build_number - -export CMAKE_LIBRARY_PATH="/opt/intel/lib:/lib:$CMAKE_LIBRARY_PATH" -export CMAKE_INCLUDE_PATH="/opt/intel/include:$CMAKE_INCLUDE_PATH" - -if [[ -e /opt/openssl ]]; then - export OPENSSL_ROOT_DIR=/opt/openssl - export CMAKE_INCLUDE_PATH="/opt/openssl/include":$CMAKE_INCLUDE_PATH -fi - -# If given a python version like 3.6m or 2.7mu, convert this to the format we -# expect. The binary CI jobs pass in python versions like this; they also only -# ever pass one python version, so we assume that DESIRED_PYTHON is not a list -# in this case -if [[ -n "$DESIRED_PYTHON" && $DESIRED_PYTHON =~ ([0-9].[0-9]+)t ]]; then - python_digits="$(echo $DESIRED_PYTHON | tr -cd [:digit:])" - py_majmin="${DESIRED_PYTHON}" - DESIRED_PYTHON="cp${python_digits}-cp${python_digits}t" -elif [[ -n "$DESIRED_PYTHON" && "$DESIRED_PYTHON" != cp* ]]; then - python_nodot="$(echo $DESIRED_PYTHON | tr -d m.u)" - DESIRED_PYTHON="cp${python_nodot}-cp${python_nodot}" - if [[ ${python_nodot} -ge 310 ]]; then - py_majmin="${DESIRED_PYTHON:2:1}.${DESIRED_PYTHON:3:2}" - else - py_majmin="${DESIRED_PYTHON:2:1}.${DESIRED_PYTHON:3:1}" - fi -fi - -pydir="/opt/python/$DESIRED_PYTHON" -export PATH="$pydir/bin:$PATH" -echo "Will build for Python version: ${DESIRED_PYTHON} with ${python_installation}" - -mkdir -p /tmp/$WHEELHOUSE_DIR - -export PATCHELF_BIN=/usr/local/bin/patchelf -patchelf_version=$($PATCHELF_BIN --version) -echo "patchelf version: " $patchelf_version -if [[ "$patchelf_version" == "patchelf 0.9" ]]; then - echo "Your patchelf version is too old. Please use version >= 0.10." - exit 1 -fi - -######################################################## -# Compile wheels as well as libtorch -####################################################### -if [[ -z "$PYTORCH_ROOT" ]]; then - echo "Need to set PYTORCH_ROOT env variable" - exit 1 -fi -pushd "$PYTORCH_ROOT" -python setup.py clean -retry pip install -qr requirements.txt -case ${DESIRED_PYTHON} in - cp38*) - retry pip install -q numpy==1.15 - ;; - cp31*) - retry pip install -q --pre numpy==2.1.0 - ;; - # Should catch 3.9+ - *) - retry pip install -q --pre numpy==2.0.2 - ;; -esac - -if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then - export _GLIBCXX_USE_CXX11_ABI=1 -else - export _GLIBCXX_USE_CXX11_ABI=0 -fi - -if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then - echo "Calling build_amd.py at $(date)" - python tools/amd_build/build_amd.py -fi - -# This value comes from binary_linux_build.sh (and should only be set to true -# for master / release branches) -BUILD_DEBUG_INFO=${BUILD_DEBUG_INFO:=0} - -if [[ $BUILD_DEBUG_INFO == "1" ]]; then - echo "Building wheel and debug info" -else - echo "BUILD_DEBUG_INFO was not set, skipping debug info" -fi - -if [[ "$DISABLE_RCCL" = 1 ]]; then - echo "Disabling NCCL/RCCL in pyTorch" - USE_RCCL=0 - USE_NCCL=0 - USE_KINETO=0 -else - USE_RCCL=1 - USE_NCCL=1 - USE_KINETO=1 -fi - -echo "Calling setup.py bdist at $(date)" - -if [[ "$USE_SPLIT_BUILD" == "true" ]]; then - echo "Calling setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)" - time EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ - BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 \ - BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ - USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ - python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR - echo "Finished setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)" - echo "Calling setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)" - time EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ - BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 \ - BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ - USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ - python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR --cmake - echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)" -else - time CMAKE_ARGS=${CMAKE_ARGS[@]} \ - EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ - BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ - USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ - python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR -fi -echo "Finished setup.py bdist at $(date)" - -# Build libtorch packages -if [[ -n "$BUILD_PYTHONLESS" ]]; then - # Now build pythonless libtorch - # Note - just use whichever python we happen to be on - python setup.py clean - - if [[ $LIBTORCH_VARIANT = *"static"* ]]; then - STATIC_CMAKE_FLAG="-DTORCH_STATIC=1" - fi - - mkdir -p build - pushd build - echo "Calling tools/build_libtorch.py at $(date)" - time CMAKE_ARGS=${CMAKE_ARGS[@]} \ - EXTRA_CAFFE2_CMAKE_FLAGS="${EXTRA_CAFFE2_CMAKE_FLAGS[@]} $STATIC_CMAKE_FLAG" \ - python ../tools/build_libtorch.py - echo "Finished tools/build_libtorch.py at $(date)" - popd - - mkdir -p libtorch/{lib,bin,include,share} - cp -r build/build/lib libtorch/ - - # for now, the headers for the libtorch package will just be copied in - # from one of the wheels (this is from when this script built multiple - # wheels at once) - ANY_WHEEL=$(ls /tmp/$WHEELHOUSE_DIR/torch*.whl | head -n1) - unzip -d any_wheel $ANY_WHEEL - if [[ -d any_wheel/torch/include ]]; then - cp -r any_wheel/torch/include libtorch/ - else - cp -r any_wheel/torch/lib/include libtorch/ - fi - cp -r any_wheel/torch/share/cmake libtorch/share/ - rm -rf any_wheel - - echo $PYTORCH_BUILD_VERSION > libtorch/build-version - echo "$(pushd $PYTORCH_ROOT && git rev-parse HEAD)" > libtorch/build-hash - - mkdir -p /tmp/$LIBTORCH_HOUSE_DIR - - if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then - LIBTORCH_ABI="cxx11-abi-" - else - LIBTORCH_ABI= - fi - - zip -rq /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip libtorch - cp /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip \ - /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-latest.zip -fi - -popd - -####################################################################### -# ADD DEPENDENCIES INTO THE WHEEL -# -# auditwheel repair doesn't work correctly and is buggy -# so manually do the work of copying dependency libs and patchelfing -# and fixing RECORDS entries correctly -###################################################################### - -fname_with_sha256() { - HASH=$(sha256sum $1 | cut -c1-8) - DIRNAME=$(dirname $1) - BASENAME=$(basename $1) - # Do not rename nvrtc-builtins.so as they are dynamically loaded - # by libnvrtc.so - # Similarly don't mangle libcudnn and libcublas library names - if [[ $BASENAME == "libnvrtc-builtins.s"* || $BASENAME == "libcudnn"* || $BASENAME == "libcublas"* ]]; then - echo $1 - else - INITNAME=$(echo $BASENAME | cut -f1 -d".") - ENDNAME=$(echo $BASENAME | cut -f 2- -d".") - echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME" - fi -} - -fname_without_so_number() { - LINKNAME=$(echo $1 | sed -e 's/\.so.*/.so/g') - echo "$LINKNAME" -} - -make_wheel_record() { - FPATH=$1 - if echo $FPATH | grep RECORD >/dev/null 2>&1; then - # if the RECORD file, then - echo "$FPATH,," - else - HASH=$(openssl dgst -sha256 -binary $FPATH | openssl base64 | sed -e 's/+/-/g' | sed -e 's/\//_/g' | sed -e 's/=//g') - FSIZE=$(ls -nl $FPATH | awk '{print $5}') - echo "$FPATH,sha256=$HASH,$FSIZE" - fi -} - -replace_needed_sofiles() { - find $1 -name '*.so*' | while read sofile; do - origname=$2 - patchedname=$3 - if [[ "$origname" != "$patchedname" ]] || [[ "$DESIRED_CUDA" == *"rocm"* ]]; then - set +e - origname=$($PATCHELF_BIN --print-needed $sofile | grep "$origname.*") - ERRCODE=$? - set -e - if [ "$ERRCODE" -eq "0" ]; then - echo "patching $sofile entry $origname to $patchedname" - $PATCHELF_BIN --replace-needed $origname $patchedname $sofile - fi - fi - done -} - -echo 'Built this wheel:' -ls /tmp/$WHEELHOUSE_DIR -mkdir -p "/$WHEELHOUSE_DIR" -mv /tmp/$WHEELHOUSE_DIR/torch*linux*.whl /$WHEELHOUSE_DIR/ - -if [[ "$USE_SPLIT_BUILD" == "true" ]]; then - mv /tmp/$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/ || true -fi - -if [[ -n "$BUILD_PYTHONLESS" ]]; then - mkdir -p /$LIBTORCH_HOUSE_DIR - mv /tmp/$LIBTORCH_HOUSE_DIR/*.zip /$LIBTORCH_HOUSE_DIR - rm -rf /tmp/$LIBTORCH_HOUSE_DIR -fi -rm -rf /tmp/$WHEELHOUSE_DIR -rm -rf /tmp_dir -mkdir /tmp_dir -pushd /tmp_dir - -for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.whl /$LIBTORCH_HOUSE_DIR/libtorch*.zip; do - - # if the glob didn't match anything - if [[ ! -e $pkg ]]; then - continue - fi - - rm -rf tmp - mkdir -p tmp - cd tmp - cp $pkg . - - unzip -q $(basename $pkg) - rm -f $(basename $pkg) - - if [[ -d torch ]]; then - PREFIX=torch - else - PREFIX=libtorch - fi - - if [[ $pkg != *"without-deps"* ]]; then - # copy over needed dependent .so files over and tag them with their hash - patched=() - for filepath in "${DEPS_LIST[@]}"; do - filename=$(basename $filepath) - destpath=$PREFIX/lib/$filename - if [[ "$filepath" != "$destpath" ]]; then - cp $filepath $destpath - fi - - # ROCm workaround for roctracer dlopens - if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then - patchedpath=$(fname_without_so_number $destpath) - # Keep the so number for XPU dependencies - elif [[ "$DESIRED_CUDA" == *"xpu"* ]]; then - patchedpath=$destpath - else - patchedpath=$(fname_with_sha256 $destpath) - fi - patchedname=$(basename $patchedpath) - if [[ "$destpath" != "$patchedpath" ]]; then - mv $destpath $patchedpath - fi - patched+=("$patchedname") - echo "Copied $filepath to $patchedpath" - done - - echo "patching to fix the so names to the hashed names" - for ((i=0;i<${#DEPS_LIST[@]};++i)); do - replace_needed_sofiles $PREFIX ${DEPS_SONAME[i]} ${patched[i]} - # do the same for caffe2, if it exists - if [[ -d caffe2 ]]; then - replace_needed_sofiles caffe2 ${DEPS_SONAME[i]} ${patched[i]} - fi - done - - # copy over needed auxiliary files - for ((i=0;i<${#DEPS_AUX_SRCLIST[@]};++i)); do - srcpath=${DEPS_AUX_SRCLIST[i]} - dstpath=$PREFIX/${DEPS_AUX_DSTLIST[i]} - mkdir -p $(dirname $dstpath) - cp $srcpath $dstpath - done - fi - - # set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib - find $PREFIX -maxdepth 1 -type f -name "*.so*" | while read sofile; do - echo "Setting rpath of $sofile to ${C_SO_RPATH:-'$ORIGIN:$ORIGIN/lib'}" - $PATCHELF_BIN --set-rpath ${C_SO_RPATH:-'$ORIGIN:$ORIGIN/lib'} ${FORCE_RPATH:-} $sofile - $PATCHELF_BIN --print-rpath $sofile - done - - # set RPATH of lib/ files to $ORIGIN - find $PREFIX/lib -maxdepth 1 -type f -name "*.so*" | while read sofile; do - echo "Setting rpath of $sofile to ${LIB_SO_RPATH:-'$ORIGIN'}" - $PATCHELF_BIN --set-rpath ${LIB_SO_RPATH:-'$ORIGIN'} ${FORCE_RPATH:-} $sofile - $PATCHELF_BIN --print-rpath $sofile - done - - # regenerate the RECORD file with new hashes - record_file=$(echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/RECORD/g') - if [[ -e $record_file ]]; then - echo "Generating new record file $record_file" - : > "$record_file" - # generate records for folders in wheel - find * -type f | while read fname; do - make_wheel_record "$fname" >>"$record_file" - done - fi - - if [[ $BUILD_DEBUG_INFO == "1" ]]; then - pushd "$PREFIX/lib" - - # Duplicate library into debug lib - cp libtorch_cpu.so libtorch_cpu.so.dbg - - # Keep debug symbols on debug lib - strip --only-keep-debug libtorch_cpu.so.dbg - - # Remove debug info from release lib - strip --strip-debug libtorch_cpu.so - - objcopy libtorch_cpu.so --add-gnu-debuglink=libtorch_cpu.so.dbg - - # Zip up debug info - mkdir -p /tmp/debug - mv libtorch_cpu.so.dbg /tmp/debug/libtorch_cpu.so.dbg - CRC32=$(objcopy --dump-section .gnu_debuglink=>(tail -c4 | od -t x4 -An | xargs echo) libtorch_cpu.so) - - pushd /tmp - PKG_NAME=$(basename "$pkg" | sed 's/\.whl$//g') - zip /tmp/debug-whl-libtorch-"$PKG_NAME"-"$CRC32".zip /tmp/debug/libtorch_cpu.so.dbg - cp /tmp/debug-whl-libtorch-"$PKG_NAME"-"$CRC32".zip "$PYTORCH_FINAL_PACKAGE_DIR" - popd - - popd - fi - - # zip up the wheel back - zip -rq $(basename $pkg) $PREIX* - - # replace original wheel - rm -f $pkg - mv $(basename $pkg) $pkg - cd .. - rm -rf tmp -done - -# Copy wheels to host machine for persistence before testing -if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then - mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true - if [[ -n "$BUILD_PYTHONLESS" ]]; then - cp /$LIBTORCH_HOUSE_DIR/libtorch*.zip "$PYTORCH_FINAL_PACKAGE_DIR" - else - cp /$WHEELHOUSE_DIR/torch*.whl "$PYTORCH_FINAL_PACKAGE_DIR" - fi -fi - -# remove stuff before testing -rm -rf /opt/rh -if ls /usr/local/cuda* >/dev/null 2>&1; then - rm -rf /usr/local/cuda* -fi - - -# Test that all the wheels work -if [[ -z "$BUILD_PYTHONLESS" ]]; then - export OMP_NUM_THREADS=4 # on NUMA machines this takes too long - pushd $PYTORCH_ROOT/test - - # Install the wheel for this Python version - if [[ "$USE_SPLIT_BUILD" == "true" ]]; then - pip uninstall -y "$TORCH_NO_PYTHON_PACKAGE_NAME" || true - fi - - pip uninstall -y "$TORCH_PACKAGE_NAME" - - if [[ "$USE_SPLIT_BUILD" == "true" ]]; then - pip install "$TORCH_NO_PYTHON_PACKAGE_NAME" --no-index -f /$WHEELHOUSE_DIR --no-dependencies -v - fi - - pip install "$TORCH_PACKAGE_NAME" --no-index -f /$WHEELHOUSE_DIR --no-dependencies -v - - # Print info on the libraries installed in this wheel - # Rather than adjust find command to skip non-library files with an embedded *.so* in their name, - # since this is only for reporting purposes, we add the || true to the ldd command. - installed_libraries=($(find "$pydir/lib/python${py_majmin}/site-packages/torch/" -name '*.so*')) - echo "The wheel installed all of the libraries: ${installed_libraries[@]}" - for installed_lib in "${installed_libraries[@]}"; do - ldd "$installed_lib" || true - done - - # Run the tests - echo "$(date) :: Running tests" - pushd "$PYTORCH_ROOT" - LD_LIBRARY_PATH=/usr/local/nvidia/lib64 \ - "${SOURCE_DIR}/../run_tests.sh" manywheel "${py_majmin}" "$DESIRED_CUDA" - popd - echo "$(date) :: Finished tests" -fi diff --git a/manywheel/build_cpu.sh b/manywheel/build_cpu.sh deleted file mode 100755 index 5b8277e44..000000000 --- a/manywheel/build_cpu.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} - -export TH_BINARY_BUILD=1 -export USE_CUDA=0 - -# Keep an array of cmake variables to add to -if [[ -z "$CMAKE_ARGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build() - CMAKE_ARGS=() -fi -if [[ -z "$EXTRA_CAFFE2_CMAKE_FLAGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build_caffe2() - EXTRA_CAFFE2_CMAKE_FLAGS=() -fi - -DIR_SUFFIX=cpu -if [[ "$GPU_ARCH_TYPE" == "xpu" ]]; then - DIR_SUFFIX=xpu - # Refer https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpu/2-5.html - source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh - source /opt/intel/oneapi/pti/latest/env/vars.sh - export USE_STATIC_MKL=1 -fi - -WHEELHOUSE_DIR="wheelhouse$DIR_SUFFIX" -LIBTORCH_HOUSE_DIR="libtorch_house$DIR_SUFFIX" -if [[ -z "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then - if [[ -z "$BUILD_PYTHONLESS" ]]; then - PYTORCH_FINAL_PACKAGE_DIR="/remote/wheelhouse$DIR_SUFFIX" - else - PYTORCH_FINAL_PACKAGE_DIR="/remote/libtorch_house$DIR_SUFFIX" - fi -fi -mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true - -OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" -elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" -elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" -elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then - if [[ "$(uname -m)" == "s390x" ]]; then - LIBGOMP_PATH="/usr/lib/s390x-linux-gnu/libgomp.so.1" - else - LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1" - fi -fi - -DEPS_LIST=( - "$LIBGOMP_PATH" -) - -DEPS_SONAME=( - "libgomp.so.1" -) - -if [[ "$GPU_ARCH_TYPE" == "xpu" ]]; then - echo "Bundling with xpu support package libs." - DEPS_LIST+=( - "/opt/intel/oneapi/compiler/latest/lib/libsycl-preview.so.7" - "/opt/intel/oneapi/compiler/latest/lib/libOpenCL.so.1" - "/opt/intel/oneapi/compiler/latest/lib/libxptifw.so" - "/opt/intel/oneapi/compiler/latest/lib/libsvml.so" - "/opt/intel/oneapi/compiler/latest/lib/libirng.so" - "/opt/intel/oneapi/compiler/latest/lib/libimf.so" - "/opt/intel/oneapi/compiler/latest/lib/libintlc.so.5" - "/opt/intel/oneapi/compiler/latest/lib/libpi_level_zero.so" - "/opt/intel/oneapi/pti/latest/lib/libpti_view.so.0.9" - "/opt/intel/oneapi/pti/latest/lib/libpti.so.0.9" - ) - DEPS_SONAME+=( - "libsycl-preview.so.7" - "libOpenCL.so.1" - "libxptifw.so" - "libsvml.so" - "libirng.so" - "libimf.so" - "libintlc.so.5" - "libpi_level_zero.so" - "libpti_view.so.0.9" - "libpti.so.0.9" - ) -fi - -rm -rf /usr/local/cuda* - -SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" -if [[ -z "$BUILD_PYTHONLESS" ]]; then - BUILD_SCRIPT=build_common.sh -else - BUILD_SCRIPT=build_libtorch.sh -fi -source ${SOURCE_DIR}/${BUILD_SCRIPT} diff --git a/manywheel/build_cuda.sh b/manywheel/build_cuda.sh deleted file mode 100644 index 4eda14a39..000000000 --- a/manywheel/build_cuda.sh +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P ))" - -export TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -export NCCL_ROOT_DIR=/usr/local/cuda -export TH_BINARY_BUILD=1 -export USE_STATIC_CUDNN=1 -export USE_STATIC_NCCL=1 -export ATEN_STATIC_CUDA=1 -export USE_CUDA_STATIC_LINK=1 -export INSTALL_TEST=0 # dont install test binaries into site-packages -export USE_CUPTI_SO=0 -export USE_CUSPARSELT=${USE_CUSPARSELT:-1} # Enable if not disabled by libtorch build - -# Keep an array of cmake variables to add to -if [[ -z "$CMAKE_ARGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build() - CMAKE_ARGS=() -fi -if [[ -z "$EXTRA_CAFFE2_CMAKE_FLAGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build_caffe2() - EXTRA_CAFFE2_CMAKE_FLAGS=() -fi - -# Determine CUDA version and architectures to build for -# -# NOTE: We should first check `DESIRED_CUDA` when determining `CUDA_VERSION`, -# because in some cases a single Docker image can have multiple CUDA versions -# on it, and `nvcc --version` might not show the CUDA version we want. -if [[ -n "$DESIRED_CUDA" ]]; then - # If the DESIRED_CUDA already matches the format that we expect - if [[ ${DESIRED_CUDA} =~ ^[0-9]+\.[0-9]+$ ]]; then - CUDA_VERSION=${DESIRED_CUDA} - else - # cu90, cu92, cu100, cu101 - if [[ ${#DESIRED_CUDA} -eq 4 ]]; then - CUDA_VERSION="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3:1}" - elif [[ ${#DESIRED_CUDA} -eq 5 ]]; then - CUDA_VERSION="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4:1}" - fi - fi - echo "Using CUDA $CUDA_VERSION as determined by DESIRED_CUDA" - - # There really has to be a better way to do this - eli - # Possibly limiting builds to specific cuda versions be delimiting images would be a choice - if [[ "$OS_NAME" == *"Ubuntu"* ]]; then - echo "Switching to CUDA version ${DESIRED_CUDA}" - /builder/conda/switch_cuda_version.sh "${DESIRED_CUDA}" - fi -else - CUDA_VERSION=$(nvcc --version|grep release|cut -f5 -d" "|cut -f1 -d",") - echo "CUDA $CUDA_VERSION Detected" -fi - -cuda_version_nodot=$(echo $CUDA_VERSION | tr -d '.') - -TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6" -case ${CUDA_VERSION} in - 12.4) - if [[ "$GPU_ARCH_TYPE" = "cuda-aarch64" ]]; then - TORCH_CUDA_ARCH_LIST="9.0" - else - TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0+PTX" - fi - EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON") - ;; - 12.1) - TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0" - EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON") - ;; - 11.8) - TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};3.7;9.0" - EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON") - ;; - 11.[67]) - TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};3.7" - EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON") - ;; - *) - echo "unknown cuda version $CUDA_VERSION" - exit 1 - ;; -esac - -export TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST} -echo "${TORCH_CUDA_ARCH_LIST}" - -# Package directories -WHEELHOUSE_DIR="wheelhouse$cuda_version_nodot" -LIBTORCH_HOUSE_DIR="libtorch_house$cuda_version_nodot" -if [[ -z "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then - if [[ -z "$BUILD_PYTHONLESS" ]]; then - PYTORCH_FINAL_PACKAGE_DIR="/remote/wheelhouse$cuda_version_nodot" - else - PYTORCH_FINAL_PACKAGE_DIR="/remote/libtorch_house$cuda_version_nodot" - fi -fi -mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true - -OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" -elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" -elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" -elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then - LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1" -fi - -DEPS_LIST=( - "$LIBGOMP_PATH" -) -DEPS_SONAME=( - "libgomp.so.1" -) - -if [[ $USE_CUSPARSELT == "1" ]]; then - DEPS_SONAME+=( - "libcusparseLt.so.0" - ) - DEPS_LIST+=( - "/usr/local/cuda/lib64/libcusparseLt.so.0" - ) -fi - -if [[ $CUDA_VERSION == "12.1" || $CUDA_VERSION == "12.4" ]]; then - export USE_STATIC_CUDNN=0 - # Try parallelizing nvcc as well - export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2" - - if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then - echo "Bundling with cudnn and cublas." - DEPS_LIST+=( - "/usr/local/cuda/lib64/libcudnn_adv.so.9" - "/usr/local/cuda/lib64/libcudnn_cnn.so.9" - "/usr/local/cuda/lib64/libcudnn_graph.so.9" - "/usr/local/cuda/lib64/libcudnn_ops.so.9" - "/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9" - "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9" - "/usr/local/cuda/lib64/libcudnn_heuristic.so.9" - "/usr/local/cuda/lib64/libcudnn.so.9" - "/usr/local/cuda/lib64/libcublas.so.12" - "/usr/local/cuda/lib64/libcublasLt.so.12" - "/usr/local/cuda/lib64/libcudart.so.12" - "/usr/local/cuda/lib64/libnvToolsExt.so.1" - "/usr/local/cuda/lib64/libnvrtc.so.12" - "/usr/local/cuda/lib64/libnvrtc-builtins.so" - ) - DEPS_SONAME+=( - "libcudnn_adv.so.9" - "libcudnn_cnn.so.9" - "libcudnn_graph.so.9" - "libcudnn_ops.so.9" - "libcudnn_engines_runtime_compiled.so.9" - "libcudnn_engines_precompiled.so.9" - "libcudnn_heuristic.so.9" - "libcudnn.so.9" - "libcublas.so.12" - "libcublasLt.so.12" - "libcudart.so.12" - "libnvToolsExt.so.1" - "libnvrtc.so.12" - "libnvrtc-builtins.so" - ) - else - echo "Using nvidia libs from pypi." - CUDA_RPATHS=( - '$ORIGIN/../../nvidia/cublas/lib' - '$ORIGIN/../../nvidia/cuda_cupti/lib' - '$ORIGIN/../../nvidia/cuda_nvrtc/lib' - '$ORIGIN/../../nvidia/cuda_runtime/lib' - '$ORIGIN/../../nvidia/cudnn/lib' - '$ORIGIN/../../nvidia/cufft/lib' - '$ORIGIN/../../nvidia/curand/lib' - '$ORIGIN/../../nvidia/cusolver/lib' - '$ORIGIN/../../nvidia/cusparse/lib' - '$ORIGIN/../../nvidia/nccl/lib' - '$ORIGIN/../../nvidia/nvtx/lib' - ) - CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}") - export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib' - export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN' - export FORCE_RPATH="--force-rpath" - export USE_STATIC_NCCL=0 - export USE_SYSTEM_NCCL=1 - export ATEN_STATIC_CUDA=0 - export USE_CUDA_STATIC_LINK=0 - export USE_CUPTI_SO=1 - export NCCL_INCLUDE_DIR="/usr/local/cuda/include/" - export NCCL_LIB_DIR="/usr/local/cuda/lib64/" - fi -elif [[ $CUDA_VERSION == "11.8" ]]; then - export USE_STATIC_CUDNN=0 - # Try parallelizing nvcc as well - export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2" - # Bundle ptxas into the wheel, see https://github.com/pytorch/pytorch/pull/119750 - export BUILD_BUNDLE_PTXAS=1 - - if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then - echo "Bundling with cudnn and cublas." - DEPS_LIST+=( - "/usr/local/cuda/lib64/libcudnn_adv.so.9" - "/usr/local/cuda/lib64/libcudnn_cnn.so.9" - "/usr/local/cuda/lib64/libcudnn_graph.so.9" - "/usr/local/cuda/lib64/libcudnn_ops.so.9" - "/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9" - "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9" - "/usr/local/cuda/lib64/libcudnn_heuristic.so.9" - "/usr/local/cuda/lib64/libcudnn.so.9" - "/usr/local/cuda/lib64/libcublas.so.11" - "/usr/local/cuda/lib64/libcublasLt.so.11" - "/usr/local/cuda/lib64/libcudart.so.11.0" - "/usr/local/cuda/lib64/libnvToolsExt.so.1" - "/usr/local/cuda/lib64/libnvrtc.so.11.2" # this is not a mistake, it links to more specific cuda version - "/usr/local/cuda/lib64/libnvrtc-builtins.so.11.8" - ) - DEPS_SONAME+=( - "libcudnn_adv.so.9" - "libcudnn_cnn.so.9" - "libcudnn_graph.so.9" - "libcudnn_ops.so.9" - "libcudnn_engines_runtime_compiled.so.9" - "libcudnn_engines_precompiled.so.9" - "libcudnn_heuristic.so.9" - "libcudnn.so.9" - "libcublas.so.11" - "libcublasLt.so.11" - "libcudart.so.11.0" - "libnvToolsExt.so.1" - "libnvrtc.so.11.2" - "libnvrtc-builtins.so.11.8" - ) - else - echo "Using nvidia libs from pypi." - CUDA_RPATHS=( - '$ORIGIN/../../nvidia/cublas/lib' - '$ORIGIN/../../nvidia/cuda_cupti/lib' - '$ORIGIN/../../nvidia/cuda_nvrtc/lib' - '$ORIGIN/../../nvidia/cuda_runtime/lib' - '$ORIGIN/../../nvidia/cudnn/lib' - '$ORIGIN/../../nvidia/cufft/lib' - '$ORIGIN/../../nvidia/curand/lib' - '$ORIGIN/../../nvidia/cusolver/lib' - '$ORIGIN/../../nvidia/cusparse/lib' - '$ORIGIN/../../nvidia/nccl/lib' - '$ORIGIN/../../nvidia/nvtx/lib' - ) - CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}") - export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib' - export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN' - export FORCE_RPATH="--force-rpath" - export USE_STATIC_NCCL=0 - export USE_SYSTEM_NCCL=1 - export ATEN_STATIC_CUDA=0 - export USE_CUDA_STATIC_LINK=0 - export USE_CUPTI_SO=1 - export NCCL_INCLUDE_DIR="/usr/local/cuda/include/" - export NCCL_LIB_DIR="/usr/local/cuda/lib64/" - fi -else - echo "Unknown cuda version $CUDA_VERSION" - exit 1 -fi - -# builder/test.sh requires DESIRED_CUDA to know what tests to exclude -export DESIRED_CUDA="$cuda_version_nodot" - -# Switch `/usr/local/cuda` to the desired CUDA version -rm -rf /usr/local/cuda || true -ln -s "/usr/local/cuda-${CUDA_VERSION}" /usr/local/cuda - -# Switch `/usr/local/magma` to the desired CUDA version -rm -rf /usr/local/magma || true -ln -s /usr/local/cuda-${CUDA_VERSION}/magma /usr/local/magma - -export CUDA_VERSION=$(ls /usr/local/cuda/lib64/libcudart.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev) # 10.0.130 -export CUDA_VERSION_SHORT=$(ls /usr/local/cuda/lib64/libcudart.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev | cut -f1,2 -d".") # 10.0 -export CUDNN_VERSION=$(ls /usr/local/cuda/lib64/libcudnn.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev) - -SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" -if [[ -z "$BUILD_PYTHONLESS" ]]; then - BUILD_SCRIPT=build_common.sh -else - BUILD_SCRIPT=build_libtorch.sh -fi -source $SCRIPTPATH/${BUILD_SCRIPT} diff --git a/manywheel/build_libtorch.sh b/manywheel/build_libtorch.sh deleted file mode 100644 index fd330f643..000000000 --- a/manywheel/build_libtorch.sh +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/env bash -# meant to be called only from the neighboring build.sh and build_cpu.sh scripts - -set -e pipefail -SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" - -# Require only one python installation -if [[ -z "$DESIRED_PYTHON" ]]; then - echo "Need to set DESIRED_PYTHON env variable" - exit 1 -fi -if [[ -n "$BUILD_PYTHONLESS" && -z "$LIBTORCH_VARIANT" ]]; then - echo "BUILD_PYTHONLESS is set, so need LIBTORCH_VARIANT to also be set" - echo "LIBTORCH_VARIANT should be one of shared-with-deps shared-without-deps static-with-deps static-without-deps" - exit 1 -fi - -# Function to retry functions that sometimes timeout or have flaky failures -retry () { - $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) -} - -# TODO move this into the Docker images -OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release` -if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then - retry yum install -q -y zip openssl -elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then - retry yum install -q -y zip openssl -elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then - retry dnf install -q -y zip openssl -elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then - # TODO: Remove this once nvidia package repos are back online - # Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968 - # shellcheck disable=SC2046 - sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list") - retry apt-get update - retry apt-get -y install zip openssl -fi - -# Version: setup.py uses $PYTORCH_BUILD_VERSION.post$PYTORCH_BUILD_NUMBER if -# PYTORCH_BUILD_NUMBER > 1 -build_version="$PYTORCH_BUILD_VERSION" -build_number="$PYTORCH_BUILD_NUMBER" -if [[ -n "$OVERRIDE_PACKAGE_VERSION" ]]; then - # This will be the *exact* version, since build_number<1 - build_version="$OVERRIDE_PACKAGE_VERSION" - build_number=0 -fi -if [[ -z "$build_version" ]]; then - build_version=1.0.0 -fi -if [[ -z "$build_number" ]]; then - build_number=1 -fi -export PYTORCH_BUILD_VERSION=$build_version -export PYTORCH_BUILD_NUMBER=$build_number - -export CMAKE_LIBRARY_PATH="/opt/intel/lib:/lib:$CMAKE_LIBRARY_PATH" -export CMAKE_INCLUDE_PATH="/opt/intel/include:$CMAKE_INCLUDE_PATH" - -# set OPENSSL_ROOT_DIR=/opt/openssl if it exists -if [[ -e /opt/openssl ]]; then - export OPENSSL_ROOT_DIR=/opt/openssl - export CMAKE_INCLUDE_PATH="/opt/openssl/include":$CMAKE_INCLUDE_PATH -fi - -# If given a python version like 3.6m or 2.7mu, convert this to the format we -# expect. The binary CI jobs pass in python versions like this; they also only -# ever pass one python version, so we assume that DESIRED_PYTHON is not a list -# in this case -if [[ -n "$DESIRED_PYTHON" && "$DESIRED_PYTHON" != cp* ]]; then - python_nodot="$(echo $DESIRED_PYTHON | tr -d m.u)" - DESIRED_PYTHON="cp${python_nodot}-cp${python_nodot}" -fi -pydir="/opt/python/$DESIRED_PYTHON" -export PATH="$pydir/bin:$PATH" - -export PATCHELF_BIN=/usr/local/bin/patchelf -patchelf_version=`$PATCHELF_BIN --version` -echo "patchelf version: " $patchelf_version -if [[ "$patchelf_version" == "patchelf 0.9" ]]; then - echo "Your patchelf version is too old. Please use version >= 0.10." - exit 1 -fi - -######################################################## -# Compile wheels as well as libtorch -####################################################### -if [[ -z "$PYTORCH_ROOT" ]]; then - echo "Need to set PYTORCH_ROOT env variable" - exit 1 -fi -pushd "$PYTORCH_ROOT" -python setup.py clean -retry pip install -qr requirements.txt -retry pip install -q numpy==2.0.1 - -if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then - export _GLIBCXX_USE_CXX11_ABI=1 -else - export _GLIBCXX_USE_CXX11_ABI=0 -fi - -if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then - echo "Calling build_amd.py at $(date)" - python tools/amd_build/build_amd.py - # TODO remove this work-around once pytorch sources are updated - export ROCclr_DIR=/opt/rocm/rocclr/lib/cmake/rocclr -fi - -echo "Calling setup.py install at $(date)" - -if [[ $LIBTORCH_VARIANT = *"static"* ]]; then - STATIC_CMAKE_FLAG="-DTORCH_STATIC=1" -fi - -( - set -x - - mkdir -p build - - time CMAKE_ARGS=${CMAKE_ARGS[@]} \ - EXTRA_CAFFE2_CMAKE_FLAGS="${EXTRA_CAFFE2_CMAKE_FLAGS[@]} $STATIC_CMAKE_FLAG" \ - # TODO: Remove this flag once https://github.com/pytorch/pytorch/issues/55952 is closed - CFLAGS='-Wno-deprecated-declarations' \ - BUILD_LIBTORCH_CPU_WITH_DEBUG=1 \ - python setup.py install - - mkdir -p libtorch/{lib,bin,include,share} - - # Make debug folder separate so it doesn't get zipped up with the rest of - # libtorch - mkdir debug - - # Copy over all lib files - cp -rv build/lib/* libtorch/lib/ - cp -rv build/lib*/torch/lib/* libtorch/lib/ - - # Copy over all include files - cp -rv build/include/* libtorch/include/ - cp -rv build/lib*/torch/include/* libtorch/include/ - - # Copy over all of the cmake files - cp -rv build/lib*/torch/share/* libtorch/share/ - - # Split libtorch into debug / release version - cp libtorch/lib/libtorch_cpu.so libtorch/lib/libtorch_cpu.so.dbg - - # Keep debug symbols on debug lib - strip --only-keep-debug libtorch/lib/libtorch_cpu.so.dbg - - # Remove debug info from release lib - strip --strip-debug libtorch/lib/libtorch_cpu.so - - # Add a debug link to the release lib to the debug lib (debuggers will then - # search for symbols in a file called libtorch_cpu.so.dbg in some - # predetermined locations) and embed a CRC32 of the debug library into the .so - cd libtorch/lib - - objcopy libtorch_cpu.so --add-gnu-debuglink=libtorch_cpu.so.dbg - cd ../.. - - # Move the debug symbols to its own directory so it doesn't get processed / - # zipped with all the other libraries - mv libtorch/lib/libtorch_cpu.so.dbg debug/libtorch_cpu.so.dbg - - echo "${PYTORCH_BUILD_VERSION}" > libtorch/build-version - echo "$(pushd $PYTORCH_ROOT && git rev-parse HEAD)" > libtorch/build-hash - -) - -if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then - LIBTORCH_ABI="cxx11-abi-" -else - LIBTORCH_ABI= -fi - -( - set -x - - mkdir -p /tmp/$LIBTORCH_HOUSE_DIR - - # objcopy installs a CRC32 into libtorch_cpu above so, so add that to the name here - CRC32=$(objcopy --dump-section .gnu_debuglink=>(tail -c4 | od -t x4 -An | xargs echo) libtorch/lib/libtorch_cpu.so) - - # Zip debug symbols - zip /tmp/$LIBTORCH_HOUSE_DIR/debug-libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION-$CRC32.zip debug/libtorch_cpu.so.dbg - - # Zip and copy libtorch - zip -rq /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip libtorch - cp /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip \ - /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-latest.zip -) - - -popd - -####################################################################### -# ADD DEPENDENCIES INTO THE WHEEL -# -# auditwheel repair doesn't work correctly and is buggy -# so manually do the work of copying dependency libs and patchelfing -# and fixing RECORDS entries correctly -###################################################################### - -fname_with_sha256() { - HASH=$(sha256sum $1 | cut -c1-8) - DIRNAME=$(dirname $1) - BASENAME=$(basename $1) - if [[ $BASENAME == "libnvrtc-builtins.so" || $BASENAME == "libcudnn"* ]]; then - echo $1 - else - INITNAME=$(echo $BASENAME | cut -f1 -d".") - ENDNAME=$(echo $BASENAME | cut -f 2- -d".") - echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME" - fi -} - -fname_without_so_number() { - LINKNAME=$(echo $1 | sed -e 's/\.so.*/.so/g') - echo "$LINKNAME" -} - -make_wheel_record() { - FPATH=$1 - if echo $FPATH | grep RECORD >/dev/null 2>&1; then - # if the RECORD file, then - echo "$FPATH,," - else - HASH=$(openssl dgst -sha256 -binary $FPATH | openssl base64 | sed -e 's/+/-/g' | sed -e 's/\//_/g' | sed -e 's/=//g') - FSIZE=$(ls -nl $FPATH | awk '{print $5}') - echo "$FPATH,sha256=$HASH,$FSIZE" - fi -} - -echo 'Built this package:' -( - set -x - mkdir -p /$LIBTORCH_HOUSE_DIR - mv /tmp/$LIBTORCH_HOUSE_DIR/*.zip /$LIBTORCH_HOUSE_DIR - rm -rf /tmp/$LIBTORCH_HOUSE_DIR -) -TMP_DIR=$(mktemp -d) -trap "rm -rf ${TMP_DIR}" EXIT -pushd "${TMP_DIR}" - -for pkg in /$LIBTORCH_HOUSE_DIR/libtorch*.zip; do - - # if the glob didn't match anything - if [[ ! -e $pkg ]]; then - continue - fi - - rm -rf tmp - mkdir -p tmp - cd tmp - cp $pkg . - - unzip -q $(basename $pkg) - rm -f $(basename $pkg) - - PREFIX=libtorch - - if [[ $pkg != *"without-deps"* ]]; then - # copy over needed dependent .so files over and tag them with their hash - patched=() - for filepath in "${DEPS_LIST[@]}"; do - filename=$(basename $filepath) - destpath=$PREFIX/lib/$filename - if [[ "$filepath" != "$destpath" ]]; then - cp $filepath $destpath - fi - - if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then - patchedpath=$(fname_without_so_number $destpath) - else - patchedpath=$(fname_with_sha256 $destpath) - fi - patchedname=$(basename $patchedpath) - if [[ "$destpath" != "$patchedpath" ]]; then - mv $destpath $patchedpath - fi - patched+=("$patchedname") - echo "Copied $filepath to $patchedpath" - done - - echo "patching to fix the so names to the hashed names" - for ((i=0;i<${#DEPS_LIST[@]};++i)); do - find $PREFIX -name '*.so*' | while read sofile; do - origname=${DEPS_SONAME[i]} - patchedname=${patched[i]} - if [[ "$origname" != "$patchedname" ]] || [[ "$DESIRED_CUDA" == *"rocm"* ]]; then - set +e - origname=$($PATCHELF_BIN --print-needed $sofile | grep "$origname.*") - ERRCODE=$? - set -e - if [ "$ERRCODE" -eq "0" ]; then - echo "patching $sofile entry $origname to $patchedname" - $PATCHELF_BIN --replace-needed $origname $patchedname $sofile - fi - fi - done - done - - # copy over needed auxiliary files - for ((i=0;i<${#DEPS_AUX_SRCLIST[@]};++i)); do - srcpath=${DEPS_AUX_SRCLIST[i]} - dstpath=$PREFIX/${DEPS_AUX_DSTLIST[i]} - mkdir -p $(dirname $dstpath) - cp $srcpath $dstpath - done - fi - - # set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib - find $PREFIX -maxdepth 1 -type f -name "*.so*" | while read sofile; do - echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib' - $PATCHELF_BIN --set-rpath '$ORIGIN:$ORIGIN/lib' $sofile - $PATCHELF_BIN --print-rpath $sofile - done - - # set RPATH of lib/ files to $ORIGIN - find $PREFIX/lib -maxdepth 1 -type f -name "*.so*" | while read sofile; do - echo "Setting rpath of $sofile to " '$ORIGIN' - $PATCHELF_BIN --set-rpath '$ORIGIN' $sofile - $PATCHELF_BIN --print-rpath $sofile - done - - # regenerate the RECORD file with new hashes - record_file=`echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/RECORD/g'` - if [[ -e $record_file ]]; then - echo "Generating new record file $record_file" - rm -f $record_file - # generate records for folders in wheel - find * -type f | while read fname; do - echo $(make_wheel_record $fname) >>$record_file - done - fi - - # zip up the wheel back - zip -rq $(basename $pkg) $PREFIX* - - # replace original wheel - rm -f $pkg - mv $(basename $pkg) $pkg - cd .. - rm -rf tmp -done - -# Copy wheels to host machine for persistence before testing -if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then - cp /$LIBTORCH_HOUSE_DIR/libtorch*.zip "$PYTORCH_FINAL_PACKAGE_DIR" - cp /$LIBTORCH_HOUSE_DIR/debug-libtorch*.zip "$PYTORCH_FINAL_PACKAGE_DIR" -fi diff --git a/manywheel/build_rocm.sh b/manywheel/build_rocm.sh deleted file mode 100755 index 1e14c9d81..000000000 --- a/manywheel/build_rocm.sh +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -export ROCM_HOME=/opt/rocm -export MAGMA_HOME=$ROCM_HOME/magma -# TODO: libtorch_cpu.so is broken when building with Debug info -export BUILD_DEBUG_INFO=0 - -# TODO Are these all used/needed? -export TH_BINARY_BUILD=1 -export USE_STATIC_CUDNN=1 -export USE_STATIC_NCCL=1 -export ATEN_STATIC_CUDA=1 -export USE_CUDA_STATIC_LINK=1 -export INSTALL_TEST=0 # dont install test binaries into site-packages -# Set RPATH instead of RUNPATH when using patchelf to avoid LD_LIBRARY_PATH override -export FORCE_RPATH="--force-rpath" - -# Keep an array of cmake variables to add to -if [[ -z "$CMAKE_ARGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build() - CMAKE_ARGS=() -fi -if [[ -z "$EXTRA_CAFFE2_CMAKE_FLAGS" ]]; then - # These are passed to tools/build_pytorch_libs.sh::build_caffe2() - EXTRA_CAFFE2_CMAKE_FLAGS=() -fi - -# Determine ROCm version and architectures to build for -# -# NOTE: We should first check `DESIRED_CUDA` when determining `ROCM_VERSION` -if [[ -n "$DESIRED_CUDA" ]]; then - if ! echo "${DESIRED_CUDA}"| grep "^rocm" >/dev/null 2>/dev/null; then - export DESIRED_CUDA="rocm${DESIRED_CUDA}" - fi - # rocm3.7, rocm3.5.1 - ROCM_VERSION="$DESIRED_CUDA" - echo "Using $ROCM_VERSION as determined by DESIRED_CUDA" -else - echo "Must set DESIRED_CUDA" - exit 1 -fi - -# Package directories -WHEELHOUSE_DIR="wheelhouse$ROCM_VERSION" -LIBTORCH_HOUSE_DIR="libtorch_house$ROCM_VERSION" -if [[ -z "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then - if [[ -z "$BUILD_PYTHONLESS" ]]; then - PYTORCH_FINAL_PACKAGE_DIR="/remote/wheelhouse$ROCM_VERSION" - else - PYTORCH_FINAL_PACKAGE_DIR="/remote/libtorch_house$ROCM_VERSION" - fi -fi -mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true - -# To make version comparison easier, create an integer representation. -ROCM_VERSION_CLEAN=$(echo ${ROCM_VERSION} | sed s/rocm//) -save_IFS="$IFS" -IFS=. ROCM_VERSION_ARRAY=(${ROCM_VERSION_CLEAN}) -IFS="$save_IFS" -if [[ ${#ROCM_VERSION_ARRAY[@]} == 2 ]]; then - ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} - ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} - ROCM_VERSION_PATCH=0 -elif [[ ${#ROCM_VERSION_ARRAY[@]} == 3 ]]; then - ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} - ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} - ROCM_VERSION_PATCH=${ROCM_VERSION_ARRAY[2]} -else - echo "Unhandled ROCM_VERSION ${ROCM_VERSION}" - exit 1 -fi -ROCM_INT=$(($ROCM_VERSION_MAJOR * 10000 + $ROCM_VERSION_MINOR * 100 + $ROCM_VERSION_PATCH)) - -# Required ROCm libraries -ROCM_SO_FILES=( - "libMIOpen.so" - "libamdhip64.so" - "libhipblas.so" - "libhipfft.so" - "libhiprand.so" - "libhipsolver.so" - "libhipsparse.so" - "libhsa-runtime64.so" - "libamd_comgr.so" - "libmagma.so" - "librccl.so" - "librocblas.so" - "librocfft.so" - "librocm_smi64.so" - "librocrand.so" - "librocsolver.so" - "librocsparse.so" - "libroctracer64.so" - "libroctx64.so" - "libhipblaslt.so" - "libhiprtc.so" -) - -if [[ $ROCM_INT -ge 60100 ]]; then - ROCM_SO_FILES+=("librocprofiler-register.so") -fi - -if [[ $ROCM_INT -ge 60200 ]]; then - ROCM_SO_FILES+=("librocm-core.so") -fi - -OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release` -if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then - LIBGOMP_PATH="/usr/lib64/libgomp.so.1" - LIBNUMA_PATH="/usr/lib64/libnuma.so.1" - LIBELF_PATH="/usr/lib64/libelf.so.1" - LIBTINFO_PATH="/usr/lib64/libtinfo.so.5" - LIBDRM_PATH="/opt/amdgpu/lib64/libdrm.so.2" - LIBDRM_AMDGPU_PATH="/opt/amdgpu/lib64/libdrm_amdgpu.so.1" - if [[ $ROCM_INT -ge 60100 ]]; then - # Below libs are direct dependencies of libhipsolver - LIBSUITESPARSE_CONFIG_PATH="/lib64/libsuitesparseconfig.so.4" - LIBCHOLMOD_PATH="/lib64/libcholmod.so.2" - # Below libs are direct dependencies of libcholmod - LIBAMD_PATH="/lib64/libamd.so.2" - LIBCAMD_PATH="/lib64/libcamd.so.2" - LIBCCOLAMD_PATH="/lib64/libccolamd.so.2" - LIBCOLAMD_PATH="/lib64/libcolamd.so.2" - LIBSATLAS_PATH="/lib64/atlas/libsatlas.so.3" - # Below libs are direct dependencies of libsatlas - LIBGFORTRAN_PATH="/lib64/libgfortran.so.3" - LIBQUADMATH_PATH="/lib64/libquadmath.so.0" - fi - MAYBE_LIB64=lib64 -elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then - LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1" - LIBNUMA_PATH="/usr/lib/x86_64-linux-gnu/libnuma.so.1" - LIBELF_PATH="/usr/lib/x86_64-linux-gnu/libelf.so.1" - if [[ $ROCM_INT -ge 50300 ]]; then - LIBTINFO_PATH="/lib/x86_64-linux-gnu/libtinfo.so.6" - else - LIBTINFO_PATH="/lib/x86_64-linux-gnu/libtinfo.so.5" - fi - LIBDRM_PATH="/usr/lib/x86_64-linux-gnu/libdrm.so.2" - LIBDRM_AMDGPU_PATH="/usr/lib/x86_64-linux-gnu/libdrm_amdgpu.so.1" - if [[ $ROCM_INT -ge 60100 ]]; then - # Below libs are direct dependencies of libhipsolver - LIBCHOLMOD_PATH="/lib/x86_64-linux-gnu/libcholmod.so.3" - # Below libs are direct dependencies of libcholmod - LIBSUITESPARSE_CONFIG_PATH="/lib/x86_64-linux-gnu/libsuitesparseconfig.so.5" - LIBAMD_PATH="/lib/x86_64-linux-gnu/libamd.so.2" - LIBCAMD_PATH="/lib/x86_64-linux-gnu/libcamd.so.2" - LIBCCOLAMD_PATH="/lib/x86_64-linux-gnu/libccolamd.so.2" - LIBCOLAMD_PATH="/lib/x86_64-linux-gnu/libcolamd.so.2" - LIBMETIS_PATH="/lib/x86_64-linux-gnu/libmetis.so.5" - LIBLAPACK_PATH="/lib/x86_64-linux-gnu/liblapack.so.3" - LIBBLAS_PATH="/lib/x86_64-linux-gnu/libblas.so.3" - # Below libs are direct dependencies of libblas - LIBGFORTRAN_PATH="/lib/x86_64-linux-gnu/libgfortran.so.5" - LIBQUADMATH_PATH="/lib/x86_64-linux-gnu/libquadmath.so.0" - fi - MAYBE_LIB64=lib -fi -OS_SO_PATHS=($LIBGOMP_PATH $LIBNUMA_PATH\ - $LIBELF_PATH $LIBTINFO_PATH\ - $LIBDRM_PATH $LIBDRM_AMDGPU_PATH\ - $LIBSUITESPARSE_CONFIG_PATH\ - $LIBCHOLMOD_PATH $LIBAMD_PATH\ - $LIBCAMD_PATH $LIBCCOLAMD_PATH\ - $LIBCOLAMD_PATH $LIBSATLAS_PATH\ - $LIBGFORTRAN_PATH $LIBQUADMATH_PATH\ - $LIBMETIS_PATH $LIBLAPACK_PATH\ - $LIBBLAS_PATH) -OS_SO_FILES=() -for lib in "${OS_SO_PATHS[@]}" -do - file_name="${lib##*/}" # Substring removal of path to get filename - OS_SO_FILES[${#OS_SO_FILES[@]}]=$file_name # Append lib to array -done - -# PyTorch-version specific -# AOTriton dependency only for PyTorch >= 2.4 -if (( $(echo "${PYTORCH_VERSION} 2.4" | awk '{print ($1 >= $2)}') )); then - ROCM_SO_FILES+=("libaotriton_v2.so") -fi - -# rocBLAS library files -ROCBLAS_LIB_SRC=$ROCM_HOME/lib/rocblas/library -ROCBLAS_LIB_DST=lib/rocblas/library -ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; seperated arch list to bar for grep -ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH) -OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx) -ROCBLAS_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES) - -# hipblaslt library files -HIPBLASLT_LIB_SRC=$ROCM_HOME/lib/hipblaslt/library -HIPBLASLT_LIB_DST=lib/hipblaslt/library -ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH) -OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx) -HIPBLASLT_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES) - -# ROCm library files -ROCM_SO_PATHS=() -for lib in "${ROCM_SO_FILES[@]}" -do - file_path=($(find $ROCM_HOME/lib/ -name "$lib")) # First search in lib - if [[ -z $file_path ]]; then - if [ -d "$ROCM_HOME/lib64/" ]; then - file_path=($(find $ROCM_HOME/lib64/ -name "$lib")) # Then search in lib64 - fi - fi - if [[ -z $file_path ]]; then - file_path=($(find $ROCM_HOME/ -name "$lib")) # Then search in ROCM_HOME - fi - if [[ -z $file_path ]]; then - echo "Error: Library file $lib is not found." >&2 - exit 1 - fi - ROCM_SO_PATHS[${#ROCM_SO_PATHS[@]}]="$file_path" # Append lib to array -done - -DEPS_LIST=( - ${ROCM_SO_PATHS[*]} - ${OS_SO_PATHS[*]} -) - -DEPS_SONAME=( - ${ROCM_SO_FILES[*]} - ${OS_SO_FILES[*]} -) - -DEPS_AUX_SRCLIST=( - "${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_SRC/}" - "${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_SRC/}" - "/opt/amdgpu/share/libdrm/amdgpu.ids" -) - -DEPS_AUX_DSTLIST=( - "${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_DST/}" - "${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_DST/}" - "share/libdrm/amdgpu.ids" -) - -# MIOpen library files -MIOPEN_SHARE_SRC=$ROCM_HOME/share/miopen/db -MIOPEN_SHARE_DST=share/miopen/db -MIOPEN_SHARE_FILES=($(ls $MIOPEN_SHARE_SRC | grep -E $ARCH)) -DEPS_AUX_SRCLIST+=(${MIOPEN_SHARE_FILES[@]/#/$MIOPEN_SHARE_SRC/}) -DEPS_AUX_DSTLIST+=(${MIOPEN_SHARE_FILES[@]/#/$MIOPEN_SHARE_DST/}) - -# RCCL library files -RCCL_SHARE_SRC=$ROCM_HOME/share/rccl/msccl-algorithms -RCCL_SHARE_DST=share/rccl/msccl-algorithms -RCCL_SHARE_FILES=($(ls $RCCL_SHARE_SRC)) -DEPS_AUX_SRCLIST+=(${RCCL_SHARE_FILES[@]/#/$RCCL_SHARE_SRC/}) -DEPS_AUX_DSTLIST+=(${RCCL_SHARE_FILES[@]/#/$RCCL_SHARE_DST/}) - -echo "PYTORCH_ROCM_ARCH: ${PYTORCH_ROCM_ARCH}" - -SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" -if [[ -z "$BUILD_PYTHONLESS" ]]; then - BUILD_SCRIPT=build_common.sh -else - BUILD_SCRIPT=build_libtorch.sh -fi -source $SCRIPTPATH/${BUILD_SCRIPT} diff --git a/manywheel/test_wheel.sh b/manywheel/test_wheel.sh deleted file mode 100755 index ada7d93f0..000000000 --- a/manywheel/test_wheel.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -set -e - -yum install -y wget git - -rm -rf /usr/local/cuda* - -# Install Anaconda -if ! ls /py -then - echo "Miniconda needs to be installed" - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - bash ~/miniconda.sh -b -p /py -else - echo "Miniconda is already installed" -fi - -export PATH="/py/bin:$PATH" - -# Anaconda token -if ls /remote/token -then - source /remote/token -fi - -conda install -y conda-build anaconda-client -