Skip to content

Commit

Permalink
pt25 update
Browse files Browse the repository at this point in the history
  • Loading branch information
YanWenKun committed Dec 18, 2024
1 parent 35e1ad8 commit 5e474d2
Show file tree
Hide file tree
Showing 15 changed files with 252 additions and 92 deletions.
20 changes: 5 additions & 15 deletions pt25-cu124/attachments/!FIRST_RUN-compile-install.bat
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
@REM Edit this to your GPU arch.
@REM You don't need to add the "+PTX". Here it works as a fail-safe (providing forward compatibility).

set TORCH_CUDA_ARCH_LIST=6.1+PTX

@REM ===========================================================================

set CMAKE_ARGS=-DBUILD_opencv_world=ON -DWITH_CUDA=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DWITH_NVCUVID=ON
set PATH=%PATH%;%~dp0\python_embeded\Scripts

@REM Compile-install PyTorch3D
@REM PyTorch3D on Windows works best through compile-install. Binary-install will fail on some workflows.
@REM e.g. "CUDA error: no kernel image is available for execution on the device"

set CMAKE_ARGS=-DBUILD_opencv_world=ON -DWITH_CUDA=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DWITH_NVCUVID=ON
set PATH=%PATH%;%~dp0\python_embeded\Scripts

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
.\extras\pytorch3d

Expand Down Expand Up @@ -39,20 +40,9 @@ set PATH=%PATH%;%~dp0\python_embeded\Scripts
.\extras\diff-gaussian-rasterization

@REM Differential Octree Rasterization
@REM Note that PIP will auto git clone submodules, no need to explicit clone it.
.\python_embeded\python.exe -s -m pip install ^
.\extras\diffoctreerast

@REM (Optional) Flash Attention
@REM flash-attn can ONLY be used on Ampere and later GPUs (RTX 30 series and beyond)
@REM Safe to remove this if you are not using RTX 30/40 or A100+.

@REM Limit Ninja jobs to avoid OOM

set MAX_JOBS=4

.\python_embeded\python.exe -s -m pip install ^
flash-attn --no-build-isolation
.\extras\diffoctreerast

@REM ===========================================================================

Expand Down
37 changes: 34 additions & 3 deletions pt25-cu124/attachments/reinstall-deps-for-3d.bat
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,33 @@ pause

@echo on

@REM In order to save your time on compiling, edit this line according to your GPU arch.
@REM ===========================================================================

@REM [IMPORTANT] To save your time on compiling, edit this line according to your GPU arch.
@REM Ref: https://github.com/ashawkey/stable-dreamfusion/issues/360#issuecomment-2292510049
@REM Ref: https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/

set TORCH_CUDA_ARCH_LIST=5.2+PTX;6.0;6.1+PTX;7.5;8.0;8.6;8.9+PTX

set CMAKE_ARGS=-DBUILD_opencv_world=ON -DWITH_CUDA=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DWITH_NVCUVID=ON
@REM ===========================================================================

set PATH=%PATH%;%~dp0\python_embeded\Scripts

set CMAKE_ARGS=-DBUILD_opencv_world=ON -DWITH_CUDA=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DWITH_NVCUVID=ON

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
spconv-cu124

if not exist ".\tmp_build" mkdir tmp_build

.\python_embeded\python.exe -s -m pip install numpy==1.26.4

git clone --depth=1 https://github.com/MrForExample/Comfy3D_Pre_Builds.git ^
.\tmp_build\Comfy3D_Pre_Builds

git clone --depth=1 https://github.com/autonomousvision/mip-splatting.git ^
.\tmp_build\mip-splatting

.\python_embeded\python.exe -s -m pip wheel -w tmp_build ^
.\tmp_build\Comfy3D_Pre_Builds\_Libs\pointnet2_ops

Expand All @@ -46,7 +57,14 @@ git clone --depth=1 https://github.com/MrForExample/Comfy3D_Pre_Builds.git ^
.\tmp_build\Comfy3D_Pre_Builds\_Libs\vox2seq

.\python_embeded\python.exe -s -m pip wheel -w tmp_build ^
git+https://github.com/ashawkey/diff-gaussian-rasterization.git
.\tmp_build\mip-splatting\submodules\diff-gaussian-rasterization

@REM Note that PIP will auto git clone submodules, no need to explicit clone it.
.\python_embeded\python.exe -s -m pip wheel -w tmp_build ^
git+https://github.com/JeffreyXiang/diffoctreerast.git

.\python_embeded\python.exe -s -m pip wheel -w tmp_build ^
git+https://github.com/EasternJournalist/utils3d.git

.\python_embeded\python.exe -s -m pip wheel -w tmp_build ^
git+https://github.com/ashawkey/kiuikit.git
Expand All @@ -63,4 +81,17 @@ del .\tmp_build\numpy-2*.whl

for %%i in (.\tmp_build\*.whl) do .\python_embeded\python.exe -s -m pip install --force-reinstall "%%i"

@REM ===========================================================================

@REM (Optional) Flash Attention for TRELLIS demo
@REM "flash-attn" can ONLY be used on Ampere and later GPUs (RTX 30 series / A100 and beyond).
@REM WARNING: VERY long build time!

@REM Limit Ninja jobs to avoid OOM. If have RAM larger than 96GB, just remove this line.
rem set MAX_JOBS=4

rem .\python_embeded\python.exe -s -m pip install flash-attn --no-build-isolation

@REM ===========================================================================

.\python_embeded\python.exe -s -m pip install numpy==1.26.4
3 changes: 3 additions & 0 deletions pt25-cu124/attachments/run.bat
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ rem set HF_HUB_ENABLE_HF_TRANSFER=1
@REM This command redirects HuggingFace-Hub to download model files in this folder.
set HF_HUB_CACHE=%~dp0\HuggingFaceHub

@REM This command redirects Pytorch Hub to download model files in this folder.
set TORCH_HOME=%~dp0\TorchHome

@REM This command will set PATH environment variable.
set PATH=%PATH%;%~dp0\python_embeded\Scripts

Expand Down
36 changes: 21 additions & 15 deletions pt25-cu124/attachments/run_trellis_demo.sh
Original file line number Diff line number Diff line change
@@ -1,26 +1,29 @@
#!/bin/bash
set -eu

# The TRELLIS demo was written in Linux context in mind. So here we use bash to run it.
# The TRELLIS demo was written with Linux context in mind. So here we use bash to run it.
# You need to install <Git for Windows> with <Git Bash> (installed by default).
# Download: https://git-scm.com/download/win

################################################################################
# Edit this first! According to your GPU model.
export TORCH_CUDA_ARCH_LIST="6.1+PTX"

# "flash-attn" can ONLY be used on Ampere and later GPUs (RTX 30 series and beyond).
# Change it to "xformers" if you can't start the program.
export ATTN_BACKEND="flash-attn"
################################################################################
# Optional Optimizations

# "auto" will be faster but will do benchmarking at the beginning.
# If run only once, set to "native".
export SPCONV_ALGO="auto"
# "auto" will be faster but will do benchmarking at the beginning.
export SPCONV_ALGO="native"

# Default to "xformers" for compatibility
# "flash-attn" for higher performance.
# Flash Attention can ONLY be used on Ampere and later GPUs (RTX 30 series / A100 and beyond).
export ATTN_BACKEND="xformers"

################################################################################

# To set proxy, edit and uncomment the lines below
# To set proxy, uncomment and edit the lines below
# (remove '#' in the beginning of line).
#export HTTP_PROXY=http://localhost:1081
#export HTTPS_PROXY=$HTTP_PROXY
Expand All @@ -40,19 +43,16 @@ export SPCONV_ALGO="auto"
#export PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
#export HF_ENDPOINT="https://hf-mirror.com"

# To set HuggingFace Access Token, uncomment and edit the line below.
# https://huggingface.co/settings/tokens
#export HF_TOKEN=

# To enable HuggingFace Hub's experimental high-speed file transfer, uncomment the line below.
# https://huggingface.co/docs/huggingface_hub/hf_transfer
#export HF_HUB_ENABLE_HF_TRANSFER=1
################################################################################

workdir="$(pwd)"

# This command redirects HuggingFace-Hub to download model files in this folder.
export HF_HUB_CACHE="$workdir/HuggingFaceHub"

# This command redirects Pytorch Hub to download model files in this folder.
export TORCH_HOME="$workdir/TorchHome"

# This command will set PATH environment variable.
export PATH="${PATH}:$workdir/python_embeded/Scripts"

Expand All @@ -76,4 +76,10 @@ fi ;
$workdir/python_embeded/Scripts/huggingface-cli.exe download JeffreyXiang/TRELLIS-image-large

# Run the TRELLIS official Gradio demo
./python_embeded/python.exe -s TRELLIS/app.py

echo "########################################"
echo "[INFO] Starting TRELLIS demo..."
echo "########################################"

cd TRELLIS
../python_embeded/python.exe -s app.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,52 @@
@REM 编辑为你的 GPU 对应架构
@REM 修改时无需保留 "+PTX" ,其用于向前兼容,防止用户忘了该步骤。

set TORCH_CUDA_ARCH_LIST=6.1+PTX

@REM 编译安装 PyTorch3D
@REM PyTorch3D 对 Windows 不甚友好,所有二进制安装都可能在某个节点报错,极难排查,故最好提前编译安装。
@REM 使用清华 PyPI 源
set PIP_INDEX_URL=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple

@REM ===========================================================================

set CMAKE_ARGS=-DBUILD_opencv_world=ON -DWITH_CUDA=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DWITH_NVCUVID=ON
set PATH=%PATH%;%~dp0\python_embeded\Scripts
set CMAKE_ARGS=-DBUILD_opencv_world=ON -DWITH_CUDA=ON -DCUDA_FAST_MATH=ON -DWITH_CUBLAS=ON -DWITH_NVCUVID=ON

set PIP_INDEX_URL=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
@REM 编译安装 PyTorch3D
@REM PyTorch3D 对 Windows 不甚友好,所有二进制安装都可能在某个节点报错,极难排查,故最好提前编译安装。

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
"git+https://ghp.ci/https://github.com/facebookresearch/pytorch3d.git"
.\extras\pytorch3d

@REM 编译安装 pointnet2_ops ,该组件用于 Triplane Gaussian

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
.\extras\pointnet2_ops

@REM 编译安装 diff-gaussian-rasterization ,该组件用于 Triplane Gaussian

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
"git+https://ghp.ci/https://github.com/ashawkey/diff-gaussian-rasterization.git"

@REM 编译安装 simple-knn
@REM 编译安装 simple-knn ,该组件用于 Gaussian Splatting

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
.\extras\simple-knn

@REM 编译安装 vox2seq ,该组件用于 TRELLIS
@REM ===========================================================================
@REM 用于 TRELLIS 的组件
@REM 注意我们跳过了 'utils3d' ,该组件不需要本地重新编译

@REM vox2seq

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
.\extras\vox2seq

@REM diff-gaussian-rasterization

.\python_embeded\python.exe -s -m pip install --force-reinstall ^
.\extras\diff-gaussian-rasterization

@REM Differential Octree Rasterization

.\python_embeded\python.exe -s -m pip install ^
.\extras\diffoctreerast

@REM ===========================================================================

.\python_embeded\python.exe -s -m pip install numpy==1.26.4
3 changes: 3 additions & 0 deletions pt25-cu124/attachments/中文脚本/run_cn.bat
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ set HF_ENDPOINT=https://hf-mirror.com
@REM 该环境变量指示 HuggingFace Hub 下载模型到"本目录\HuggingFaceHub",而不是"用户\.cache"目录。
set HF_HUB_CACHE=%~dp0\HuggingFaceHub

@REM 该环境变量指示 Pytorch Hub 下载模型到"本目录\TorchHome",而不是"用户\.cache"目录。
set TORCH_HOME=%~dp0\TorchHome

@REM 该命令配置 PATH 环境变量。
set PATH=%PATH%;%~dp0\python_embeded\Scripts

Expand Down
85 changes: 85 additions & 0 deletions pt25-cu124/attachments/中文脚本/run_trellis_demo_cn.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#!/bin/bash
set -eu

# 因为 TRELLIS 官方 demo 是以 Linux 语境编写的,未考虑跨平台,这里需用 Bash 运行
# 下载 Git for Windows: https://git-scm.com/download/win
# 并在安装时选择 Git Bash(默认)

################################################################################
# 务必根据你的 GPU 型号配置!
export TORCH_CUDA_ARCH_LIST="6.1+PTX"

################################################################################
# 性能优化(可选)

# 如果仅单次运行,使用 "native" 即可
# 如果长期运行,使用 "auto" 会有更好性能,但一开始会花时间进行性能测试。
export SPCONV_ALGO="native"

# 默认使用 "xformers" 以保证兼容性
# 如果需要高性能,尝试改为 "flash-attn"
# Flash Attention 只能用于 Ampere (RTX 30 系 / A100) 及更新的 GPU
export ATTN_BACKEND="xformers"

################################################################################

# 如需配置代理,取消注释并编辑以下部分
# (删除行首井号 # )
#export HTTP_PROXY=http://localhost:1081
#export HTTPS_PROXY=$HTTP_PROXY
#export http_proxy=$HTTP_PROXY
#export https_proxy=$HTTP_PROXY
#export NO_PROXY="localhost,*.local,*.internal,[::1],fd00::/7,
#10.0.0.0/8,127.0.0.0/8,169.254.0.0/16,172.16.0.0/12,192.168.0.0/16,
#10.*,127.*,169.254.*,172.16.*,172.17.*,172.18.*,172.19.*,172.20.*,
#172.21.*,172.22.*,172.23.*,172.24.*,172.25.*,172.26.*,172.27.*,
#172.28.*,172.29.*,172.30.*,172.31.*,172.32.*,192.168.*,
#*.cn,ghproxy.com,*.ghproxy.com,ghproxy.org,*.ghproxy.org,
#gh-proxy.com,*.gh-proxy.com,ghproxy.net,*.ghproxy.net"
#export no_proxy=$NO_PROXY
#echo "[INFO] Proxy set to $HTTP_PROXY"

# 配置使用国内镜像站点
export PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
export HF_ENDPOINT="https://hf-mirror.com"

################################################################################

workdir="$(pwd)"

# 该环境变量指示 HuggingFace Hub 下载模型到"本目录\HuggingFaceHub",而不是"用户\.cache"目录。
export HF_HUB_CACHE="$workdir/HuggingFaceHub"

# 该环境变量指示 Pytorch Hub 下载模型到"本目录\TorchHome",而不是"用户\.cache"目录。
export TORCH_HOME="$workdir/TorchHome"

# 该命令配置 PATH 环境变量。
export PATH="${PATH}:$workdir/python_embeded/Scripts"

# 该环境变量使 .pyc 缓存文件集中保存在一个文件夹下,而不是随 .py 文件分布。
export PYTHONPYCACHEPREFIX="$workdir/pycache"

# 该命令会复制 u2net.onnx 到用户主目录下,以免启动时还需下载。
if [ ! -f "${HOME}/.u2net/u2net.onnx" ]; then
if [ -f "./extras/u2net.onnx" ]; then
mkdir -p "${HOME}/.u2net"
cp "./extras/u2net.onnx" "${HOME}/.u2net/u2net.onnx"
fi
fi

# 下载 TRELLIS 模型(不会重复下载)
if [ ! -f "$workdir/python_embeded/Scripts/.hf-hub-reinstalled" ] ; then
$workdir/python_embeded/python.exe -s -m pip install --force-reinstall huggingface-hub
touch "$workdir/python_embeded/Scripts/.hf-hub-reinstalled"
fi ;

$workdir/python_embeded/Scripts/huggingface-cli.exe download JeffreyXiang/TRELLIS-image-large

# 运行 TRELLIS 官方 Gradio demo

echo "########################################"
echo "[INFO] Starting TRELLIS demo..."
echo "########################################"

cd TRELLIS
../python_embeded/python.exe -s app.py
Loading

0 comments on commit 5e474d2

Please sign in to comment.