Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Windows support + docs, closes #22, closes #25 #23

Merged
merged 14 commits into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 17 additions & 7 deletions .github/workflows/cmake.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,37 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
os: [ubuntu-latest, macos-latest, windows-latest]
build_type: [Release]
c_compiler: [gcc, clang]
c_compiler: [clang, cl]
include:
- os: ubuntu-latest
c_compiler: gcc
cpp_compiler: g++
- os: ubuntu-latest
c_compiler: clang
cpp_compiler: clang++
- os: macos-latest
c_compiler: clang
cpp_compiler: clang++
- os: windows-latest
c_compiler: cl
cpp_compiler: cl
exclude:
- os: ubuntu-latest
c_compiler: cli
- os: macos-latest
c_compiler: gcc
c_compiler: cl
- os: windows-latest
c_compiler: clang

steps:
- uses: actions/checkout@v4
with:
submodules: 'true'

- name: Build (Linux/macOS)
- name: Build on Windows
if: runner.os == 'Windows'
run: .\scripts\build\release.ps1
shell: pwsh

- name: Build on Linux/macOS
if: runner.os != 'Windows'
run: ./scripts/build/release.sh
8 changes: 4 additions & 4 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[submodule "JUCE"]
path = JUCE
url = [email protected]:juce-framework/JUCE.git
[submodule "llama.cpp"]
path = llama.cpp
url = [email protected]:ggerganov/llama.cpp.git
url = https://github.com/ggerganov/llama.cpp.git
[submodule "JUCE"]
path = JUCE
url = https://github.com/juce-framework/JUCE.git
151 changes: 96 additions & 55 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -68,58 +68,99 @@ PRIVATE
$<$<CONFIG:Release>:-O3>
)

# Add llama-server as a binary resource
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/llama-server
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/build/llama.cpp/bin/llama-server
${CMAKE_BINARY_DIR}/llama-server
DEPENDS ${CMAKE_SOURCE_DIR}/build/llama.cpp/bin/llama-server
)
add_custom_target(copy_llama_server ALL DEPENDS ${CMAKE_BINARY_DIR}/llama-server)

# Ensure the executable is copied into the bundle
set_source_files_properties(${CMAKE_BINARY_DIR}/llama-server PROPERTIES MACOSX_PACKAGE_LOCATION Resources)
target_sources(musegpt PRIVATE ${CMAKE_BINARY_DIR}/llama-server)

# Copy llama-server to VST plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_BINARY_DIR}/llama-server
$<TARGET_FILE_DIR:musegpt>/VST3/musegpt.vst3/Contents/Resources/llama-server
)

# Copy llama-server to Standalone plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_BINARY_DIR}/llama-server
$<TARGET_FILE_DIR:musegpt>/Standalone/musegpt.app/Contents/Resources/llama-server
)

# Copy model weights to VST plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/VST3/musegpt.vst3/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to Standalone plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/Standalone/musegpt.app/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to AAX plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/AAX/musegpt.aaxplugin/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to AU plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/AU/musegp.component/Contents/Resources/gemma-2b-it.fp16.gguf
)
# Add llama-server as a binary resource and copy files
if(WIN32)
# Windows-specific commands
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/Debug/llama-server.exe
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/build/llama.cpp/bin/Debug/llama-server.exe
${CMAKE_BINARY_DIR}/Debug/llama-server.exe
DEPENDS ${CMAKE_SOURCE_DIR}/build/llama.cpp/bin/Debug/llama-server.exe
)
add_custom_target(copy_llama_server ALL DEPENDS ${CMAKE_BINARY_DIR}/Debug/llama-server.exe)
set_source_files_properties(${CMAKE_BINARY_DIR}/Debug/llama-server.exe PROPERTIES MACOSX_PACKAGE_LOCATION Resources)
greynewell marked this conversation as resolved.
Show resolved Hide resolved
target_sources(musegpt PRIVATE ${CMAKE_BINARY_DIR}/Debug/llama-server.exe)

# Copy llama-server to VST plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_BINARY_DIR}/Debug/llama-server.exe
$<TARGET_FILE_DIR:musegpt>/VST3/musegpt.vst3/Contents/Resources/llama-server.exe
)

# Copy llama-server to Standalone plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_BINARY_DIR}/Debug/llama-server.exe
$<TARGET_FILE_DIR:musegpt>/musegpt.exe/llama-server.exe
)

# Copy model weights to VST plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/VST3/musegpt.vst3/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to Standalone plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/musegpt.exe/gemma-2b-it.fp16.gguf
)
else()
# Non-Windows commands
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/llama-server
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/build/llama.cpp/bin/llama-server
${CMAKE_BINARY_DIR}/llama-server
DEPENDS ${CMAKE_SOURCE_DIR}/build/llama.cpp/bin/llama-server
)
add_custom_target(copy_llama_server ALL DEPENDS ${CMAKE_BINARY_DIR}/llama-server)
set_source_files_properties(${CMAKE_BINARY_DIR}/llama-server PROPERTIES MACOSX_PACKAGE_LOCATION Resources)
target_sources(musegpt PRIVATE ${CMAKE_BINARY_DIR}/llama-server)

# Copy llama-server to VST plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_BINARY_DIR}/llama-server
$<TARGET_FILE_DIR:musegpt>/VST3/musegpt.vst3/Contents/Resources/llama-server
)

# Copy llama-server to Standalone plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_BINARY_DIR}/llama-server
$<TARGET_FILE_DIR:musegpt>/Standalone/musegpt.app/Contents/Resources/llama-server
)

# Copy model weights to VST plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/VST3/musegpt.vst3/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to Standalone plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/Standalone/musegpt.app/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to AAX plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/AAX/musegpt.aaxplugin/Contents/Resources/gemma-2b-it.fp16.gguf
)

# Copy model weights to AU plugin format's output directory
add_custom_command(TARGET musegpt POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/models/gemma-2b-it.fp16.gguf
$<TARGET_FILE_DIR:musegpt>/AU/musegp.component/Contents/Resources/gemma-2b-it.fp16.gguf
)
endif()
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,12 @@ If you want to build from source, follow these steps:
cd musegpt
```

In case you forgot to clone with submodules, run:

```bash
git submodule update --init --recursive
```

2. **Install dependencies:**

Ensure you have the required dependencies installed. See [Requirements](#requirements) for details.
Expand Down Expand Up @@ -99,6 +105,7 @@ Feel free to experiment with the system prompt to customize the behavior of the
- [JUCE](https://juce.com/) (Audio application framework)
- [llama.cpp](https://github.com/ggerganov/llama.cpp) (LLM inference library)
- C++17 compatible compiler (e.g., GCC 7+, Clang 5+, MSVC 2017+)
- [Python](https://www.python.org/) 3.10 or later (for model downloading and processing)
- [CMake](https://cmake.org/) 3.15 or later

## Usage
Expand Down
1 change: 1 addition & 0 deletions docs/requirements.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ Any DAW that supports VST3 plugins (e.g., Ableton Live, FL Studio, Logic Pro, Pr
- **JUCE:** Audio application framework
- **llama.cpp:** LLM inference library
- **Compiler:** C++17 compatible compiler (e.g., GCC 7+, Clang 5+, MSVC 2017+)
- **Python:** 3.10 or later (for model downloading and processing)
- **CMake:** Version 3.15 or later

## Supported Models
Expand Down
2 changes: 1 addition & 1 deletion llama.cpp
Submodule llama.cpp updated 118 files
21 changes: 21 additions & 0 deletions scripts/build/debug.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Wipe out build output from this project
Remove-Item -Path "$env:USERPROFILE\Documents\VST3\musegpt.vst3" -Recurse -Force -ErrorAction SilentlyContinue

# Create build output directories
New-Item -Path "build\debug" -ItemType Directory -Force

# Download models
& "$PSScriptRoot\..\models.ps1"

# build llama.cpp server

Push-Location llama.cpp
cmake -S . -G "Visual Studio 17 2022" -DCMAKE_CXX_COMPILER="/c/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.41.34120/bin/HostX86/x86/cl" -DCMAKE_C_COMPILER="/c/Program Files (x86)/Microsoft Visual Studio/2022/BuildTools/VC/Tools/MSVC/14.41.34120/bin/HostX86/x86/cl" -B ..\build\llama.cpp
greynewell marked this conversation as resolved.
Show resolved Hide resolved
cmake --build ..\build\llama.cpp -j $env:NUMBER_OF_PROCESSORS --target llama-server
Pop-Location

# build main project
Push-Location build
cmake -S .. -B debug
cmake --build debug --config Debug -j $env:NUMBER_OF_PROCESSORS
Pop-Location
2 changes: 1 addition & 1 deletion scripts/build/debug.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ cd ..
# build main project
cd build
cmake -S .. -B debug
cmake --build debug --config Debug -j $(sysctl -n hw.physicalcpu)
cmake --build debug --config Debug -DCMAKE_CXX_COMPILER=/pathto/g++ -DCMAKE_C_COMPILER=/pathto/gcc -j $(sysctl -n hw.physicalcpu)
greynewell marked this conversation as resolved.
Show resolved Hide resolved
cd ..
12 changes: 12 additions & 0 deletions scripts/clean.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Clean script to wipe out all transient state

# Remove build directory
Remove-Item -Path "build" -Recurse -Force -ErrorAction SilentlyContinue

# Remove models directory
Remove-Item -Path "models" -Recurse -Force -ErrorAction SilentlyContinue

# Remove .env file
Remove-Item -Path ".env" -Recurse -Force -ErrorAction SilentlyContinue

Write-Host "Cleanup completed."
10 changes: 10 additions & 0 deletions scripts/models.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Create models directory if it doesn't exist
New-Item -ItemType Directory -Force -Path .\models

# Setup virtual environment and dependencies
python -m venv .env
.\.env\Scripts\Activate.ps1
pip install -r requirements.txt

# Download GGUF models
huggingface-cli download MaziyarPanahi/gemma-2b-it-GGUF gemma-2b-it.fp16.gguf --local-dir .\models
greynewell marked this conversation as resolved.
Show resolved Hide resolved
Loading