Skip to content

Commit

Permalink
Upgraded the hardware management
Browse files Browse the repository at this point in the history
  • Loading branch information
ParisNeo committed Jan 4, 2024
1 parent e868dc0 commit ac7afbb
Show file tree
Hide file tree
Showing 16 changed files with 404 additions and 453 deletions.
10 changes: 9 additions & 1 deletion api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1574,7 +1574,7 @@ def prepare_query(self, client_id: str, message_id: int = -1, is_continue: bool
self.warning("Couldn't add long term memory information to the context. Please verify the vector database") # Add information about the user
user_description=""
if self.config.use_user_name_in_discussions:
user_description="!@>User description:\n"+self.config.user_description
user_description="!@>User description:\n"+self.config.user_description+"\n"


# Tokenize the conditionning text and calculate its number of tokens
Expand Down Expand Up @@ -1971,6 +1971,13 @@ def process_chunk(
dt=1
spd = self.nb_received_tokens/dt
ASCIIColors.green(f"Received {self.nb_received_tokens} tokens (speed: {spd:.2f}t/s) ",end="\r",flush=True)
antiprompt = self.personality.detect_antiprompt(self.connections[client_id]["generated_text"])
if antiprompt:
ASCIIColors.warning(f"\nDetected hallucination with antiprompt: {antiprompt}")
self.connections[client_id]["generated_text"] = self.remove_text_from_string(self.connections[client_id]["generated_text"],antiprompt)
self.update_message(client_id, self.connections[client_id]["generated_text"], parameters, metadata, None, MSG_TYPE.MSG_TYPE_FULL)
return False

self.update_message(client_id, chunk, parameters, metadata, ui=None, msg_type=message_type)
return True
# Stream the generated text to the frontend
Expand All @@ -1983,6 +1990,7 @@ def generate(self, full_prompt, prompt, n_predict, client_id, callback=None):
if self.personality.processor is not None:
ASCIIColors.info("Running workflow")
try:
self.personality.callback = callback
self.personality.processor.run_workflow( prompt, full_prompt, callback)
except Exception as ex:
trace_exception(ex)
Expand Down
15 changes: 2 additions & 13 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -1254,7 +1254,7 @@ def upgrade_to_gpu(self):
ASCIIColors.green("PyTorch uninstalled successfully")
reinstall_pytorch_with_cuda()
ASCIIColors.yellow("Installing pytorch with cuda support")
self.config.enable_gpu=True
self.config.hardware_mode="nvidia-tensorcores"
return jsonify({'status':res==0})


Expand Down Expand Up @@ -1992,7 +1992,7 @@ def clear_personality_files_list(self):
return jsonify({"state":True})

def start_training(self):
if self.config.enable_gpu:
if self.config.hardware_mode=="nvidia-tensorcores" or self.config.hardware_mode=="nvidia" or self.config.hardware_mode=="apple-intel" or self.config.hardware_mode=="apple-silicon":
if not self.lollms_paths.gptqlora_path.exists():
# Clone the repository to the target path
ASCIIColors.info("No gptqlora found in your personal space.\nCloning the gptqlora repo")
Expand Down Expand Up @@ -2748,17 +2748,6 @@ def sync_cfg(default_config, config):
if not user_avatar_path.exists():
# If the user avatar doesn't exist, copy the default avatar from the assets folder
shutil.copy(default_user_avatar, user_avatar_path)
# executor = ThreadPoolExecutor(max_workers=1)
# app.config['executor'] = executor
# Check if .no_gpu file exists
no_gpu_file = Path('.no_gpu')
if no_gpu_file.exists():
# If the file exists, change self.config.use_gpu to False
config.enable_gpu = False
config.save_config()

# Remove the .no_gpu file
no_gpu_file.unlink()

bot = LoLLMsWebUI(args, app, socketio, config, config.file_path, lollms_paths)

Expand Down
6 changes: 3 additions & 3 deletions configs/config.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# =================== Lord Of Large Language Models Configuration file ===========================
version: 39
version: 40
binding_name: null
model_name: null

Expand Down Expand Up @@ -44,8 +44,8 @@ debug: False
auto_update: true
auto_save: true
auto_title: false
# Enables gpu usage
enable_gpu: true
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
# Automatically open the browser
auto_show_browser: true

Expand Down
43 changes: 7 additions & 36 deletions lollms_webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,65 +6,36 @@
This class provides a singleton instance of the LoLLMS web UI, allowing access to its functionality and data across multiple endpoints.
"""

from lollms.app import LollmsApplication
from lollms.server.elf_server import LOLLMSElfServer
from lollms.main_config import LOLLMSConfig
from lollms.paths import LollmsPaths

class LoLLMSWebUI(LollmsApplication):
__instance = None

@staticmethod
def build_instance(
config: LOLLMSConfig,
lollms_paths: LollmsPaths,
load_binding=True,
load_model=True,
try_select_binding=False,
try_select_model=False,
callback=None,
socketio = None
):
if LoLLMSWebUI.__instance is None:
LoLLMSWebUI(
config,
lollms_paths,
load_binding=load_binding,
load_model=load_model,
try_select_binding=try_select_binding,
try_select_model=try_select_model,
callback=callback,
socketio=socketio
)
return LoLLMSWebUI.__instance
@staticmethod
def get_instance():
return LoLLMSWebUI.__instance

class LOLLMSWebUI(LOLLMSElfServer):
def __init__(
self,
config: LOLLMSConfig,
lollms_paths: LollmsPaths,
load_binding=True,
load_model=True,
load_voice_service=True,
load_sd_service=True,
try_select_binding=False,
try_select_model=False,
callback=None,
socketio=None
) -> None:
super().__init__(
"LoLLMSWebUI",
config,
lollms_paths,
load_binding=load_binding,
load_model=load_model,
load_sd_service=load_sd_service,
load_voice_service=load_voice_service,
try_select_binding=try_select_binding,
try_select_model=try_select_model,
callback=callback,
socketio=socketio
)
if LoLLMSWebUI.__instance is not None:
raise Exception("This class is a singleton!")
else:
LoLLMSWebUI.__instance = self
self.app_name = "LOLLMSWebUI"

# Other methods and properties of the LoLLMSWebUI singleton class
8 changes: 5 additions & 3 deletions new_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.main_config import LOLLMSConfig
from lollms_webui import LoLLMSWebUI
from lollms_webui import LOLLMSWebUI
from pathlib import Path
from ascii_colors import ASCIIColors
import socketio
Expand Down Expand Up @@ -43,7 +43,9 @@
if args.port:
config.port=args.port

LoLLMSWebUI.build_instance(config=config, lollms_paths=lollms_paths, socketio=sio)
from lollms.server.endpoints.lollms_infos import *
LOLLMSWebUI.build_instance(config=config, lollms_paths=lollms_paths, socketio=sio)

# Import all endpoints
from lollms.server.endpoints.lollms_infos import router

uvicorn.run(app, host=config.host, port=config.port)
108 changes: 40 additions & 68 deletions scripts/linux/linux_install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,22 @@
# This script will install miniconda and git with all dependencies for this project
# This enables a user to install this project without manually installing conda and git.

echo " ___ ___ ___ ___ ___ ___ "
echo " /\__\ /\ \ /\__\ /\__\ /\__\ /\ \ "
echo " /:/ / /::\ \ /:/ / /:/ / /::| | /::\ \ "
echo " /:/ / /:/\:\ \ /:/ / /:/ / /:|:| | /:/\ \ \ "
echo " /:/ / /:/ \:\ \ /:/ / /:/ / /:/|:|__|__ _\:\~\ \ \ "
echo " /:/__/ /:/__/ \:\__\ /:/__/ /:/__/ /:/ |::::\__\ /\ \:\ \ \__\ "
echo " \:\ \ \:\ \ /:/ / \:\ \ \:\ \ \/__/~~/:/ / \:\ \:\ \/__/ "
echo " \:\ \ \:\ /:/ / \:\ \ \:\ \ /:/ / \:\ \:\__\ "
echo " \:\ \ \:\/:/ / \:\ \ \:\ \ /:/ / \:\/:/ / "
echo " \:\__\ \::/ / \:\__\ \:\__\ /:/ / \::/ / "
echo " \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ "
echo "V8.5 (alpha)"
echo "-----------------"
echo "By ParisNeo"
echo "-----------------"


cd "$(dirname "$0")"

Expand All @@ -19,47 +35,12 @@ if [[ "$PWD" =~ [^#\$\%\&\(\)\*\+\] ]]; then
fi




export PACKAGES_TO_INSTALL=python=3.11 git
read -rp "Press Enter to continue..."

clear

echo " ___ ___ ___ ___ ___ ___ "
echo " /\__\ /\ \ /\__\ /\__\ /\__\ /\ \ "
echo " /:/ / /::\ \ /:/ / /:/ / /::| | /::\ \ "
echo " /:/ / /:/\:\ \ /:/ / /:/ / /:|:| | /:/\ \ \ "
echo " /:/ / /:/ \:\ \ /:/ / /:/ / /:/|:|__|__ _\:\~\ \ \ "
echo " /:/__/ /:/__/ \:\__\ /:/__/ /:/__/ /:/ |::::\__\ /\ \:\ \ \__\ "
echo " \:\ \ \:\ \ /:/ / \:\ \ \:\ \ \/__/~~/:/ / \:\ \:\ \/__/ "
echo " \:\ \ \:\ /:/ / \:\ \ \:\ \ /:/ / \:\ \:\__\ "
echo " \:\ \ \:\/:/ / \:\ \ \:\ \ /:/ / \:\/:/ / "
echo " \:\__\ \::/ / \:\__\ \:\__\ /:/ / \::/ / "
echo " \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ "
echo " By ParisNeo"

echo "Please specify if you want to use a GPU or CPU."
echo "*Note* that only NVidea GPUs (cuda) or AMD GPUs (rocm) are supported."
echo "A) Enable Cuda (for nvidia GPUS)"
echo "B) Enable ROCm (for AMD GPUs)"
echo "C) Run CPU mode"
echo
read -rp "Input> " gpuchoice
gpuchoice="${gpuchoice:0:1}"

if [[ "${gpuchoice^^}" == "A" ]]; then
PACKAGES_TO_INSTALL="python=3.10 cuda-toolkit ninja git gcc"
CHANNEL="-c nvidia/label/cuda-12.1.1 -c nvidia -c conda-forge"
elif [[ "${gpuchoice^^}" == "B" ]]; then
PACKAGES_TO_INSTALL="python=3.10 rocm-comgr rocm-smi ninja git gcc"
CHANNEL=" -c conda-forge"
elif [[ "${gpuchoice^^}" == "C" ]]; then
PACKAGES_TO_INSTALL="python=3.10 ninja git gcc"
CHANNEL="-c conda-forge"
else
echo "Invalid choice. Exiting..."
exit 1
fi


# Better isolation for virtual environment
unset CONDA_SHLVL
Expand Down Expand Up @@ -115,33 +96,28 @@ export CUDA_PATH="$INSTALL_ENV_DIR"
if [ -d "lollms-webui" ]; then
cd lollms-webui || exit 1
git pull
git submodule update --init --recursive
cd
cd lollms-core
pip install -e .
cd ..
cd utilities\safe_store
pip install -e .
cd ..\..

else
git clone "$REPO_URL"
git clone --depth 1 --recurse-submodules "$REPO_URL"
git submodule update --init --recursive
cd lollms-webui\lollms_core
pip install -e .
cd ..
cd utilities\safe_store
pip install -e .
cd ..\..

cd lollms-webui || exit 1
fi

# Initilize all submodules and set them to main branch
echo "Initializing submodules"
git submodule update --init
cd zoos/bindings_zoo
git checkout main
cd ../personalities_zoo
git checkout main
cd ../extensions_zoo
git checkout main
cd ../models_zoo
git checkout main

cd ../..

cd lollms_core
git checkout main

cd ../utilities/safe_store
git checkout main

cd ../..

# Loop through each "git+" requirement and uninstall it (workaround for inconsistent git package updating)
while IFS= read -r requirement; do
if echo "$requirement" | grep -q "git+"; then
Expand All @@ -152,8 +128,6 @@ done < requirements.txt

# Install the pip requirements
python -m pip install -r requirements.txt --upgrade
python -m pip install -e lollms_core --upgrade
python -m pip install -e utilities/safe_store --upgrade


if [[ -e "../linux_run.sh" ]]; then
Expand All @@ -180,12 +154,10 @@ else
cp scripts/linux/linux_update_models.sh ../
fi

if [[ "${gpuchoice^^}" == "C" ]]; then
echo "This is a .no_gpu file." > .no_gpu
echo "You have chosen to use only CPU on this system."
else
echo "You have chosen to use GPU on this system."
fi

cd scripts/python/lollms_installer
python main.py
cd ..

PrintBigMessage() {
echo
Expand Down
Loading

0 comments on commit ac7afbb

Please sign in to comment.