From da348f397421ce3c5781ca44a8fe2dd77c0db457 Mon Sep 17 00:00:00 2001 From: Ludwig Kristoffersson Date: Fri, 19 Apr 2024 10:31:22 +0200 Subject: [PATCH] add debug message when model is loading --- llms/generate.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/llms/generate.py b/llms/generate.py index bbed2ae..d8602f3 100644 --- a/llms/generate.py +++ b/llms/generate.py @@ -6,10 +6,13 @@ from .sampling_strategies import top_k_sampling, top_p_sampling, top_k_and_p_sampling from config.settings import get_settings +from config.logger import log from .config import Params def load_hf_model(model_path: str, device: str) -> (AutoModelForCausalLM, AutoTokenizer): + log().debug(f"loading model {model_path} with token {get_settings().HUGGINGFACE_ACCESS_TOKEN}") + tokenizer = AutoTokenizer.from_pretrained(model_path, token=get_settings().HUGGINGFACE_ACCESS_TOKEN) model = AutoModelForCausalLM.from_pretrained( model_path,