diff --git a/pyproject.toml b/pyproject.toml index 78d634f26d..721a7ce488 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,7 +58,7 @@ core = [ "open-clip-torch>=2.23.0,<2.26.1", ] openvino = ["openvino>=2024.0", "nncf>=2.10.0", "onnx>=1.16.0"] -vlm = ["ollama<0.4.0", "openai", "python-dotenv","transformers"] +vlm = ["ollama>=0.4.0", "openai", "python-dotenv","transformers"] loggers = [ "comet-ml>=3.31.7", "gradio>=4", diff --git a/src/anomalib/models/image/vlm_ad/backends/ollama.py b/src/anomalib/models/image/vlm_ad/backends/ollama.py index 4c712cdba8..75bcaf143c 100644 --- a/src/anomalib/models/image/vlm_ad/backends/ollama.py +++ b/src/anomalib/models/image/vlm_ad/backends/ollama.py @@ -44,8 +44,7 @@ from .base import Backend if module_available("ollama"): - from ollama import chat - from ollama._client import _encode_image + from ollama import Image, chat else: chat = None @@ -101,7 +100,7 @@ def add_reference_images(self, image: str | Path) -> None: Args: image (str | Path): Path to the reference image file """ - self._ref_images_encoded.append(_encode_image(image)) + self._ref_images_encoded.append(Image(value=image)) @property def num_reference_images(self) -> int: @@ -144,7 +143,7 @@ def predict(self, image: str | Path, prompt: Prompt) -> str: if not chat: msg = "Ollama is not installed. Please install it using `pip install ollama`." raise ImportError(msg) - image_encoded = _encode_image(image) + image_encoded = Image(value=image) messages = [] # few-shot