diff --git a/models/Llama-3-8B-Instruct.json b/models/Llama-3-8B-Instruct.json index 368f995..c9d63bf 100644 --- a/models/Llama-3-8B-Instruct.json +++ b/models/Llama-3-8B-Instruct.json @@ -31,7 +31,7 @@ "format": "gguf", "sha256checksum": "ab9e4eec7e80892fd78f74d9a15d0299f1e22121cea44efd68a7a02a3fe9a1da", "publisher": { - "name": "LM Studio Community", + "name": "lmstudio-community", "socialUrl": "https://huggingface.co/lmstudio-community" }, "respository": "lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF", diff --git a/models/gemma-2-9b.json b/models/gemma-2-9b.json new file mode 100644 index 0000000..425f5b6 --- /dev/null +++ b/models/gemma-2-9b.json @@ -0,0 +1,41 @@ +{ + "_descriptorVersion": "0.0.1", + "datePublished": "2024-06-28T05:10:58.000Z", + "name": "Gemma 2 9B Instruct", + "description": "Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models", + "author": { + "name": "Google DeepMind", + "url": "https://deepmind.google", + "blurb": "We’re a team of scientists, engineers, ethicists and more, working to build the next generation of AI systems safely and responsibly." + }, + "numParameters": "9B", + "resources": { + "canonicalUrl": "https://huggingface.co/google/gemma-2-9b-it", + "downloadUrl": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF" + }, + "trainedFor": "chat", + "arch": "gemma2", + "files": { + "highlighted": { + "economical": { + "name": "gemma-2-9b-it-Q4_K_M.gguf" + } + }, + "all": [ + { + "name": "gemma-2-9b-it-Q4_K_M.gguf", + "url": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_K_M.gguf", + "sizeBytes": 5761057728, + "quantization": "Q4_K_M", + "format": "gguf", + "sha256checksum": "13b2a7b4115bbd0900162edcebe476da1ba1fc24e718e8b40d32f6e300f56dfe", + "publisher": { + "name": "lmstudio-community", + "socialUrl": "https://twitter.com/LMStudioAI" + }, + "respository": "lmstudio-community/gemma-2-9b-it-GGUF", + "repositoryUrl": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF" + } + ] + } + } diff --git a/models/phi-3.json b/models/phi-3.json index 19b01cf..ab7adb7 100644 --- a/models/phi-3.json +++ b/models/phi-3.json @@ -30,7 +30,7 @@ "format": "gguf", "sha256checksum": "bb076f8f9e6c188a8251c626e4d89442c291215c82b2cb06e1efed0941fc443a", "publisher": { - "name": "LM Studio Community", + "name": "lmstudio-community", "socialUrl": "https://twitter.com/LMStudioAI" }, "respository": "lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF", diff --git a/schema.json b/schema.json index 786fddc..dc0a668 100644 --- a/schema.json +++ b/schema.json @@ -51,7 +51,7 @@ }, "numParameters": { "type": "string", - "enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "8B", "13B", "15B", "30B", "65B", "unknown"] + "enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "8B", "9B", "13B", "15B", "30B", "65B", "unknown"] }, "trainedFor": { "type": "string", @@ -59,7 +59,7 @@ }, "arch": { "type": "string", - "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "command-r", "phi3"] + "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "gemma2", "command-r", "phi3"] }, "description": { "type": "string"