Skip to content
This repository was archived by the owner on Sep 27, 2024. It is now read-only.

Commit 72667d1

Browse files
authored
Merge pull request #85 from lmstudio-ai/yagil/catalog-update
Gemma 2 and Phi 3
2 parents f5fd120 + e28d17a commit 72667d1

File tree

4 files changed

+85
-3
lines changed

4 files changed

+85
-3
lines changed

models/Llama-3-8B-Instruct.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
"format": "gguf",
3232
"sha256checksum": "ab9e4eec7e80892fd78f74d9a15d0299f1e22121cea44efd68a7a02a3fe9a1da",
3333
"publisher": {
34-
"name": "LM Studio Community",
34+
"name": "lmstudio-community",
3535
"socialUrl": "https://huggingface.co/lmstudio-community"
3636
},
3737
"respository": "lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",

models/gemma-2-9b.json

+41
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
{
2+
"_descriptorVersion": "0.0.1",
3+
"datePublished": "2024-06-28T05:10:58.000Z",
4+
"name": "Gemma 2 9B Instruct",
5+
"description": "Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models",
6+
"author": {
7+
"name": "Google DeepMind",
8+
"url": "https://deepmind.google",
9+
"blurb": "We’re a team of scientists, engineers, ethicists and more, working to build the next generation of AI systems safely and responsibly."
10+
},
11+
"numParameters": "9B",
12+
"resources": {
13+
"canonicalUrl": "https://huggingface.co/google/gemma-2-9b-it",
14+
"downloadUrl": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF"
15+
},
16+
"trainedFor": "chat",
17+
"arch": "gemma2",
18+
"files": {
19+
"highlighted": {
20+
"economical": {
21+
"name": "gemma-2-9b-it-Q4_K_M.gguf"
22+
}
23+
},
24+
"all": [
25+
{
26+
"name": "gemma-2-9b-it-Q4_K_M.gguf",
27+
"url": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_K_M.gguf",
28+
"sizeBytes": 5761057728,
29+
"quantization": "Q4_K_M",
30+
"format": "gguf",
31+
"sha256checksum": "13b2a7b4115bbd0900162edcebe476da1ba1fc24e718e8b40d32f6e300f56dfe",
32+
"publisher": {
33+
"name": "lmstudio-community",
34+
"socialUrl": "https://twitter.com/LMStudioAI"
35+
},
36+
"respository": "lmstudio-community/gemma-2-9b-it-GGUF",
37+
"repositoryUrl": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF"
38+
}
39+
]
40+
}
41+
}

models/phi-3.json

+41
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
{
2+
"_descriptorVersion": "0.0.1",
3+
"datePublished": "2024-07-02T14:09:26",
4+
"name": "Phi 3",
5+
"description": "The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties.",
6+
"author": {
7+
"name": "Microsoft Research",
8+
"url": "https://www.microsoft.com/en-us/research/",
9+
"blurb": "Advancing science and technology to benefit humanity"
10+
},
11+
"numParameters": "3B",
12+
"resources": {
13+
"canonicalUrl": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct",
14+
"downloadUrl": "https://huggingface.co/lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF"
15+
},
16+
"trainedFor": "chat",
17+
"arch": "phi3",
18+
"files": {
19+
"highlighted": {
20+
"economical": {
21+
"name": "Phi-3.1-mini-4k-instruct-Q5_K_M.gguf"
22+
}
23+
},
24+
"all": [
25+
{
26+
"name": "Phi-3.1-mini-4k-instruct-Q5_K_M.gguf",
27+
"url": "https://huggingface.co/lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF/resolve/main/Phi-3.1-mini-4k-instruct-Q5_K_M.gguf",
28+
"sizeBytes": 2815275232,
29+
"quantization": "Q5_K_M",
30+
"format": "gguf",
31+
"sha256checksum": "bb076f8f9e6c188a8251c626e4d89442c291215c82b2cb06e1efed0941fc443a",
32+
"publisher": {
33+
"name": "lmstudio-community",
34+
"socialUrl": "https://twitter.com/LMStudioAI"
35+
},
36+
"respository": "lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF",
37+
"repositoryUrl": "https://huggingface.co/lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF"
38+
}
39+
]
40+
}
41+
}

schema.json

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,15 @@
5151
},
5252
"numParameters": {
5353
"type": "string",
54-
"enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "13B", "15B", "30B", "65B", "unknown"]
54+
"enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "8B", "9B", "13B", "15B", "30B", "65B", "unknown"]
5555
},
5656
"trainedFor": {
5757
"type": "string",
5858
"enum": ["chat", "instruct", "code_completion", "other"]
5959
},
6060
"arch": {
6161
"type": "string",
62-
"enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "command-r"]
62+
"enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "gemma2", "command-r", "phi3"]
6363
},
6464
"description": {
6565
"type": "string"

0 commit comments

Comments
 (0)