Alpaca/src/available_models.json
2024-06-30 14:10:02 -06:00

19178 lines
415 KiB
JSON

{
"gemma2": {
"url": "https://ollama.com/library/gemma2",
"description": "Google Gemma 2 is now available in 2 sizes, 9B and 27B.",
"tags": [
[
"latest",
"5.4GB"
],
[
"27b",
"16GB"
],
[
"9b",
"5.4GB"
],
[
"27b-instruct-fp16",
"54GB"
],
[
"27b-instruct-q2_K",
"10GB"
],
[
"27b-instruct-q3_K_L",
"15GB"
],
[
"27b-instruct-q3_K_M",
"13GB"
],
[
"27b-instruct-q3_K_S",
"12GB"
],
[
"27b-instruct-q4_0",
"16GB"
],
[
"27b-instruct-q4_1",
"17GB"
],
[
"27b-instruct-q4_K_M",
"17GB"
],
[
"27b-instruct-q4_K_S",
"16GB"
],
[
"27b-instruct-q5_0",
"19GB"
],
[
"27b-instruct-q5_1",
"21GB"
],
[
"27b-instruct-q5_K_M",
"19GB"
],
[
"27b-instruct-q5_K_S",
"19GB"
],
[
"27b-instruct-q6_K",
"22GB"
],
[
"27b-instruct-q8_0",
"29GB"
],
[
"9b-instruct-fp16",
"18GB"
],
[
"9b-instruct-q2_K",
"3.8GB"
],
[
"9b-instruct-q3_K_L",
"5.1GB"
],
[
"9b-instruct-q3_K_M",
"4.8GB"
],
[
"9b-instruct-q3_K_S",
"4.3GB"
],
[
"9b-instruct-q4_0",
"5.4GB"
],
[
"9b-instruct-q4_1",
"6.0GB"
],
[
"9b-instruct-q4_K_M",
"5.8GB"
],
[
"9b-instruct-q4_K_S",
"5.5GB"
],
[
"9b-instruct-q5_0",
"6.5GB"
],
[
"9b-instruct-q5_1",
"7.0GB"
],
[
"9b-instruct-q5_K_M",
"6.6GB"
],
[
"9b-instruct-q5_K_S",
"6.5GB"
],
[
"9b-instruct-q6_K",
"7.6GB"
],
[
"9b-instruct-q8_0",
"9.8GB"
]
],
"image": false,
"author": "Google DeepMind"
},
"llama3": {
"url": "https://ollama.com/library/llama3",
"description": "Meta Llama 3: The most capable openly available LLM to date",
"tags": [
[
"latest",
"4.7GB"
],
[
"70b",
"40GB"
],
[
"8b",
"4.7GB"
],
[
"instruct",
"4.7GB"
],
[
"text",
"4.7GB"
],
[
"70b-instruct",
"40GB"
],
[
"70b-instruct-fp16",
"141GB"
],
[
"70b-instruct-q2_K",
"26GB"
],
[
"70b-instruct-q3_K_L",
"37GB"
],
[
"70b-instruct-q3_K_M",
"34GB"
],
[
"70b-instruct-q3_K_S",
"31GB"
],
[
"70b-instruct-q4_0",
"40GB"
],
[
"70b-instruct-q4_1",
"44GB"
],
[
"70b-instruct-q4_K_M",
"43GB"
],
[
"70b-instruct-q4_K_S",
"40GB"
],
[
"70b-instruct-q5_0",
"49GB"
],
[
"70b-instruct-q5_1",
"53GB"
],
[
"70b-instruct-q5_K_M",
"50GB"
],
[
"70b-instruct-q5_K_S",
"49GB"
],
[
"70b-instruct-q6_K",
"58GB"
],
[
"70b-instruct-q8_0",
"75GB"
],
[
"70b-text",
"40GB"
],
[
"70b-text-fp16",
"141GB"
],
[
"70b-text-q2_K",
"26GB"
],
[
"70b-text-q3_K_L",
"37GB"
],
[
"70b-text-q3_K_M",
"34GB"
],
[
"70b-text-q3_K_S",
"31GB"
],
[
"70b-text-q4_0",
"40GB"
],
[
"70b-text-q4_1",
"44GB"
],
[
"70b-text-q4_K_M",
"43GB"
],
[
"70b-text-q4_K_S",
"40GB"
],
[
"70b-text-q5_0",
"49GB"
],
[
"70b-text-q5_1",
"53GB"
],
[
"70b-text-q5_K_M",
"50GB"
],
[
"70b-text-q5_K_S",
"49GB"
],
[
"70b-text-q6_K",
"58GB"
],
[
"70b-text-q8_0",
"75GB"
],
[
"8b-instruct-fp16",
"16GB"
],
[
"8b-instruct-q2_K",
"3.2GB"
],
[
"8b-instruct-q3_K_L",
"4.3GB"
],
[
"8b-instruct-q3_K_M",
"4.0GB"
],
[
"8b-instruct-q3_K_S",
"3.7GB"
],
[
"8b-instruct-q4_0",
"4.7GB"
],
[
"8b-instruct-q4_1",
"5.1GB"
],
[
"8b-instruct-q4_K_M",
"4.9GB"
],
[
"8b-instruct-q4_K_S",
"4.7GB"
],
[
"8b-instruct-q5_0",
"5.6GB"
],
[
"8b-instruct-q5_1",
"6.1GB"
],
[
"8b-instruct-q5_K_M",
"5.7GB"
],
[
"8b-instruct-q5_K_S",
"5.6GB"
],
[
"8b-instruct-q6_K",
"6.6GB"
],
[
"8b-instruct-q8_0",
"8.5GB"
],
[
"8b-text",
"4.7GB"
],
[
"8b-text-fp16",
"16GB"
],
[
"8b-text-q2_K",
"3.2GB"
],
[
"8b-text-q3_K_L",
"4.3GB"
],
[
"8b-text-q3_K_M",
"4.0GB"
],
[
"8b-text-q3_K_S",
"3.7GB"
],
[
"8b-text-q4_0",
"4.7GB"
],
[
"8b-text-q4_1",
"5.1GB"
],
[
"8b-text-q4_K_M",
"4.9GB"
],
[
"8b-text-q4_K_S",
"4.7GB"
],
[
"8b-text-q5_0",
"5.6GB"
],
[
"8b-text-q5_1",
"6.1GB"
],
[
"8b-text-q5_K_M",
"5.7GB"
],
[
"8b-text-q5_K_S",
"5.6GB"
],
[
"8b-text-q6_K",
"6.6GB"
],
[
"8b-text-q8_0",
"8.5GB"
]
],
"image": false,
"author": "Meta"
},
"qwen2": {
"url": "https://ollama.com/library/qwen2",
"description": "Qwen2 is a new series of large language models from Alibaba group",
"tags": [
[
"latest",
"4.4GB"
],
[
"72b",
"41GB"
],
[
"7b",
"4.4GB"
],
[
"1.5b",
"935MB"
],
[
"0.5b",
"352MB"
],
[
"72b-instruct",
"41GB"
],
[
"72b-instruct-fp16",
"145GB"
],
[
"72b-instruct-q2_K",
"30GB"
],
[
"72b-instruct-q3_K_L",
"40GB"
],
[
"72b-instruct-q3_K_M",
"38GB"
],
[
"72b-instruct-q3_K_S",
"34GB"
],
[
"72b-instruct-q4_0",
"41GB"
],
[
"72b-instruct-q4_1",
"46GB"
],
[
"72b-instruct-q4_K_M",
"47GB"
],
[
"72b-instruct-q4_K_S",
"44GB"
],
[
"72b-instruct-q5_0",
"50GB"
],
[
"72b-instruct-q5_1",
"55GB"
],
[
"72b-instruct-q5_K_M",
"54GB"
],
[
"72b-instruct-q5_K_S",
"51GB"
],
[
"72b-instruct-q6_K",
"64GB"
],
[
"72b-instruct-q8_0",
"77GB"
],
[
"72b-text",
"41GB"
],
[
"72b-text-fp16",
"145GB"
],
[
"72b-text-q2_K",
"30GB"
],
[
"72b-text-q3_K_L",
"40GB"
],
[
"72b-text-q3_K_M",
"38GB"
],
[
"72b-text-q3_K_S",
"34GB"
],
[
"72b-text-q4_0",
"41GB"
],
[
"72b-text-q4_1",
"46GB"
],
[
"72b-text-q4_K_M",
"47GB"
],
[
"72b-text-q4_K_S",
"44GB"
],
[
"72b-text-q5_0",
"50GB"
],
[
"72b-text-q5_1",
"55GB"
],
[
"72b-text-q5_K_M",
"54GB"
],
[
"72b-text-q5_K_S",
"51GB"
],
[
"72b-text-q6_K",
"64GB"
],
[
"72b-text-q8_0",
"77GB"
],
[
"7b-instruct",
"4.4GB"
],
[
"7b-instruct-fp16",
"15GB"
],
[
"7b-instruct-q2_K",
"3.0GB"
],
[
"7b-instruct-q3_K_L",
"4.1GB"
],
[
"7b-instruct-q3_K_M",
"3.8GB"
],
[
"7b-instruct-q3_K_S",
"3.5GB"
],
[
"7b-instruct-q4_0",
"4.4GB"
],
[
"7b-instruct-q4_1",
"4.9GB"
],
[
"7b-instruct-q4_K_M",
"4.7GB"
],
[
"7b-instruct-q4_K_S",
"4.5GB"
],
[
"7b-instruct-q5_0",
"5.3GB"
],
[
"7b-instruct-q5_1",
"5.8GB"
],
[
"7b-instruct-q5_K_M",
"5.4GB"
],
[
"7b-instruct-q5_K_S",
"5.3GB"
],
[
"7b-instruct-q6_K",
"6.3GB"
],
[
"7b-instruct-q8_0",
"8.1GB"
],
[
"7b-text",
"4.4GB"
],
[
"7b-text-q2_K",
"3.0GB"
],
[
"7b-text-q3_K_L",
"4.1GB"
],
[
"7b-text-q3_K_M",
"3.8GB"
],
[
"7b-text-q3_K_S",
"3.5GB"
],
[
"7b-text-q4_0",
"4.4GB"
],
[
"7b-text-q4_1",
"4.9GB"
],
[
"7b-text-q4_K_M",
"4.7GB"
],
[
"7b-text-q4_K_S",
"4.5GB"
],
[
"7b-text-q5_0",
"5.3GB"
],
[
"7b-text-q5_1",
"5.8GB"
],
[
"7b-text-q8_0",
"8.1GB"
],
[
"1.5b-instruct",
"935MB"
],
[
"1.5b-instruct-fp16",
"3.1GB"
],
[
"1.5b-instruct-q2_K",
"676MB"
],
[
"1.5b-instruct-q3_K_L",
"880MB"
],
[
"1.5b-instruct-q3_K_M",
"824MB"
],
[
"1.5b-instruct-q3_K_S",
"761MB"
],
[
"1.5b-instruct-q4_0",
"935MB"
],
[
"1.5b-instruct-q4_1",
"1.0GB"
],
[
"1.5b-instruct-q4_K_M",
"986MB"
],
[
"1.5b-instruct-q4_K_S",
"940MB"
],
[
"1.5b-instruct-q5_0",
"1.1GB"
],
[
"1.5b-instruct-q5_1",
"1.2GB"
],
[
"1.5b-instruct-q5_K_M",
"1.1GB"
],
[
"1.5b-instruct-q5_K_S",
"1.1GB"
],
[
"1.5b-instruct-q6_K",
"1.3GB"
],
[
"1.5b-instruct-q8_0",
"1.6GB"
],
[
"0.5b-instruct",
"352MB"
],
[
"0.5b-instruct-fp16",
"994MB"
],
[
"0.5b-instruct-q2_K",
"339MB"
],
[
"0.5b-instruct-q3_K_L",
"369MB"
],
[
"0.5b-instruct-q3_K_M",
"355MB"
],
[
"0.5b-instruct-q3_K_S",
"338MB"
],
[
"0.5b-instruct-q4_0",
"352MB"
],
[
"0.5b-instruct-q4_1",
"375MB"
],
[
"0.5b-instruct-q4_K_M",
"398MB"
],
[
"0.5b-instruct-q4_K_S",
"385MB"
],
[
"0.5b-instruct-q5_0",
"397MB"
],
[
"0.5b-instruct-q5_1",
"419MB"
],
[
"0.5b-instruct-q5_K_M",
"420MB"
],
[
"0.5b-instruct-q5_K_S",
"413MB"
],
[
"0.5b-instruct-q6_K",
"506MB"
],
[
"0.5b-instruct-q8_0",
"531MB"
]
],
"image": false,
"author": "Alibaba"
},
"deepseek-coder-v2": {
"url": "https://ollama.com/library/deepseek-coder-v2",
"description": "An open-source Mixture-of-Experts code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks.",
"tags": [
[
"latest",
"8.9GB"
],
[
"236b",
"133GB"
],
[
"16b",
"8.9GB"
],
[
"lite",
"8.9GB"
],
[
"236b-instruct-q4_k_m",
"142GB"
],
[
"236b-instruct-fp16",
"472GB"
],
[
"236b-instruct-q2_K",
"86GB"
],
[
"236b-instruct-q3_K_L",
"122GB"
],
[
"236b-instruct-q3_K_M",
"113GB"
],
[
"236b-instruct-q3_K_S",
"102GB"
],
[
"236b-instruct-q4_0",
"133GB"
],
[
"236b-instruct-q4_1",
"148GB"
],
[
"236b-instruct-q4_K_M",
"142GB"
],
[
"236b-instruct-q4_K_S",
"134GB"
],
[
"236b-instruct-q5_0",
"162GB"
],
[
"236b-instruct-q5_1",
"177GB"
],
[
"236b-instruct-q5_K_M",
"167GB"
],
[
"236b-instruct-q5_K_S",
"162GB"
],
[
"236b-instruct-q6_K",
"194GB"
],
[
"236b-instruct-q8_0",
"251GB"
],
[
"16b-lite-base-fp16",
"31GB"
],
[
"16b-lite-base-q2_K",
"6.4GB"
],
[
"16b-lite-base-q3_K_L",
"8.5GB"
],
[
"16b-lite-base-q3_K_M",
"8.1GB"
],
[
"16b-lite-base-q3_K_S",
"7.5GB"
],
[
"16b-lite-base-q4_0",
"8.9GB"
],
[
"16b-lite-base-q4_1",
"9.9GB"
],
[
"16b-lite-base-q4_K_M",
"10GB"
],
[
"16b-lite-base-q4_K_S",
"9.5GB"
],
[
"16b-lite-base-q5_0",
"11GB"
],
[
"16b-lite-base-q5_1",
"12GB"
],
[
"16b-lite-base-q5_K_M",
"12GB"
],
[
"16b-lite-base-q5_K_S",
"11GB"
],
[
"16b-lite-base-q6_K",
"14GB"
],
[
"16b-lite-base-q8_0",
"17GB"
],
[
"16b-lite-instruct-fp16",
"31GB"
],
[
"16b-lite-instruct-q2_K",
"6.4GB"
],
[
"16b-lite-instruct-q3_K_L",
"8.5GB"
],
[
"16b-lite-instruct-q3_K_M",
"8.1GB"
],
[
"16b-lite-instruct-q3_K_S",
"7.5GB"
],
[
"16b-lite-instruct-q4_0",
"8.9GB"
],
[
"16b-lite-instruct-q4_1",
"9.9GB"
],
[
"16b-lite-instruct-q4_K_M",
"10GB"
],
[
"16b-lite-instruct-q4_K_S",
"9.5GB"
],
[
"16b-lite-instruct-q5_0",
"11GB"
],
[
"16b-lite-instruct-q5_1",
"12GB"
],
[
"16b-lite-instruct-q5_K_M",
"12GB"
],
[
"16b-lite-instruct-q5_K_S",
"11GB"
],
[
"16b-lite-instruct-q6_K",
"14GB"
],
[
"16b-lite-instruct-q8_0",
"17GB"
]
],
"image": false,
"author": "DeepSeek Team"
},
"phi3": {
"url": "https://ollama.com/library/phi3",
"description": "Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft.",
"tags": [
[
"latest",
"2.4GB"
],
[
"14b",
"7.9GB"
],
[
"3.8b",
"2.4GB"
],
[
"instruct",
"2.4GB"
],
[
"medium",
"7.9GB"
],
[
"mini",
"2.4GB"
],
[
"14b-instruct",
"7.9GB"
],
[
"14b-medium-128k-instruct-f16",
"28GB"
],
[
"14b-medium-4k-instruct-f16",
"28GB"
],
[
"14b-medium-128k-instruct-q2_K",
"5.1GB"
],
[
"14b-medium-128k-instruct-q3_K_L",
"7.5GB"
],
[
"14b-medium-128k-instruct-q3_K_M",
"6.9GB"
],
[
"14b-medium-128k-instruct-q3_K_S",
"6.1GB"
],
[
"14b-medium-128k-instruct-q4_0",
"7.9GB"
],
[
"14b-medium-128k-instruct-q4_1",
"8.8GB"
],
[
"14b-medium-128k-instruct-q4_K_M",
"8.6GB"
],
[
"14b-medium-128k-instruct-q4_K_S",
"8.0GB"
],
[
"14b-medium-128k-instruct-q5_0",
"9.6GB"
],
[
"14b-medium-128k-instruct-q5_1",
"10GB"
],
[
"14b-medium-128k-instruct-q5_K_M",
"10GB"
],
[
"14b-medium-128k-instruct-q5_K_S",
"9.6GB"
],
[
"14b-medium-128k-instruct-q6_K",
"11GB"
],
[
"14b-medium-4k-instruct-q2_K",
"5.1GB"
],
[
"14b-medium-4k-instruct-q3_K_L",
"7.5GB"
],
[
"14b-medium-4k-instruct-q3_K_M",
"6.9GB"
],
[
"14b-medium-4k-instruct-q3_K_S",
"6.1GB"
],
[
"14b-medium-4k-instruct-q4_0",
"7.9GB"
],
[
"14b-medium-4k-instruct-q4_1",
"8.8GB"
],
[
"14b-medium-4k-instruct-q4_K_M",
"8.6GB"
],
[
"14b-medium-4k-instruct-q4_K_S",
"8.0GB"
],
[
"14b-medium-4k-instruct-q5_0",
"9.6GB"
],
[
"14b-medium-4k-instruct-q5_1",
"10GB"
],
[
"14b-medium-4k-instruct-q5_K_M",
"10GB"
],
[
"14b-medium-4k-instruct-q5_K_S",
"9.6GB"
],
[
"14b-medium-4k-instruct-q6_K",
"11GB"
],
[
"14b-medium-4k-instruct-q8_0",
"15GB"
],
[
"3.8b-instruct",
"2.4GB"
],
[
"3.8b-mini-128k-instruct-f16",
"7.6GB"
],
[
"3.8b-mini-4k-instruct-f16",
"7.6GB"
],
[
"3.8b-mini-128k-instruct-q2_K",
"1.4GB"
],
[
"3.8b-mini-128k-instruct-q3_K_L",
"2.1GB"
],
[
"3.8b-mini-128k-instruct-q3_K_M",
"2.0GB"
],
[
"3.8b-mini-128k-instruct-q3_K_S",
"1.7GB"
],
[
"3.8b-mini-128k-instruct-q4_0",
"2.2GB"
],
[
"3.8b-mini-128k-instruct-q4_1",
"2.4GB"
],
[
"3.8b-mini-128k-instruct-q4_K_M",
"2.4GB"
],
[
"3.8b-mini-128k-instruct-q4_K_S",
"2.2GB"
],
[
"3.8b-mini-128k-instruct-q5_0",
"2.6GB"
],
[
"3.8b-mini-128k-instruct-q5_1",
"2.9GB"
],
[
"3.8b-mini-128k-instruct-q5_K_M",
"2.8GB"
],
[
"3.8b-mini-128k-instruct-q5_K_S",
"2.6GB"
],
[
"3.8b-mini-128k-instruct-q6_K",
"3.1GB"
],
[
"3.8b-mini-128k-instruct-q8_0",
"4.1GB"
],
[
"3.8b-mini-4k-instruct-q2_K",
"1.4GB"
],
[
"3.8b-mini-4k-instruct-q3_K_L",
"2.1GB"
],
[
"3.8b-mini-4k-instruct-q3_K_M",
"2.0GB"
],
[
"3.8b-mini-4k-instruct-q3_K_S",
"1.7GB"
],
[
"3.8b-mini-4k-instruct-q4_0",
"2.2GB"
],
[
"3.8b-mini-4k-instruct-q4_1",
"2.4GB"
],
[
"3.8b-mini-4k-instruct-q4_K_M",
"2.4GB"
],
[
"3.8b-mini-4k-instruct-q4_K_S",
"2.2GB"
],
[
"3.8b-mini-4k-instruct-q5_0",
"2.6GB"
],
[
"3.8b-mini-4k-instruct-q5_1",
"2.9GB"
],
[
"3.8b-mini-4k-instruct-q5_K_M",
"2.8GB"
],
[
"3.8b-mini-4k-instruct-q5_K_S",
"2.6GB"
],
[
"3.8b-mini-4k-instruct-q6_K",
"3.1GB"
],
[
"3.8b-mini-4k-instruct-q8_0",
"4.1GB"
],
[
"3.8b-mini-instruct-4k-fp16",
"7.6GB"
],
[
"mini-128k",
"2.2GB"
],
[
"medium-128k",
"7.9GB"
]
],
"image": false,
"author": "Microsoft"
},
"aya": {
"url": "https://ollama.com/library/aya",
"description": "Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages.",
"tags": [
[
"latest",
"4.8GB"
],
[
"35b",
"20GB"
],
[
"8b",
"4.8GB"
],
[
"35b-23",
"20GB"
],
[
"35b-23-f16",
"70GB"
],
[
"35b-23-q2_K",
"14GB"
],
[
"35b-23-q3_K_L",
"19GB"
],
[
"35b-23-q3_K_M",
"18GB"
],
[
"35b-23-q3_K_S",
"16GB"
],
[
"35b-23-q4_0",
"20GB"
],
[
"35b-23-q4_1",
"22GB"
],
[
"35b-23-q4_K_M",
"22GB"
],
[
"35b-23-q4_K_S",
"20GB"
],
[
"35b-23-q5_0",
"24GB"
],
[
"35b-23-q5_1",
"26GB"
],
[
"35b-23-q5_K_M",
"25GB"
],
[
"35b-23-q5_K_S",
"24GB"
],
[
"35b-23-q6_K",
"29GB"
],
[
"35b-23-q8_0",
"37GB"
],
[
"8b-23-f16",
"16GB"
],
[
"8b-23",
"4.8GB"
],
[
"8b-23-q2_K",
"3.4GB"
],
[
"8b-23-q3_K_L",
"4.5GB"
],
[
"8b-23-q3_K_M",
"4.2GB"
],
[
"8b-23-q3_K_S",
"3.9GB"
],
[
"8b-23-q4_0",
"4.8GB"
],
[
"8b-23-q4_1",
"5.2GB"
],
[
"8b-23-q4_K_M",
"5.1GB"
],
[
"8b-23-q4_K_S",
"4.8GB"
],
[
"8b-23-q5_0",
"5.7GB"
],
[
"8b-23-q5_1",
"6.1GB"
],
[
"8b-23-q5_K_M",
"5.8GB"
],
[
"8b-23-q5_K_S",
"5.7GB"
],
[
"8b-23-q6_K",
"6.6GB"
],
[
"8b-23-q8_0",
"8.5GB"
]
],
"image": false,
"author": "Cohere"
},
"mistral": {
"url": "https://ollama.com/library/mistral",
"description": "The 7B model released by Mistral AI, updated to version 0.3.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"instruct",
"4.1GB"
],
[
"text",
"4.1GB"
],
[
"v0.1",
"4.1GB"
],
[
"v0.2",
"4.1GB"
],
[
"v0.3",
"4.1GB"
],
[
"7b-instruct",
"4.1GB"
],
[
"7b-instruct-fp16",
"14GB"
],
[
"7b-instruct-q2_K",
"3.1GB"
],
[
"7b-instruct-q3_K_L",
"3.8GB"
],
[
"7b-instruct-q3_K_M",
"3.5GB"
],
[
"7b-instruct-q3_K_S",
"3.2GB"
],
[
"7b-instruct-q4_0",
"4.1GB"
],
[
"7b-instruct-q4_1",
"4.6GB"
],
[
"7b-instruct-q4_K_M",
"4.4GB"
],
[
"7b-instruct-q4_K_S",
"4.1GB"
],
[
"7b-instruct-q5_0",
"5.0GB"
],
[
"7b-instruct-q5_1",
"5.4GB"
],
[
"7b-instruct-q5_K_M",
"5.1GB"
],
[
"7b-instruct-q5_K_S",
"5.0GB"
],
[
"7b-instruct-q6_K",
"5.9GB"
],
[
"7b-instruct-q8_0",
"7.7GB"
],
[
"7b-instruct-v0.2-q5_K_M",
"5.1GB"
],
[
"7b-instruct-v0.2-q5_0",
"5.0GB"
],
[
"7b-instruct-v0.2-q4_0",
"4.1GB"
],
[
"7b-instruct-v0.2-q5_1",
"5.4GB"
],
[
"7b-instruct-v0.2-fp16",
"14GB"
],
[
"7b-instruct-v0.2-q3_K_L",
"3.8GB"
],
[
"7b-instruct-v0.2-q2_K",
"3.1GB"
],
[
"7b-instruct-v0.2-q4_K_S",
"4.1GB"
],
[
"7b-instruct-v0.2-q4_K_M",
"4.4GB"
],
[
"7b-instruct-v0.2-q3_K_M",
"3.5GB"
],
[
"7b-instruct-v0.2-q3_K_S",
"3.2GB"
],
[
"7b-instruct-v0.2-q4_1",
"4.6GB"
],
[
"7b-instruct-v0.2-q5_K_S",
"5.0GB"
],
[
"7b-instruct-v0.2-q6_K",
"5.9GB"
],
[
"7b-instruct-v0.2-q8_0",
"7.7GB"
],
[
"7b-instruct-v0.3-fp16",
"14GB"
],
[
"7b-instruct-v0.3-q2_K",
"2.7GB"
],
[
"7b-instruct-v0.3-q3_K_L",
"3.8GB"
],
[
"7b-instruct-v0.3-q3_K_M",
"3.5GB"
],
[
"7b-instruct-v0.3-q3_K_S",
"3.2GB"
],
[
"7b-instruct-v0.3-q4_0",
"4.1GB"
],
[
"7b-instruct-v0.3-q4_1",
"4.6GB"
],
[
"7b-instruct-v0.3-q4_K_M",
"4.4GB"
],
[
"7b-instruct-v0.3-q4_K_S",
"4.1GB"
],
[
"7b-instruct-v0.3-q5_0",
"5.0GB"
],
[
"7b-instruct-v0.3-q5_1",
"5.4GB"
],
[
"7b-instruct-v0.3-q5_K_M",
"5.1GB"
],
[
"7b-instruct-v0.3-q5_K_S",
"5.0GB"
],
[
"7b-instruct-v0.3-q6_K",
"5.9GB"
],
[
"7b-instruct-v0.3-q8_0",
"7.7GB"
],
[
"7b-text",
"4.1GB"
],
[
"7b-text-fp16",
"14GB"
],
[
"7b-text-q2_K",
"3.1GB"
],
[
"7b-text-q3_K_L",
"3.8GB"
],
[
"7b-text-q3_K_M",
"3.5GB"
],
[
"7b-text-q3_K_S",
"3.2GB"
],
[
"7b-text-q4_0",
"4.1GB"
],
[
"7b-text-q4_1",
"4.6GB"
],
[
"7b-text-q4_K_M",
"4.4GB"
],
[
"7b-text-v0.2-q3_K_S",
"3.2GB"
],
[
"7b-text-q5_K_S",
"5.0GB"
],
[
"7b-text-q8_0",
"7.7GB"
],
[
"7b-text-v0.2-q3_K_L",
"3.8GB"
],
[
"7b-text-v0.2-q3_K_M",
"3.5GB"
],
[
"7b-text-q5_0",
"5.0GB"
],
[
"7b-text-q4_K_S",
"4.1GB"
],
[
"7b-text-q5_K_M",
"5.1GB"
],
[
"7b-text-v0.2-q2_K",
"2.7GB"
],
[
"7b-text-q6_K",
"5.9GB"
],
[
"7b-text-v0.2-fp16",
"14GB"
],
[
"7b-text-q5_1",
"5.4GB"
],
[
"7b-text-v0.2-q4_0",
"4.1GB"
],
[
"7b-text-v0.2-q4_1",
"4.6GB"
],
[
"7b-text-v0.2-q4_K_M",
"4.4GB"
],
[
"7b-text-v0.2-q4_K_S",
"4.1GB"
],
[
"7b-text-v0.2-q5_0",
"5.0GB"
],
[
"7b-text-v0.2-q5_1",
"5.4GB"
],
[
"7b-text-v0.2-q5_K_M",
"5.1GB"
],
[
"7b-text-v0.2-q5_K_S",
"5.0GB"
],
[
"7b-text-v0.2-q6_K",
"5.9GB"
],
[
"7b-text-v0.2-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Mistral AI"
},
"mixtral": {
"url": "https://ollama.com/library/mixtral",
"description": "A set of Mixture of Experts (MoE) model with open weights by Mistral AI in 8x7b and 8x22b parameter sizes.",
"tags": [
[
"latest",
"26GB"
],
[
"8x7b",
"26GB"
],
[
"8x22b",
"80GB"
],
[
"instruct",
"26GB"
],
[
"text",
"26GB"
],
[
"v0.1",
"80GB"
],
[
"8x22b-instruct",
"80GB"
],
[
"8x22b-instruct-v0.1-fp16",
"281GB"
],
[
"8x22b-instruct-v0.1-q2_K",
"52GB"
],
[
"8x22b-instruct-v0.1-q3_K_L",
"73GB"
],
[
"8x22b-instruct-v0.1-q3_K_M",
"68GB"
],
[
"8x22b-instruct-v0.1-q3_K_S",
"62GB"
],
[
"8x22b-instruct-v0.1-q4_0",
"80GB"
],
[
"8x22b-instruct-v0.1-q4_1",
"88GB"
],
[
"8x22b-instruct-v0.1-q4_K_M",
"86GB"
],
[
"8x22b-instruct-v0.1-q4_K_S",
"80GB"
],
[
"8x22b-instruct-v0.1-q5_0",
"97GB"
],
[
"8x22b-instruct-v0.1-q5_1",
"106GB"
],
[
"8x22b-instruct-v0.1-q5_K_M",
"100GB"
],
[
"8x22b-instruct-v0.1-q5_K_S",
"97GB"
],
[
"8x22b-instruct-v0.1-q6_K",
"116GB"
],
[
"8x22b-instruct-v0.1-q8_0",
"149GB"
],
[
"8x7b-instruct-v0.1-fp16",
"93GB"
],
[
"8x7b-instruct-v0.1-q2_K",
"16GB"
],
[
"8x7b-instruct-v0.1-q3_K_L",
"20GB"
],
[
"8x7b-instruct-v0.1-q3_K_M",
"20GB"
],
[
"8x7b-instruct-v0.1-q3_K_S",
"20GB"
],
[
"8x7b-instruct-v0.1-q4_0",
"26GB"
],
[
"8x7b-instruct-v0.1-q4_1",
"29GB"
],
[
"8x7b-instruct-v0.1-q4_K_M",
"26GB"
],
[
"8x7b-instruct-v0.1-q4_K_S",
"26GB"
],
[
"8x7b-instruct-v0.1-q5_0",
"32GB"
],
[
"8x7b-instruct-v0.1-q5_1",
"35GB"
],
[
"8x7b-instruct-v0.1-q5_K_M",
"32GB"
],
[
"8x7b-instruct-v0.1-q5_K_S",
"32GB"
],
[
"8x7b-instruct-v0.1-q6_K",
"38GB"
],
[
"8x7b-instruct-v0.1-q8_0",
"50GB"
],
[
"8x22b-text",
"80GB"
],
[
"8x22b-text-v0.1-fp16",
"281GB"
],
[
"8x22b-text-v0.1-q2_K",
"52GB"
],
[
"8x22b-text-v0.1-q3_K_L",
"73GB"
],
[
"8x22b-text-v0.1-q3_K_M",
"68GB"
],
[
"8x22b-text-v0.1-q3_K_S",
"61GB"
],
[
"8x22b-text-v0.1-q4_0",
"80GB"
],
[
"8x22b-text-v0.1-q4_1",
"88GB"
],
[
"8x22b-text-v0.1-q4_K_M",
"86GB"
],
[
"8x22b-text-v0.1-q4_K_S",
"80GB"
],
[
"8x22b-text-v0.1-q5_0",
"97GB"
],
[
"8x22b-text-v0.1-q5_1",
"106GB"
],
[
"8x22b-text-v0.1-q5_K_M",
"100GB"
],
[
"8x22b-text-v0.1-q5_K_S",
"97GB"
],
[
"8x22b-text-v0.1-q6_K",
"116GB"
],
[
"8x22b-text-v0.1-q8_0",
"149GB"
],
[
"8x7b-text-v0.1-fp16",
"93GB"
],
[
"8x7b-text-v0.1-q2_K",
"16GB"
],
[
"8x7b-text-v0.1-q3_K_L",
"20GB"
],
[
"8x7b-text-v0.1-q3_K_M",
"20GB"
],
[
"8x7b-text-v0.1-q3_K_S",
"20GB"
],
[
"8x7b-text-v0.1-q4_0",
"26GB"
],
[
"8x7b-text-v0.1-q4_1",
"29GB"
],
[
"8x7b-text-v0.1-q4_K_M",
"26GB"
],
[
"8x7b-text-v0.1-q4_K_S",
"26GB"
],
[
"8x7b-text-v0.1-q5_0",
"32GB"
],
[
"8x7b-text-v0.1-q5_1",
"35GB"
],
[
"8x7b-text-v0.1-q5_K_M",
"32GB"
],
[
"8x7b-text-v0.1-q5_K_S",
"32GB"
],
[
"8x7b-text-v0.1-q6_K",
"38GB"
],
[
"8x7b-text-v0.1-q8_0",
"50GB"
],
[
"v0.1-instruct",
"80GB"
]
],
"image": false,
"author": "Mistral AI"
},
"codegemma": {
"url": "https://ollama.com/library/codegemma",
"description": "CodeGemma is a collection of powerful, lightweight models that can perform a variety of coding tasks like fill-in-the-middle code completion, code generation, natural language understanding, mathematical reasoning, and instruction following.",
"tags": [
[
"latest",
"5.0GB"
],
[
"7b",
"5.0GB"
],
[
"2b",
"1.6GB"
],
[
"code",
"1.6GB"
],
[
"instruct",
"5.0GB"
],
[
"7b-code",
"5.0GB"
],
[
"7b-code-fp16",
"17GB"
],
[
"7b-code-q2_K",
"3.5GB"
],
[
"7b-code-q3_K_L",
"4.7GB"
],
[
"7b-code-q3_K_M",
"4.4GB"
],
[
"7b-code-q3_K_S",
"4.0GB"
],
[
"7b-code-q4_0",
"5.0GB"
],
[
"7b-code-q4_1",
"5.5GB"
],
[
"7b-code-q4_K_M",
"5.3GB"
],
[
"7b-code-q4_K_S",
"5.0GB"
],
[
"7b-code-q5_0",
"6.0GB"
],
[
"7b-code-q5_1",
"6.5GB"
],
[
"7b-code-q5_K_M",
"6.1GB"
],
[
"7b-code-q5_K_S",
"6.0GB"
],
[
"7b-code-q6_K",
"7.0GB"
],
[
"7b-code-q8_0",
"9.1GB"
],
[
"7b-instruct",
"5.0GB"
],
[
"7b-instruct-fp16",
"17GB"
],
[
"7b-instruct-q2_K",
"3.5GB"
],
[
"7b-instruct-q3_K_L",
"4.7GB"
],
[
"7b-instruct-q3_K_M",
"4.4GB"
],
[
"7b-instruct-v1.1-q4_K_M",
"5.3GB"
],
[
"7b-instruct-q5_K_M",
"6.1GB"
],
[
"7b-instruct-v1.1-q4_1",
"5.5GB"
],
[
"7b-instruct-v1.1-q4_0",
"5.0GB"
],
[
"7b-instruct-q4_1",
"5.5GB"
],
[
"7b-instruct-q6_K",
"7.0GB"
],
[
"7b-instruct-q4_K_M",
"5.3GB"
],
[
"7b-instruct-q3_K_S",
"4.0GB"
],
[
"7b-instruct-q8_0",
"9.1GB"
],
[
"7b-instruct-q4_K_S",
"5.0GB"
],
[
"7b-instruct-v1.1-q3_K_L",
"4.7GB"
],
[
"7b-instruct-q5_K_S",
"6.0GB"
],
[
"7b-instruct-q5_0",
"6.0GB"
],
[
"7b-instruct-v1.1-fp16",
"17GB"
],
[
"7b-instruct-v1.1-q3_K_S",
"4.0GB"
],
[
"7b-instruct-v1.1-q2_K",
"3.5GB"
],
[
"7b-instruct-v1.1-q3_K_M",
"4.4GB"
],
[
"7b-instruct-q4_0",
"5.0GB"
],
[
"7b-instruct-q5_1",
"6.5GB"
],
[
"7b-instruct-v1.1-q8_0",
"9.1GB"
],
[
"7b-instruct-v1.1-q5_0",
"6.0GB"
],
[
"7b-instruct-v1.1-q5_1",
"6.5GB"
],
[
"7b-instruct-v1.1-q5_K_S",
"6.0GB"
],
[
"7b-instruct-v1.1-q6_K",
"7.0GB"
],
[
"7b-instruct-v1.1-q5_K_M",
"6.1GB"
],
[
"7b-instruct-v1.1-q4_K_S",
"5.0GB"
],
[
"7b-v1.1",
"5.0GB"
],
[
"2b-code",
"1.6GB"
],
[
"2b-code-v1.1-fp16",
"5.0GB"
],
[
"2b-code-v1.1-q2_K",
"1.2GB"
],
[
"2b-code-v1.1-q3_K_L",
"1.5GB"
],
[
"2b-code-v1.1-q3_K_M",
"1.4GB"
],
[
"2b-code-v1.1-q3_K_S",
"1.3GB"
],
[
"2b-code-v1.1-q4_0",
"1.6GB"
],
[
"2b-code-v1.1-q4_1",
"1.7GB"
],
[
"2b-code-q4_K_M",
"1.6GB"
],
[
"2b-code-q3_K_L",
"1.5GB"
],
[
"2b-code-q3_K_M",
"1.4GB"
],
[
"2b-code-v1.1-q6_K",
"2.1GB"
],
[
"2b-code-v1.1-q5_K_S",
"1.8GB"
],
[
"2b-code-q4_1",
"1.7GB"
],
[
"2b-code-v1.1-q5_1",
"1.9GB"
],
[
"2b-code-v1.1-q4_K_M",
"1.6GB"
],
[
"2b-code-v1.1-q5_K_M",
"1.8GB"
],
[
"2b-code-q4_0",
"1.6GB"
],
[
"2b-code-v1.1-q8_0",
"2.7GB"
],
[
"2b-code-q3_K_S",
"1.3GB"
],
[
"2b-code-fp16",
"5.0GB"
],
[
"2b-code-q2_K",
"1.2GB"
],
[
"2b-code-v1.1-q4_K_S",
"1.6GB"
],
[
"2b-code-v1.1-q5_0",
"1.8GB"
],
[
"2b-code-q4_K_S",
"1.6GB"
],
[
"2b-code-q5_0",
"1.8GB"
],
[
"2b-code-q5_1",
"1.9GB"
],
[
"2b-code-q5_K_M",
"1.8GB"
],
[
"2b-code-q5_K_S",
"1.8GB"
],
[
"2b-code-q6_K",
"2.1GB"
],
[
"2b-code-q8_0",
"2.7GB"
],
[
"2b-v1.1",
"1.6GB"
]
],
"image": false,
"author": "Google DeepMind"
},
"command-r": {
"url": "https://ollama.com/library/command-r",
"description": "Command R is a Large Language Model optimized for conversational interaction and long context tasks.",
"tags": [
[
"latest",
"20GB"
],
[
"35b",
"20GB"
],
[
"v0.1",
"20GB"
],
[
"35b-v0.1-fp16",
"70GB"
],
[
"35b-v0.1-q2_K",
"14GB"
],
[
"35b-v0.1-q3_K_L",
"19GB"
],
[
"35b-v0.1-q3_K_M",
"18GB"
],
[
"35b-v0.1-q3_K_S",
"16GB"
],
[
"35b-v0.1-q4_0",
"20GB"
],
[
"35b-v0.1-q4_1",
"22GB"
],
[
"35b-v0.1-q4_K_M",
"22GB"
],
[
"35b-v0.1-q4_K_S",
"20GB"
],
[
"35b-v0.1-q5_1",
"26GB"
],
[
"35b-v0.1-q5_K_M",
"25GB"
],
[
"35b-v0.1-q5_K_S",
"24GB"
],
[
"35b-v0.1-q6_K",
"29GB"
],
[
"35b-v0.1-q8_0",
"37GB"
]
],
"image": false,
"author": "Cohere"
},
"command-r-plus": {
"url": "https://ollama.com/library/command-r-plus",
"description": "Command R+ is a powerful, scalable large language model purpose-built to excel at real-world enterprise use cases.",
"tags": [
[
"latest",
"59GB"
],
[
"104b",
"59GB"
],
[
"104b-fp16",
"208GB"
],
[
"104b-q2_K",
"39GB"
],
[
"104b-q4_0",
"59GB"
],
[
"104b-q8_0",
"110GB"
]
],
"image": false,
"author": "Cohere"
},
"llava": {
"url": "https://ollama.com/library/llava",
"description": "\ud83c\udf0b LLaVA is a novel end-to-end trained large multimodal model that combines a vision encoder and Vicuna for general-purpose visual and language understanding. Updated to version 1.6.",
"tags": [
[
"latest",
"4.7GB"
],
[
"34b",
"20GB"
],
[
"13b",
"8.0GB"
],
[
"7b",
"4.7GB"
],
[
"v1.6",
"4.7GB"
],
[
"34b-v1.6",
"20GB"
],
[
"34b-v1.6-fp16",
"69GB"
],
[
"34b-v1.6-q2_K",
"14GB"
],
[
"34b-v1.6-q3_K_L",
"19GB"
],
[
"34b-v1.6-q3_K_M",
"17GB"
],
[
"34b-v1.6-q3_K_S",
"16GB"
],
[
"34b-v1.6-q4_0",
"20GB"
],
[
"34b-v1.6-q4_1",
"22GB"
],
[
"34b-v1.6-q4_K_M",
"21GB"
],
[
"34b-v1.6-q4_K_S",
"20GB"
],
[
"34b-v1.6-q5_0",
"24GB"
],
[
"34b-v1.6-q5_1",
"27GB"
],
[
"34b-v1.6-q5_K_M",
"25GB"
],
[
"34b-v1.6-q5_K_S",
"24GB"
],
[
"34b-v1.6-q6_K",
"29GB"
],
[
"34b-v1.6-q8_0",
"37GB"
],
[
"13b-v1.5-fp16",
"27GB"
],
[
"13b-v1.5-q2_K",
"6.1GB"
],
[
"13b-v1.5-q3_K_L",
"7.6GB"
],
[
"13b-v1.5-q3_K_M",
"7.0GB"
],
[
"13b-v1.5-q3_K_S",
"6.3GB"
],
[
"13b-v1.5-q4_0",
"8.0GB"
],
[
"13b-v1.5-q4_1",
"8.8GB"
],
[
"13b-v1.5-q4_K_M",
"8.5GB"
],
[
"13b-v1.5-q4_K_S",
"8.1GB"
],
[
"13b-v1.5-q5_0",
"9.6GB"
],
[
"13b-v1.5-q5_1",
"10GB"
],
[
"13b-v1.5-q5_K_M",
"9.9GB"
],
[
"13b-v1.5-q5_K_S",
"9.6GB"
],
[
"13b-v1.5-q6_K",
"11GB"
],
[
"13b-v1.5-q8_0",
"14GB"
],
[
"13b-v1.6",
"8.0GB"
],
[
"13b-v1.6-vicuna-fp16",
"27GB"
],
[
"13b-v1.6-vicuna-q2_K",
"5.5GB"
],
[
"13b-v1.6-vicuna-q3_K_L",
"7.6GB"
],
[
"13b-v1.6-vicuna-q3_K_M",
"7.0GB"
],
[
"13b-v1.6-vicuna-q3_K_S",
"6.3GB"
],
[
"13b-v1.6-vicuna-q4_0",
"8.0GB"
],
[
"13b-v1.6-vicuna-q4_1",
"8.8GB"
],
[
"13b-v1.6-vicuna-q4_K_M",
"8.5GB"
],
[
"13b-v1.6-vicuna-q4_K_S",
"8.1GB"
],
[
"13b-v1.6-vicuna-q5_0",
"9.6GB"
],
[
"13b-v1.6-vicuna-q5_1",
"10GB"
],
[
"13b-v1.6-vicuna-q5_K_M",
"9.9GB"
],
[
"13b-v1.6-vicuna-q5_K_S",
"9.6GB"
],
[
"13b-v1.6-vicuna-q6_K",
"11GB"
],
[
"13b-v1.6-vicuna-q8_0",
"14GB"
],
[
"7b-v1.5-fp16",
"14GB"
],
[
"7b-v1.5-q2_K",
"3.5GB"
],
[
"7b-v1.5-q3_K_L",
"4.2GB"
],
[
"7b-v1.5-q3_K_M",
"3.9GB"
],
[
"7b-v1.5-q3_K_S",
"3.6GB"
],
[
"7b-v1.5-q4_0",
"4.5GB"
],
[
"7b-v1.5-q4_1",
"4.9GB"
],
[
"7b-v1.5-q4_K_M",
"4.7GB"
],
[
"7b-v1.5-q4_K_S",
"4.5GB"
],
[
"7b-v1.5-q5_0",
"5.3GB"
],
[
"7b-v1.5-q5_1",
"5.7GB"
],
[
"7b-v1.5-q5_K_M",
"5.4GB"
],
[
"7b-v1.5-q5_K_S",
"5.3GB"
],
[
"7b-v1.5-q6_K",
"6.2GB"
],
[
"7b-v1.5-q8_0",
"7.8GB"
],
[
"7b-v1.6",
"4.7GB"
],
[
"7b-v1.6-mistral-fp16",
"15GB"
],
[
"7b-v1.6-mistral-q2_K",
"3.3GB"
],
[
"7b-v1.6-mistral-q3_K_L",
"4.4GB"
],
[
"7b-v1.6-mistral-q3_K_M",
"4.1GB"
],
[
"7b-v1.6-mistral-q3_K_S",
"3.8GB"
],
[
"7b-v1.6-mistral-q4_0",
"4.7GB"
],
[
"7b-v1.6-mistral-q4_1",
"5.2GB"
],
[
"7b-v1.6-mistral-q4_K_M",
"5.0GB"
],
[
"7b-v1.6-mistral-q4_K_S",
"4.8GB"
],
[
"7b-v1.6-mistral-q5_0",
"5.6GB"
],
[
"7b-v1.6-mistral-q5_1",
"6.1GB"
],
[
"7b-v1.6-mistral-q5_K_M",
"5.8GB"
],
[
"7b-v1.6-mistral-q5_K_S",
"5.6GB"
],
[
"7b-v1.6-mistral-q6_K",
"6.6GB"
],
[
"7b-v1.6-mistral-q8_0",
"8.3GB"
],
[
"7b-v1.6-vicuna-fp16",
"14GB"
],
[
"7b-v1.6-vicuna-q2_K",
"3.2GB"
],
[
"7b-v1.6-vicuna-q3_K_L",
"4.2GB"
],
[
"7b-v1.6-vicuna-q3_K_M",
"3.9GB"
],
[
"7b-v1.6-vicuna-q3_K_S",
"3.6GB"
],
[
"7b-v1.6-vicuna-q4_0",
"4.5GB"
],
[
"7b-v1.6-vicuna-q4_1",
"4.9GB"
],
[
"7b-v1.6-vicuna-q4_K_M",
"4.7GB"
],
[
"7b-v1.6-vicuna-q4_K_S",
"4.5GB"
],
[
"7b-v1.6-vicuna-q5_0",
"5.3GB"
],
[
"7b-v1.6-vicuna-q5_1",
"5.7GB"
],
[
"7b-v1.6-vicuna-q5_K_M",
"5.4GB"
],
[
"7b-v1.6-vicuna-q5_K_S",
"5.3GB"
],
[
"7b-v1.6-vicuna-q6_K",
"6.2GB"
],
[
"7b-v1.6-vicuna-q8_0",
"7.8GB"
]
],
"image": true,
"author": "Haotian Liu"
},
"gemma": {
"url": "https://ollama.com/library/gemma",
"description": "Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind. Updated to version 1.1",
"tags": [
[
"latest",
"5.0GB"
],
[
"7b",
"5.0GB"
],
[
"2b",
"1.7GB"
],
[
"instruct",
"5.0GB"
],
[
"text",
"5.2GB"
],
[
"v1.1",
"5.0GB"
],
[
"7b-instruct",
"5.0GB"
],
[
"7b-instruct-fp16",
"17GB"
],
[
"7b-instruct-q2_K",
"3.7GB"
],
[
"7b-instruct-q3_K_L",
"4.9GB"
],
[
"7b-instruct-q3_K_M",
"4.6GB"
],
[
"7b-instruct-q3_K_S",
"4.2GB"
],
[
"7b-instruct-q4_0",
"5.2GB"
],
[
"7b-instruct-q4_1",
"5.7GB"
],
[
"7b-instruct-q4_K_M",
"5.5GB"
],
[
"7b-instruct-q4_K_S",
"5.2GB"
],
[
"7b-instruct-q5_0",
"6.2GB"
],
[
"7b-instruct-q5_1",
"6.7GB"
],
[
"7b-instruct-q5_K_M",
"6.3GB"
],
[
"7b-instruct-q5_K_S",
"6.2GB"
],
[
"7b-instruct-q6_K",
"7.2GB"
],
[
"7b-instruct-v1.1-q5_K_S",
"6.0GB"
],
[
"7b-instruct-v1.1-q3_K_S",
"4.0GB"
],
[
"7b-instruct-v1.1-q5_1",
"6.5GB"
],
[
"7b-instruct-v1.1-q4_1",
"5.5GB"
],
[
"7b-instruct-v1.1-q4_K_S",
"5.0GB"
],
[
"7b-instruct-v1.1-q3_K_L",
"4.7GB"
],
[
"7b-instruct-v1.1-q3_K_M",
"4.4GB"
],
[
"7b-instruct-v1.1-q5_0",
"6.0GB"
],
[
"7b-instruct-v1.1-q4_0",
"5.0GB"
],
[
"7b-instruct-v1.1-q4_K_M",
"5.3GB"
],
[
"7b-instruct-v1.1-q5_K_M",
"6.1GB"
],
[
"7b-instruct-q8_0",
"9.1GB"
],
[
"7b-instruct-v1.1-q2_K",
"3.5GB"
],
[
"7b-instruct-v1.1-fp16",
"17GB"
],
[
"7b-instruct-v1.1-q6_K",
"7.0GB"
],
[
"7b-instruct-v1.1-q8_0",
"9.1GB"
],
[
"7b-text",
"5.2GB"
],
[
"7b-text-fp16",
"16GB"
],
[
"7b-text-q2_K",
"3.7GB"
],
[
"7b-text-q3_K_L",
"4.9GB"
],
[
"7b-text-q3_K_M",
"4.6GB"
],
[
"7b-text-q3_K_S",
"4.2GB"
],
[
"7b-text-q4_0",
"5.2GB"
],
[
"7b-text-q4_1",
"5.7GB"
],
[
"7b-text-q4_K_M",
"5.5GB"
],
[
"7b-text-q4_K_S",
"5.2GB"
],
[
"7b-text-q5_0",
"6.2GB"
],
[
"7b-text-q5_1",
"6.7GB"
],
[
"7b-text-q5_K_M",
"6.3GB"
],
[
"7b-text-q5_K_S",
"6.2GB"
],
[
"7b-text-q6_K",
"7.2GB"
],
[
"7b-text-q8_0",
"9.1GB"
],
[
"7b-v1.1",
"5.0GB"
],
[
"2b-instruct",
"1.6GB"
],
[
"2b-instruct-fp16",
"4.5GB"
],
[
"2b-instruct-q2_K",
"1.3GB"
],
[
"2b-instruct-q3_K_L",
"1.6GB"
],
[
"2b-instruct-q3_K_M",
"1.5GB"
],
[
"2b-instruct-q3_K_S",
"1.4GB"
],
[
"2b-instruct-q4_0",
"1.7GB"
],
[
"2b-instruct-q4_1",
"1.8GB"
],
[
"2b-instruct-q4_K_M",
"1.8GB"
],
[
"2b-instruct-q4_K_S",
"1.7GB"
],
[
"2b-instruct-q5_0",
"1.9GB"
],
[
"2b-instruct-q5_1",
"2.1GB"
],
[
"2b-instruct-q5_K_M",
"2.0GB"
],
[
"2b-instruct-v1.1-fp16",
"5.0GB"
],
[
"2b-instruct-q8_0",
"2.7GB"
],
[
"2b-instruct-q6_K",
"2.2GB"
],
[
"2b-instruct-q5_K_S",
"1.9GB"
],
[
"2b-instruct-v1.1-q3_K_M",
"1.4GB"
],
[
"2b-instruct-v1.1-q3_K_L",
"1.5GB"
],
[
"2b-instruct-v1.1-q2_K",
"1.2GB"
],
[
"2b-instruct-v1.1-q3_K_S",
"1.3GB"
],
[
"2b-instruct-v1.1-q4_0",
"1.6GB"
],
[
"2b-instruct-v1.1-q4_1",
"1.7GB"
],
[
"2b-instruct-v1.1-q4_K_M",
"1.6GB"
],
[
"2b-instruct-v1.1-q4_K_S",
"1.6GB"
],
[
"2b-instruct-v1.1-q5_0",
"1.8GB"
],
[
"2b-instruct-v1.1-q5_1",
"1.9GB"
],
[
"2b-instruct-v1.1-q5_K_M",
"1.8GB"
],
[
"2b-instruct-v1.1-q5_K_S",
"1.8GB"
],
[
"2b-instruct-v1.1-q6_K",
"2.1GB"
],
[
"2b-instruct-v1.1-q8_0",
"2.7GB"
],
[
"2b-text",
"1.7GB"
],
[
"2b-text-fp16",
"4.5GB"
],
[
"2b-text-q2_K",
"1.3GB"
],
[
"2b-text-q3_K_L",
"1.6GB"
],
[
"2b-text-q3_K_M",
"1.5GB"
],
[
"2b-text-q3_K_S",
"1.4GB"
],
[
"2b-text-q4_0",
"1.7GB"
],
[
"2b-text-q4_1",
"1.8GB"
],
[
"2b-text-q4_K_M",
"1.8GB"
],
[
"2b-text-q4_K_S",
"1.7GB"
],
[
"2b-text-q5_0",
"1.9GB"
],
[
"2b-text-q5_1",
"2.1GB"
],
[
"2b-text-q5_K_M",
"2.0GB"
],
[
"2b-text-q5_K_S",
"1.9GB"
],
[
"2b-text-q6_K",
"2.2GB"
],
[
"2b-text-q8_0",
"2.7GB"
],
[
"2b-v1.1",
"1.6GB"
]
],
"image": false,
"author": "Google DeepMind"
},
"qwen": {
"url": "https://ollama.com/library/qwen",
"description": "Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from 0.5B to 110B parameters",
"tags": [
[
"latest",
"2.3GB"
],
[
"110b",
"63GB"
],
[
"72b",
"41GB"
],
[
"32b",
"18GB"
],
[
"14b",
"8.2GB"
],
[
"7b",
"4.5GB"
],
[
"4b",
"2.3GB"
],
[
"1.8b",
"1.1GB"
],
[
"0.5b",
"395MB"
],
[
"110b-chat",
"63GB"
],
[
"110b-chat-v1.5-fp16",
"222GB"
],
[
"110b-chat-v1.5-q2_K",
"41GB"
],
[
"110b-chat-v1.5-q3_K_L",
"58GB"
],
[
"110b-chat-v1.5-q3_K_M",
"54GB"
],
[
"110b-chat-v1.5-q3_K_S",
"48GB"
],
[
"110b-chat-v1.5-q4_0",
"63GB"
],
[
"110b-chat-v1.5-q4_1",
"70GB"
],
[
"110b-chat-v1.5-q4_K_M",
"67GB"
],
[
"110b-chat-v1.5-q4_K_S",
"63GB"
],
[
"110b-chat-v1.5-q5_0",
"77GB"
],
[
"110b-chat-v1.5-q5_1",
"84GB"
],
[
"110b-chat-v1.5-q5_K_M",
"79GB"
],
[
"110b-chat-v1.5-q5_K_S",
"77GB"
],
[
"110b-chat-v1.5-q6_K",
"91GB"
],
[
"110b-chat-v1.5-q8_0",
"118GB"
],
[
"110b-text-v1.5-fp16",
"222GB"
],
[
"110b-text-v1.5-q2_K",
"41GB"
],
[
"110b-text-v1.5-q3_K_L",
"58GB"
],
[
"110b-text-v1.5-q3_K_M",
"54GB"
],
[
"110b-text-v1.5-q3_K_S",
"48GB"
],
[
"110b-text-v1.5-q4_0",
"63GB"
],
[
"110b-text-v1.5-q4_1",
"70GB"
],
[
"110b-text-v1.5-q4_K_M",
"67GB"
],
[
"110b-text-v1.5-q4_K_S",
"63GB"
],
[
"110b-text-v1.5-q5_0",
"77GB"
],
[
"110b-text-v1.5-q5_1",
"84GB"
],
[
"110b-text-v1.5-q5_K_M",
"79GB"
],
[
"110b-text-v1.5-q5_K_S",
"77GB"
],
[
"110b-text-v1.5-q6_K",
"91GB"
],
[
"110b-text-v1.5-q8_0",
"118GB"
],
[
"72b-chat",
"41GB"
],
[
"72b-chat-fp16",
"145GB"
],
[
"72b-chat-q2_K",
"27GB"
],
[
"72b-chat-q3_K_L",
"39GB"
],
[
"72b-chat-q3_K_M",
"37GB"
],
[
"72b-chat-q3_K_S",
"32GB"
],
[
"72b-chat-q4_0",
"41GB"
],
[
"72b-chat-q4_1",
"45GB"
],
[
"72b-chat-q4_K_M",
"45GB"
],
[
"72b-chat-q4_K_S",
"41GB"
],
[
"72b-chat-q5_0",
"50GB"
],
[
"72b-chat-q5_1",
"54GB"
],
[
"72b-chat-q5_K_M",
"53GB"
],
[
"72b-chat-q5_K_S",
"50GB"
],
[
"72b-chat-q6_K",
"59GB"
],
[
"72b-chat-v1.5-q4_K_M",
"44GB"
],
[
"72b-chat-v1.5-q2_K",
"28GB"
],
[
"72b-chat-v1.5-q4_0",
"41GB"
],
[
"72b-chat-v1.5-q4_1",
"45GB"
],
[
"72b-chat-v1.5-q3_K_L",
"38GB"
],
[
"72b-chat-q8_0",
"77GB"
],
[
"72b-chat-v1.5-q3_K_M",
"36GB"
],
[
"72b-chat-v1.5-q3_K_S",
"33GB"
],
[
"72b-chat-v1.5-fp16",
"145GB"
],
[
"72b-chat-v1.5-q4_K_S",
"42GB"
],
[
"72b-chat-v1.5-q5_0",
"50GB"
],
[
"72b-chat-v1.5-q5_1",
"54GB"
],
[
"72b-chat-v1.5-q5_K_M",
"51GB"
],
[
"72b-chat-v1.5-q5_K_S",
"50GB"
],
[
"72b-chat-v1.5-q6_K",
"59GB"
],
[
"72b-chat-v1.5-q8_0",
"77GB"
],
[
"72b-text",
"63GB"
],
[
"72b-text-v1.5-fp16",
"145GB"
],
[
"72b-text-v1.5-q2_K",
"28GB"
],
[
"72b-text-v1.5-q3_K_L",
"38GB"
],
[
"72b-text-v1.5-q3_K_M",
"36GB"
],
[
"72b-text-q4_1",
"45GB"
],
[
"72b-text-v1.5-q5_K_M",
"51GB"
],
[
"72b-text-fp16",
"145GB"
],
[
"72b-text-q4_0",
"41GB"
],
[
"72b-text-v1.5-q4_K_M",
"44GB"
],
[
"72b-text-v1.5-q5_1",
"54GB"
],
[
"72b-text-v1.5-q8_0",
"77GB"
],
[
"72b-text-q3_K_S",
"32GB"
],
[
"72b-text-q3_K_M",
"37GB"
],
[
"72b-text-v1.5-q4_K_S",
"42GB"
],
[
"72b-text-v1.5-q4_1",
"45GB"
],
[
"72b-text-q3_K_L",
"39GB"
],
[
"72b-text-v1.5-q3_K_S",
"33GB"
],
[
"72b-text-v1.5-q5_0",
"50GB"
],
[
"72b-text-v1.5-q6_K",
"59GB"
],
[
"72b-text-v1.5-q4_0",
"41GB"
],
[
"72b-text-q2_K",
"27GB"
],
[
"72b-text-v1.5-q5_K_S",
"50GB"
],
[
"72b-text-q4_K_M",
"45GB"
],
[
"72b-text-q4_K_S",
"41GB"
],
[
"72b-text-q5_0",
"50GB"
],
[
"72b-text-q5_1",
"54GB"
],
[
"72b-text-q5_K_M",
"53GB"
],
[
"72b-text-q5_K_S",
"50GB"
],
[
"72b-text-q6_K",
"59GB"
],
[
"72b-text-q8_0",
"77GB"
],
[
"32b-chat",
"18GB"
],
[
"32b-chat-v1.5-fp16",
"65GB"
],
[
"32b-chat-v1.5-q2_K",
"12GB"
],
[
"32b-chat-v1.5-q3_K_L",
"17GB"
],
[
"32b-chat-v1.5-q3_K_M",
"16GB"
],
[
"32b-chat-v1.5-q3_K_S",
"14GB"
],
[
"32b-chat-v1.5-q4_0",
"18GB"
],
[
"32b-chat-v1.5-q4_1",
"20GB"
],
[
"32b-chat-v1.5-q4_K_M",
"20GB"
],
[
"32b-chat-v1.5-q4_K_S",
"19GB"
],
[
"32b-chat-v1.5-q5_0",
"22GB"
],
[
"32b-chat-v1.5-q5_1",
"24GB"
],
[
"32b-chat-v1.5-q5_K_M",
"23GB"
],
[
"32b-chat-v1.5-q5_K_S",
"22GB"
],
[
"32b-chat-v1.5-q6_K",
"27GB"
],
[
"32b-chat-v1.5-q8_0",
"35GB"
],
[
"32b-text",
"18GB"
],
[
"32b-text-v1.5-q2_K",
"12GB"
],
[
"32b-text-v1.5-q3_K_L",
"17GB"
],
[
"32b-text-v1.5-q3_K_M",
"16GB"
],
[
"32b-text-v1.5-q3_K_S",
"14GB"
],
[
"32b-text-v1.5-q4_0",
"18GB"
],
[
"32b-text-v1.5-q4_1",
"20GB"
],
[
"32b-text-v1.5-q4_K_S",
"19GB"
],
[
"32b-text-v1.5-q5_0",
"22GB"
],
[
"32b-text-v1.5-q5_1",
"24GB"
],
[
"32b-text-v1.5-q8_0",
"35GB"
],
[
"14b-chat",
"8.2GB"
],
[
"14b-chat-v1.5-fp16",
"28GB"
],
[
"14b-chat-v1.5-q2_K",
"6.1GB"
],
[
"14b-chat-v1.5-q3_K_L",
"7.8GB"
],
[
"14b-chat-v1.5-q3_K_M",
"7.4GB"
],
[
"14b-chat-v1.5-q3_K_S",
"6.9GB"
],
[
"14b-chat-v1.5-q4_0",
"8.2GB"
],
[
"14b-chat-v1.5-q4_1",
"9.0GB"
],
[
"14b-chat-v1.5-q4_K_M",
"9.2GB"
],
[
"14b-chat-v1.5-q4_K_S",
"8.6GB"
],
[
"14b-chat-v1.5-q5_0",
"9.9GB"
],
[
"14b-chat-v1.5-q5_1",
"11GB"
],
[
"14b-chat-v1.5-q5_K_M",
"11GB"
],
[
"14b-chat-v1.5-q5_K_S",
"10GB"
],
[
"14b-chat-v1.5-q6_K",
"12GB"
],
[
"14b-chat-v1.5-q8_0",
"15GB"
],
[
"14b-chat-q8_0",
"15GB"
],
[
"14b-chat-q3_K_S",
"6.9GB"
],
[
"14b-chat-fp16",
"28GB"
],
[
"14b-chat-q4_K_S",
"8.6GB"
],
[
"14b-chat-q5_K_M",
"11GB"
],
[
"14b-chat-q4_1",
"9.0GB"
],
[
"14b-chat-q6_K",
"12GB"
],
[
"14b-chat-q3_K_L",
"8.0GB"
],
[
"14b-chat-q5_0",
"9.9GB"
],
[
"14b-chat-q3_K_M",
"7.7GB"
],
[
"14b-chat-q5_1",
"11GB"
],
[
"14b-chat-q4_0",
"8.2GB"
],
[
"14b-chat-q2_K",
"6.0GB"
],
[
"14b-chat-q4_K_M",
"9.4GB"
],
[
"14b-chat-q5_K_S",
"10GB"
],
[
"14b-text",
"8.2GB"
],
[
"14b-text-v1.5-fp16",
"28GB"
],
[
"14b-text-v1.5-q2_K",
"6.1GB"
],
[
"14b-text-v1.5-q3_K_L",
"7.8GB"
],
[
"14b-text-q3_K_L",
"8.0GB"
],
[
"14b-text-v1.5-q8_0",
"15GB"
],
[
"14b-text-v1.5-q4_0",
"8.2GB"
],
[
"14b-text-v1.5-q6_K",
"12GB"
],
[
"14b-text-v1.5-q4_1",
"9.0GB"
],
[
"14b-text-v1.5-q4_K_M",
"9.2GB"
],
[
"14b-text-v1.5-q5_K_S",
"10GB"
],
[
"14b-text-fp16",
"28GB"
],
[
"14b-text-v1.5-q5_1",
"11GB"
],
[
"14b-text-v1.5-q3_K_S",
"6.9GB"
],
[
"14b-text-v1.5-q5_0",
"9.9GB"
],
[
"14b-text-v1.5-q4_K_S",
"8.6GB"
],
[
"14b-text-v1.5-q3_K_M",
"7.4GB"
],
[
"14b-text-q2_K",
"6.0GB"
],
[
"14b-text-v1.5-q5_K_M",
"11GB"
],
[
"14b-text-q5_K_S",
"10GB"
],
[
"14b-text-q3_K_S",
"6.9GB"
],
[
"14b-text-q4_1",
"9.0GB"
],
[
"14b-text-q4_0",
"8.2GB"
],
[
"14b-text-q3_K_M",
"7.7GB"
],
[
"14b-text-q4_K_S",
"8.6GB"
],
[
"14b-text-q5_0",
"9.9GB"
],
[
"14b-text-q5_K_M",
"11GB"
],
[
"14b-text-q4_K_M",
"9.4GB"
],
[
"14b-text-q5_1",
"11GB"
],
[
"14b-text-q6_K",
"12GB"
],
[
"14b-text-q8_0",
"15GB"
],
[
"7b-chat",
"4.5GB"
],
[
"7b-chat-v1.5-fp16",
"15GB"
],
[
"7b-chat-v1.5-q2_K",
"3.1GB"
],
[
"7b-chat-v1.5-q3_K_L",
"4.2GB"
],
[
"7b-chat-v1.5-q3_K_M",
"3.9GB"
],
[
"7b-chat-v1.5-q3_K_S",
"3.6GB"
],
[
"7b-chat-v1.5-q4_0",
"4.5GB"
],
[
"7b-chat-v1.5-q4_1",
"5.0GB"
],
[
"7b-chat-v1.5-q4_K_M",
"4.8GB"
],
[
"7b-chat-v1.5-q4_K_S",
"4.5GB"
],
[
"7b-chat-v1.5-q5_0",
"5.4GB"
],
[
"7b-chat-v1.5-q5_1",
"5.8GB"
],
[
"7b-chat-v1.5-q5_K_M",
"5.5GB"
],
[
"7b-chat-v1.5-q5_K_S",
"5.4GB"
],
[
"7b-chat-v1.5-q6_K",
"6.3GB"
],
[
"7b-chat-q3_K_L",
"4.3GB"
],
[
"7b-chat-fp16",
"15GB"
],
[
"7b-chat-v1.5-q8_0",
"8.2GB"
],
[
"7b-chat-q2_K",
"3.0GB"
],
[
"7b-chat-q3_K_M",
"4.1GB"
],
[
"7b-chat-q3_K_S",
"3.6GB"
],
[
"7b-chat-q4_0",
"4.5GB"
],
[
"7b-chat-q4_1",
"5.0GB"
],
[
"7b-chat-q4_K_M",
"4.9GB"
],
[
"7b-chat-q4_K_S",
"4.5GB"
],
[
"7b-chat-q5_0",
"5.4GB"
],
[
"7b-chat-q5_1",
"5.8GB"
],
[
"7b-chat-q5_K_M",
"5.7GB"
],
[
"7b-chat-q5_K_S",
"5.4GB"
],
[
"7b-chat-q6_K",
"6.3GB"
],
[
"7b-chat-q8_0",
"8.2GB"
],
[
"7b-text",
"4.5GB"
],
[
"7b-text-v1.5-fp16",
"15GB"
],
[
"7b-text-v1.5-q2_K",
"3.1GB"
],
[
"7b-text-v1.5-q3_K_L",
"4.2GB"
],
[
"7b-text-v1.5-q3_K_M",
"3.9GB"
],
[
"7b-text-v1.5-q3_K_S",
"3.6GB"
],
[
"7b-text-v1.5-q4_0",
"4.5GB"
],
[
"7b-text-v1.5-q4_1",
"5.0GB"
],
[
"7b-text-v1.5-q4_K_M",
"4.8GB"
],
[
"7b-text-v1.5-q4_K_S",
"4.5GB"
],
[
"7b-text-v1.5-q5_0",
"5.4GB"
],
[
"7b-text-v1.5-q5_1",
"5.8GB"
],
[
"7b-text-v1.5-q5_K_M",
"5.5GB"
],
[
"7b-text-v1.5-q5_K_S",
"5.4GB"
],
[
"7b-text-v1.5-q6_K",
"6.3GB"
],
[
"7b-text-v1.5-q8_0",
"8.2GB"
],
[
"4b-chat",
"2.3GB"
],
[
"7b-fp16",
"15GB"
],
[
"7b-q2_K",
"3.0GB"
],
[
"7b-q3_K_L",
"4.3GB"
],
[
"7b-q3_K_M",
"4.1GB"
],
[
"7b-q3_K_S",
"3.6GB"
],
[
"7b-q4_0",
"4.5GB"
],
[
"7b-q4_1",
"5.0GB"
],
[
"7b-q4_K_M",
"4.9GB"
],
[
"7b-q4_K_S",
"4.5GB"
],
[
"7b-q5_0",
"5.4GB"
],
[
"7b-q5_1",
"5.8GB"
],
[
"7b-q5_K_M",
"5.7GB"
],
[
"7b-q5_K_S",
"5.4GB"
],
[
"7b-q6_K",
"6.3GB"
],
[
"7b-q8_0",
"8.2GB"
],
[
"4b-chat-v1.5-fp16",
"7.9GB"
],
[
"4b-chat-v1.5-q2_K",
"1.6GB"
],
[
"4b-chat-v1.5-q3_K_L",
"2.2GB"
],
[
"4b-chat-v1.5-q3_K_M",
"2.0GB"
],
[
"4b-chat-v1.5-q3_K_S",
"1.9GB"
],
[
"4b-chat-v1.5-q4_0",
"2.3GB"
],
[
"4b-chat-v1.5-q4_1",
"2.6GB"
],
[
"4b-chat-v1.5-q4_K_M",
"2.5GB"
],
[
"4b-chat-v1.5-q4_K_S",
"2.3GB"
],
[
"4b-chat-v1.5-q5_0",
"2.8GB"
],
[
"4b-chat-v1.5-q5_1",
"3.0GB"
],
[
"4b-chat-v1.5-q5_K_M",
"2.8GB"
],
[
"4b-chat-v1.5-q5_K_S",
"2.8GB"
],
[
"4b-chat-v1.5-q6_K",
"3.2GB"
],
[
"4b-chat-v1.5-q8_0",
"4.2GB"
],
[
"4b-text",
"2.3GB"
],
[
"4b-text-v1.5-fp16",
"7.9GB"
],
[
"4b-text-v1.5-q2_K",
"1.6GB"
],
[
"4b-text-v1.5-q3_K_L",
"2.2GB"
],
[
"4b-text-v1.5-q3_K_M",
"2.0GB"
],
[
"4b-text-v1.5-q3_K_S",
"1.9GB"
],
[
"4b-text-v1.5-q4_0",
"2.3GB"
],
[
"4b-text-v1.5-q4_1",
"2.6GB"
],
[
"4b-text-v1.5-q4_K_M",
"2.5GB"
],
[
"4b-text-v1.5-q4_K_S",
"2.3GB"
],
[
"4b-text-v1.5-q5_0",
"2.8GB"
],
[
"4b-text-v1.5-q5_1",
"3.0GB"
],
[
"4b-text-v1.5-q5_K_M",
"2.8GB"
],
[
"4b-text-v1.5-q5_K_S",
"2.8GB"
],
[
"4b-text-v1.5-q6_K",
"3.2GB"
],
[
"4b-text-v1.5-q8_0",
"4.2GB"
],
[
"1.8b-chat",
"1.1GB"
],
[
"1.8b-chat-v1.5-fp16",
"3.7GB"
],
[
"1.8b-chat-v1.5-q2_K",
"863MB"
],
[
"1.8b-chat-v1.5-q3_K_L",
"1.1GB"
],
[
"1.8b-chat-v1.5-q3_K_M",
"1.0GB"
],
[
"1.8b-chat-v1.5-q3_K_S",
"970MB"
],
[
"1.8b-chat-v1.5-q4_0",
"1.1GB"
],
[
"1.8b-chat-v1.5-q4_1",
"1.2GB"
],
[
"1.8b-chat-v1.5-q4_K_M",
"1.2GB"
],
[
"1.8b-chat-v1.5-q4_K_S",
"1.2GB"
],
[
"1.8b-chat-q3_K_S",
"970MB"
],
[
"1.8b-chat-v1.5-q5_0",
"1.3GB"
],
[
"1.8b-chat-v1.5-q6_K",
"1.6GB"
],
[
"1.8b-chat-v1.5-q5_1",
"1.4GB"
],
[
"1.8b-chat-v1.5-q8_0",
"2.0GB"
],
[
"1.8b-chat-v1.5-q5_K_S",
"1.3GB"
],
[
"1.8b-chat-q3_K_L",
"1.1GB"
],
[
"1.8b-chat-fp16",
"3.7GB"
],
[
"1.8b-chat-v1.5-q5_K_M",
"1.4GB"
],
[
"1.8b-chat-q2_K",
"853MB"
],
[
"1.8b-chat-q3_K_M",
"1.0GB"
],
[
"1.8b-chat-q4_0",
"1.1GB"
],
[
"1.8b-chat-q4_1",
"1.2GB"
],
[
"1.8b-chat-q4_K_M",
"1.2GB"
],
[
"1.8b-chat-q4_K_S",
"1.2GB"
],
[
"1.8b-chat-q5_0",
"1.3GB"
],
[
"1.8b-chat-q5_1",
"1.4GB"
],
[
"1.8b-chat-q5_K_M",
"1.4GB"
],
[
"1.8b-chat-q5_K_S",
"1.3GB"
],
[
"1.8b-chat-q6_K",
"1.6GB"
],
[
"1.8b-chat-q8_0",
"2.0GB"
],
[
"1.8b-text",
"1.1GB"
],
[
"1.8b-text-fp16",
"3.7GB"
],
[
"1.8b-text-q2_K",
"853MB"
],
[
"1.8b-text-q3_K_L",
"1.1GB"
],
[
"1.8b-text-q3_K_M",
"1.0GB"
],
[
"1.8b-text-q3_K_S",
"970MB"
],
[
"1.8b-text-q4_0",
"1.1GB"
],
[
"1.8b-text-q4_1",
"1.2GB"
],
[
"1.8b-text-q4_K_M",
"1.2GB"
],
[
"1.8b-text-q4_K_S",
"1.2GB"
],
[
"1.8b-text-q5_0",
"1.3GB"
],
[
"1.8b-text-q5_1",
"1.4GB"
],
[
"1.8b-text-q5_K_M",
"1.4GB"
],
[
"1.8b-text-q5_K_S",
"1.3GB"
],
[
"1.8b-text-q6_K",
"1.6GB"
],
[
"1.8b-text-v1.5-q5_K_S",
"1.3GB"
],
[
"1.8b-text-v1.5-q4_K_S",
"1.2GB"
],
[
"1.8b-text-v1.5-q5_0",
"1.3GB"
],
[
"1.8b-text-v1.5-q5_K_M",
"1.4GB"
],
[
"1.8b-text-v1.5-q4_0",
"1.1GB"
],
[
"1.8b-text-v1.5-q2_K",
"863MB"
],
[
"1.8b-text-v1.5-q4_K_M",
"1.2GB"
],
[
"1.8b-text-q8_0",
"2.0GB"
],
[
"1.8b-text-v1.5-q3_K_S",
"970MB"
],
[
"1.8b-text-v1.5-q4_1",
"1.2GB"
],
[
"1.8b-text-v1.5-q5_1",
"1.4GB"
],
[
"1.8b-text-v1.5-q3_K_L",
"1.1GB"
],
[
"1.8b-text-v1.5-fp16",
"3.7GB"
],
[
"1.8b-text-v1.5-q3_K_M",
"1.0GB"
],
[
"1.8b-text-v1.5-q6_K",
"1.6GB"
],
[
"1.8b-text-v1.5-q8_0",
"2.0GB"
],
[
"0.5b-chat",
"395MB"
],
[
"0.5b-chat-v1.5-fp16",
"1.2GB"
],
[
"0.5b-chat-v1.5-q2_K",
"298MB"
],
[
"0.5b-chat-v1.5-q3_K_L",
"364MB"
],
[
"0.5b-chat-v1.5-q3_K_M",
"350MB"
],
[
"0.5b-chat-v1.5-q3_K_S",
"333MB"
],
[
"0.5b-chat-v1.5-q4_0",
"395MB"
],
[
"0.5b-chat-v1.5-q4_1",
"424MB"
],
[
"0.5b-chat-v1.5-q4_K_M",
"407MB"
],
[
"0.5b-chat-v1.5-q4_K_S",
"397MB"
],
[
"0.5b-chat-v1.5-q5_0",
"453MB"
],
[
"0.5b-chat-v1.5-q5_1",
"482MB"
],
[
"0.5b-chat-v1.5-q5_K_M",
"459MB"
],
[
"0.5b-chat-v1.5-q5_K_S",
"453MB"
],
[
"0.5b-chat-v1.5-q6_K",
"515MB"
],
[
"0.5b-chat-v1.5-q8_0",
"665MB"
],
[
"0.5b-text",
"395MB"
],
[
"0.5b-text-v1.5-fp16",
"1.2GB"
],
[
"0.5b-text-v1.5-q2_K",
"298MB"
],
[
"0.5b-text-v1.5-q3_K_L",
"364MB"
],
[
"0.5b-text-v1.5-q3_K_M",
"350MB"
],
[
"0.5b-text-v1.5-q3_K_S",
"333MB"
],
[
"0.5b-text-v1.5-q4_0",
"395MB"
],
[
"0.5b-text-v1.5-q4_1",
"424MB"
],
[
"0.5b-text-v1.5-q4_K_M",
"407MB"
],
[
"0.5b-text-v1.5-q4_K_S",
"397MB"
],
[
"0.5b-text-v1.5-q5_0",
"453MB"
],
[
"0.5b-text-v1.5-q5_1",
"482MB"
],
[
"0.5b-text-v1.5-q5_K_M",
"459MB"
],
[
"0.5b-text-v1.5-q5_K_S",
"453MB"
],
[
"0.5b-text-v1.5-q6_K",
"515MB"
],
[
"0.5b-text-v1.5-q8_0",
"665MB"
]
],
"image": false,
"author": "Alibaba"
},
"llama2": {
"url": "https://ollama.com/library/llama2",
"description": "Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters.",
"tags": [
[
"latest",
"3.8GB"
],
[
"70b",
"39GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"chat",
"3.8GB"
],
[
"text",
"3.8GB"
],
[
"70b-chat",
"39GB"
],
[
"70b-chat-fp16",
"138GB"
],
[
"70b-chat-q2_K",
"29GB"
],
[
"70b-chat-q3_K_L",
"36GB"
],
[
"70b-chat-q3_K_M",
"33GB"
],
[
"70b-chat-q3_K_S",
"30GB"
],
[
"70b-chat-q4_0",
"39GB"
],
[
"70b-chat-q4_1",
"43GB"
],
[
"70b-chat-q4_K_M",
"41GB"
],
[
"70b-chat-q4_K_S",
"39GB"
],
[
"70b-chat-q5_0",
"47GB"
],
[
"70b-chat-q5_1",
"52GB"
],
[
"70b-chat-q5_K_M",
"49GB"
],
[
"70b-chat-q5_K_S",
"47GB"
],
[
"70b-chat-q6_K",
"57GB"
],
[
"70b-chat-q8_0",
"73GB"
],
[
"70b-text",
"39GB"
],
[
"70b-text-fp16",
"138GB"
],
[
"70b-text-q2_K",
"29GB"
],
[
"70b-text-q3_K_L",
"36GB"
],
[
"70b-text-q3_K_M",
"33GB"
],
[
"70b-text-q3_K_S",
"30GB"
],
[
"70b-text-q4_0",
"39GB"
],
[
"70b-text-q4_1",
"43GB"
],
[
"70b-text-q4_K_M",
"41GB"
],
[
"70b-text-q4_K_S",
"39GB"
],
[
"70b-text-q5_0",
"47GB"
],
[
"70b-text-q5_1",
"52GB"
],
[
"70b-text-q5_K_M",
"49GB"
],
[
"70b-text-q5_K_S",
"47GB"
],
[
"70b-text-q6_K",
"57GB"
],
[
"70b-text-q8_0",
"73GB"
],
[
"13b-chat",
"7.4GB"
],
[
"13b-chat-fp16",
"26GB"
],
[
"13b-chat-q2_K",
"5.4GB"
],
[
"13b-chat-q3_K_L",
"6.9GB"
],
[
"13b-chat-q3_K_M",
"6.3GB"
],
[
"13b-chat-q3_K_S",
"5.7GB"
],
[
"13b-chat-q4_0",
"7.4GB"
],
[
"13b-chat-q4_1",
"8.2GB"
],
[
"13b-chat-q4_K_M",
"7.9GB"
],
[
"13b-chat-q4_K_S",
"7.4GB"
],
[
"13b-chat-q5_0",
"9.0GB"
],
[
"13b-chat-q5_1",
"9.8GB"
],
[
"13b-chat-q5_K_M",
"9.2GB"
],
[
"13b-chat-q5_K_S",
"9.0GB"
],
[
"13b-chat-q6_K",
"11GB"
],
[
"13b-chat-q8_0",
"14GB"
],
[
"13b-text",
"7.4GB"
],
[
"13b-text-fp16",
"26GB"
],
[
"13b-text-q2_K",
"5.4GB"
],
[
"13b-text-q3_K_L",
"6.9GB"
],
[
"13b-text-q3_K_M",
"6.3GB"
],
[
"13b-text-q3_K_S",
"5.7GB"
],
[
"13b-text-q4_0",
"7.4GB"
],
[
"13b-text-q4_1",
"8.2GB"
],
[
"13b-text-q4_K_M",
"7.9GB"
],
[
"13b-text-q4_K_S",
"7.4GB"
],
[
"13b-text-q5_0",
"9.0GB"
],
[
"13b-text-q5_1",
"9.8GB"
],
[
"13b-text-q5_K_M",
"9.2GB"
],
[
"13b-text-q5_K_S",
"9.0GB"
],
[
"13b-text-q6_K",
"11GB"
],
[
"13b-text-q8_0",
"14GB"
],
[
"7b-chat",
"3.8GB"
],
[
"7b-chat-fp16",
"13GB"
],
[
"7b-chat-q2_K",
"2.8GB"
],
[
"7b-chat-q3_K_L",
"3.6GB"
],
[
"7b-chat-q3_K_M",
"3.3GB"
],
[
"7b-chat-q3_K_S",
"2.9GB"
],
[
"7b-chat-q4_0",
"3.8GB"
],
[
"7b-chat-q4_1",
"4.2GB"
],
[
"7b-chat-q4_K_M",
"4.1GB"
],
[
"7b-chat-q4_K_S",
"3.9GB"
],
[
"7b-chat-q5_0",
"4.7GB"
],
[
"7b-chat-q5_1",
"5.1GB"
],
[
"7b-chat-q5_K_M",
"4.8GB"
],
[
"7b-chat-q5_K_S",
"4.7GB"
],
[
"7b-chat-q6_K",
"5.5GB"
],
[
"7b-chat-q8_0",
"7.2GB"
],
[
"7b-text",
"3.8GB"
],
[
"7b-text-fp16",
"13GB"
],
[
"7b-text-q2_K",
"2.8GB"
],
[
"7b-text-q3_K_L",
"3.6GB"
],
[
"7b-text-q3_K_M",
"3.3GB"
],
[
"7b-text-q3_K_S",
"2.9GB"
],
[
"7b-text-q4_0",
"3.8GB"
],
[
"7b-text-q4_1",
"4.2GB"
],
[
"7b-text-q4_K_M",
"4.1GB"
],
[
"7b-text-q4_K_S",
"3.9GB"
],
[
"7b-text-q5_0",
"4.7GB"
],
[
"7b-text-q5_1",
"5.1GB"
],
[
"7b-text-q5_K_M",
"4.8GB"
],
[
"7b-text-q5_K_S",
"4.7GB"
],
[
"7b-text-q6_K",
"5.5GB"
],
[
"7b-text-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Meta"
},
"codellama": {
"url": "https://ollama.com/library/codellama",
"description": "A large language model that can use text prompts to generate and discuss code.",
"tags": [
[
"latest",
"3.8GB"
],
[
"70b",
"39GB"
],
[
"34b",
"19GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"code",
"3.8GB"
],
[
"instruct",
"3.8GB"
],
[
"python",
"3.8GB"
],
[
"70b-code",
"39GB"
],
[
"70b-code-fp16",
"138GB"
],
[
"70b-code-q2_K",
"25GB"
],
[
"70b-code-q3_K_L",
"36GB"
],
[
"70b-code-q3_K_M",
"33GB"
],
[
"70b-code-q3_K_S",
"30GB"
],
[
"70b-code-q4_0",
"39GB"
],
[
"70b-code-q4_1",
"43GB"
],
[
"70b-code-q4_K_M",
"41GB"
],
[
"70b-code-q4_K_S",
"39GB"
],
[
"70b-code-q5_0",
"47GB"
],
[
"70b-code-q5_1",
"52GB"
],
[
"70b-code-q5_K_M",
"49GB"
],
[
"70b-code-q5_K_S",
"47GB"
],
[
"70b-code-q6_K",
"57GB"
],
[
"70b-code-q8_0",
"73GB"
],
[
"70b-instruct",
"39GB"
],
[
"70b-instruct-fp16",
"138GB"
],
[
"70b-instruct-q2_K",
"25GB"
],
[
"70b-instruct-q3_K_L",
"36GB"
],
[
"70b-instruct-q3_K_M",
"33GB"
],
[
"70b-instruct-q3_K_S",
"30GB"
],
[
"70b-instruct-q4_0",
"39GB"
],
[
"70b-instruct-q4_1",
"43GB"
],
[
"70b-instruct-q4_K_M",
"41GB"
],
[
"70b-instruct-q4_K_S",
"39GB"
],
[
"70b-instruct-q5_0",
"47GB"
],
[
"70b-instruct-q5_1",
"52GB"
],
[
"70b-instruct-q5_K_M",
"49GB"
],
[
"70b-instruct-q5_K_S",
"47GB"
],
[
"70b-instruct-q6_K",
"57GB"
],
[
"70b-instruct-q8_0",
"73GB"
],
[
"70b-python",
"39GB"
],
[
"70b-python-fp16",
"138GB"
],
[
"70b-python-q2_K",
"25GB"
],
[
"70b-python-q3_K_L",
"36GB"
],
[
"70b-python-q3_K_M",
"33GB"
],
[
"70b-python-q3_K_S",
"30GB"
],
[
"70b-python-q4_0",
"39GB"
],
[
"70b-python-q4_1",
"43GB"
],
[
"70b-python-q4_K_M",
"41GB"
],
[
"70b-python-q4_K_S",
"39GB"
],
[
"70b-python-q5_0",
"47GB"
],
[
"70b-python-q5_1",
"52GB"
],
[
"70b-python-q5_K_M",
"49GB"
],
[
"70b-python-q5_K_S",
"47GB"
],
[
"70b-python-q6_K",
"57GB"
],
[
"70b-python-q8_0",
"73GB"
],
[
"34b-code",
"19GB"
],
[
"34b-code-q2_K",
"14GB"
],
[
"34b-code-q3_K_L",
"18GB"
],
[
"34b-code-q3_K_M",
"16GB"
],
[
"34b-code-q3_K_S",
"15GB"
],
[
"34b-code-q4_0",
"19GB"
],
[
"34b-code-q4_1",
"21GB"
],
[
"34b-code-q4_K_M",
"20GB"
],
[
"34b-code-q4_K_S",
"19GB"
],
[
"34b-code-q5_0",
"23GB"
],
[
"34b-code-q5_1",
"25GB"
],
[
"34b-code-q5_K_M",
"24GB"
],
[
"34b-code-q5_K_S",
"23GB"
],
[
"34b-code-q6_K",
"28GB"
],
[
"34b-code-q8_0",
"36GB"
],
[
"34b-instruct",
"19GB"
],
[
"34b-instruct-fp16",
"67GB"
],
[
"34b-instruct-q2_K",
"14GB"
],
[
"34b-instruct-q3_K_L",
"18GB"
],
[
"34b-instruct-q3_K_M",
"16GB"
],
[
"34b-instruct-q3_K_S",
"15GB"
],
[
"34b-instruct-q4_0",
"19GB"
],
[
"34b-instruct-q4_1",
"21GB"
],
[
"34b-instruct-q4_K_M",
"20GB"
],
[
"34b-instruct-q4_K_S",
"19GB"
],
[
"34b-instruct-q5_0",
"23GB"
],
[
"34b-instruct-q5_1",
"25GB"
],
[
"34b-instruct-q5_K_M",
"24GB"
],
[
"34b-instruct-q5_K_S",
"23GB"
],
[
"34b-instruct-q6_K",
"28GB"
],
[
"34b-instruct-q8_0",
"36GB"
],
[
"34b-python",
"19GB"
],
[
"34b-python-fp16",
"67GB"
],
[
"34b-python-q2_K",
"14GB"
],
[
"34b-python-q3_K_L",
"18GB"
],
[
"34b-python-q3_K_M",
"16GB"
],
[
"34b-python-q3_K_S",
"15GB"
],
[
"34b-python-q4_0",
"19GB"
],
[
"34b-python-q4_1",
"21GB"
],
[
"34b-python-q4_K_M",
"20GB"
],
[
"34b-python-q4_K_S",
"19GB"
],
[
"34b-python-q5_0",
"23GB"
],
[
"34b-python-q5_1",
"25GB"
],
[
"34b-python-q5_K_M",
"24GB"
],
[
"34b-python-q5_K_S",
"23GB"
],
[
"34b-python-q6_K",
"28GB"
],
[
"34b-python-q8_0",
"36GB"
],
[
"13b-code",
"7.4GB"
],
[
"13b-code-fp16",
"26GB"
],
[
"13b-code-q2_K",
"5.4GB"
],
[
"13b-code-q3_K_L",
"6.9GB"
],
[
"13b-code-q3_K_M",
"6.3GB"
],
[
"13b-code-q3_K_S",
"5.7GB"
],
[
"13b-code-q4_0",
"7.4GB"
],
[
"13b-code-q4_1",
"8.2GB"
],
[
"13b-code-q4_K_M",
"7.9GB"
],
[
"13b-code-q4_K_S",
"7.4GB"
],
[
"13b-code-q5_0",
"9.0GB"
],
[
"13b-code-q5_1",
"9.8GB"
],
[
"13b-code-q5_K_M",
"9.2GB"
],
[
"13b-code-q5_K_S",
"9.0GB"
],
[
"13b-code-q6_K",
"11GB"
],
[
"13b-code-q8_0",
"14GB"
],
[
"13b-instruct",
"7.4GB"
],
[
"13b-instruct-fp16",
"26GB"
],
[
"13b-instruct-q2_K",
"5.4GB"
],
[
"13b-instruct-q3_K_L",
"6.9GB"
],
[
"13b-instruct-q3_K_M",
"6.3GB"
],
[
"13b-instruct-q3_K_S",
"5.7GB"
],
[
"13b-instruct-q4_0",
"7.4GB"
],
[
"13b-instruct-q4_1",
"8.2GB"
],
[
"13b-instruct-q4_K_M",
"7.9GB"
],
[
"13b-instruct-q4_K_S",
"7.4GB"
],
[
"13b-instruct-q5_0",
"9.0GB"
],
[
"13b-instruct-q5_1",
"9.8GB"
],
[
"13b-instruct-q5_K_M",
"9.2GB"
],
[
"13b-instruct-q5_K_S",
"9.0GB"
],
[
"13b-instruct-q6_K",
"11GB"
],
[
"13b-instruct-q8_0",
"14GB"
],
[
"13b-python",
"7.4GB"
],
[
"13b-python-fp16",
"26GB"
],
[
"13b-python-q2_K",
"5.4GB"
],
[
"13b-python-q3_K_L",
"6.9GB"
],
[
"13b-python-q3_K_M",
"6.3GB"
],
[
"13b-python-q3_K_S",
"5.7GB"
],
[
"13b-python-q4_0",
"7.4GB"
],
[
"13b-python-q4_1",
"8.2GB"
],
[
"13b-python-q4_K_M",
"7.9GB"
],
[
"13b-python-q4_K_S",
"7.4GB"
],
[
"13b-python-q5_0",
"9.0GB"
],
[
"13b-python-q5_1",
"9.8GB"
],
[
"13b-python-q5_K_M",
"9.2GB"
],
[
"13b-python-q5_K_S",
"9.0GB"
],
[
"13b-python-q6_K",
"11GB"
],
[
"13b-python-q8_0",
"14GB"
],
[
"7b-code",
"3.8GB"
],
[
"7b-code-fp16",
"13GB"
],
[
"7b-code-q2_K",
"2.8GB"
],
[
"7b-code-q3_K_L",
"3.6GB"
],
[
"7b-code-q3_K_M",
"3.3GB"
],
[
"7b-code-q3_K_S",
"2.9GB"
],
[
"7b-code-q4_0",
"3.8GB"
],
[
"7b-code-q4_1",
"4.2GB"
],
[
"7b-code-q4_K_M",
"4.1GB"
],
[
"7b-code-q4_K_S",
"3.9GB"
],
[
"7b-code-q5_0",
"4.7GB"
],
[
"7b-code-q5_1",
"5.1GB"
],
[
"7b-code-q5_K_M",
"4.8GB"
],
[
"7b-code-q5_K_S",
"4.7GB"
],
[
"7b-code-q6_K",
"5.5GB"
],
[
"7b-code-q8_0",
"7.2GB"
],
[
"7b-instruct",
"3.8GB"
],
[
"7b-instruct-fp16",
"13GB"
],
[
"7b-instruct-q2_K",
"2.8GB"
],
[
"7b-instruct-q3_K_L",
"3.6GB"
],
[
"7b-instruct-q3_K_M",
"3.3GB"
],
[
"7b-instruct-q3_K_S",
"2.9GB"
],
[
"7b-instruct-q4_0",
"3.8GB"
],
[
"7b-instruct-q4_1",
"4.2GB"
],
[
"7b-instruct-q4_K_M",
"4.1GB"
],
[
"7b-instruct-q4_K_S",
"3.9GB"
],
[
"7b-instruct-q5_0",
"4.7GB"
],
[
"7b-instruct-q5_1",
"5.1GB"
],
[
"7b-instruct-q5_K_M",
"4.8GB"
],
[
"7b-instruct-q5_K_S",
"4.7GB"
],
[
"7b-instruct-q6_K",
"5.5GB"
],
[
"7b-instruct-q8_0",
"7.2GB"
],
[
"7b-python",
"3.8GB"
],
[
"7b-python-fp16",
"13GB"
],
[
"7b-python-q2_K",
"2.8GB"
],
[
"7b-python-q3_K_L",
"3.6GB"
],
[
"7b-python-q3_K_M",
"3.3GB"
],
[
"7b-python-q3_K_S",
"2.9GB"
],
[
"7b-python-q4_0",
"3.8GB"
],
[
"7b-python-q4_1",
"4.2GB"
],
[
"7b-python-q4_K_M",
"4.1GB"
],
[
"7b-python-q4_K_S",
"3.9GB"
],
[
"7b-python-q5_0",
"4.7GB"
],
[
"7b-python-q5_1",
"5.1GB"
],
[
"7b-python-q5_K_M",
"4.8GB"
],
[
"7b-python-q5_K_S",
"4.7GB"
],
[
"7b-python-q6_K",
"5.5GB"
],
[
"7b-python-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Meta"
},
"dolphin-mixtral": {
"url": "https://ollama.com/library/dolphin-mixtral",
"description": "Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford.",
"tags": [
[
"latest",
"26GB"
],
[
"8x7b",
"26GB"
],
[
"8x22b",
"80GB"
],
[
"v2.5",
"26GB"
],
[
"v2.6",
"26GB"
],
[
"v2.6.1",
"26GB"
],
[
"v2.7",
"26GB"
],
[
"8x7b-v2.5",
"26GB"
],
[
"8x7b-v2.5-fp16",
"93GB"
],
[
"8x7b-v2.5-q2_K",
"16GB"
],
[
"8x7b-v2.5-q3_K_L",
"20GB"
],
[
"8x7b-v2.5-q3_K_M",
"20GB"
],
[
"8x7b-v2.5-q3_K_S",
"20GB"
],
[
"8x7b-v2.5-q4_0",
"26GB"
],
[
"8x7b-v2.5-q4_1",
"29GB"
],
[
"8x7b-v2.5-q4_K_M",
"26GB"
],
[
"8x7b-v2.5-q4_K_S",
"26GB"
],
[
"8x7b-v2.5-q5_0",
"32GB"
],
[
"8x7b-v2.5-q5_1",
"35GB"
],
[
"8x7b-v2.5-q5_K_M",
"32GB"
],
[
"8x7b-v2.5-q5_K_S",
"32GB"
],
[
"8x7b-v2.5-q6_K",
"38GB"
],
[
"8x7b-v2.5-q8_0",
"50GB"
],
[
"8x7b-v2.6",
"26GB"
],
[
"8x7b-v2.6-fp16",
"93GB"
],
[
"8x7b-v2.6-q2_K",
"16GB"
],
[
"8x7b-v2.6-q3_K_L",
"20GB"
],
[
"8x7b-v2.6-q3_K_M",
"20GB"
],
[
"8x7b-v2.6-q3_K_S",
"20GB"
],
[
"8x7b-v2.6-q4_0",
"26GB"
],
[
"8x7b-v2.6-q4_1",
"29GB"
],
[
"8x7b-v2.6-q4_K_M",
"26GB"
],
[
"8x7b-v2.6-q4_K_S",
"26GB"
],
[
"8x7b-v2.6-q5_0",
"32GB"
],
[
"8x7b-v2.6-q5_1",
"35GB"
],
[
"8x7b-v2.6-q5_K_M",
"32GB"
],
[
"8x7b-v2.6-q5_K_S",
"32GB"
],
[
"8x7b-v2.6-q6_K",
"38GB"
],
[
"8x7b-v2.6-q8_0",
"50GB"
],
[
"8x7b-v2.6.1",
"26GB"
],
[
"8x7b-v2.6.1-fp16",
"93GB"
],
[
"8x7b-v2.6.1-q2_K",
"16GB"
],
[
"8x7b-v2.6.1-q3_K_L",
"20GB"
],
[
"8x7b-v2.6.1-q3_K_M",
"20GB"
],
[
"8x7b-v2.6.1-q3_K_S",
"20GB"
],
[
"8x7b-v2.6.1-q4_0",
"26GB"
],
[
"8x7b-v2.6.1-q4_1",
"29GB"
],
[
"8x7b-v2.6.1-q4_K_M",
"26GB"
],
[
"8x7b-v2.6.1-q4_K_S",
"26GB"
],
[
"8x7b-v2.6.1-q5_0",
"32GB"
],
[
"8x7b-v2.6.1-q5_1",
"35GB"
],
[
"8x7b-v2.6.1-q5_K_M",
"32GB"
],
[
"8x7b-v2.6.1-q5_K_S",
"32GB"
],
[
"8x7b-v2.6.1-q6_K",
"38GB"
],
[
"8x7b-v2.6.1-q8_0",
"50GB"
],
[
"8x7b-v2.7",
"26GB"
],
[
"8x7b-v2.7-fp16",
"93GB"
],
[
"8x7b-v2.7-q2_K",
"16GB"
],
[
"8x7b-v2.7-q3_K_L",
"20GB"
],
[
"8x7b-v2.7-q3_K_M",
"20GB"
],
[
"8x7b-v2.7-q3_K_S",
"20GB"
],
[
"8x7b-v2.7-q4_0",
"26GB"
],
[
"8x7b-v2.7-q4_1",
"29GB"
],
[
"8x7b-v2.7-q4_K_M",
"26GB"
],
[
"8x7b-v2.7-q4_K_S",
"26GB"
],
[
"8x7b-v2.7-q5_0",
"32GB"
],
[
"8x7b-v2.7-q5_1",
"35GB"
],
[
"8x7b-v2.7-q5_K_M",
"32GB"
],
[
"8x7b-v2.7-q5_K_S",
"32GB"
],
[
"8x7b-v2.7-q6_K",
"38GB"
],
[
"8x7b-v2.7-q8_0",
"50GB"
],
[
"8x22b-v2.9",
"80GB"
],
[
"8x22b-v2.9-fp16",
"281GB"
],
[
"8x22b-v2.9-q2_K",
"52GB"
],
[
"8x22b-v2.9-q3_K_L",
"73GB"
],
[
"8x22b-v2.9-q3_K_M",
"68GB"
],
[
"8x22b-v2.9-q3_K_S",
"61GB"
],
[
"8x22b-v2.9-q4_0",
"80GB"
],
[
"8x22b-v2.9-q4_1",
"88GB"
],
[
"8x22b-v2.9-q4_K_M",
"86GB"
],
[
"8x22b-v2.9-q4_K_S",
"80GB"
],
[
"8x22b-v2.9-q5_0",
"97GB"
],
[
"8x22b-v2.9-q5_1",
"106GB"
],
[
"8x22b-v2.9-q5_K_M",
"100GB"
],
[
"8x22b-v2.9-q5_K_S",
"97GB"
],
[
"8x22b-v2.9-q6_K",
"116GB"
],
[
"8x22b-v2.9-q8_0",
"149GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"llama2-uncensored": {
"url": "https://ollama.com/library/llama2-uncensored",
"description": "Uncensored Llama 2 model by George Sung and Jarrad Hope.",
"tags": [
[
"latest",
"3.8GB"
],
[
"70b",
"39GB"
],
[
"7b",
"3.8GB"
],
[
"70b-chat",
"39GB"
],
[
"70b-chat-q2_K",
"29GB"
],
[
"70b-chat-q3_K_L",
"36GB"
],
[
"70b-chat-q3_K_M",
"33GB"
],
[
"70b-chat-q3_K_S",
"30GB"
],
[
"70b-chat-q4_0",
"39GB"
],
[
"70b-chat-q4_1",
"43GB"
],
[
"70b-chat-q4_K_M",
"41GB"
],
[
"70b-chat-q4_K_S",
"39GB"
],
[
"70b-chat-q5_0",
"47GB"
],
[
"70b-chat-q5_1",
"52GB"
],
[
"70b-chat-q5_K_M",
"49GB"
],
[
"70b-chat-q5_K_S",
"47GB"
],
[
"70b-chat-q6_K",
"57GB"
],
[
"70b-chat-q8_0",
"73GB"
],
[
"7b-chat",
"3.8GB"
],
[
"7b-chat-fp16",
"13GB"
],
[
"7b-chat-q2_K",
"2.8GB"
],
[
"7b-chat-q3_K_L",
"3.6GB"
],
[
"7b-chat-q3_K_M",
"3.3GB"
],
[
"7b-chat-q3_K_S",
"2.9GB"
],
[
"7b-chat-q4_0",
"3.8GB"
],
[
"7b-chat-q4_1",
"4.2GB"
],
[
"7b-chat-q4_K_M",
"4.1GB"
],
[
"7b-chat-q4_K_S",
"3.9GB"
],
[
"7b-chat-q5_0",
"4.7GB"
],
[
"7b-chat-q5_1",
"5.1GB"
],
[
"7b-chat-q5_K_M",
"4.8GB"
],
[
"7b-chat-q5_K_S",
"4.7GB"
],
[
"7b-chat-q6_K",
"5.5GB"
],
[
"7b-chat-q8_0",
"7.2GB"
]
],
"image": false,
"author": "George Sung, Jarrad Hope"
},
"deepseek-coder": {
"url": "https://ollama.com/library/deepseek-coder",
"description": "DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens.",
"tags": [
[
"latest",
"776MB"
],
[
"33b",
"19GB"
],
[
"6.7b",
"3.8GB"
],
[
"1.3b",
"776MB"
],
[
"base",
"776MB"
],
[
"instruct",
"776MB"
],
[
"33b-base",
"19GB"
],
[
"33b-base-fp16",
"67GB"
],
[
"33b-base-q2_K",
"14GB"
],
[
"33b-base-q3_K_L",
"18GB"
],
[
"33b-base-q3_K_M",
"16GB"
],
[
"33b-base-q3_K_S",
"14GB"
],
[
"33b-base-q4_0",
"19GB"
],
[
"33b-base-q4_1",
"21GB"
],
[
"33b-base-q4_K_M",
"20GB"
],
[
"33b-base-q4_K_S",
"19GB"
],
[
"33b-base-q5_0",
"23GB"
],
[
"33b-base-q5_1",
"25GB"
],
[
"33b-base-q5_K_M",
"24GB"
],
[
"33b-base-q5_K_S",
"23GB"
],
[
"33b-base-q6_K",
"27GB"
],
[
"33b-base-q8_0",
"35GB"
],
[
"33b-instruct",
"19GB"
],
[
"33b-instruct-fp16",
"67GB"
],
[
"33b-instruct-q2_K",
"14GB"
],
[
"33b-instruct-q3_K_L",
"18GB"
],
[
"33b-instruct-q3_K_M",
"16GB"
],
[
"33b-instruct-q3_K_S",
"14GB"
],
[
"33b-instruct-q4_0",
"19GB"
],
[
"33b-instruct-q4_1",
"21GB"
],
[
"33b-instruct-q4_K_M",
"20GB"
],
[
"33b-instruct-q4_K_S",
"19GB"
],
[
"33b-instruct-q5_0",
"23GB"
],
[
"33b-instruct-q5_1",
"25GB"
],
[
"33b-instruct-q5_K_M",
"24GB"
],
[
"33b-instruct-q5_K_S",
"23GB"
],
[
"33b-instruct-q6_K",
"27GB"
],
[
"33b-instruct-q8_0",
"35GB"
],
[
"6.7b-base",
"3.8GB"
],
[
"6.7b-base-fp16",
"13GB"
],
[
"6.7b-base-q2_K",
"2.8GB"
],
[
"6.7b-base-q3_K_L",
"3.6GB"
],
[
"6.7b-base-q3_K_M",
"3.3GB"
],
[
"6.7b-base-q3_K_S",
"3.0GB"
],
[
"6.7b-base-q4_0",
"3.8GB"
],
[
"6.7b-base-q4_1",
"4.2GB"
],
[
"6.7b-base-q4_K_M",
"4.1GB"
],
[
"6.7b-base-q4_K_S",
"3.9GB"
],
[
"6.7b-base-q5_0",
"4.7GB"
],
[
"6.7b-base-q5_1",
"5.1GB"
],
[
"6.7b-base-q5_K_M",
"4.8GB"
],
[
"6.7b-base-q5_K_S",
"4.7GB"
],
[
"6.7b-base-q6_K",
"5.5GB"
],
[
"6.7b-base-q8_0",
"7.2GB"
],
[
"6.7b-instruct",
"3.8GB"
],
[
"6.7b-instruct-fp16",
"13GB"
],
[
"6.7b-instruct-q2_K",
"2.8GB"
],
[
"6.7b-instruct-q3_K_L",
"3.6GB"
],
[
"6.7b-instruct-q3_K_M",
"3.3GB"
],
[
"6.7b-instruct-q3_K_S",
"3.0GB"
],
[
"6.7b-instruct-q4_0",
"3.8GB"
],
[
"6.7b-instruct-q4_1",
"4.2GB"
],
[
"6.7b-instruct-q4_K_M",
"4.1GB"
],
[
"6.7b-instruct-q4_K_S",
"3.9GB"
],
[
"6.7b-instruct-q5_0",
"4.7GB"
],
[
"6.7b-instruct-q5_1",
"5.1GB"
],
[
"6.7b-instruct-q5_K_M",
"4.8GB"
],
[
"6.7b-instruct-q5_K_S",
"4.7GB"
],
[
"6.7b-instruct-q6_K",
"5.5GB"
],
[
"6.7b-instruct-q8_0",
"7.2GB"
],
[
"1.3b-base",
"776MB"
],
[
"1.3b-base-fp16",
"2.7GB"
],
[
"1.3b-base-q2_K",
"632MB"
],
[
"1.3b-base-q3_K_L",
"745MB"
],
[
"1.3b-base-q3_K_M",
"705MB"
],
[
"1.3b-base-q3_K_S",
"659MB"
],
[
"1.3b-base-q4_0",
"776MB"
],
[
"1.3b-base-q4_1",
"856MB"
],
[
"1.3b-base-q4_K_M",
"874MB"
],
[
"1.3b-base-q4_K_S",
"815MB"
],
[
"1.3b-base-q5_0",
"936MB"
],
[
"1.3b-base-q5_1",
"1.0GB"
],
[
"1.3b-base-q5_K_M",
"1.0GB"
],
[
"1.3b-base-q5_K_S",
"953MB"
],
[
"1.3b-base-q6_K",
"1.2GB"
],
[
"1.3b-base-q8_0",
"1.4GB"
],
[
"1.3b-instruct",
"776MB"
],
[
"1.3b-instruct-fp16",
"2.7GB"
],
[
"1.3b-instruct-q2_K",
"632MB"
],
[
"1.3b-instruct-q3_K_L",
"745MB"
],
[
"1.3b-instruct-q3_K_M",
"705MB"
],
[
"1.3b-instruct-q3_K_S",
"659MB"
],
[
"1.3b-instruct-q4_0",
"776MB"
],
[
"1.3b-instruct-q4_1",
"856MB"
],
[
"1.3b-instruct-q4_K_M",
"874MB"
],
[
"1.3b-instruct-q4_K_S",
"815MB"
],
[
"1.3b-instruct-q5_0",
"936MB"
],
[
"1.3b-instruct-q5_1",
"1.0GB"
],
[
"1.3b-instruct-q5_K_M",
"1.0GB"
],
[
"1.3b-instruct-q5_K_S",
"953MB"
],
[
"1.3b-instruct-q6_K",
"1.2GB"
],
[
"1.3b-instruct-q8_0",
"1.4GB"
]
],
"image": false,
"author": "DeepSeek Team"
},
"nomic-embed-text": {
"url": "https://ollama.com/library/nomic-embed-text",
"description": "A high-performing open embedding model with a large token context window.",
"tags": [
[
"latest",
"274MB"
],
[
"v1.5",
"274MB"
],
[
"137m-v1.5-fp16",
"274MB"
]
],
"image": false,
"author": "Nomic AI"
},
"phi": {
"url": "https://ollama.com/library/phi",
"description": "Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities.",
"tags": [
[
"latest",
"1.6GB"
],
[
"2.7b",
"1.6GB"
],
[
"chat",
"1.6GB"
],
[
"2.7b-chat-v2-fp16",
"5.6GB"
],
[
"2.7b-chat-v2-q2_K",
"1.2GB"
],
[
"2.7b-chat-v2-q3_K_L",
"1.6GB"
],
[
"2.7b-chat-v2-q3_K_M",
"1.5GB"
],
[
"2.7b-chat-v2-q3_K_S",
"1.3GB"
],
[
"2.7b-chat-v2-q4_0",
"1.6GB"
],
[
"2.7b-chat-v2-q4_1",
"1.8GB"
],
[
"2.7b-chat-v2-q4_K_M",
"1.8GB"
],
[
"2.7b-chat-v2-q4_K_S",
"1.6GB"
],
[
"2.7b-chat-v2-q5_0",
"1.9GB"
],
[
"2.7b-chat-v2-q5_1",
"2.1GB"
],
[
"2.7b-chat-v2-q5_K_M",
"2.1GB"
],
[
"2.7b-chat-v2-q5_K_S",
"1.9GB"
],
[
"2.7b-chat-v2-q6_K",
"2.3GB"
],
[
"2.7b-chat-v2-q8_0",
"3.0GB"
]
],
"image": false,
"author": "Microsoft"
},
"dolphin-mistral": {
"url": "https://ollama.com/library/dolphin-mistral",
"description": "The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"v2",
"4.1GB"
],
[
"v2.1",
"4.1GB"
],
[
"v2.2",
"4.1GB"
],
[
"v2.2.1",
"4.1GB"
],
[
"v2.6",
"4.1GB"
],
[
"v2.8",
"4.1GB"
],
[
"7b-v2",
"4.1GB"
],
[
"7b-v2-fp16",
"14GB"
],
[
"7b-v2-q2_K",
"3.1GB"
],
[
"7b-v2-q3_K_L",
"3.8GB"
],
[
"7b-v2-q3_K_M",
"3.5GB"
],
[
"7b-v2-q3_K_S",
"3.2GB"
],
[
"7b-v2-q4_0",
"4.1GB"
],
[
"7b-v2-q4_1",
"4.6GB"
],
[
"7b-v2-q4_K_M",
"4.4GB"
],
[
"7b-v2-q4_K_S",
"4.1GB"
],
[
"7b-v2-q5_0",
"5.0GB"
],
[
"7b-v2-q5_1",
"5.4GB"
],
[
"7b-v2-q5_K_M",
"5.1GB"
],
[
"7b-v2-q5_K_S",
"5.0GB"
],
[
"7b-v2-q6_K",
"5.9GB"
],
[
"7b-v2-q8_0",
"7.7GB"
],
[
"7b-v2.1",
"4.1GB"
],
[
"7b-v2.1-fp16",
"14GB"
],
[
"7b-v2.1-q2_K",
"3.1GB"
],
[
"7b-v2.1-q3_K_L",
"3.8GB"
],
[
"7b-v2.1-q3_K_M",
"3.5GB"
],
[
"7b-v2.1-q3_K_S",
"3.2GB"
],
[
"7b-v2.1-q4_0",
"4.1GB"
],
[
"7b-v2.1-q4_1",
"4.6GB"
],
[
"7b-v2.1-q4_K_M",
"4.4GB"
],
[
"7b-v2.1-q4_K_S",
"4.1GB"
],
[
"7b-v2.1-q5_0",
"5.0GB"
],
[
"7b-v2.1-q5_1",
"5.4GB"
],
[
"7b-v2.1-q5_K_M",
"5.1GB"
],
[
"7b-v2.1-q5_K_S",
"5.0GB"
],
[
"7b-v2.1-q6_K",
"5.9GB"
],
[
"7b-v2.1-q8_0",
"7.7GB"
],
[
"7b-v2.2",
"4.1GB"
],
[
"7b-v2.2-fp16",
"14GB"
],
[
"7b-v2.2-q2_K",
"3.1GB"
],
[
"7b-v2.2-q3_K_L",
"3.8GB"
],
[
"7b-v2.2-q3_K_M",
"3.5GB"
],
[
"7b-v2.2-q3_K_S",
"3.2GB"
],
[
"7b-v2.2-q4_0",
"4.1GB"
],
[
"7b-v2.2-q4_1",
"4.6GB"
],
[
"7b-v2.2-q4_K_M",
"4.4GB"
],
[
"7b-v2.2-q4_K_S",
"4.1GB"
],
[
"7b-v2.2-q5_0",
"5.0GB"
],
[
"7b-v2.2-q5_1",
"5.4GB"
],
[
"7b-v2.2-q5_K_M",
"5.1GB"
],
[
"7b-v2.2-q5_K_S",
"5.0GB"
],
[
"7b-v2.2-q6_K",
"5.9GB"
],
[
"7b-v2.2-q8_0",
"7.7GB"
],
[
"7b-v2.2.1",
"4.1GB"
],
[
"7b-v2.2.1-fp16",
"14GB"
],
[
"7b-v2.2.1-q2_K",
"3.1GB"
],
[
"7b-v2.2.1-q3_K_L",
"3.8GB"
],
[
"7b-v2.2.1-q3_K_M",
"3.5GB"
],
[
"7b-v2.2.1-q3_K_S",
"3.2GB"
],
[
"7b-v2.2.1-q4_0",
"4.1GB"
],
[
"7b-v2.2.1-q4_1",
"4.6GB"
],
[
"7b-v2.2.1-q4_K_M",
"4.4GB"
],
[
"7b-v2.2.1-q4_K_S",
"4.1GB"
],
[
"7b-v2.2.1-q5_0",
"5.0GB"
],
[
"7b-v2.2.1-q5_1",
"5.4GB"
],
[
"7b-v2.2.1-q5_K_M",
"5.1GB"
],
[
"7b-v2.2.1-q5_K_S",
"5.0GB"
],
[
"7b-v2.2.1-q6_K",
"5.9GB"
],
[
"7b-v2.2.1-q8_0",
"7.7GB"
],
[
"7b-v2.6",
"4.1GB"
],
[
"7b-v2.6-dpo-laser",
"4.1GB"
],
[
"7b-v2.6-fp16",
"14GB"
],
[
"7b-v2.6-q2_K",
"3.1GB"
],
[
"7b-v2.6-q3_K_L",
"3.8GB"
],
[
"7b-v2.6-q3_K_M",
"3.5GB"
],
[
"7b-v2.6-q3_K_S",
"3.2GB"
],
[
"7b-v2.6-q4_0",
"4.1GB"
],
[
"7b-v2.6-q4_1",
"4.6GB"
],
[
"7b-v2.6-q4_K_M",
"4.4GB"
],
[
"7b-v2.6-q4_K_S",
"4.1GB"
],
[
"7b-v2.6-q5_0",
"5.0GB"
],
[
"7b-v2.6-q5_1",
"5.4GB"
],
[
"7b-v2.6-q5_K_M",
"5.1GB"
],
[
"7b-v2.6-q5_K_S",
"5.0GB"
],
[
"7b-v2.6-q6_K",
"5.9GB"
],
[
"7b-v2.6-dpo-laser-q4_K_M",
"4.4GB"
],
[
"7b-v2.6-dpo-laser-q3_K_L",
"3.8GB"
],
[
"7b-v2.6-q8_0",
"7.7GB"
],
[
"7b-v2.6-dpo-laser-q3_K_M",
"3.5GB"
],
[
"7b-v2.6-dpo-laser-q2_K",
"3.1GB"
],
[
"7b-v2.6-dpo-laser-q4_0",
"4.1GB"
],
[
"7b-v2.6-dpo-laser-q3_K_S",
"3.2GB"
],
[
"7b-v2.6-dpo-laser-fp16",
"14GB"
],
[
"7b-v2.6-dpo-laser-q4_1",
"4.6GB"
],
[
"7b-v2.6-dpo-laser-q4_K_S",
"4.1GB"
],
[
"7b-v2.6-dpo-laser-q5_0",
"5.0GB"
],
[
"7b-v2.6-dpo-laser-q5_1",
"5.4GB"
],
[
"7b-v2.6-dpo-laser-q5_K_M",
"5.1GB"
],
[
"7b-v2.6-dpo-laser-q5_K_S",
"5.0GB"
],
[
"7b-v2.6-dpo-laser-q6_K",
"5.9GB"
],
[
"7b-v2.6-dpo-laser-q8_0",
"7.7GB"
],
[
"7b-v2.8",
"4.1GB"
],
[
"7b-v2.8-fp16",
"14GB"
],
[
"7b-v2.8-q2_K",
"2.7GB"
],
[
"7b-v2.8-q3_K_L",
"3.8GB"
],
[
"7b-v2.8-q3_K_M",
"3.5GB"
],
[
"7b-v2.8-q3_K_S",
"3.2GB"
],
[
"7b-v2.8-q4_0",
"4.1GB"
],
[
"7b-v2.8-q4_1",
"4.6GB"
],
[
"7b-v2.8-q4_K_M",
"4.4GB"
],
[
"7b-v2.8-q4_K_S",
"4.1GB"
],
[
"7b-v2.8-q5_0",
"5.0GB"
],
[
"7b-v2.8-q5_1",
"5.4GB"
],
[
"7b-v2.8-q5_K_M",
"5.1GB"
],
[
"7b-v2.8-q5_K_S",
"5.0GB"
],
[
"7b-v2.8-q6_K",
"5.9GB"
],
[
"7b-v2.8-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"mistral-openorca": {
"url": "https://ollama.com/library/mistral-openorca",
"description": "Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-fp16",
"14GB"
],
[
"7b-q2_K",
"3.1GB"
],
[
"7b-q3_K_L",
"3.8GB"
],
[
"7b-q3_K_M",
"3.5GB"
],
[
"7b-q3_K_S",
"3.2GB"
],
[
"7b-q4_0",
"4.1GB"
],
[
"7b-q4_1",
"4.6GB"
],
[
"7b-q4_K_M",
"4.4GB"
],
[
"7b-q4_K_S",
"4.1GB"
],
[
"7b-q5_0",
"5.0GB"
],
[
"7b-q5_1",
"5.4GB"
],
[
"7b-q5_K_M",
"5.1GB"
],
[
"7b-q5_K_S",
"5.0GB"
],
[
"7b-q6_K",
"5.9GB"
],
[
"7b-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Open Orca"
},
"orca-mini": {
"url": "https://ollama.com/library/orca-mini",
"description": "A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware.",
"tags": [
[
"latest",
"2.0GB"
],
[
"70b",
"39GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"3b",
"2.0GB"
],
[
"70b-v3",
"39GB"
],
[
"70b-v3-fp16",
"138GB"
],
[
"70b-v3-q2_K",
"29GB"
],
[
"70b-v3-q3_K_L",
"36GB"
],
[
"70b-v3-q3_K_M",
"33GB"
],
[
"70b-v3-q3_K_S",
"30GB"
],
[
"70b-v3-q4_0",
"39GB"
],
[
"70b-v3-q4_1",
"43GB"
],
[
"70b-v3-q4_K_M",
"41GB"
],
[
"70b-v3-q4_K_S",
"39GB"
],
[
"70b-v3-q5_0",
"47GB"
],
[
"70b-v3-q5_1",
"52GB"
],
[
"70b-v3-q5_K_M",
"49GB"
],
[
"70b-v3-q5_K_S",
"47GB"
],
[
"70b-v3-q6_K",
"57GB"
],
[
"70b-v3-q8_0",
"73GB"
],
[
"13b-v2-fp16",
"26GB"
],
[
"13b-v2-q2_K",
"5.4GB"
],
[
"13b-v2-q3_K_L",
"6.9GB"
],
[
"13b-v2-q3_K_M",
"6.3GB"
],
[
"13b-v2-q3_K_S",
"5.7GB"
],
[
"13b-v2-q4_0",
"7.4GB"
],
[
"13b-v2-q4_1",
"8.2GB"
],
[
"13b-v2-q4_K_M",
"7.9GB"
],
[
"13b-v2-q4_K_S",
"7.4GB"
],
[
"13b-v2-q5_0",
"9.0GB"
],
[
"13b-v2-q5_1",
"9.8GB"
],
[
"13b-v2-q5_K_M",
"9.2GB"
],
[
"13b-v2-q5_K_S",
"9.0GB"
],
[
"13b-v2-q6_K",
"11GB"
],
[
"13b-v2-q8_0",
"14GB"
],
[
"13b-v3",
"7.4GB"
],
[
"13b-v3-fp16",
"26GB"
],
[
"13b-v3-q2_K",
"5.4GB"
],
[
"13b-v3-q3_K_L",
"6.9GB"
],
[
"13b-v3-q3_K_M",
"6.3GB"
],
[
"13b-v3-q3_K_S",
"5.7GB"
],
[
"13b-v3-q4_0",
"7.4GB"
],
[
"13b-v3-q4_1",
"8.2GB"
],
[
"13b-v3-q4_K_M",
"7.9GB"
],
[
"13b-v3-q4_K_S",
"7.4GB"
],
[
"13b-v3-q5_0",
"9.0GB"
],
[
"13b-v3-q5_1",
"9.8GB"
],
[
"13b-v3-q5_K_M",
"9.2GB"
],
[
"13b-v3-q5_K_S",
"9.0GB"
],
[
"13b-v3-q6_K",
"11GB"
],
[
"13b-v3-q8_0",
"14GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-v2-fp16",
"13GB"
],
[
"7b-v2-q2_K",
"2.8GB"
],
[
"7b-v2-q3_K_L",
"3.6GB"
],
[
"7b-v2-q3_K_M",
"3.3GB"
],
[
"7b-v2-q3_K_S",
"2.9GB"
],
[
"7b-v2-q4_0",
"3.8GB"
],
[
"7b-v2-q4_1",
"4.2GB"
],
[
"7b-v2-q4_K_M",
"4.1GB"
],
[
"7b-v2-q4_K_S",
"3.9GB"
],
[
"7b-v2-q5_0",
"4.7GB"
],
[
"7b-v2-q5_1",
"5.1GB"
],
[
"7b-v2-q5_K_M",
"4.8GB"
],
[
"7b-v2-q5_K_S",
"4.7GB"
],
[
"7b-v2-q6_K",
"5.5GB"
],
[
"7b-v2-q8_0",
"7.2GB"
],
[
"7b-v3",
"3.8GB"
],
[
"7b-v3-fp16",
"13GB"
],
[
"7b-v3-q2_K",
"2.8GB"
],
[
"7b-v3-q3_K_L",
"3.6GB"
],
[
"7b-v3-q3_K_M",
"3.3GB"
],
[
"7b-v3-q3_K_S",
"2.9GB"
],
[
"7b-v3-q4_0",
"3.8GB"
],
[
"7b-v3-q4_1",
"4.2GB"
],
[
"7b-v3-q4_K_M",
"4.1GB"
],
[
"7b-v3-q4_K_S",
"3.9GB"
],
[
"7b-v3-q5_0",
"4.7GB"
],
[
"7b-v3-q5_1",
"5.1GB"
],
[
"7b-v3-q5_K_M",
"4.8GB"
],
[
"7b-v3-q5_K_S",
"4.7GB"
],
[
"7b-v3-q6_K",
"5.5GB"
],
[
"7b-v3-q8_0",
"7.2GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
],
[
"3b-fp16",
"6.9GB"
],
[
"3b-q4_0",
"2.0GB"
],
[
"3b-q4_1",
"2.2GB"
],
[
"3b-q5_0",
"2.4GB"
],
[
"3b-q5_1",
"2.6GB"
],
[
"3b-q8_0",
"3.6GB"
]
],
"image": false,
"author": "Orca Mini Team"
},
"mxbai-embed-large": {
"url": "https://ollama.com/library/mxbai-embed-large",
"description": "State-of-the-art large embedding model from mixedbread.ai",
"tags": [
[
"latest",
"670MB"
],
[
"335m",
"670MB"
],
[
"v1",
"670MB"
],
[
"335m-v1-fp16",
"670MB"
]
],
"image": false,
"author": "Mixedbread.ai"
},
"dolphin-llama3": {
"url": "https://ollama.com/library/dolphin-llama3",
"description": "Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills.",
"tags": [
[
"latest",
"4.7GB"
],
[
"70b",
"40GB"
],
[
"8b",
"4.7GB"
],
[
"256k",
"4.7GB"
],
[
"v2.9",
"4.7GB"
],
[
"70b-v2.9",
"40GB"
],
[
"70b-v2.9-fp16",
"141GB"
],
[
"70b-v2.9-q2_K",
"26GB"
],
[
"70b-v2.9-q3_K_L",
"37GB"
],
[
"70b-v2.9-q3_K_M",
"34GB"
],
[
"70b-v2.9-q3_K_S",
"31GB"
],
[
"70b-v2.9-q4_0",
"40GB"
],
[
"70b-v2.9-q4_1",
"44GB"
],
[
"70b-v2.9-q4_K_M",
"43GB"
],
[
"70b-v2.9-q4_K_S",
"40GB"
],
[
"70b-v2.9-q5_0",
"49GB"
],
[
"70b-v2.9-q5_1",
"53GB"
],
[
"70b-v2.9-q5_K_M",
"50GB"
],
[
"70b-v2.9-q5_K_S",
"49GB"
],
[
"70b-v2.9-q6_K",
"58GB"
],
[
"70b-v2.9-q8_0",
"75GB"
],
[
"8b-256k-v2.9",
"4.7GB"
],
[
"8b-256k",
"4.7GB"
],
[
"8b-256k-v2.9-fp16",
"16GB"
],
[
"8b-256k-v2.9-q2_K",
"3.2GB"
],
[
"8b-256k-v2.9-q3_K_L",
"4.3GB"
],
[
"8b-256k-v2.9-q3_K_M",
"4.0GB"
],
[
"8b-256k-v2.9-q3_K_S",
"3.7GB"
],
[
"8b-256k-v2.9-q4_0",
"4.7GB"
],
[
"8b-256k-v2.9-q4_1",
"5.1GB"
],
[
"8b-256k-v2.9-q4_K_M",
"4.9GB"
],
[
"8b-256k-v2.9-q4_K_S",
"4.7GB"
],
[
"8b-256k-v2.9-q5_0",
"5.6GB"
],
[
"8b-256k-v2.9-q5_1",
"6.1GB"
],
[
"8b-256k-v2.9-q5_K_M",
"5.7GB"
],
[
"8b-256k-v2.9-q5_K_S",
"5.6GB"
],
[
"8b-256k-v2.9-q6_K",
"6.6GB"
],
[
"8b-256k-v2.9-q8_0",
"8.5GB"
],
[
"8b-v2.9",
"4.7GB"
],
[
"8b-v2.9-fp16",
"16GB"
],
[
"8b-v2.9-q2_K",
"3.2GB"
],
[
"8b-v2.9-q3_K_L",
"4.3GB"
],
[
"8b-v2.9-q3_K_M",
"4.0GB"
],
[
"8b-v2.9-q3_K_S",
"3.7GB"
],
[
"8b-v2.9-q4_0",
"4.7GB"
],
[
"8b-v2.9-q4_1",
"5.1GB"
],
[
"8b-v2.9-q4_K_M",
"4.9GB"
],
[
"8b-v2.9-q4_K_S",
"4.7GB"
],
[
"8b-v2.9-q5_0",
"5.6GB"
],
[
"8b-v2.9-q5_1",
"6.1GB"
],
[
"8b-v2.9-q5_K_M",
"5.7GB"
],
[
"8b-v2.9-q5_K_S",
"5.6GB"
],
[
"8b-v2.9-q6_K",
"6.6GB"
],
[
"8b-v2.9-q8_0",
"8.5GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"starcoder2": {
"url": "https://ollama.com/library/starcoder2",
"description": "StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters.",
"tags": [
[
"latest",
"1.7GB"
],
[
"15b",
"9.1GB"
],
[
"7b",
"4.0GB"
],
[
"3b",
"1.7GB"
],
[
"instruct",
"9.1GB"
],
[
"15b-instruct",
"9.1GB"
],
[
"15b-instruct-v0.1-fp16",
"32GB"
],
[
"15b-instruct-v0.1-q2_K",
"6.2GB"
],
[
"15b-instruct-v0.1-q3_K_L",
"9.0GB"
],
[
"15b-instruct-v0.1-q3_K_M",
"8.0GB"
],
[
"15b-instruct-v0.1-q3_K_S",
"7.0GB"
],
[
"15b-instruct-v0.1-q4_0",
"9.1GB"
],
[
"15b-instruct-v0.1-q4_1",
"10GB"
],
[
"15b-instruct-v0.1-q4_K_M",
"9.9GB"
],
[
"15b-instruct-v0.1-q4_K_S",
"9.2GB"
],
[
"15b-instruct-v0.1-q5_0",
"11GB"
],
[
"15b-instruct-v0.1-q5_1",
"12GB"
],
[
"15b-instruct-v0.1-q5_K_M",
"11GB"
],
[
"15b-instruct-v0.1-q5_K_S",
"11GB"
],
[
"15b-instruct-v0.1-q6_K",
"13GB"
],
[
"15b-instruct-v0.1-q8_0",
"17GB"
],
[
"15b-instruct-q4_0",
"9.1GB"
],
[
"15b-fp16",
"32GB"
],
[
"15b-q2_K",
"6.2GB"
],
[
"15b-q3_K_L",
"9.0GB"
],
[
"15b-q3_K_M",
"8.1GB"
],
[
"15b-q3_K_S",
"7.0GB"
],
[
"15b-q4_0",
"9.1GB"
],
[
"15b-q4_1",
"10GB"
],
[
"15b-q4_K_M",
"9.9GB"
],
[
"15b-q4_K_S",
"9.3GB"
],
[
"15b-q5_0",
"11GB"
],
[
"15b-q5_1",
"12GB"
],
[
"15b-q5_K_M",
"11GB"
],
[
"15b-q5_K_S",
"11GB"
],
[
"15b-q6_K",
"13GB"
],
[
"15b-q8_0",
"17GB"
],
[
"7b-fp16",
"14GB"
],
[
"7b-q2_K",
"2.7GB"
],
[
"7b-q3_K_L",
"4.0GB"
],
[
"7b-q3_K_M",
"3.6GB"
],
[
"7b-q3_K_S",
"3.1GB"
],
[
"7b-q4_0",
"4.0GB"
],
[
"7b-q4_1",
"4.5GB"
],
[
"7b-q4_K_M",
"4.4GB"
],
[
"7b-q4_K_S",
"4.1GB"
],
[
"7b-q5_0",
"4.9GB"
],
[
"7b-q5_1",
"5.4GB"
],
[
"7b-q5_K_M",
"5.1GB"
],
[
"7b-q5_K_S",
"4.9GB"
],
[
"7b-q6_K",
"5.9GB"
],
[
"7b-q8_0",
"7.6GB"
],
[
"3b-fp16",
"6.1GB"
],
[
"3b-q2_K",
"1.1GB"
],
[
"3b-q3_K_L",
"1.7GB"
],
[
"3b-q3_K_M",
"1.5GB"
],
[
"3b-q3_K_S",
"1.3GB"
],
[
"3b-q4_0",
"1.7GB"
],
[
"3b-q4_1",
"1.9GB"
],
[
"3b-q4_K_M",
"1.8GB"
],
[
"3b-q4_K_S",
"1.7GB"
],
[
"3b-q5_0",
"2.1GB"
],
[
"3b-q5_1",
"2.3GB"
],
[
"3b-q5_K_M",
"2.2GB"
],
[
"3b-q5_K_S",
"2.1GB"
],
[
"3b-q6_K",
"2.5GB"
],
[
"3b-q8_0",
"3.2GB"
]
],
"image": false,
"author": "BigCode"
},
"llama2-chinese": {
"url": "https://ollama.com/library/llama2-chinese",
"description": "Llama 2 based model fine tuned to improve Chinese dialogue ability.",
"tags": [
[
"latest",
"3.8GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"13b-chat",
"7.4GB"
],
[
"13b-chat-fp16",
"26GB"
],
[
"13b-chat-q2_K",
"5.4GB"
],
[
"13b-chat-q3_K_L",
"6.9GB"
],
[
"13b-chat-q3_K_M",
"6.3GB"
],
[
"13b-chat-q3_K_S",
"5.7GB"
],
[
"13b-chat-q4_0",
"7.4GB"
],
[
"13b-chat-q4_1",
"8.2GB"
],
[
"13b-chat-q4_K_M",
"7.9GB"
],
[
"13b-chat-q4_K_S",
"7.4GB"
],
[
"13b-chat-q5_0",
"9.0GB"
],
[
"13b-chat-q5_1",
"9.8GB"
],
[
"13b-chat-q5_K_M",
"9.2GB"
],
[
"13b-chat-q5_K_S",
"9.0GB"
],
[
"13b-chat-q6_K",
"11GB"
],
[
"13b-chat-q8_0",
"14GB"
],
[
"7b-chat",
"3.8GB"
],
[
"7b-chat-fp16",
"13GB"
],
[
"7b-chat-q2_K",
"2.8GB"
],
[
"7b-chat-q3_K_L",
"3.6GB"
],
[
"7b-chat-q3_K_M",
"3.3GB"
],
[
"7b-chat-q3_K_S",
"2.9GB"
],
[
"7b-chat-q4_0",
"3.8GB"
],
[
"7b-chat-q4_1",
"4.2GB"
],
[
"7b-chat-q4_K_M",
"4.1GB"
],
[
"7b-chat-q4_K_S",
"3.9GB"
],
[
"7b-chat-q5_0",
"4.7GB"
],
[
"7b-chat-q5_1",
"5.1GB"
],
[
"7b-chat-q5_K_M",
"4.8GB"
],
[
"7b-chat-q5_K_S",
"4.7GB"
],
[
"7b-chat-q6_K",
"5.5GB"
],
[
"7b-chat-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Meta"
},
"zephyr": {
"url": "https://ollama.com/library/zephyr",
"description": "Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants.",
"tags": [
[
"latest",
"4.1GB"
],
[
"141b",
"80GB"
],
[
"7b",
"4.1GB"
],
[
"141b-v0.1",
"80GB"
],
[
"141b-v0.1-fp16",
"281GB"
],
[
"141b-v0.1-q2_K",
"52GB"
],
[
"141b-v0.1-q4_0",
"80GB"
],
[
"141b-v0.1-q8_0",
"149GB"
],
[
"7b-alpha",
"4.1GB"
],
[
"7b-alpha-fp16",
"14GB"
],
[
"7b-alpha-q2_K",
"3.1GB"
],
[
"7b-alpha-q3_K_L",
"3.8GB"
],
[
"7b-alpha-q3_K_M",
"3.5GB"
],
[
"7b-alpha-q3_K_S",
"3.2GB"
],
[
"7b-alpha-q4_0",
"4.1GB"
],
[
"7b-alpha-q4_1",
"4.6GB"
],
[
"7b-alpha-q4_K_M",
"4.4GB"
],
[
"7b-alpha-q4_K_S",
"4.1GB"
],
[
"7b-alpha-q5_0",
"5.0GB"
],
[
"7b-alpha-q5_1",
"5.4GB"
],
[
"7b-alpha-q5_K_M",
"5.1GB"
],
[
"7b-alpha-q5_K_S",
"5.0GB"
],
[
"7b-alpha-q6_K",
"5.9GB"
],
[
"7b-alpha-q8_0",
"7.7GB"
],
[
"7b-beta",
"4.1GB"
],
[
"7b-beta-fp16",
"14GB"
],
[
"7b-beta-q2_K",
"3.1GB"
],
[
"7b-beta-q3_K_L",
"3.8GB"
],
[
"7b-beta-q3_K_M",
"3.5GB"
],
[
"7b-beta-q3_K_S",
"3.2GB"
],
[
"7b-beta-q4_0",
"4.1GB"
],
[
"7b-beta-q4_1",
"4.6GB"
],
[
"7b-beta-q4_K_M",
"4.4GB"
],
[
"7b-beta-q4_K_S",
"4.1GB"
],
[
"7b-beta-q5_0",
"5.0GB"
],
[
"7b-beta-q5_1",
"5.4GB"
],
[
"7b-beta-q5_K_M",
"5.1GB"
],
[
"7b-beta-q5_K_S",
"5.0GB"
],
[
"7b-beta-q6_K",
"5.9GB"
],
[
"7b-beta-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Hugging Face H4"
},
"yi": {
"url": "https://ollama.com/library/yi",
"description": "Yi 1.5 is a high-performing, bilingual language model.",
"tags": [
[
"latest",
"3.5GB"
],
[
"34b",
"19GB"
],
[
"9b",
"5.0GB"
],
[
"6b",
"3.5GB"
],
[
"v1.5",
"3.5GB"
],
[
"34b-chat",
"19GB"
],
[
"34b-chat-v1.5-fp16",
"69GB"
],
[
"34b-chat-v1.5-q2_K",
"13GB"
],
[
"34b-chat-v1.5-q3_K_L",
"18GB"
],
[
"34b-chat-v1.5-q3_K_M",
"17GB"
],
[
"34b-chat-v1.5-q3_K_S",
"15GB"
],
[
"34b-chat-v1.5-q4_0",
"19GB"
],
[
"34b-chat-v1.5-q4_1",
"22GB"
],
[
"34b-chat-v1.5-q4_K_M",
"21GB"
],
[
"34b-chat-v1.5-q4_K_S",
"20GB"
],
[
"34b-chat-v1.5-q5_0",
"24GB"
],
[
"34b-chat-v1.5-q5_1",
"26GB"
],
[
"34b-chat-v1.5-q5_K_M",
"24GB"
],
[
"34b-chat-v1.5-q5_K_S",
"24GB"
],
[
"34b-chat-q4_K_S",
"20GB"
],
[
"34b-chat-q4_1",
"22GB"
],
[
"34b-chat-fp16",
"69GB"
],
[
"34b-chat-q3_K_M",
"17GB"
],
[
"34b-chat-q3_K_L",
"18GB"
],
[
"34b-chat-q4_0",
"19GB"
],
[
"34b-chat-q3_K_S",
"15GB"
],
[
"34b-chat-q4_K_M",
"21GB"
],
[
"34b-chat-v1.5-q6_K",
"28GB"
],
[
"34b-chat-v1.5-q8_0",
"37GB"
],
[
"34b-chat-q2_K",
"15GB"
],
[
"34b-chat-q5_0",
"24GB"
],
[
"34b-chat-q5_1",
"26GB"
],
[
"34b-chat-q5_K_M",
"24GB"
],
[
"34b-chat-q5_K_S",
"24GB"
],
[
"34b-chat-q6_K",
"28GB"
],
[
"34b-chat-q8_0",
"37GB"
],
[
"34b-v1.5",
"19GB"
],
[
"34b-v1.5-fp16",
"69GB"
],
[
"34b-v1.5-q2_K",
"13GB"
],
[
"34b-v1.5-q3_K_L",
"18GB"
],
[
"34b-v1.5-q3_K_M",
"17GB"
],
[
"34b-v1.5-q3_K_S",
"15GB"
],
[
"34b-v1.5-q4_0",
"19GB"
],
[
"34b-v1.5-q4_1",
"22GB"
],
[
"34b-v1.5-q4_K_M",
"21GB"
],
[
"34b-v1.5-q4_K_S",
"20GB"
],
[
"34b-v1.5-q5_0",
"24GB"
],
[
"34b-v1.5-q5_1",
"26GB"
],
[
"34b-v1.5-q5_K_M",
"24GB"
],
[
"34b-v1.5-q5_K_S",
"24GB"
],
[
"34b-v1.5-q6_K",
"28GB"
],
[
"34b-v1.5-q8_0",
"37GB"
],
[
"9b-chat",
"5.0GB"
],
[
"9b-v1.5",
"5.0GB"
],
[
"6b-200k",
"3.5GB"
],
[
"34b-q2_K",
"15GB"
],
[
"34b-q3_K_L",
"18GB"
],
[
"34b-q3_K_M",
"17GB"
],
[
"34b-q3_K_S",
"15GB"
],
[
"34b-q4_0",
"19GB"
],
[
"34b-q4_1",
"22GB"
],
[
"34b-q4_K_M",
"21GB"
],
[
"34b-q4_K_S",
"20GB"
],
[
"34b-q5_0",
"24GB"
],
[
"34b-q5_1",
"26GB"
],
[
"34b-q5_K_S",
"24GB"
],
[
"34b-q6_K",
"28GB"
],
[
"9b-chat-v1.5-fp16",
"18GB"
],
[
"9b-chat-v1.5-q2_K",
"3.4GB"
],
[
"9b-chat-v1.5-q3_K_L",
"4.7GB"
],
[
"9b-chat-v1.5-q3_K_M",
"4.3GB"
],
[
"9b-chat-v1.5-q3_K_S",
"3.9GB"
],
[
"9b-chat-v1.5-q4_0",
"5.0GB"
],
[
"9b-chat-v1.5-q4_1",
"5.6GB"
],
[
"9b-chat-v1.5-q4_K_M",
"5.3GB"
],
[
"9b-chat-v1.5-q4_K_S",
"5.1GB"
],
[
"9b-chat-v1.5-q5_0",
"6.1GB"
],
[
"9b-chat-v1.5-q5_1",
"6.6GB"
],
[
"9b-chat-v1.5-q5_K_M",
"6.3GB"
],
[
"9b-chat-v1.5-q5_K_S",
"6.1GB"
],
[
"9b-chat-v1.5-q6_K",
"7.2GB"
],
[
"9b-chat-v1.5-q8_0",
"9.4GB"
],
[
"9b-v1.5-fp16",
"18GB"
],
[
"9b-v1.5-q2_K",
"3.4GB"
],
[
"9b-v1.5-q3_K_L",
"4.7GB"
],
[
"9b-v1.5-q3_K_M",
"4.3GB"
],
[
"9b-v1.5-q3_K_S",
"3.9GB"
],
[
"9b-v1.5-q4_0",
"5.0GB"
],
[
"9b-v1.5-q4_1",
"5.6GB"
],
[
"9b-v1.5-q4_K_M",
"5.3GB"
],
[
"9b-v1.5-q4_K_S",
"5.1GB"
],
[
"9b-v1.5-q5_0",
"6.1GB"
],
[
"9b-v1.5-q5_1",
"6.6GB"
],
[
"9b-v1.5-q5_K_M",
"6.3GB"
],
[
"9b-v1.5-q5_K_S",
"6.1GB"
],
[
"9b-v1.5-q6_K",
"7.2GB"
],
[
"9b-v1.5-q8_0",
"9.4GB"
],
[
"6b-200k-fp16",
"12GB"
],
[
"6b-200k-q2_K",
"2.6GB"
],
[
"6b-200k-q3_K_L",
"3.2GB"
],
[
"6b-200k-q3_K_M",
"3.0GB"
],
[
"6b-200k-q3_K_S",
"2.7GB"
],
[
"6b-200k-q4_0",
"3.5GB"
],
[
"6b-200k-q4_1",
"3.8GB"
],
[
"6b-200k-q4_K_M",
"3.7GB"
],
[
"6b-200k-q4_K_S",
"3.5GB"
],
[
"6b-200k-q5_0",
"4.2GB"
],
[
"6b-200k-q5_1",
"4.6GB"
],
[
"6b-200k-q5_K_M",
"4.3GB"
],
[
"6b-200k-q5_K_S",
"4.2GB"
],
[
"6b-200k-q6_K",
"5.0GB"
],
[
"6b-200k-q8_0",
"6.4GB"
],
[
"6b-chat",
"3.5GB"
],
[
"6b-chat-fp16",
"12GB"
],
[
"6b-chat-q2_K",
"2.6GB"
],
[
"6b-chat-q3_K_L",
"3.2GB"
],
[
"6b-chat-q3_K_M",
"3.0GB"
],
[
"6b-chat-q3_K_S",
"2.7GB"
],
[
"6b-chat-q4_0",
"3.5GB"
],
[
"6b-chat-q4_1",
"3.8GB"
],
[
"6b-chat-q4_K_M",
"3.7GB"
],
[
"6b-chat-q4_K_S",
"3.5GB"
],
[
"6b-chat-q5_0",
"4.2GB"
],
[
"6b-chat-q5_1",
"4.6GB"
],
[
"6b-chat-v1.5-q4_K_M",
"3.7GB"
],
[
"6b-chat-q5_K_S",
"4.2GB"
],
[
"6b-chat-q5_K_M",
"4.3GB"
],
[
"6b-chat-v1.5-fp16",
"12GB"
],
[
"6b-chat-v1.5-q4_1",
"3.8GB"
],
[
"6b-chat-v1.5-q2_K",
"2.3GB"
],
[
"6b-chat-v1.5-q3_K_S",
"2.7GB"
],
[
"6b-chat-q8_0",
"6.4GB"
],
[
"6b-chat-v1.5-q4_0",
"3.5GB"
],
[
"6b-chat-v1.5-q3_K_M",
"3.0GB"
],
[
"6b-chat-v1.5-q3_K_L",
"3.2GB"
],
[
"6b-chat-q6_K",
"5.0GB"
],
[
"6b-chat-v1.5-q4_K_S",
"3.5GB"
],
[
"6b-chat-v1.5-q5_0",
"4.2GB"
],
[
"6b-chat-v1.5-q5_1",
"4.6GB"
],
[
"6b-chat-v1.5-q5_K_M",
"4.3GB"
],
[
"6b-chat-v1.5-q5_K_S",
"4.2GB"
],
[
"6b-chat-v1.5-q6_K",
"5.0GB"
],
[
"6b-chat-v1.5-q8_0",
"6.4GB"
],
[
"6b-v1.5",
"3.5GB"
],
[
"6b-v1.5-fp16",
"12GB"
],
[
"6b-v1.5-q2_K",
"2.3GB"
],
[
"6b-v1.5-q3_K_L",
"3.2GB"
],
[
"6b-v1.5-q3_K_M",
"3.0GB"
],
[
"6b-v1.5-q3_K_S",
"2.7GB"
],
[
"6b-v1.5-q4_0",
"3.5GB"
],
[
"6b-v1.5-q4_1",
"3.8GB"
],
[
"6b-v1.5-q4_K_M",
"3.7GB"
],
[
"6b-v1.5-q4_K_S",
"3.5GB"
],
[
"6b-v1.5-q5_0",
"4.2GB"
],
[
"6b-v1.5-q5_1",
"4.6GB"
],
[
"6b-v1.5-q5_K_M",
"4.3GB"
],
[
"6b-v1.5-q5_K_S",
"4.2GB"
],
[
"6b-v1.5-q6_K",
"5.0GB"
],
[
"6b-v1.5-q8_0",
"6.4GB"
],
[
"6b-fp16",
"12GB"
],
[
"6b-q2_K",
"2.6GB"
],
[
"6b-q3_K_L",
"3.2GB"
],
[
"6b-q3_K_M",
"3.0GB"
],
[
"6b-q3_K_S",
"2.7GB"
],
[
"6b-q4_0",
"3.5GB"
],
[
"6b-q4_1",
"3.8GB"
],
[
"6b-q4_K_M",
"3.7GB"
],
[
"6b-q4_K_S",
"3.5GB"
],
[
"6b-q5_0",
"4.2GB"
],
[
"6b-q5_1",
"4.6GB"
],
[
"6b-q5_K_M",
"4.3GB"
],
[
"6b-q5_K_S",
"4.2GB"
],
[
"6b-q6_K",
"5.0GB"
],
[
"6b-q8_0",
"6.4GB"
]
],
"image": false,
"author": "01.AI"
},
"nous-hermes2": {
"url": "https://ollama.com/library/nous-hermes2",
"description": "The powerful family of models by Nous Research that excels at scientific discussion and coding tasks.",
"tags": [
[
"latest",
"6.1GB"
],
[
"34b",
"19GB"
],
[
"10.7b",
"6.1GB"
],
[
"34b-yi-fp16",
"69GB"
],
[
"34b-yi-q2_K",
"15GB"
],
[
"34b-yi-q3_K_L",
"18GB"
],
[
"34b-yi-q3_K_M",
"17GB"
],
[
"34b-yi-q3_K_S",
"15GB"
],
[
"34b-yi-q4_0",
"19GB"
],
[
"34b-yi-q4_1",
"22GB"
],
[
"34b-yi-q4_K_M",
"21GB"
],
[
"34b-yi-q4_K_S",
"20GB"
],
[
"34b-yi-q5_0",
"24GB"
],
[
"34b-yi-q5_1",
"26GB"
],
[
"34b-yi-q5_K_M",
"24GB"
],
[
"34b-yi-q5_K_S",
"24GB"
],
[
"34b-yi-q6_K",
"28GB"
],
[
"34b-yi-q8_0",
"37GB"
],
[
"10.7b-solar-fp16",
"21GB"
],
[
"10.7b-solar-q2_K",
"4.5GB"
],
[
"10.7b-solar-q3_K_L",
"5.7GB"
],
[
"10.7b-solar-q3_K_M",
"5.2GB"
],
[
"10.7b-solar-q3_K_S",
"4.7GB"
],
[
"10.7b-solar-q4_0",
"6.1GB"
],
[
"10.7b-solar-q4_1",
"6.7GB"
],
[
"10.7b-solar-q4_K_M",
"6.5GB"
],
[
"10.7b-solar-q4_K_S",
"6.1GB"
],
[
"10.7b-solar-q5_0",
"7.4GB"
],
[
"10.7b-solar-q5_1",
"8.1GB"
],
[
"10.7b-solar-q5_K_M",
"7.6GB"
],
[
"10.7b-solar-q5_K_S",
"7.4GB"
],
[
"10.7b-solar-q6_K",
"8.8GB"
],
[
"10.7b-solar-q8_0",
"11GB"
]
],
"image": false,
"author": "Nous Research"
},
"vicuna": {
"url": "https://ollama.com/library/vicuna",
"description": "General use chat model based on Llama and Llama 2 with 2K to 16K context sizes.",
"tags": [
[
"latest",
"3.8GB"
],
[
"33b",
"18GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"13b-16k",
"7.4GB"
],
[
"33b-fp16",
"65GB"
],
[
"33b-q2_K",
"14GB"
],
[
"33b-q3_K_L",
"17GB"
],
[
"33b-q3_K_M",
"16GB"
],
[
"33b-q3_K_S",
"14GB"
],
[
"33b-q4_0",
"18GB"
],
[
"33b-q4_1",
"20GB"
],
[
"33b-q4_K_M",
"20GB"
],
[
"33b-q4_K_S",
"18GB"
],
[
"33b-q5_0",
"22GB"
],
[
"33b-q5_1",
"24GB"
],
[
"33b-q5_K_M",
"23GB"
],
[
"33b-q5_K_S",
"22GB"
],
[
"33b-q6_K",
"27GB"
],
[
"33b-q8_0",
"35GB"
],
[
"13b-v1.5-16k-fp16",
"26GB"
],
[
"13b-v1.5-16k-q2_K",
"5.4GB"
],
[
"13b-v1.5-16k-q3_K_L",
"6.9GB"
],
[
"13b-v1.5-16k-q3_K_M",
"6.3GB"
],
[
"13b-v1.5-16k-q3_K_S",
"5.7GB"
],
[
"13b-v1.5-16k-q4_0",
"7.4GB"
],
[
"13b-v1.5-16k-q4_1",
"8.2GB"
],
[
"13b-v1.5-q3_K_L",
"6.9GB"
],
[
"13b-v1.5-16k-q5_1",
"9.8GB"
],
[
"13b-v1.5-16k-q5_0",
"9.0GB"
],
[
"13b-v1.5-16k-q5_K_M",
"9.2GB"
],
[
"13b-v1.5-16k-q4_K_M",
"7.9GB"
],
[
"13b-v1.5-16k-q6_K",
"11GB"
],
[
"13b-v1.5-16k-q8_0",
"14GB"
],
[
"13b-v1.5-16k-q5_K_S",
"9.0GB"
],
[
"13b-v1.5-fp16",
"26GB"
],
[
"13b-v1.5-16k-q4_K_S",
"7.4GB"
],
[
"13b-v1.5-q2_K",
"5.4GB"
],
[
"13b-v1.5-q3_K_M",
"6.3GB"
],
[
"13b-v1.5-q3_K_S",
"5.7GB"
],
[
"13b-v1.5-q4_0",
"7.4GB"
],
[
"13b-v1.5-q4_1",
"8.2GB"
],
[
"13b-v1.5-q4_K_M",
"7.9GB"
],
[
"13b-v1.5-q4_K_S",
"7.4GB"
],
[
"13b-v1.5-q5_0",
"9.0GB"
],
[
"13b-v1.5-q5_1",
"9.8GB"
],
[
"13b-v1.5-q5_K_M",
"9.2GB"
],
[
"13b-v1.5-q5_K_S",
"9.0GB"
],
[
"13b-v1.5-q6_K",
"11GB"
],
[
"13b-v1.5-q8_0",
"14GB"
],
[
"7b-16k",
"3.8GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-v1.5-16k-fp16",
"13GB"
],
[
"7b-v1.5-16k-q2_K",
"2.8GB"
],
[
"7b-v1.5-16k-q3_K_L",
"3.6GB"
],
[
"7b-v1.5-16k-q3_K_M",
"3.3GB"
],
[
"7b-v1.5-16k-q3_K_S",
"2.9GB"
],
[
"7b-v1.5-16k-q4_0",
"3.8GB"
],
[
"7b-v1.5-16k-q4_1",
"4.2GB"
],
[
"7b-v1.5-16k-q4_K_M",
"4.1GB"
],
[
"7b-v1.5-16k-q4_K_S",
"3.9GB"
],
[
"7b-v1.5-16k-q5_0",
"4.7GB"
],
[
"7b-v1.5-16k-q5_1",
"5.1GB"
],
[
"7b-v1.5-16k-q5_K_M",
"4.8GB"
],
[
"7b-v1.5-16k-q5_K_S",
"4.7GB"
],
[
"7b-v1.5-16k-q6_K",
"5.5GB"
],
[
"7b-v1.5-16k-q8_0",
"7.2GB"
],
[
"7b-v1.5-q4_0",
"3.8GB"
],
[
"7b-v1.5-q3_K_L",
"3.6GB"
],
[
"7b-v1.5-fp16",
"13GB"
],
[
"7b-v1.5-q3_K_S",
"2.9GB"
],
[
"7b-v1.5-q2_K",
"2.8GB"
],
[
"7b-v1.5-q3_K_M",
"3.3GB"
],
[
"7b-v1.5-q4_1",
"4.2GB"
],
[
"7b-v1.5-q4_K_M",
"4.1GB"
],
[
"7b-v1.5-q4_K_S",
"3.9GB"
],
[
"7b-v1.5-q5_0",
"4.7GB"
],
[
"7b-v1.5-q5_1",
"5.1GB"
],
[
"7b-v1.5-q5_K_M",
"4.8GB"
],
[
"7b-v1.5-q5_K_S",
"4.7GB"
],
[
"7b-v1.5-q6_K",
"5.5GB"
],
[
"7b-v1.5-q8_0",
"7.2GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "lmsys.org"
},
"wizard-vicuna-uncensored": {
"url": "https://ollama.com/library/wizard-vicuna-uncensored",
"description": "Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford.",
"tags": [
[
"latest",
"3.8GB"
],
[
"30b",
"18GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"30b-fp16",
"65GB"
],
[
"30b-q2_K",
"14GB"
],
[
"30b-q3_K_L",
"17GB"
],
[
"30b-q3_K_M",
"16GB"
],
[
"30b-q3_K_S",
"14GB"
],
[
"30b-q4_0",
"18GB"
],
[
"30b-q4_1",
"20GB"
],
[
"30b-q4_K_M",
"20GB"
],
[
"30b-q4_K_S",
"18GB"
],
[
"30b-q5_0",
"22GB"
],
[
"30b-q5_1",
"24GB"
],
[
"30b-q5_K_M",
"23GB"
],
[
"30b-q5_K_S",
"22GB"
],
[
"30b-q6_K",
"27GB"
],
[
"30b-q8_0",
"35GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"tinyllama": {
"url": "https://ollama.com/library/tinyllama",
"description": "The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens.",
"tags": [
[
"latest",
"638MB"
],
[
"1.1b",
"638MB"
],
[
"chat",
"638MB"
],
[
"v0.6",
"638MB"
],
[
"v1",
"638MB"
],
[
"1.1b-chat",
"638MB"
],
[
"1.1b-chat-v0.6-fp16",
"2.2GB"
],
[
"1.1b-chat-v0.6-q2_K",
"483MB"
],
[
"1.1b-chat-v0.6-q3_K_L",
"593MB"
],
[
"1.1b-chat-v0.6-q3_K_M",
"551MB"
],
[
"1.1b-chat-v0.6-q3_K_S",
"500MB"
],
[
"1.1b-chat-v0.6-q4_0",
"638MB"
],
[
"1.1b-chat-v0.6-q4_1",
"702MB"
],
[
"1.1b-chat-v0.6-q4_K_M",
"669MB"
],
[
"1.1b-chat-v0.6-q4_K_S",
"644MB"
],
[
"1.1b-chat-v0.6-q5_0",
"767MB"
],
[
"1.1b-chat-v0.6-q5_1",
"832MB"
],
[
"1.1b-chat-v0.6-q5_K_M",
"783MB"
],
[
"1.1b-chat-v0.6-q5_K_S",
"767MB"
],
[
"1.1b-chat-v0.6-q6_K",
"904MB"
],
[
"1.1b-chat-v0.6-q8_0",
"1.2GB"
],
[
"1.1b-chat-v1-fp16",
"2.2GB"
],
[
"1.1b-chat-v1-q2_K",
"483MB"
],
[
"1.1b-chat-v1-q3_K_L",
"593MB"
],
[
"1.1b-chat-v1-q3_K_M",
"551MB"
],
[
"1.1b-chat-v1-q3_K_S",
"500MB"
],
[
"1.1b-chat-v1-q4_0",
"638MB"
],
[
"1.1b-chat-v1-q4_1",
"702MB"
],
[
"1.1b-chat-v1-q4_K_M",
"669MB"
],
[
"1.1b-chat-v1-q4_K_S",
"644MB"
],
[
"1.1b-chat-v1-q5_0",
"767MB"
],
[
"1.1b-chat-v1-q5_1",
"832MB"
],
[
"1.1b-chat-v1-q5_K_M",
"783MB"
],
[
"1.1b-chat-v1-q5_K_S",
"767MB"
],
[
"1.1b-chat-v1-q6_K",
"904MB"
],
[
"1.1b-chat-v1-q8_0",
"1.2GB"
]
],
"image": false,
"author": "TinyLlama Team"
},
"wizardlm2": {
"url": "https://ollama.com/library/wizardlm2",
"description": "State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases.",
"tags": [
[
"latest",
"4.1GB"
],
[
"8x22b",
"80GB"
],
[
"7b",
"4.1GB"
],
[
"8x22b-fp16",
"281GB"
],
[
"8x22b-q2_K",
"52GB"
],
[
"8x22b-q4_0",
"80GB"
],
[
"8x22b-q8_0",
"149GB"
],
[
"7b-fp16",
"14GB"
],
[
"7b-q2_K",
"2.7GB"
],
[
"7b-q3_K_L",
"3.8GB"
],
[
"7b-q3_K_M",
"3.5GB"
],
[
"7b-q3_K_S",
"3.2GB"
],
[
"7b-q4_0",
"4.1GB"
],
[
"7b-q4_1",
"4.6GB"
],
[
"7b-q4_K_M",
"4.4GB"
],
[
"7b-q4_K_S",
"4.1GB"
],
[
"7b-q5_0",
"5.0GB"
],
[
"7b-q5_1",
"5.4GB"
],
[
"7b-q5_K_M",
"5.1GB"
],
[
"7b-q5_K_S",
"5.0GB"
],
[
"7b-q6_K",
"5.9GB"
],
[
"7b-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Microsoft"
},
"starcoder": {
"url": "https://ollama.com/library/starcoder",
"description": "StarCoder is a code generation model trained on 80+ programming languages.",
"tags": [
[
"latest",
"1.8GB"
],
[
"15b",
"9.0GB"
],
[
"7b",
"4.3GB"
],
[
"3b",
"1.8GB"
],
[
"1b",
"726MB"
],
[
"15b-base",
"9.0GB"
],
[
"15b-base-fp16",
"32GB"
],
[
"15b-base-q2_K",
"6.7GB"
],
[
"15b-base-q3_K_L",
"9.1GB"
],
[
"15b-base-q3_K_M",
"8.2GB"
],
[
"15b-base-q3_K_S",
"6.9GB"
],
[
"15b-base-q4_0",
"9.0GB"
],
[
"15b-base-q4_1",
"10.0GB"
],
[
"15b-base-q4_K_M",
"10.0GB"
],
[
"15b-base-q4_K_S",
"9.1GB"
],
[
"15b-base-q5_0",
"11GB"
],
[
"15b-base-q5_1",
"12GB"
],
[
"15b-base-q5_K_M",
"12GB"
],
[
"15b-base-q5_K_S",
"11GB"
],
[
"15b-base-q6_K",
"13GB"
],
[
"15b-base-q8_0",
"17GB"
],
[
"15b-plus",
"9.0GB"
],
[
"15b-plus-fp16",
"32GB"
],
[
"15b-plus-q2_K",
"6.7GB"
],
[
"15b-plus-q3_K_L",
"9.1GB"
],
[
"15b-plus-q3_K_M",
"8.2GB"
],
[
"15b-plus-q3_K_S",
"6.9GB"
],
[
"15b-plus-q4_0",
"9.0GB"
],
[
"15b-plus-q4_1",
"10.0GB"
],
[
"15b-plus-q4_K_M",
"10.0GB"
],
[
"15b-plus-q4_K_S",
"9.1GB"
],
[
"15b-plus-q5_0",
"11GB"
],
[
"15b-plus-q5_1",
"12GB"
],
[
"15b-plus-q5_K_M",
"12GB"
],
[
"15b-plus-q5_K_S",
"11GB"
],
[
"15b-plus-q6_K",
"13GB"
],
[
"15b-plus-q8_0",
"17GB"
],
[
"7b-base",
"4.3GB"
],
[
"15b-fp16",
"32GB"
],
[
"15b-q2_K",
"6.7GB"
],
[
"15b-q3_K_L",
"9.1GB"
],
[
"15b-q3_K_M",
"8.2GB"
],
[
"15b-q3_K_S",
"6.9GB"
],
[
"15b-q4_0",
"9.0GB"
],
[
"15b-q4_1",
"10.0GB"
],
[
"15b-q4_K_M",
"10.0GB"
],
[
"15b-q4_K_S",
"9.1GB"
],
[
"15b-q5_0",
"11GB"
],
[
"15b-q5_1",
"12GB"
],
[
"15b-q5_K_M",
"12GB"
],
[
"15b-q5_K_S",
"11GB"
],
[
"15b-q6_K",
"13GB"
],
[
"15b-q8_0",
"17GB"
],
[
"7b-base-fp16",
"15GB"
],
[
"7b-base-q2_K",
"3.2GB"
],
[
"7b-base-q3_K_L",
"4.3GB"
],
[
"7b-base-q3_K_M",
"3.9GB"
],
[
"7b-base-q3_K_S",
"3.3GB"
],
[
"7b-base-q4_0",
"4.3GB"
],
[
"7b-base-q4_1",
"4.8GB"
],
[
"7b-base-q4_K_M",
"4.8GB"
],
[
"7b-base-q4_K_S",
"4.3GB"
],
[
"7b-base-q5_0",
"5.2GB"
],
[
"7b-base-q5_1",
"5.7GB"
],
[
"7b-base-q5_K_M",
"5.5GB"
],
[
"7b-base-q5_K_S",
"5.2GB"
],
[
"7b-base-q6_K",
"6.2GB"
],
[
"7b-base-q8_0",
"8.0GB"
],
[
"3b-base",
"1.8GB"
],
[
"3b-base-fp16",
"6.4GB"
],
[
"3b-base-q2_K",
"1.4GB"
],
[
"3b-base-q3_K_L",
"1.8GB"
],
[
"3b-base-q3_K_M",
"1.7GB"
],
[
"3b-base-q3_K_S",
"1.4GB"
],
[
"3b-base-q4_0",
"1.8GB"
],
[
"3b-base-q4_1",
"2.0GB"
],
[
"3b-base-q4_K_M",
"2.0GB"
],
[
"3b-base-q4_K_S",
"1.8GB"
],
[
"3b-base-q5_0",
"2.2GB"
],
[
"3b-base-q5_1",
"2.4GB"
],
[
"3b-base-q5_K_M",
"2.3GB"
],
[
"3b-base-q5_K_S",
"2.2GB"
],
[
"3b-base-q6_K",
"2.6GB"
],
[
"3b-base-q8_0",
"3.4GB"
],
[
"1b-base",
"726MB"
],
[
"1b-base-fp16",
"2.5GB"
],
[
"1b-base-q2_K",
"552MB"
],
[
"1b-base-q3_K_L",
"720MB"
],
[
"1b-base-q3_K_M",
"661MB"
],
[
"1b-base-q3_K_S",
"575MB"
],
[
"1b-base-q4_0",
"726MB"
],
[
"1b-base-q4_1",
"797MB"
],
[
"1b-base-q4_K_M",
"792MB"
],
[
"1b-base-q4_K_S",
"734MB"
],
[
"1b-base-q5_0",
"868MB"
],
[
"1b-base-q5_1",
"939MB"
],
[
"1b-base-q5_K_M",
"910MB"
],
[
"1b-base-q5_K_S",
"868MB"
],
[
"1b-base-q6_K",
"1.0GB"
],
[
"1b-base-q8_0",
"1.3GB"
]
],
"image": false,
"author": "BigCode"
},
"codestral": {
"url": "https://ollama.com/library/codestral",
"description": "Codestral is Mistral AI\u2019s first-ever code model designed for code generation tasks.",
"tags": [
[
"latest",
"13GB"
],
[
"22b",
"13GB"
],
[
"v0.1",
"13GB"
],
[
"22b-v0.1-f16",
"44GB"
],
[
"22b-v0.1-q2_K",
"8.3GB"
],
[
"22b-v0.1-q3_K_L",
"12GB"
],
[
"22b-v0.1-q3_K_M",
"11GB"
],
[
"22b-v0.1-q3_K_S",
"9.6GB"
],
[
"22b-v0.1-q4_0",
"13GB"
],
[
"22b-v0.1-q4_1",
"14GB"
],
[
"22b-v0.1-q4_K_M",
"13GB"
],
[
"22b-v0.1-q4_K_S",
"13GB"
],
[
"22b-v0.1-q5_0",
"15GB"
],
[
"22b-v0.1-q5_1",
"17GB"
],
[
"22b-v0.1-q5_K_M",
"16GB"
],
[
"22b-v0.1-q5_K_S",
"15GB"
],
[
"22b-v0.1-q6_K",
"18GB"
],
[
"22b-v0.1-q8_0",
"24GB"
]
],
"image": false,
"author": "Mistral AI"
},
"openchat": {
"url": "https://ollama.com/library/openchat",
"description": "A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-v3.5-0106",
"4.1GB"
],
[
"7b-v3.5",
"4.1GB"
],
[
"7b-v3.5-1210",
"4.1GB"
],
[
"7b-v3.5-0106-fp16",
"14GB"
],
[
"7b-v3.5-0106-q2_K",
"3.1GB"
],
[
"7b-v3.5-0106-q3_K_L",
"3.8GB"
],
[
"7b-v3.5-0106-q3_K_M",
"3.5GB"
],
[
"7b-v3.5-0106-q3_K_S",
"3.2GB"
],
[
"7b-v3.5-0106-q4_0",
"4.1GB"
],
[
"7b-v3.5-0106-q4_1",
"4.6GB"
],
[
"7b-v3.5-0106-q4_K_M",
"4.4GB"
],
[
"7b-v3.5-0106-q4_K_S",
"4.1GB"
],
[
"7b-v3.5-0106-q5_0",
"5.0GB"
],
[
"7b-v3.5-0106-q5_1",
"5.4GB"
],
[
"7b-v3.5-0106-q5_K_M",
"5.1GB"
],
[
"7b-v3.5-0106-q5_K_S",
"5.0GB"
],
[
"7b-v3.5-0106-q6_K",
"5.9GB"
],
[
"7b-v3.5-0106-q8_0",
"7.7GB"
],
[
"7b-v3.5-q6_K",
"5.9GB"
],
[
"7b-v3.5-1210-q2_K",
"3.1GB"
],
[
"7b-v3.5-fp16",
"14GB"
],
[
"7b-v3.5-q4_K_M",
"4.4GB"
],
[
"7b-v3.5-q2_K",
"3.1GB"
],
[
"7b-v3.5-1210-q6_K",
"5.9GB"
],
[
"7b-v3.5-1210-q4_K_S",
"4.1GB"
],
[
"7b-v3.5-q3_K_L",
"3.8GB"
],
[
"7b-v3.5-q3_K_S",
"3.2GB"
],
[
"7b-v3.5-1210-q5_0",
"5.0GB"
],
[
"7b-v3.5-1210-q4_0",
"4.1GB"
],
[
"7b-v3.5-1210-fp16",
"14GB"
],
[
"7b-v3.5-1210-q4_1",
"4.6GB"
],
[
"7b-v3.5-1210-q5_K_M",
"5.1GB"
],
[
"7b-v3.5-1210-q4_K_M",
"4.4GB"
],
[
"7b-v3.5-1210-q5_K_S",
"5.0GB"
],
[
"7b-v3.5-1210-q3_K_S",
"3.2GB"
],
[
"7b-v3.5-1210-q8_0",
"7.7GB"
],
[
"7b-v3.5-1210-q5_1",
"5.4GB"
],
[
"7b-v3.5-q4_0",
"4.1GB"
],
[
"7b-v3.5-q5_1",
"5.4GB"
],
[
"7b-v3.5-1210-q3_K_L",
"3.8GB"
],
[
"7b-v3.5-1210-q3_K_M",
"3.5GB"
],
[
"7b-v3.5-q4_K_S",
"4.1GB"
],
[
"7b-v3.5-q5_K_M",
"5.1GB"
],
[
"7b-v3.5-q5_K_S",
"5.0GB"
],
[
"7b-v3.5-q3_K_M",
"3.5GB"
],
[
"7b-v3.5-q5_0",
"5.0GB"
],
[
"7b-v3.5-q4_1",
"4.6GB"
],
[
"7b-v3.5-q8_0",
"7.7GB"
]
],
"image": false,
"author": "OpenChat Team"
},
"tinydolphin": {
"url": "https://ollama.com/library/tinydolphin",
"description": "An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama.",
"tags": [
[
"latest",
"637MB"
],
[
"1.1b",
"637MB"
],
[
"v2.8",
"637MB"
],
[
"1.1b-v2.8-fp16",
"2.2GB"
],
[
"1.1b-v2.8-q2_K",
"432MB"
],
[
"1.1b-v2.8-q3_K_L",
"592MB"
],
[
"1.1b-v2.8-q3_K_M",
"548MB"
],
[
"1.1b-v2.8-q3_K_S",
"499MB"
],
[
"1.1b-v2.8-q4_0",
"637MB"
],
[
"1.1b-v2.8-q4_1",
"701MB"
],
[
"1.1b-v2.8-q4_K_M",
"668MB"
],
[
"1.1b-v2.8-q4_K_S",
"640MB"
],
[
"1.1b-v2.8-q5_0",
"766MB"
],
[
"1.1b-v2.8-q5_1",
"831MB"
],
[
"1.1b-v2.8-q5_K_M",
"782MB"
],
[
"1.1b-v2.8-q5_K_S",
"766MB"
],
[
"1.1b-v2.8-q6_K",
"903MB"
],
[
"1.1b-v2.8-q8_0",
"1.2GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"openhermes": {
"url": "https://ollama.com/library/openhermes",
"description": "OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets.",
"tags": [
[
"latest",
"4.1GB"
],
[
"v2",
"4.1GB"
],
[
"v2.5",
"4.1GB"
],
[
"7b-mistral-v2-fp16",
"14GB"
],
[
"7b-mistral-v2-q2_K",
"3.1GB"
],
[
"7b-mistral-v2-q3_K_L",
"3.8GB"
],
[
"7b-mistral-v2-q3_K_M",
"3.5GB"
],
[
"7b-mistral-v2-q3_K_S",
"3.2GB"
],
[
"7b-mistral-v2-q4_0",
"4.1GB"
],
[
"7b-mistral-v2-q4_1",
"4.6GB"
],
[
"7b-mistral-v2-q4_K_M",
"4.4GB"
],
[
"7b-mistral-v2-q4_K_S",
"4.1GB"
],
[
"7b-mistral-v2-q5_0",
"5.0GB"
],
[
"7b-mistral-v2-q5_1",
"5.4GB"
],
[
"7b-mistral-v2-q5_K_M",
"5.1GB"
],
[
"7b-mistral-v2-q5_K_S",
"5.0GB"
],
[
"7b-mistral-v2-q6_K",
"5.9GB"
],
[
"7b-mistral-v2-q8_0",
"7.7GB"
],
[
"7b-mistral-v2.5-fp16",
"14GB"
],
[
"7b-mistral-v2.5-q2_K",
"3.1GB"
],
[
"7b-mistral-v2.5-q3_K_L",
"3.8GB"
],
[
"7b-mistral-v2.5-q3_K_M",
"3.5GB"
],
[
"7b-mistral-v2.5-q3_K_S",
"3.2GB"
],
[
"7b-mistral-v2.5-q4_0",
"4.1GB"
],
[
"7b-mistral-v2.5-q4_1",
"4.6GB"
],
[
"7b-mistral-v2.5-q4_K_M",
"4.4GB"
],
[
"7b-mistral-v2.5-q4_K_S",
"4.1GB"
],
[
"7b-mistral-v2.5-q5_0",
"5.0GB"
],
[
"7b-mistral-v2.5-q5_1",
"5.4GB"
],
[
"7b-mistral-v2.5-q5_K_M",
"5.1GB"
],
[
"7b-mistral-v2.5-q5_K_S",
"5.0GB"
],
[
"7b-mistral-v2.5-q6_K",
"5.9GB"
],
[
"7b-mistral-v2.5-q8_0",
"7.7GB"
],
[
"7b-v2",
"4.1GB"
],
[
"7b-v2.5",
"4.1GB"
]
],
"image": false,
"author": "Teknium"
},
"wizardcoder": {
"url": "https://ollama.com/library/wizardcoder",
"description": "State-of-the-art code generation model",
"tags": [
[
"latest",
"3.8GB"
],
[
"33b",
"19GB"
],
[
"python",
"3.8GB"
],
[
"34b-python",
"19GB"
],
[
"34b-python-fp16",
"67GB"
],
[
"34b-python-q2_K",
"14GB"
],
[
"34b-python-q3_K_L",
"18GB"
],
[
"34b-python-q3_K_M",
"16GB"
],
[
"34b-python-q3_K_S",
"15GB"
],
[
"34b-python-q4_0",
"19GB"
],
[
"34b-python-q4_1",
"21GB"
],
[
"34b-python-q4_K_M",
"20GB"
],
[
"34b-python-q4_K_S",
"19GB"
],
[
"34b-python-q5_0",
"23GB"
],
[
"34b-python-q5_1",
"25GB"
],
[
"34b-python-q5_K_M",
"24GB"
],
[
"34b-python-q5_K_S",
"23GB"
],
[
"34b-python-q6_K",
"28GB"
],
[
"34b-python-q8_0",
"36GB"
],
[
"33b-v1.1",
"19GB"
],
[
"33b-v1.1-fp16",
"67GB"
],
[
"33b-v1.1-q2_K",
"14GB"
],
[
"33b-v1.1-q3_K_L",
"18GB"
],
[
"33b-v1.1-q3_K_M",
"16GB"
],
[
"33b-v1.1-q3_K_S",
"14GB"
],
[
"33b-v1.1-q4_0",
"19GB"
],
[
"33b-v1.1-q4_1",
"21GB"
],
[
"33b-v1.1-q4_K_M",
"20GB"
],
[
"33b-v1.1-q4_K_S",
"19GB"
],
[
"33b-v1.1-q5_0",
"23GB"
],
[
"33b-v1.1-q5_1",
"25GB"
],
[
"33b-v1.1-q5_K_M",
"24GB"
],
[
"33b-v1.1-q5_K_S",
"23GB"
],
[
"33b-v1.1-q6_K",
"27GB"
],
[
"33b-v1.1-q8_0",
"35GB"
],
[
"13b-python",
"7.4GB"
],
[
"13b-python-fp16",
"26GB"
],
[
"13b-python-q2_K",
"5.4GB"
],
[
"13b-python-q3_K_L",
"6.9GB"
],
[
"13b-python-q3_K_M",
"6.3GB"
],
[
"13b-python-q3_K_S",
"5.7GB"
],
[
"13b-python-q4_0",
"7.4GB"
],
[
"13b-python-q4_1",
"8.2GB"
],
[
"13b-python-q4_K_M",
"7.9GB"
],
[
"13b-python-q4_K_S",
"7.4GB"
],
[
"13b-python-q5_0",
"9.0GB"
],
[
"13b-python-q5_1",
"9.8GB"
],
[
"13b-python-q5_K_M",
"9.2GB"
],
[
"13b-python-q5_K_S",
"9.0GB"
],
[
"13b-python-q6_K",
"11GB"
],
[
"13b-python-q8_0",
"14GB"
],
[
"7b-python",
"3.8GB"
],
[
"7b-python-fp16",
"13GB"
],
[
"7b-python-q2_K",
"2.8GB"
],
[
"7b-python-q3_K_L",
"3.6GB"
],
[
"7b-python-q3_K_M",
"3.3GB"
],
[
"7b-python-q3_K_S",
"2.9GB"
],
[
"7b-python-q4_0",
"3.8GB"
],
[
"7b-python-q4_1",
"4.2GB"
],
[
"7b-python-q4_K_M",
"4.1GB"
],
[
"7b-python-q4_K_S",
"3.9GB"
],
[
"7b-python-q5_0",
"4.7GB"
],
[
"7b-python-q5_1",
"5.1GB"
],
[
"7b-python-q5_K_M",
"4.8GB"
],
[
"7b-python-q5_K_S",
"4.7GB"
],
[
"7b-python-q6_K",
"5.5GB"
],
[
"7b-python-q8_0",
"7.2GB"
]
],
"image": false,
"author": "WizardLM Team"
},
"stable-code": {
"url": "https://ollama.com/library/stable-code",
"description": "Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger.",
"tags": [
[
"latest",
"1.6GB"
],
[
"3b",
"1.6GB"
],
[
"code",
"1.6GB"
],
[
"instruct",
"1.6GB"
],
[
"3b-code",
"1.6GB"
],
[
"3b-code-fp16",
"5.6GB"
],
[
"3b-code-q2_K",
"1.1GB"
],
[
"3b-code-q3_K_L",
"1.5GB"
],
[
"3b-code-q3_K_M",
"1.4GB"
],
[
"3b-code-q3_K_S",
"1.3GB"
],
[
"3b-code-q4_0",
"1.6GB"
],
[
"3b-code-q4_1",
"1.8GB"
],
[
"3b-code-q4_K_M",
"1.7GB"
],
[
"3b-code-q4_K_S",
"1.6GB"
],
[
"3b-code-q5_0",
"1.9GB"
],
[
"3b-code-q5_1",
"2.1GB"
],
[
"3b-code-q5_K_M",
"2.0GB"
],
[
"3b-code-q5_K_S",
"1.9GB"
],
[
"3b-code-q6_K",
"2.3GB"
],
[
"3b-code-q8_0",
"3.0GB"
],
[
"3b-instruct",
"1.6GB"
],
[
"3b-instruct-fp16",
"5.6GB"
],
[
"3b-instruct-q2_K",
"1.1GB"
],
[
"3b-instruct-q3_K_L",
"1.5GB"
],
[
"3b-instruct-q3_K_M",
"1.4GB"
],
[
"3b-instruct-q3_K_S",
"1.3GB"
],
[
"3b-instruct-q4_0",
"1.6GB"
],
[
"3b-instruct-q4_1",
"1.8GB"
],
[
"3b-instruct-q4_K_M",
"1.7GB"
],
[
"3b-instruct-q4_K_S",
"1.6GB"
],
[
"3b-instruct-q5_0",
"1.9GB"
],
[
"3b-instruct-q5_1",
"2.1GB"
],
[
"3b-instruct-q5_K_M",
"2.0GB"
],
[
"3b-instruct-q5_K_S",
"1.9GB"
],
[
"3b-instruct-q6_K",
"2.3GB"
],
[
"3b-instruct-q8_0",
"3.0GB"
]
],
"image": false,
"author": "Stability AI"
},
"neural-chat": {
"url": "https://ollama.com/library/neural-chat",
"description": "A fine-tuned model based on Mistral with good coverage of domain and language.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-v3.1",
"4.1GB"
],
[
"7b-v3.1-fp16",
"14GB"
],
[
"7b-v3.1-q2_K",
"3.1GB"
],
[
"7b-v3.1-q3_K_L",
"3.8GB"
],
[
"7b-v3.1-q3_K_M",
"3.5GB"
],
[
"7b-v3.1-q3_K_S",
"3.2GB"
],
[
"7b-v3.1-q4_0",
"4.1GB"
],
[
"7b-v3.1-q4_1",
"4.6GB"
],
[
"7b-v3.1-q4_K_M",
"4.4GB"
],
[
"7b-v3.1-q4_K_S",
"4.1GB"
],
[
"7b-v3.1-q5_0",
"5.0GB"
],
[
"7b-v3.1-q5_1",
"5.4GB"
],
[
"7b-v3.1-q5_K_M",
"5.1GB"
],
[
"7b-v3.1-q5_K_S",
"5.0GB"
],
[
"7b-v3.1-q6_K",
"5.9GB"
],
[
"7b-v3.1-q8_0",
"7.7GB"
],
[
"7b-v3.2",
"4.1GB"
],
[
"7b-v3.2-fp16",
"14GB"
],
[
"7b-v3.2-q2_K",
"3.1GB"
],
[
"7b-v3.2-q3_K_L",
"3.8GB"
],
[
"7b-v3.2-q3_K_M",
"3.5GB"
],
[
"7b-v3.2-q3_K_S",
"3.2GB"
],
[
"7b-v3.2-q4_0",
"4.1GB"
],
[
"7b-v3.2-q4_1",
"4.6GB"
],
[
"7b-v3.2-q4_K_M",
"4.4GB"
],
[
"7b-v3.2-q4_K_S",
"4.1GB"
],
[
"7b-v3.2-q5_0",
"5.0GB"
],
[
"7b-v3.2-q5_1",
"5.4GB"
],
[
"7b-v3.2-q5_K_M",
"5.1GB"
],
[
"7b-v3.2-q5_K_S",
"5.0GB"
],
[
"7b-v3.2-q6_K",
"5.9GB"
],
[
"7b-v3.2-q8_0",
"7.7GB"
],
[
"7b-v3.3",
"4.1GB"
],
[
"7b-v3.3-fp16",
"14GB"
],
[
"7b-v3.3-q2_K",
"3.1GB"
],
[
"7b-v3.3-q3_K_L",
"3.8GB"
],
[
"7b-v3.3-q3_K_M",
"3.5GB"
],
[
"7b-v3.3-q3_K_S",
"3.2GB"
],
[
"7b-v3.3-q4_0",
"4.1GB"
],
[
"7b-v3.3-q4_1",
"4.6GB"
],
[
"7b-v3.3-q4_K_M",
"4.4GB"
],
[
"7b-v3.3-q4_K_S",
"4.1GB"
],
[
"7b-v3.3-q5_0",
"5.0GB"
],
[
"7b-v3.3-q5_1",
"5.4GB"
],
[
"7b-v3.3-q5_K_M",
"5.1GB"
],
[
"7b-v3.3-q5_K_S",
"5.0GB"
],
[
"7b-v3.3-q6_K",
"5.9GB"
],
[
"7b-v3.3-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Intel"
},
"wizard-math": {
"url": "https://ollama.com/library/wizard-math",
"description": "Model focused on math and logic problems",
"tags": [
[
"latest",
"4.1GB"
],
[
"70b",
"39GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"4.1GB"
],
[
"70b-fp16",
"138GB"
],
[
"70b-q2_K",
"29GB"
],
[
"70b-q3_K_L",
"36GB"
],
[
"70b-q3_K_M",
"33GB"
],
[
"70b-q3_K_S",
"30GB"
],
[
"70b-q4_0",
"39GB"
],
[
"70b-q4_1",
"43GB"
],
[
"70b-q4_K_M",
"41GB"
],
[
"70b-q4_K_S",
"39GB"
],
[
"70b-q5_0",
"47GB"
],
[
"70b-q5_1",
"52GB"
],
[
"70b-q5_K_M",
"49GB"
],
[
"70b-q5_K_S",
"47GB"
],
[
"70b-q6_K",
"57GB"
],
[
"70b-q8_0",
"73GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-v1.1-fp16",
"14GB"
],
[
"7b-v1.1-q2_K",
"3.1GB"
],
[
"7b-v1.1-q3_K_L",
"3.8GB"
],
[
"7b-v1.1-q3_K_M",
"3.5GB"
],
[
"7b-v1.1-q3_K_S",
"3.2GB"
],
[
"7b-v1.1-q4_0",
"4.1GB"
],
[
"7b-v1.1-q4_1",
"4.6GB"
],
[
"7b-v1.1-q4_K_M",
"4.4GB"
],
[
"7b-v1.1-q4_K_S",
"4.1GB"
],
[
"7b-v1.1-q5_0",
"5.0GB"
],
[
"7b-v1.1-q5_1",
"5.4GB"
],
[
"7b-v1.1-q5_K_M",
"5.1GB"
],
[
"7b-v1.1-q5_K_S",
"5.0GB"
],
[
"7b-v1.1-q6_K",
"5.9GB"
],
[
"7b-v1.1-q8_0",
"7.7GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "WizardLM Team"
},
"codeqwen": {
"url": "https://ollama.com/library/codeqwen",
"description": "CodeQwen1.5 is a large language model pretrained on a large amount of code data.",
"tags": [
[
"latest",
"4.2GB"
],
[
"7b",
"4.2GB"
],
[
"chat",
"4.2GB"
],
[
"code",
"4.2GB"
],
[
"v1.5",
"4.2GB"
],
[
"7b-chat",
"4.2GB"
],
[
"7b-chat-v1.5-fp16",
"15GB"
],
[
"7b-chat-v1.5-q2_K",
"3.1GB"
],
[
"7b-chat-v1.5-q3_K_L",
"4.0GB"
],
[
"7b-chat-v1.5-q3_K_M",
"3.8GB"
],
[
"7b-chat-v1.5-q3_K_S",
"3.5GB"
],
[
"7b-chat-v1.5-q4_0",
"4.2GB"
],
[
"7b-chat-v1.5-q4_1",
"4.6GB"
],
[
"7b-chat-v1.5-q4_K_M",
"4.7GB"
],
[
"7b-chat-v1.5-q4_K_S",
"4.4GB"
],
[
"7b-chat-v1.5-q5_0",
"5.0GB"
],
[
"7b-chat-v1.5-q5_1",
"5.5GB"
],
[
"7b-chat-v1.5-q5_K_M",
"5.4GB"
],
[
"7b-chat-v1.5-q5_K_S",
"5.1GB"
],
[
"7b-chat-v1.5-q6_K",
"6.4GB"
],
[
"7b-chat-v1.5-q8_0",
"7.7GB"
],
[
"7b-code",
"4.2GB"
],
[
"7b-code-v1.5-fp16",
"15GB"
],
[
"7b-code-v1.5-q4_0",
"4.2GB"
],
[
"7b-code-v1.5-q4_1",
"4.6GB"
],
[
"7b-code-v1.5-q5_0",
"5.0GB"
],
[
"7b-code-v1.5-q5_1",
"5.5GB"
],
[
"7b-code-v1.5-q8_0",
"7.7GB"
],
[
"v1.5-chat",
"4.2GB"
],
[
"v1.5-code",
"4.2GB"
]
],
"image": false,
"author": "Alibaba"
},
"phind-codellama": {
"url": "https://ollama.com/library/phind-codellama",
"description": "Code generation model based on Code Llama.",
"tags": [
[
"latest",
"19GB"
],
[
"34b",
"19GB"
],
[
"34b-python",
"19GB"
],
[
"34b-python-fp16",
"67GB"
],
[
"34b-python-q2_K",
"14GB"
],
[
"34b-python-q3_K_L",
"18GB"
],
[
"34b-python-q3_K_M",
"16GB"
],
[
"34b-python-q3_K_S",
"15GB"
],
[
"34b-python-q4_0",
"19GB"
],
[
"34b-python-q4_1",
"21GB"
],
[
"34b-python-q4_K_M",
"20GB"
],
[
"34b-python-q4_K_S",
"19GB"
],
[
"34b-python-q5_0",
"23GB"
],
[
"34b-python-q5_1",
"25GB"
],
[
"34b-python-q5_K_M",
"24GB"
],
[
"34b-python-q5_K_S",
"23GB"
],
[
"34b-python-q6_K",
"28GB"
],
[
"34b-python-q8_0",
"36GB"
],
[
"34b-v2",
"19GB"
],
[
"34b-v2-fp16",
"67GB"
],
[
"34b-v2-q2_K",
"14GB"
],
[
"34b-v2-q3_K_L",
"18GB"
],
[
"34b-v2-q3_K_M",
"16GB"
],
[
"34b-v2-q3_K_S",
"15GB"
],
[
"34b-v2-q4_0",
"19GB"
],
[
"34b-v2-q4_1",
"21GB"
],
[
"34b-v2-q4_K_M",
"20GB"
],
[
"34b-v2-q4_K_S",
"19GB"
],
[
"34b-v2-q5_0",
"23GB"
],
[
"34b-v2-q5_1",
"25GB"
],
[
"34b-v2-q5_K_M",
"24GB"
],
[
"34b-v2-q5_K_S",
"23GB"
],
[
"34b-v2-q6_K",
"28GB"
],
[
"34b-v2-q8_0",
"36GB"
],
[
"34b-fp16",
"67GB"
],
[
"34b-q2_K",
"14GB"
],
[
"34b-q3_K_L",
"18GB"
],
[
"34b-q3_K_M",
"16GB"
],
[
"34b-q3_K_S",
"15GB"
],
[
"34b-q4_0",
"19GB"
],
[
"34b-q4_1",
"21GB"
],
[
"34b-q4_K_M",
"20GB"
],
[
"34b-q4_K_S",
"19GB"
],
[
"34b-q5_0",
"23GB"
],
[
"34b-q5_1",
"25GB"
],
[
"34b-q5_K_M",
"24GB"
],
[
"34b-q5_K_S",
"23GB"
],
[
"34b-q6_K",
"28GB"
],
[
"34b-q8_0",
"36GB"
]
],
"image": false,
"author": "Phind"
},
"stablelm2": {
"url": "https://ollama.com/library/stablelm2",
"description": "Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch.",
"tags": [
[
"latest",
"983MB"
],
[
"12b",
"7.0GB"
],
[
"1.6b",
"983MB"
],
[
"chat",
"983MB"
],
[
"zephyr",
"983MB"
],
[
"12b-chat",
"7.0GB"
],
[
"12b-chat-fp16",
"24GB"
],
[
"12b-chat-q2_K",
"4.7GB"
],
[
"12b-chat-q3_K_L",
"6.5GB"
],
[
"12b-chat-q3_K_M",
"6.0GB"
],
[
"12b-chat-q3_K_S",
"5.4GB"
],
[
"12b-chat-q4_0",
"7.0GB"
],
[
"12b-chat-q4_1",
"7.7GB"
],
[
"12b-chat-q4_K_M",
"7.4GB"
],
[
"12b-chat-q4_K_S",
"7.0GB"
],
[
"12b-chat-q5_0",
"8.4GB"
],
[
"12b-chat-q5_1",
"9.1GB"
],
[
"12b-chat-q5_K_M",
"8.6GB"
],
[
"12b-chat-q5_K_S",
"8.4GB"
],
[
"12b-chat-q6_K",
"10.0GB"
],
[
"12b-chat-q8_0",
"13GB"
],
[
"12b-text",
"7.0GB"
],
[
"1.6b-chat",
"983MB"
],
[
"1.6b-zephyr",
"983MB"
],
[
"12b-fp16",
"24GB"
],
[
"12b-q2_K",
"4.7GB"
],
[
"12b-q3_K_L",
"6.5GB"
],
[
"12b-q3_K_M",
"6.0GB"
],
[
"12b-q3_K_S",
"5.4GB"
],
[
"12b-q4_0",
"7.0GB"
],
[
"12b-q4_1",
"7.7GB"
],
[
"12b-q4_K_M",
"7.4GB"
],
[
"12b-q4_K_S",
"7.0GB"
],
[
"12b-q5_0",
"8.4GB"
],
[
"12b-q5_1",
"9.1GB"
],
[
"12b-q5_K_M",
"8.6GB"
],
[
"12b-q5_K_S",
"8.4GB"
],
[
"12b-q6_K",
"10.0GB"
],
[
"12b-q8_0",
"13GB"
],
[
"1.6b-chat-fp16",
"3.3GB"
],
[
"1.6b-chat-q2_K",
"694MB"
],
[
"1.6b-chat-q3_K_L",
"915MB"
],
[
"1.6b-chat-q3_K_M",
"858MB"
],
[
"1.6b-chat-q3_K_S",
"792MB"
],
[
"1.6b-chat-q4_0",
"983MB"
],
[
"1.6b-chat-q4_1",
"1.1GB"
],
[
"1.6b-chat-q4_K_M",
"1.0GB"
],
[
"1.6b-chat-q4_K_S",
"989MB"
],
[
"1.6b-chat-q5_0",
"1.2GB"
],
[
"1.6b-chat-q5_1",
"1.3GB"
],
[
"1.6b-chat-q5_K_M",
"1.2GB"
],
[
"1.6b-chat-q5_K_S",
"1.2GB"
],
[
"1.6b-chat-q6_K",
"1.4GB"
],
[
"1.6b-chat-q8_0",
"1.8GB"
],
[
"1.6b-zephyr-fp16",
"3.3GB"
],
[
"1.6b-zephyr-q2_K",
"694MB"
],
[
"1.6b-zephyr-q3_K_L",
"915MB"
],
[
"1.6b-zephyr-q3_K_M",
"858MB"
],
[
"1.6b-zephyr-q3_K_S",
"792MB"
],
[
"1.6b-zephyr-q4_0",
"983MB"
],
[
"1.6b-zephyr-q4_1",
"1.1GB"
],
[
"1.6b-zephyr-q4_K_M",
"1.0GB"
],
[
"1.6b-zephyr-q4_K_S",
"989MB"
],
[
"1.6b-zephyr-q5_0",
"1.2GB"
],
[
"1.6b-zephyr-q5_1",
"1.3GB"
],
[
"1.6b-zephyr-q5_K_M",
"1.2GB"
],
[
"1.6b-zephyr-q5_K_S",
"1.2GB"
],
[
"1.6b-zephyr-q6_K",
"1.4GB"
],
[
"1.6b-zephyr-q8_0",
"1.8GB"
],
[
"1.6b-fp16",
"3.3GB"
],
[
"1.6b-q2_K",
"694MB"
],
[
"1.6b-q3_K_L",
"915MB"
],
[
"1.6b-q3_K_M",
"858MB"
],
[
"1.6b-q3_K_S",
"792MB"
],
[
"1.6b-q4_0",
"983MB"
],
[
"1.6b-q4_1",
"1.1GB"
],
[
"1.6b-q4_K_M",
"1.0GB"
],
[
"1.6b-q4_K_S",
"989MB"
],
[
"1.6b-q5_0",
"1.2GB"
],
[
"1.6b-q5_1",
"1.3GB"
],
[
"1.6b-q5_K_M",
"1.2GB"
],
[
"1.6b-q5_K_S",
"1.2GB"
],
[
"1.6b-q6_K",
"1.4GB"
],
[
"1.6b-q8_0",
"1.8GB"
]
],
"image": false,
"author": "Stability AI"
},
"dolphincoder": {
"url": "https://ollama.com/library/dolphincoder",
"description": "A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2.",
"tags": [
[
"latest",
"4.2GB"
],
[
"15b",
"9.1GB"
],
[
"7b",
"4.2GB"
],
[
"15b-starcoder2",
"9.1GB"
],
[
"15b-starcoder2-fp16",
"32GB"
],
[
"15b-starcoder2-q2_K",
"6.2GB"
],
[
"15b-starcoder2-q3_K_L",
"9.0GB"
],
[
"15b-starcoder2-q3_K_M",
"8.1GB"
],
[
"15b-starcoder2-q3_K_S",
"7.0GB"
],
[
"15b-starcoder2-q4_0",
"9.1GB"
],
[
"15b-starcoder2-q4_1",
"10GB"
],
[
"15b-starcoder2-q4_K_M",
"9.9GB"
],
[
"15b-starcoder2-q4_K_S",
"9.3GB"
],
[
"15b-starcoder2-q5_0",
"11GB"
],
[
"15b-starcoder2-q5_1",
"12GB"
],
[
"15b-starcoder2-q5_K_M",
"11GB"
],
[
"15b-starcoder2-q5_K_S",
"11GB"
],
[
"15b-starcoder2-q6_K",
"13GB"
],
[
"15b-starcoder2-q8_0",
"17GB"
],
[
"7b-starcoder2",
"4.2GB"
],
[
"7b-starcoder2-fp16",
"15GB"
],
[
"7b-starcoder2-q2_K",
"2.9GB"
],
[
"7b-starcoder2-q3_K_L",
"4.2GB"
],
[
"7b-starcoder2-q3_K_M",
"3.8GB"
],
[
"7b-starcoder2-q3_K_S",
"3.3GB"
],
[
"7b-starcoder2-q4_0",
"4.2GB"
],
[
"7b-starcoder2-q4_1",
"4.7GB"
],
[
"7b-starcoder2-q4_K_M",
"4.6GB"
],
[
"7b-starcoder2-q4_K_S",
"4.3GB"
],
[
"7b-starcoder2-q5_0",
"5.1GB"
],
[
"7b-starcoder2-q5_1",
"5.6GB"
],
[
"7b-starcoder2-q5_K_M",
"5.3GB"
],
[
"7b-starcoder2-q5_K_S",
"5.1GB"
],
[
"7b-starcoder2-q6_K",
"6.1GB"
],
[
"7b-starcoder2-q8_0",
"7.9GB"
]
],
"image": false,
"author": "Cognitive Computations"
},
"all-minilm": {
"url": "https://ollama.com/library/all-minilm",
"description": "Embedding models on very large sentence level datasets.",
"tags": [
[
"latest",
"46MB"
],
[
"33m",
"67MB"
],
[
"22m",
"46MB"
],
[
"l12",
"67MB"
],
[
"l6",
"46MB"
],
[
"v2",
"46MB"
],
[
"33m-l12-v2-fp16",
"67MB"
],
[
"22m-l6-v2-fp16",
"46MB"
],
[
"l12-v2",
"67MB"
],
[
"l6-v2",
"46MB"
]
],
"image": false,
"author": "Sentence Transformers"
},
"nous-hermes": {
"url": "https://ollama.com/library/nous-hermes",
"description": "General use models based on Llama and Llama 2 from Nous Research.",
"tags": [
[
"latest",
"3.8GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"70b-llama2-fp16",
"138GB"
],
[
"70b-llama2-q2_K",
"29GB"
],
[
"70b-llama2-q3_K_L",
"36GB"
],
[
"70b-llama2-q3_K_M",
"33GB"
],
[
"70b-llama2-q3_K_S",
"30GB"
],
[
"70b-llama2-q4_0",
"39GB"
],
[
"70b-llama2-q4_1",
"43GB"
],
[
"70b-llama2-q4_K_M",
"41GB"
],
[
"70b-llama2-q4_K_S",
"39GB"
],
[
"70b-llama2-q5_0",
"47GB"
],
[
"70b-llama2-q5_1",
"52GB"
],
[
"70b-llama2-q5_K_M",
"49GB"
],
[
"70b-llama2-q6_K",
"57GB"
],
[
"13b-llama2",
"7.4GB"
],
[
"13b-llama2-fp16",
"26GB"
],
[
"13b-llama2-q2_K",
"5.4GB"
],
[
"13b-llama2-q3_K_L",
"6.9GB"
],
[
"13b-llama2-q3_K_M",
"6.3GB"
],
[
"13b-llama2-q3_K_S",
"5.7GB"
],
[
"13b-llama2-q4_0",
"7.4GB"
],
[
"13b-llama2-q4_1",
"8.2GB"
],
[
"13b-llama2-q4_K_M",
"7.9GB"
],
[
"13b-llama2-q4_K_S",
"7.4GB"
],
[
"13b-llama2-q5_0",
"9.0GB"
],
[
"13b-llama2-q5_1",
"9.8GB"
],
[
"13b-llama2-q5_K_M",
"9.2GB"
],
[
"13b-llama2-q5_K_S",
"9.0GB"
],
[
"13b-llama2-q6_K",
"11GB"
],
[
"13b-llama2-q8_0",
"14GB"
],
[
"7b-llama2",
"3.8GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-llama2-fp16",
"13GB"
],
[
"7b-llama2-q2_K",
"2.8GB"
],
[
"7b-llama2-q3_K_L",
"3.6GB"
],
[
"7b-llama2-q3_K_M",
"3.3GB"
],
[
"7b-llama2-q3_K_S",
"2.9GB"
],
[
"7b-llama2-q4_0",
"3.8GB"
],
[
"7b-llama2-q4_1",
"4.2GB"
],
[
"7b-llama2-q4_K_M",
"4.1GB"
],
[
"7b-llama2-q4_K_S",
"3.9GB"
],
[
"7b-llama2-q5_0",
"4.7GB"
],
[
"7b-llama2-q5_1",
"5.1GB"
],
[
"7b-llama2-q5_K_M",
"4.8GB"
],
[
"7b-llama2-q5_K_S",
"4.7GB"
],
[
"7b-llama2-q6_K",
"5.5GB"
],
[
"7b-llama2-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Nous Research"
},
"starling-lm": {
"url": "https://ollama.com/library/starling-lm",
"description": "Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"alpha",
"4.1GB"
],
[
"beta",
"4.1GB"
],
[
"7b-alpha",
"4.1GB"
],
[
"7b-alpha-fp16",
"14GB"
],
[
"7b-alpha-q2_K",
"2.7GB"
],
[
"7b-alpha-q3_K_L",
"3.8GB"
],
[
"7b-alpha-q3_K_M",
"3.5GB"
],
[
"7b-alpha-q3_K_S",
"3.2GB"
],
[
"7b-alpha-q4_0",
"4.1GB"
],
[
"7b-alpha-q4_1",
"4.6GB"
],
[
"7b-alpha-q4_K_M",
"4.4GB"
],
[
"7b-alpha-q4_K_S",
"4.1GB"
],
[
"7b-alpha-q5_0",
"5.0GB"
],
[
"7b-alpha-q5_1",
"5.4GB"
],
[
"7b-alpha-q5_K_M",
"5.1GB"
],
[
"7b-alpha-q5_K_S",
"5.0GB"
],
[
"7b-alpha-q6_K",
"5.9GB"
],
[
"7b-alpha-q8_0",
"7.7GB"
],
[
"7b-beta",
"4.1GB"
],
[
"7b-beta-fp16",
"14GB"
],
[
"7b-beta-q2_K",
"2.7GB"
],
[
"7b-beta-q3_K_L",
"3.8GB"
],
[
"7b-beta-q3_K_M",
"3.5GB"
],
[
"7b-beta-q3_K_S",
"3.2GB"
],
[
"7b-beta-q4_0",
"4.1GB"
],
[
"7b-beta-q4_1",
"4.6GB"
],
[
"7b-beta-q4_K_M",
"4.4GB"
],
[
"7b-beta-q4_K_S",
"4.1GB"
],
[
"7b-beta-q5_0",
"5.0GB"
],
[
"7b-beta-q5_1",
"5.4GB"
],
[
"7b-beta-q5_K_M",
"5.1GB"
],
[
"7b-beta-q5_K_S",
"5.0GB"
],
[
"7b-beta-q6_K",
"5.9GB"
],
[
"7b-beta-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Berkeley Nest"
},
"sqlcoder": {
"url": "https://ollama.com/library/sqlcoder",
"description": "SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks",
"tags": [
[
"latest",
"4.1GB"
],
[
"15b",
"9.0GB"
],
[
"7b",
"4.1GB"
],
[
"70b-alpha-fp16",
"138GB"
],
[
"70b-alpha-q2_K",
"25GB"
],
[
"70b-alpha-q3_K_L",
"36GB"
],
[
"70b-alpha-q3_K_M",
"33GB"
],
[
"70b-alpha-q3_K_S",
"30GB"
],
[
"70b-alpha-q4_0",
"39GB"
],
[
"70b-alpha-q4_1",
"43GB"
],
[
"70b-alpha-q4_K_M",
"41GB"
],
[
"70b-alpha-q4_K_S",
"39GB"
],
[
"70b-alpha-q5_0",
"47GB"
],
[
"70b-alpha-q5_1",
"52GB"
],
[
"70b-alpha-q5_K_M",
"49GB"
],
[
"70b-alpha-q5_K_S",
"47GB"
],
[
"70b-alpha-q6_K",
"57GB"
],
[
"70b-alpha-q8_0",
"73GB"
],
[
"15b-fp16",
"32GB"
],
[
"15b-q2_K",
"6.7GB"
],
[
"15b-q3_K_L",
"9.1GB"
],
[
"15b-q3_K_M",
"8.2GB"
],
[
"15b-q3_K_S",
"6.9GB"
],
[
"15b-q4_0",
"9.0GB"
],
[
"15b-q4_1",
"10.0GB"
],
[
"15b-q4_K_M",
"10.0GB"
],
[
"15b-q4_K_S",
"9.1GB"
],
[
"15b-q5_0",
"11GB"
],
[
"15b-q5_1",
"12GB"
],
[
"15b-q5_K_M",
"12GB"
],
[
"15b-q5_K_S",
"11GB"
],
[
"15b-q6_K",
"13GB"
],
[
"15b-q8_0",
"17GB"
],
[
"7b-fp16",
"14GB"
],
[
"7b-q2_K",
"3.1GB"
],
[
"7b-q3_K_L",
"3.8GB"
],
[
"7b-q3_K_M",
"3.5GB"
],
[
"7b-q3_K_S",
"3.2GB"
],
[
"7b-q4_0",
"4.1GB"
],
[
"7b-q4_1",
"4.6GB"
],
[
"7b-q4_K_M",
"4.4GB"
],
[
"7b-q4_K_S",
"4.1GB"
],
[
"7b-q5_0",
"5.0GB"
],
[
"7b-q5_1",
"5.4GB"
],
[
"7b-q5_K_M",
"5.1GB"
],
[
"7b-q5_K_S",
"5.0GB"
],
[
"7b-q6_K",
"5.9GB"
],
[
"7b-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Defog.ai"
},
"orca2": {
"url": "https://ollama.com/library/orca2",
"description": "Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning.",
"tags": [
[
"latest",
"3.8GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Microsoft Research"
},
"llama3-gradient": {
"url": "https://ollama.com/library/llama3-gradient",
"description": "This model extends LLama-3 8B's context length from 8k to over 1m tokens.",
"tags": [
[
"latest",
"4.7GB"
],
[
"70b",
"40GB"
],
[
"8b",
"4.7GB"
],
[
"1048k",
"4.7GB"
],
[
"instruct",
"4.7GB"
],
[
"70b-instruct-1048k-fp16",
"141GB"
],
[
"70b-instruct-1048k-q2_K",
"26GB"
],
[
"70b-instruct-1048k-q3_K_L",
"37GB"
],
[
"70b-instruct-1048k-q3_K_M",
"34GB"
],
[
"70b-instruct-1048k-q3_K_S",
"31GB"
],
[
"70b-instruct-1048k-q4_0",
"40GB"
],
[
"70b-instruct-1048k-q4_1",
"44GB"
],
[
"70b-instruct-1048k-q4_K_M",
"43GB"
],
[
"70b-instruct-1048k-q4_K_S",
"40GB"
],
[
"70b-instruct-1048k-q5_0",
"49GB"
],
[
"70b-instruct-1048k-q5_1",
"53GB"
],
[
"70b-instruct-1048k-q5_K_M",
"50GB"
],
[
"70b-instruct-1048k-q5_K_S",
"49GB"
],
[
"70b-instruct-1048k-q6_K",
"58GB"
],
[
"70b-instruct-1048k-q8_0",
"75GB"
],
[
"8b-instruct-1048k-fp16",
"16GB"
],
[
"8b-instruct-1048k-q2_K",
"3.2GB"
],
[
"8b-instruct-1048k-q3_K_L",
"4.3GB"
],
[
"8b-instruct-1048k-q3_K_M",
"4.0GB"
],
[
"8b-instruct-1048k-q3_K_S",
"3.7GB"
],
[
"8b-instruct-1048k-q4_0",
"4.7GB"
],
[
"8b-instruct-1048k-q4_1",
"5.1GB"
],
[
"8b-instruct-1048k-q4_K_M",
"4.9GB"
],
[
"8b-instruct-1048k-q4_K_S",
"4.7GB"
],
[
"8b-instruct-1048k-q5_0",
"5.6GB"
],
[
"8b-instruct-1048k-q5_1",
"6.1GB"
],
[
"8b-instruct-1048k-q5_K_M",
"5.7GB"
],
[
"8b-instruct-1048k-q5_K_S",
"5.6GB"
],
[
"8b-instruct-1048k-q6_K",
"6.6GB"
],
[
"8b-instruct-1048k-q8_0",
"8.5GB"
]
],
"image": false,
"author": "Gradient AI"
},
"deepseek-llm": {
"url": "https://ollama.com/library/deepseek-llm",
"description": "An advanced language model crafted with 2 trillion bilingual tokens.",
"tags": [
[
"latest",
"4.0GB"
],
[
"67b",
"38GB"
],
[
"7b",
"4.0GB"
],
[
"67b-base",
"38GB"
],
[
"67b-base-fp16",
"135GB"
],
[
"67b-base-q2_K",
"28GB"
],
[
"67b-base-q3_K_L",
"36GB"
],
[
"67b-base-q3_K_M",
"33GB"
],
[
"67b-base-q3_K_S",
"29GB"
],
[
"67b-base-q4_0",
"38GB"
],
[
"67b-base-q4_1",
"42GB"
],
[
"67b-base-q4_K_M",
"40GB"
],
[
"67b-base-q4_K_S",
"38GB"
],
[
"67b-base-q5_0",
"46GB"
],
[
"67b-base-q5_1",
"51GB"
],
[
"67b-base-q5_K_M",
"48GB"
],
[
"67b-base-q5_K_S",
"46GB"
],
[
"67b-base-q6_K",
"55GB"
],
[
"67b-base-q8_0",
"72GB"
],
[
"67b-chat",
"38GB"
],
[
"67b-chat-fp16",
"135GB"
],
[
"67b-chat-q2_K",
"28GB"
],
[
"67b-chat-q3_K_L",
"36GB"
],
[
"67b-chat-q3_K_M",
"33GB"
],
[
"67b-chat-q3_K_S",
"29GB"
],
[
"67b-chat-q4_0",
"38GB"
],
[
"67b-chat-q4_1",
"42GB"
],
[
"67b-chat-q4_K_M",
"40GB"
],
[
"67b-chat-q4_K_S",
"38GB"
],
[
"67b-chat-q5_0",
"46GB"
],
[
"67b-chat-q5_1",
"51GB"
],
[
"67b-chat-q5_K_S",
"46GB"
],
[
"7b-base",
"4.0GB"
],
[
"7b-base-fp16",
"14GB"
],
[
"7b-base-q2_K",
"3.0GB"
],
[
"7b-base-q3_K_L",
"3.7GB"
],
[
"7b-base-q3_K_M",
"3.5GB"
],
[
"7b-base-q3_K_S",
"3.1GB"
],
[
"7b-base-q4_0",
"4.0GB"
],
[
"7b-base-q4_1",
"4.4GB"
],
[
"7b-base-q4_K_M",
"4.2GB"
],
[
"7b-base-q4_K_S",
"4.0GB"
],
[
"7b-base-q5_0",
"4.8GB"
],
[
"7b-base-q5_1",
"5.2GB"
],
[
"7b-base-q5_K_M",
"4.9GB"
],
[
"7b-base-q5_K_S",
"4.8GB"
],
[
"7b-base-q6_K",
"5.7GB"
],
[
"7b-base-q8_0",
"7.3GB"
],
[
"7b-chat",
"4.0GB"
],
[
"7b-chat-fp16",
"14GB"
],
[
"7b-chat-q2_K",
"3.0GB"
],
[
"7b-chat-q3_K_L",
"3.7GB"
],
[
"7b-chat-q3_K_M",
"3.5GB"
],
[
"7b-chat-q3_K_S",
"3.1GB"
],
[
"7b-chat-q4_0",
"4.0GB"
],
[
"7b-chat-q4_1",
"4.4GB"
],
[
"7b-chat-q4_K_M",
"4.2GB"
],
[
"7b-chat-q4_K_S",
"4.0GB"
],
[
"7b-chat-q5_0",
"4.8GB"
],
[
"7b-chat-q5_1",
"5.2GB"
],
[
"7b-chat-q5_K_M",
"4.9GB"
],
[
"7b-chat-q5_K_S",
"4.8GB"
],
[
"7b-chat-q6_K",
"5.7GB"
],
[
"7b-chat-q8_0",
"7.3GB"
]
],
"image": false,
"author": "DeepSeek Team"
},
"yarn-llama2": {
"url": "https://ollama.com/library/yarn-llama2",
"description": "An extension of Llama 2 that supports a context of up to 128k tokens.",
"tags": [
[
"latest",
"3.8GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"13b-128k",
"7.4GB"
],
[
"13b-128k-fp16",
"26GB"
],
[
"13b-128k-q2_K",
"5.4GB"
],
[
"13b-128k-q3_K_L",
"6.9GB"
],
[
"13b-128k-q3_K_M",
"6.3GB"
],
[
"13b-128k-q3_K_S",
"5.7GB"
],
[
"13b-128k-q4_0",
"7.4GB"
],
[
"13b-128k-q4_1",
"8.2GB"
],
[
"13b-128k-q4_K_M",
"7.9GB"
],
[
"13b-128k-q4_K_S",
"7.4GB"
],
[
"13b-128k-q5_0",
"9.0GB"
],
[
"13b-128k-q5_1",
"9.8GB"
],
[
"13b-128k-q5_K_M",
"9.2GB"
],
[
"13b-128k-q5_K_S",
"9.0GB"
],
[
"13b-128k-q6_K",
"11GB"
],
[
"13b-128k-q8_0",
"14GB"
],
[
"13b-64k",
"7.4GB"
],
[
"13b-64k-fp16",
"26GB"
],
[
"13b-64k-q2_K",
"5.4GB"
],
[
"13b-64k-q3_K_L",
"6.9GB"
],
[
"13b-64k-q3_K_M",
"6.3GB"
],
[
"13b-64k-q3_K_S",
"5.7GB"
],
[
"13b-64k-q4_0",
"7.4GB"
],
[
"13b-64k-q4_1",
"8.2GB"
],
[
"13b-64k-q4_K_M",
"7.9GB"
],
[
"13b-64k-q4_K_S",
"7.4GB"
],
[
"13b-64k-q5_0",
"9.0GB"
],
[
"13b-64k-q5_1",
"9.8GB"
],
[
"13b-64k-q5_K_M",
"9.2GB"
],
[
"13b-64k-q5_K_S",
"9.0GB"
],
[
"13b-64k-q6_K",
"11GB"
],
[
"13b-64k-q8_0",
"14GB"
],
[
"7b-128k",
"3.8GB"
],
[
"7b-128k-fp16",
"13GB"
],
[
"7b-128k-q2_K",
"2.8GB"
],
[
"7b-128k-q3_K_L",
"3.6GB"
],
[
"7b-128k-q3_K_M",
"3.3GB"
],
[
"7b-128k-q3_K_S",
"2.9GB"
],
[
"7b-128k-q4_0",
"3.8GB"
],
[
"7b-128k-q4_1",
"4.2GB"
],
[
"7b-128k-q4_K_M",
"4.1GB"
],
[
"7b-128k-q4_K_S",
"3.9GB"
],
[
"7b-128k-q5_0",
"4.7GB"
],
[
"7b-128k-q5_1",
"5.1GB"
],
[
"7b-128k-q5_K_M",
"4.8GB"
],
[
"7b-128k-q5_K_S",
"4.7GB"
],
[
"7b-128k-q6_K",
"5.5GB"
],
[
"7b-128k-q8_0",
"7.2GB"
],
[
"7b-64k",
"3.8GB"
],
[
"7b-64k-fp16",
"13GB"
],
[
"7b-64k-q2_K",
"2.8GB"
],
[
"7b-64k-q3_K_L",
"3.6GB"
],
[
"7b-64k-q3_K_M",
"3.3GB"
],
[
"7b-64k-q3_K_S",
"2.9GB"
],
[
"7b-64k-q4_0",
"3.8GB"
],
[
"7b-64k-q4_1",
"4.2GB"
],
[
"7b-64k-q4_K_M",
"4.1GB"
],
[
"7b-64k-q4_K_S",
"3.9GB"
],
[
"7b-64k-q5_0",
"4.7GB"
],
[
"7b-64k-q5_1",
"5.1GB"
],
[
"7b-64k-q5_K_M",
"4.8GB"
],
[
"7b-64k-q5_K_S",
"4.7GB"
],
[
"7b-64k-q6_K",
"5.5GB"
],
[
"7b-64k-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Nous Research"
},
"llama3-chatqa": {
"url": "https://ollama.com/library/llama3-chatqa",
"description": "A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG).",
"tags": [
[
"latest",
"4.7GB"
],
[
"70b",
"40GB"
],
[
"8b",
"4.7GB"
],
[
"70b-v1.5",
"40GB"
],
[
"70b-v1.5-fp16",
"141GB"
],
[
"70b-v1.5-q2_K",
"26GB"
],
[
"70b-v1.5-q3_K_L",
"37GB"
],
[
"70b-v1.5-q3_K_M",
"34GB"
],
[
"70b-v1.5-q3_K_S",
"31GB"
],
[
"70b-v1.5-q4_0",
"40GB"
],
[
"70b-v1.5-q4_1",
"44GB"
],
[
"70b-v1.5-q4_K_M",
"43GB"
],
[
"70b-v1.5-q4_K_S",
"40GB"
],
[
"70b-v1.5-q5_0",
"49GB"
],
[
"70b-v1.5-q5_1",
"53GB"
],
[
"70b-v1.5-q5_K_M",
"50GB"
],
[
"70b-v1.5-q5_K_S",
"49GB"
],
[
"70b-v1.5-q6_K",
"58GB"
],
[
"70b-v1.5-q8_0",
"75GB"
],
[
"8b-v1.5",
"4.7GB"
],
[
"8b-v1.5-fp16",
"16GB"
],
[
"8b-v1.5-q2_K",
"3.2GB"
],
[
"8b-v1.5-q3_K_L",
"4.3GB"
],
[
"8b-v1.5-q3_K_M",
"4.0GB"
],
[
"8b-v1.5-q3_K_S",
"3.7GB"
],
[
"8b-v1.5-q4_0",
"4.7GB"
],
[
"8b-v1.5-q4_1",
"5.1GB"
],
[
"8b-v1.5-q4_K_M",
"4.9GB"
],
[
"8b-v1.5-q4_K_S",
"4.7GB"
],
[
"8b-v1.5-q5_0",
"5.6GB"
],
[
"8b-v1.5-q5_1",
"6.1GB"
],
[
"8b-v1.5-q5_K_M",
"5.7GB"
],
[
"8b-v1.5-q5_K_S",
"5.6GB"
],
[
"8b-v1.5-q6_K",
"6.6GB"
],
[
"8b-v1.5-q8_0",
"8.5GB"
]
],
"image": false,
"author": "Nvidia"
},
"solar": {
"url": "https://ollama.com/library/solar",
"description": "A compact, yet powerful 10.7B large language model designed for single-turn conversation.",
"tags": [
[
"latest",
"6.1GB"
],
[
"10.7b",
"6.1GB"
],
[
"10.7b-instruct-v1-fp16",
"21GB"
],
[
"10.7b-instruct-v1-q2_K",
"4.5GB"
],
[
"10.7b-instruct-v1-q3_K_L",
"5.7GB"
],
[
"10.7b-instruct-v1-q3_K_M",
"5.2GB"
],
[
"10.7b-instruct-v1-q3_K_S",
"4.7GB"
],
[
"10.7b-instruct-v1-q4_0",
"6.1GB"
],
[
"10.7b-instruct-v1-q4_1",
"6.7GB"
],
[
"10.7b-instruct-v1-q4_K_M",
"6.5GB"
],
[
"10.7b-instruct-v1-q4_K_S",
"6.1GB"
],
[
"10.7b-instruct-v1-q5_0",
"7.4GB"
],
[
"10.7b-instruct-v1-q5_1",
"8.1GB"
],
[
"10.7b-instruct-v1-q5_K_M",
"7.6GB"
],
[
"10.7b-instruct-v1-q5_K_S",
"7.4GB"
],
[
"10.7b-instruct-v1-q6_K",
"8.8GB"
],
[
"10.7b-instruct-v1-q8_0",
"11GB"
],
[
"10.7b-text-v1-fp16",
"21GB"
],
[
"10.7b-text-v1-q2_K",
"4.5GB"
],
[
"10.7b-text-v1-q3_K_L",
"5.7GB"
],
[
"10.7b-text-v1-q3_K_M",
"5.2GB"
],
[
"10.7b-text-v1-q3_K_S",
"4.7GB"
],
[
"10.7b-text-v1-q4_0",
"6.1GB"
],
[
"10.7b-text-v1-q4_1",
"6.7GB"
],
[
"10.7b-text-v1-q4_K_M",
"6.5GB"
],
[
"10.7b-text-v1-q4_K_S",
"6.1GB"
],
[
"10.7b-text-v1-q5_0",
"7.4GB"
],
[
"10.7b-text-v1-q5_1",
"8.1GB"
],
[
"10.7b-text-v1-q5_K_M",
"7.6GB"
],
[
"10.7b-text-v1-q5_K_S",
"7.4GB"
],
[
"10.7b-text-v1-q6_K",
"8.8GB"
],
[
"10.7b-text-v1-q8_0",
"11GB"
]
],
"image": false,
"author": "Upstage"
},
"xwinlm": {
"url": "https://ollama.com/library/xwinlm",
"description": "Conversational model based on Llama 2 that performs competitively on various benchmarks.",
"tags": [
[
"latest",
"3.8GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"70b-v0.1",
"39GB"
],
[
"70b-v0.1-fp16",
"138GB"
],
[
"70b-v0.1-q2_K",
"29GB"
],
[
"70b-v0.1-q3_K_L",
"36GB"
],
[
"70b-v0.1-q3_K_M",
"33GB"
],
[
"70b-v0.1-q3_K_S",
"30GB"
],
[
"70b-v0.1-q4_0",
"39GB"
],
[
"70b-v0.1-q4_1",
"43GB"
],
[
"70b-v0.1-q4_K_M",
"41GB"
],
[
"70b-v0.1-q4_K_S",
"39GB"
],
[
"70b-v0.1-q5_0",
"47GB"
],
[
"70b-v0.1-q5_1",
"52GB"
],
[
"70b-v0.1-q5_K_S",
"47GB"
],
[
"70b-v0.1-q6_K",
"57GB"
],
[
"70b-v0.1-q8_0",
"73GB"
],
[
"13b-v0.1",
"7.4GB"
],
[
"13b-v0.1-fp16",
"26GB"
],
[
"13b-v0.1-q2_K",
"5.4GB"
],
[
"13b-v0.1-q3_K_L",
"6.9GB"
],
[
"13b-v0.1-q3_K_M",
"6.3GB"
],
[
"13b-v0.1-q3_K_S",
"5.7GB"
],
[
"13b-v0.1-q4_0",
"7.4GB"
],
[
"13b-v0.1-q4_1",
"8.2GB"
],
[
"13b-v0.1-q4_K_M",
"7.9GB"
],
[
"13b-v0.1-q4_K_S",
"7.4GB"
],
[
"13b-v0.1-q5_0",
"9.0GB"
],
[
"13b-v0.1-q5_1",
"9.8GB"
],
[
"13b-v0.1-q5_K_M",
"9.2GB"
],
[
"13b-v0.1-q5_K_S",
"9.0GB"
],
[
"13b-v0.1-q6_K",
"11GB"
],
[
"13b-v0.1-q8_0",
"14GB"
],
[
"13b-v0.2",
"7.4GB"
],
[
"13b-v0.2-fp16",
"26GB"
],
[
"13b-v0.2-q2_K",
"5.4GB"
],
[
"13b-v0.2-q3_K_L",
"6.9GB"
],
[
"13b-v0.2-q3_K_M",
"6.3GB"
],
[
"13b-v0.2-q3_K_S",
"5.7GB"
],
[
"13b-v0.2-q4_0",
"7.4GB"
],
[
"13b-v0.2-q4_1",
"8.2GB"
],
[
"13b-v0.2-q4_K_M",
"7.9GB"
],
[
"13b-v0.2-q4_K_S",
"7.4GB"
],
[
"13b-v0.2-q5_0",
"9.0GB"
],
[
"13b-v0.2-q5_1",
"9.8GB"
],
[
"13b-v0.2-q5_K_M",
"9.2GB"
],
[
"13b-v0.2-q5_K_S",
"9.0GB"
],
[
"13b-v0.2-q6_K",
"11GB"
],
[
"13b-v0.2-q8_0",
"14GB"
],
[
"7b-v0.1",
"3.8GB"
],
[
"7b-v0.1-fp16",
"13GB"
],
[
"7b-v0.1-q2_K",
"2.8GB"
],
[
"7b-v0.1-q3_K_L",
"3.6GB"
],
[
"7b-v0.1-q3_K_M",
"3.3GB"
],
[
"7b-v0.1-q3_K_S",
"2.9GB"
],
[
"7b-v0.1-q4_0",
"3.8GB"
],
[
"7b-v0.1-q4_1",
"4.2GB"
],
[
"7b-v0.1-q4_K_M",
"4.1GB"
],
[
"7b-v0.1-q4_K_S",
"3.9GB"
],
[
"7b-v0.1-q5_0",
"4.7GB"
],
[
"7b-v0.1-q5_1",
"5.1GB"
],
[
"7b-v0.1-q5_K_M",
"4.8GB"
],
[
"7b-v0.1-q5_K_S",
"4.7GB"
],
[
"7b-v0.1-q6_K",
"5.5GB"
],
[
"7b-v0.1-q8_0",
"7.2GB"
],
[
"7b-v0.2",
"3.8GB"
],
[
"7b-v0.2-fp16",
"13GB"
],
[
"7b-v0.2-q2_K",
"2.8GB"
],
[
"7b-v0.2-q3_K_L",
"3.6GB"
],
[
"7b-v0.2-q3_K_S",
"2.9GB"
],
[
"7b-v0.2-q4_0",
"3.8GB"
],
[
"7b-v0.2-q4_1",
"4.2GB"
],
[
"7b-v0.2-q4_K_M",
"4.1GB"
],
[
"7b-v0.2-q4_K_S",
"3.9GB"
],
[
"7b-v0.2-q5_0",
"4.7GB"
],
[
"7b-v0.2-q5_K_M",
"4.8GB"
],
[
"7b-v0.2-q5_K_S",
"4.7GB"
],
[
"7b-v0.2-q6_K",
"5.5GB"
],
[
"7b-v0.2-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Xwin LM"
},
"granite-code": {
"url": "https://ollama.com/library/granite-code",
"description": "A family of open foundation models by IBM for Code Intelligence",
"tags": [
[
"latest",
"2.0GB"
],
[
"34b",
"19GB"
],
[
"20b",
"12GB"
],
[
"8b",
"4.6GB"
],
[
"3b",
"2.0GB"
],
[
"34b-base",
"19GB"
],
[
"34b-base-f16",
"68GB"
],
[
"34b-base-q2_K",
"13GB"
],
[
"34b-base-q3_K_L",
"20GB"
],
[
"34b-base-q3_K_M",
"18GB"
],
[
"34b-base-q3_K_S",
"15GB"
],
[
"34b-base-q4_0",
"19GB"
],
[
"34b-base-q4_1",
"21GB"
],
[
"34b-base-q4_K_M",
"21GB"
],
[
"34b-base-q4_K_S",
"19GB"
],
[
"34b-base-q5_0",
"23GB"
],
[
"34b-base-q5_1",
"25GB"
],
[
"34b-base-q5_K_M",
"25GB"
],
[
"34b-base-q5_K_S",
"23GB"
],
[
"34b-base-q6_K",
"28GB"
],
[
"34b-base-q8_0",
"36GB"
],
[
"34b-instruct-f16",
"68GB"
],
[
"34b-instruct",
"19GB"
],
[
"34b-instruct-q2_K",
"13GB"
],
[
"34b-instruct-q3_K_L",
"20GB"
],
[
"34b-instruct-q3_K_M",
"18GB"
],
[
"34b-instruct-q3_K_S",
"15GB"
],
[
"34b-instruct-q4_0",
"19GB"
],
[
"34b-instruct-q4_1",
"21GB"
],
[
"34b-instruct-q4_K_M",
"21GB"
],
[
"34b-instruct-q4_K_S",
"19GB"
],
[
"34b-instruct-q5_0",
"23GB"
],
[
"34b-instruct-q5_1",
"25GB"
],
[
"34b-instruct-q5_K_M",
"25GB"
],
[
"34b-instruct-q5_K_S",
"23GB"
],
[
"34b-instruct-q6_K",
"28GB"
],
[
"34b-instruct-q8_0",
"36GB"
],
[
"20b-base",
"12GB"
],
[
"20b-base-f16",
"40GB"
],
[
"20b-base-fp16",
"40GB"
],
[
"20b-base-q2_K",
"7.9GB"
],
[
"20b-base-q3_K_L",
"12GB"
],
[
"20b-base-q3_K_M",
"11GB"
],
[
"20b-base-q3_K_S",
"8.9GB"
],
[
"20b-base-q4_0",
"12GB"
],
[
"20b-base-q4_1",
"13GB"
],
[
"20b-base-q4_K_M",
"13GB"
],
[
"20b-base-q4_K_S",
"12GB"
],
[
"20b-base-q5_0",
"14GB"
],
[
"20b-base-q5_1",
"15GB"
],
[
"20b-base-q5_K_M",
"15GB"
],
[
"20b-base-q5_K_S",
"14GB"
],
[
"20b-base-q6_K",
"17GB"
],
[
"20b-base-q8_0",
"21GB"
],
[
"20b-instruct-f16",
"40GB"
],
[
"20b-instruct",
"12GB"
],
[
"20b-instruct-q2_K",
"7.9GB"
],
[
"20b-instruct-q3_K_L",
"12GB"
],
[
"20b-instruct-q3_K_M",
"11GB"
],
[
"20b-instruct-q3_K_S",
"8.9GB"
],
[
"20b-instruct-q4_0",
"12GB"
],
[
"20b-instruct-q4_1",
"13GB"
],
[
"20b-instruct-q4_K_M",
"13GB"
],
[
"20b-instruct-q4_K_S",
"12GB"
],
[
"20b-instruct-q5_0",
"14GB"
],
[
"20b-instruct-q5_1",
"15GB"
],
[
"20b-instruct-q5_K_M",
"15GB"
],
[
"20b-instruct-q5_K_S",
"14GB"
],
[
"20b-instruct-q6_K",
"17GB"
],
[
"20b-instruct-q8_0",
"21GB"
],
[
"8b-base",
"4.6GB"
],
[
"8b-base-f16",
"16GB"
],
[
"8b-base-fp16",
"16GB"
],
[
"8b-base-q2_K",
"3.1GB"
],
[
"8b-base-q3_K_L",
"4.3GB"
],
[
"8b-base-q3_K_M",
"3.9GB"
],
[
"8b-base-q3_K_S",
"3.5GB"
],
[
"8b-base-q4_0",
"4.6GB"
],
[
"8b-base-q4_1",
"5.1GB"
],
[
"8b-base-q4_K_M",
"4.9GB"
],
[
"8b-base-q4_K_S",
"4.6GB"
],
[
"8b-base-q5_0",
"5.6GB"
],
[
"8b-base-q5_1",
"6.1GB"
],
[
"8b-base-q5_K_M",
"5.7GB"
],
[
"8b-base-q5_K_S",
"5.6GB"
],
[
"8b-base-q6_K",
"6.6GB"
],
[
"8b-base-q8_0",
"8.6GB"
],
[
"8b-instruct-f16",
"16GB"
],
[
"8b-instruct",
"4.6GB"
],
[
"8b-instruct-fp16",
"16GB"
],
[
"8b-instruct-q2_K",
"3.1GB"
],
[
"8b-instruct-q3_K_L",
"4.3GB"
],
[
"8b-instruct-q3_K_M",
"3.9GB"
],
[
"8b-instruct-q3_K_S",
"3.5GB"
],
[
"8b-instruct-q4_0",
"4.6GB"
],
[
"8b-instruct-q4_1",
"5.1GB"
],
[
"8b-instruct-q4_K_M",
"4.9GB"
],
[
"8b-instruct-q4_K_S",
"4.6GB"
],
[
"8b-instruct-q5_0",
"5.6GB"
],
[
"8b-instruct-q5_1",
"6.1GB"
],
[
"8b-instruct-q5_K_M",
"5.7GB"
],
[
"8b-instruct-q5_K_S",
"5.6GB"
],
[
"8b-instruct-q6_K",
"6.6GB"
],
[
"8b-instruct-q8_0",
"8.6GB"
],
[
"3b-base-f16",
"7.0GB"
],
[
"3b-base",
"2.0GB"
],
[
"3b-base-fp16",
"7.0GB"
],
[
"3b-base-q2_K",
"1.3GB"
],
[
"3b-base-q3_K_L",
"1.9GB"
],
[
"3b-base-q3_K_M",
"1.7GB"
],
[
"3b-base-q3_K_S",
"1.6GB"
],
[
"3b-base-q4_0",
"2.0GB"
],
[
"3b-base-q4_1",
"2.2GB"
],
[
"3b-base-q4_K_M",
"2.1GB"
],
[
"3b-base-q4_K_S",
"2.0GB"
],
[
"3b-base-q5_0",
"2.4GB"
],
[
"3b-base-q5_1",
"2.6GB"
],
[
"3b-base-q5_K_M",
"2.5GB"
],
[
"3b-base-q5_K_S",
"2.4GB"
],
[
"3b-base-q6_K",
"2.9GB"
],
[
"3b-base-q8_0",
"3.7GB"
],
[
"3b-instruct-f16",
"7.0GB"
],
[
"3b-instruct",
"2.0GB"
],
[
"3b-instruct-fp16",
"7.0GB"
],
[
"3b-instruct-q2_K",
"1.3GB"
],
[
"3b-instruct-q3_K_L",
"1.9GB"
],
[
"3b-instruct-q3_K_M",
"1.7GB"
],
[
"3b-instruct-q3_K_S",
"1.6GB"
],
[
"3b-instruct-q4_0",
"2.0GB"
],
[
"3b-instruct-q4_1",
"2.2GB"
],
[
"3b-instruct-q4_K_M",
"2.1GB"
],
[
"3b-instruct-q4_K_S",
"2.0GB"
],
[
"3b-instruct-q5_0",
"2.4GB"
],
[
"3b-instruct-q5_1",
"2.6GB"
],
[
"3b-instruct-q5_K_M",
"2.5GB"
],
[
"3b-instruct-q5_K_S",
"2.4GB"
],
[
"3b-instruct-q6_K",
"2.9GB"
],
[
"3b-instruct-q8_0",
"3.7GB"
]
],
"image": false,
"author": "IBM for Code Intelligence"
},
"dolphin-phi": {
"url": "https://ollama.com/library/dolphin-phi",
"description": "2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research.",
"tags": [
[
"latest",
"1.6GB"
],
[
"2.7b",
"1.6GB"
],
[
"2.7b-v2.6",
"1.6GB"
],
[
"2.7b-v2.6-q2_K",
"1.2GB"
],
[
"2.7b-v2.6-q3_K_L",
"1.6GB"
],
[
"2.7b-v2.6-q3_K_M",
"1.5GB"
],
[
"2.7b-v2.6-q3_K_S",
"1.3GB"
],
[
"2.7b-v2.6-q4_0",
"1.6GB"
],
[
"2.7b-v2.6-q4_K_M",
"1.8GB"
],
[
"2.7b-v2.6-q4_K_S",
"1.6GB"
],
[
"2.7b-v2.6-q5_0",
"1.9GB"
],
[
"2.7b-v2.6-q5_K_M",
"2.1GB"
],
[
"2.7b-v2.6-q5_K_S",
"1.9GB"
],
[
"2.7b-v2.6-q6_K",
"2.3GB"
],
[
"2.7b-v2.6-q8_0",
"3.0GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"wizardlm": {
"url": "https://ollama.com/library/wizardlm",
"description": "General use model based on Llama 2.",
"tags": [
[
"70b-llama2-q2_K",
"29GB"
],
[
"70b-llama2-q3_K_L",
"36GB"
],
[
"70b-llama2-q3_K_M",
"33GB"
],
[
"70b-llama2-q3_K_S",
"30GB"
],
[
"70b-llama2-q4_0",
"39GB"
],
[
"70b-llama2-q4_1",
"43GB"
],
[
"70b-llama2-q4_K_M",
"41GB"
],
[
"70b-llama2-q4_K_S",
"39GB"
],
[
"70b-llama2-q5_0",
"47GB"
],
[
"70b-llama2-q5_K_M",
"49GB"
],
[
"70b-llama2-q5_K_S",
"47GB"
],
[
"70b-llama2-q6_K",
"57GB"
],
[
"70b-llama2-q8_0",
"73GB"
],
[
"30b-fp16",
"65GB"
],
[
"30b-q2_K",
"14GB"
],
[
"30b-q3_K_L",
"17GB"
],
[
"30b-q3_K_M",
"16GB"
],
[
"30b-q3_K_S",
"14GB"
],
[
"30b-q4_0",
"18GB"
],
[
"30b-q4_1",
"20GB"
],
[
"30b-q4_K_M",
"20GB"
],
[
"30b-q4_K_S",
"18GB"
],
[
"30b-q5_0",
"22GB"
],
[
"30b-q5_1",
"24GB"
],
[
"30b-q5_K_M",
"23GB"
],
[
"30b-q5_K_S",
"22GB"
],
[
"30b-q6_K",
"27GB"
],
[
"30b-q8_0",
"35GB"
],
[
"13b-llama2-fp16",
"26GB"
],
[
"13b-llama2-q2_K",
"5.4GB"
],
[
"13b-llama2-q3_K_L",
"6.9GB"
],
[
"13b-llama2-q3_K_M",
"6.3GB"
],
[
"13b-llama2-q3_K_S",
"5.7GB"
],
[
"13b-llama2-q4_0",
"7.4GB"
],
[
"13b-llama2-q4_1",
"8.2GB"
],
[
"13b-llama2-q4_K_M",
"7.9GB"
],
[
"13b-llama2-q4_K_S",
"7.4GB"
],
[
"13b-llama2-q5_0",
"9.0GB"
],
[
"13b-llama2-q5_1",
"9.8GB"
],
[
"13b-llama2-q5_K_M",
"9.2GB"
],
[
"13b-llama2-q5_K_S",
"9.0GB"
],
[
"13b-llama2-q6_K",
"11GB"
],
[
"13b-llama2-q8_0",
"14GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "WizardLM Team"
},
"samantha-mistral": {
"url": "https://ollama.com/library/samantha-mistral",
"description": "A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-instruct-fp16",
"14GB"
],
[
"7b-instruct-q2_K",
"3.1GB"
],
[
"7b-instruct-q3_K_L",
"3.8GB"
],
[
"7b-instruct-q3_K_M",
"3.5GB"
],
[
"7b-instruct-q3_K_S",
"3.2GB"
],
[
"7b-instruct-q4_0",
"4.1GB"
],
[
"7b-instruct-q4_1",
"4.6GB"
],
[
"7b-instruct-q4_K_M",
"4.4GB"
],
[
"7b-instruct-q4_K_S",
"4.1GB"
],
[
"7b-instruct-q5_0",
"5.0GB"
],
[
"7b-instruct-q5_1",
"5.4GB"
],
[
"7b-instruct-q5_K_M",
"5.1GB"
],
[
"7b-instruct-q5_K_S",
"5.0GB"
],
[
"7b-instruct-q6_K",
"5.9GB"
],
[
"7b-instruct-q8_0",
"7.7GB"
],
[
"7b-text",
"4.1GB"
],
[
"7b-text-fp16",
"14GB"
],
[
"7b-text-q2_K",
"3.1GB"
],
[
"7b-text-q3_K_L",
"3.8GB"
],
[
"7b-text-q3_K_M",
"3.5GB"
],
[
"7b-text-q3_K_S",
"3.2GB"
],
[
"7b-text-q4_0",
"4.1GB"
],
[
"7b-text-q4_1",
"4.6GB"
],
[
"7b-text-q4_K_M",
"4.4GB"
],
[
"7b-text-q4_K_S",
"4.1GB"
],
[
"7b-text-q5_0",
"5.0GB"
],
[
"7b-text-q5_1",
"5.4GB"
],
[
"7b-text-q5_K_M",
"5.1GB"
],
[
"7b-text-q5_K_S",
"5.0GB"
],
[
"7b-text-q6_K",
"5.9GB"
],
[
"7b-text-q8_0",
"7.7GB"
],
[
"7b-v1.2-text",
"4.1GB"
],
[
"7b-v1.2-text-fp16",
"14GB"
],
[
"7b-v1.2-text-q2_K",
"3.1GB"
],
[
"7b-v1.2-text-q3_K_L",
"3.8GB"
],
[
"7b-v1.2-text-q3_K_M",
"3.5GB"
],
[
"7b-v1.2-text-q3_K_S",
"3.2GB"
],
[
"7b-v1.2-text-q4_0",
"4.1GB"
],
[
"7b-v1.2-text-q4_1",
"4.6GB"
],
[
"7b-v1.2-text-q4_K_M",
"4.4GB"
],
[
"7b-v1.2-text-q4_K_S",
"4.1GB"
],
[
"7b-v1.2-text-q5_0",
"5.0GB"
],
[
"7b-v1.2-text-q5_1",
"5.4GB"
],
[
"7b-v1.2-text-q5_K_M",
"5.1GB"
],
[
"7b-v1.2-text-q5_K_S",
"5.0GB"
],
[
"7b-v1.2-text-q6_K",
"5.9GB"
],
[
"7b-v1.2-text-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Eric Hartford"
},
"stable-beluga": {
"url": "https://ollama.com/library/stable-beluga",
"description": "Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy.",
"tags": [
[
"latest",
"3.8GB"
],
[
"70b",
"39GB"
],
[
"13b",
"7.4GB"
],
[
"7b",
"3.8GB"
],
[
"70b-fp16",
"138GB"
],
[
"70b-q2_K",
"29GB"
],
[
"70b-q3_K_L",
"36GB"
],
[
"70b-q3_K_M",
"33GB"
],
[
"70b-q3_K_S",
"30GB"
],
[
"70b-q4_0",
"39GB"
],
[
"70b-q4_1",
"43GB"
],
[
"70b-q4_K_M",
"41GB"
],
[
"70b-q4_K_S",
"39GB"
],
[
"70b-q5_0",
"47GB"
],
[
"70b-q5_1",
"52GB"
],
[
"70b-q5_K_M",
"49GB"
],
[
"70b-q5_K_S",
"47GB"
],
[
"70b-q6_K",
"57GB"
],
[
"70b-q8_0",
"73GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Stability AI"
},
"bakllava": {
"url": "https://ollama.com/library/bakllava",
"description": "BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture.",
"tags": [
[
"latest",
"4.7GB"
],
[
"7b",
"4.7GB"
],
[
"7b-v1-fp16",
"15GB"
],
[
"7b-v1-q2_K",
"3.7GB"
],
[
"7b-v1-q3_K_L",
"4.4GB"
],
[
"7b-v1-q3_K_M",
"4.1GB"
],
[
"7b-v1-q3_K_S",
"3.8GB"
],
[
"7b-v1-q4_0",
"4.7GB"
],
[
"7b-v1-q4_1",
"5.2GB"
],
[
"7b-v1-q4_K_M",
"5.0GB"
],
[
"7b-v1-q4_K_S",
"4.8GB"
],
[
"7b-v1-q5_0",
"5.6GB"
],
[
"7b-v1-q5_1",
"6.1GB"
],
[
"7b-v1-q5_K_M",
"5.8GB"
],
[
"7b-v1-q5_K_S",
"5.6GB"
],
[
"7b-v1-q6_K",
"6.6GB"
],
[
"7b-v1-q8_0",
"8.3GB"
]
],
"image": true,
"author": "Skunkworks AI"
},
"llava-llama3": {
"url": "https://ollama.com/library/llava-llama3",
"description": "A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks.",
"tags": [
[
"latest",
"5.5GB"
],
[
"8b",
"5.5GB"
],
[
"8b-v1.1-fp16",
"17GB"
],
[
"8b-v1.1-q4_0",
"5.5GB"
]
],
"image": true,
"author": "Xtuner"
},
"wizardlm-uncensored": {
"url": "https://ollama.com/library/wizardlm-uncensored",
"description": "Uncensored version of Wizard LM model",
"tags": [
[
"latest",
"7.4GB"
],
[
"13b",
"7.4GB"
],
[
"13b-llama2",
"7.4GB"
],
[
"13b-llama2-fp16",
"26GB"
],
[
"13b-llama2-q2_K",
"5.4GB"
],
[
"13b-llama2-q3_K_L",
"6.9GB"
],
[
"13b-llama2-q3_K_M",
"6.3GB"
],
[
"13b-llama2-q3_K_S",
"5.7GB"
],
[
"13b-llama2-q4_0",
"7.4GB"
],
[
"13b-llama2-q4_1",
"8.2GB"
],
[
"13b-llama2-q4_K_M",
"7.9GB"
],
[
"13b-llama2-q4_K_S",
"7.4GB"
],
[
"13b-llama2-q5_0",
"9.0GB"
],
[
"13b-llama2-q5_1",
"9.8GB"
],
[
"13b-llama2-q5_K_M",
"9.2GB"
],
[
"13b-llama2-q5_K_S",
"9.0GB"
],
[
"13b-llama2-q6_K",
"11GB"
],
[
"13b-llama2-q8_0",
"14GB"
]
],
"image": false,
"author": "TheBloke AI"
},
"medllama2": {
"url": "https://ollama.com/library/medllama2",
"description": "Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset.",
"tags": [
[
"latest",
"3.8GB"
],
[
"7b",
"3.8GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "Siraj Raval"
},
"nous-hermes2-mixtral": {
"url": "https://ollama.com/library/nous-hermes2-mixtral",
"description": "The Nous Hermes 2 model from Nous Research, now trained over Mixtral.",
"tags": [
[
"latest",
"26GB"
],
[
"8x7b",
"26GB"
],
[
"dpo",
"26GB"
],
[
"8x7b-dpo-fp16",
"93GB"
],
[
"8x7b-dpo-q2_K",
"16GB"
],
[
"8x7b-dpo-q3_K_L",
"20GB"
],
[
"8x7b-dpo-q3_K_M",
"20GB"
],
[
"8x7b-dpo-q3_K_S",
"20GB"
],
[
"8x7b-dpo-q4_0",
"26GB"
],
[
"8x7b-dpo-q4_1",
"29GB"
],
[
"8x7b-dpo-q4_K_M",
"26GB"
],
[
"8x7b-dpo-q4_K_S",
"26GB"
],
[
"8x7b-dpo-q5_0",
"32GB"
],
[
"8x7b-dpo-q5_1",
"35GB"
],
[
"8x7b-dpo-q5_K_M",
"32GB"
],
[
"8x7b-dpo-q5_K_S",
"32GB"
],
[
"8x7b-dpo-q6_K",
"38GB"
],
[
"8x7b-dpo-q8_0",
"50GB"
]
],
"image": false,
"author": "Nous Research"
},
"yarn-mistral": {
"url": "https://ollama.com/library/yarn-mistral",
"description": "An extension of Mistral to support context windows of 64K or 128K.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-128k",
"4.1GB"
],
[
"7b-128k-fp16",
"14GB"
],
[
"7b-128k-q2_K",
"3.1GB"
],
[
"7b-128k-q3_K_L",
"3.8GB"
],
[
"7b-128k-q3_K_M",
"3.5GB"
],
[
"7b-128k-q3_K_S",
"3.2GB"
],
[
"7b-128k-q4_0",
"4.1GB"
],
[
"7b-128k-q4_1",
"4.6GB"
],
[
"7b-128k-q4_K_M",
"4.4GB"
],
[
"7b-128k-q4_K_S",
"4.1GB"
],
[
"7b-128k-q5_0",
"5.0GB"
],
[
"7b-128k-q5_1",
"5.4GB"
],
[
"7b-128k-q5_K_M",
"5.1GB"
],
[
"7b-128k-q5_K_S",
"5.0GB"
],
[
"7b-128k-q6_K",
"5.9GB"
],
[
"7b-128k-q8_0",
"7.7GB"
],
[
"7b-64k",
"4.1GB"
],
[
"7b-64k-q2_K",
"3.1GB"
],
[
"7b-64k-q3_K_L",
"3.8GB"
],
[
"7b-64k-q3_K_M",
"3.5GB"
],
[
"7b-64k-q3_K_S",
"3.2GB"
],
[
"7b-64k-q4_0",
"4.1GB"
],
[
"7b-64k-q4_1",
"4.6GB"
],
[
"7b-64k-q4_K_M",
"4.4GB"
],
[
"7b-64k-q4_K_S",
"4.1GB"
],
[
"7b-64k-q5_0",
"5.0GB"
],
[
"7b-64k-q5_1",
"5.4GB"
],
[
"7b-64k-q5_K_M",
"5.1GB"
],
[
"7b-64k-q5_K_S",
"5.0GB"
],
[
"7b-64k-q6_K",
"5.9GB"
],
[
"7b-64k-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Nous Research"
},
"snowflake-arctic-embed": {
"url": "https://ollama.com/library/snowflake-arctic-embed",
"description": "A suite of text embedding models by Snowflake, optimized for performance.",
"tags": [
[
"latest",
"669MB"
],
[
"335m",
"669MB"
],
[
"137m",
"274MB"
],
[
"110m",
"219MB"
],
[
"33m",
"67MB"
],
[
"22m",
"46MB"
],
[
"l",
"669MB"
],
[
"m",
"219MB"
],
[
"s",
"67MB"
],
[
"xs",
"46MB"
],
[
"335m-l-fp16",
"669MB"
],
[
"137m-m-long-fp16",
"274MB"
],
[
"110m-m-fp16",
"219MB"
],
[
"33m-s-fp16",
"67MB"
],
[
"22m-xs-fp16",
"46MB"
],
[
"m-long",
"274MB"
]
],
"image": false,
"author": "Snowflake"
},
"llama-pro": {
"url": "https://ollama.com/library/llama-pro",
"description": "An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics.",
"tags": [
[
"latest",
"4.7GB"
],
[
"instruct",
"4.7GB"
],
[
"text",
"4.7GB"
],
[
"8b-instruct-fp16",
"17GB"
],
[
"8b-instruct-q2_K",
"3.5GB"
],
[
"8b-instruct-q3_K_L",
"4.5GB"
],
[
"8b-instruct-q3_K_M",
"4.1GB"
],
[
"8b-instruct-q3_K_S",
"3.6GB"
],
[
"8b-instruct-q4_0",
"4.7GB"
],
[
"8b-instruct-q4_1",
"5.3GB"
],
[
"8b-instruct-q4_K_M",
"5.1GB"
],
[
"8b-instruct-q4_K_S",
"4.8GB"
],
[
"8b-instruct-q5_0",
"5.8GB"
],
[
"8b-instruct-q5_1",
"6.3GB"
],
[
"8b-instruct-q5_K_M",
"5.9GB"
],
[
"8b-instruct-q5_K_S",
"5.8GB"
],
[
"8b-instruct-q6_K",
"6.9GB"
],
[
"8b-instruct-q8_0",
"8.9GB"
],
[
"8b-text-fp16",
"17GB"
],
[
"8b-text-q2_K",
"3.5GB"
],
[
"8b-text-q3_K_L",
"4.5GB"
],
[
"8b-text-q3_K_M",
"4.1GB"
],
[
"8b-text-q3_K_S",
"3.6GB"
],
[
"8b-text-q4_0",
"4.7GB"
],
[
"8b-text-q4_1",
"5.3GB"
],
[
"8b-text-q4_K_M",
"5.1GB"
],
[
"8b-text-q4_K_S",
"4.8GB"
],
[
"8b-text-q5_0",
"5.8GB"
],
[
"8b-text-q5_1",
"6.3GB"
],
[
"8b-text-q5_K_M",
"5.9GB"
],
[
"8b-text-q5_K_S",
"5.8GB"
],
[
"8b-text-q6_K",
"6.9GB"
],
[
"8b-text-q8_0",
"8.9GB"
]
],
"image": false,
"author": "Tencent"
},
"codeup": {
"url": "https://ollama.com/library/codeup",
"description": "Great code generation model based on Llama2.",
"tags": [
[
"latest",
"7.4GB"
],
[
"13b",
"7.4GB"
],
[
"13b-llama2",
"7.4GB"
],
[
"13b-llama2-chat",
"7.4GB"
],
[
"13b-llama2-chat-fp16",
"26GB"
],
[
"13b-llama2-chat-q2_K",
"5.4GB"
],
[
"13b-llama2-chat-q3_K_L",
"6.9GB"
],
[
"13b-llama2-chat-q3_K_M",
"6.3GB"
],
[
"13b-llama2-chat-q3_K_S",
"5.7GB"
],
[
"13b-llama2-chat-q4_0",
"7.4GB"
],
[
"13b-llama2-chat-q4_1",
"8.2GB"
],
[
"13b-llama2-chat-q4_K_M",
"7.9GB"
],
[
"13b-llama2-chat-q4_K_S",
"7.4GB"
],
[
"13b-llama2-chat-q5_0",
"9.0GB"
],
[
"13b-llama2-chat-q5_1",
"9.8GB"
],
[
"13b-llama2-chat-q5_K_M",
"9.2GB"
],
[
"13b-llama2-chat-q5_K_S",
"9.0GB"
],
[
"13b-llama2-chat-q6_K",
"11GB"
],
[
"13b-llama2-chat-q8_0",
"14GB"
]
],
"image": false,
"author": "DeepSE"
},
"meditron": {
"url": "https://ollama.com/library/meditron",
"description": "Open-source medical large language model adapted from Llama 2 to the medical domain.",
"tags": [
[
"latest",
"3.8GB"
],
[
"70b",
"39GB"
],
[
"7b",
"3.8GB"
],
[
"70b-q4_0",
"39GB"
],
[
"70b-q4_1",
"43GB"
],
[
"70b-q4_K_S",
"39GB"
],
[
"70b-q5_1",
"52GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.8GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "EPFL LLM Team"
},
"moondream": {
"url": "https://ollama.com/library/moondream",
"description": "moondream2 is a small vision language model designed to run efficiently on edge devices.",
"tags": [
[
"latest",
"1.7GB"
],
[
"1.8b",
"1.7GB"
],
[
"v2",
"1.7GB"
],
[
"1.8b-v2-fp16",
"3.7GB"
],
[
"1.8b-v2-q2_K",
"1.5GB"
],
[
"1.8b-v2-q3_K_L",
"1.7GB"
],
[
"1.8b-v2-q3_K_M",
"1.7GB"
],
[
"1.8b-v2-q3_K_S",
"1.6GB"
],
[
"1.8b-v2-q4_0",
"1.7GB"
],
[
"1.8b-v2-q4_1",
"1.8GB"
],
[
"1.8b-v2-q4_K_M",
"1.8GB"
],
[
"1.8b-v2-q4_K_S",
"1.7GB"
],
[
"1.8b-v2-q5_0",
"1.9GB"
],
[
"1.8b-v2-q5_1",
"2.0GB"
],
[
"1.8b-v2-q5_K_M",
"2.0GB"
],
[
"1.8b-v2-q5_K_S",
"1.9GB"
],
[
"1.8b-v2-q6_K",
"2.1GB"
],
[
"1.8b-v2-q8_0",
"2.4GB"
]
],
"image": true,
"author": "Vikhyatk"
},
"everythinglm": {
"url": "https://ollama.com/library/everythinglm",
"description": "Uncensored Llama2 based model with support for a 16K context window.",
"tags": [
[
"latest",
"7.4GB"
],
[
"13b",
"7.4GB"
],
[
"13b-16k",
"7.4GB"
],
[
"13b-16k-fp16",
"26GB"
],
[
"13b-16k-q2_K",
"5.4GB"
],
[
"13b-16k-q3_K_L",
"6.9GB"
],
[
"13b-16k-q3_K_M",
"6.3GB"
],
[
"13b-16k-q3_K_S",
"5.7GB"
],
[
"13b-16k-q4_0",
"7.4GB"
],
[
"13b-16k-q4_1",
"8.2GB"
],
[
"13b-16k-q4_K_M",
"7.9GB"
],
[
"13b-16k-q4_K_S",
"7.4GB"
],
[
"13b-16k-q5_0",
"9.0GB"
],
[
"13b-16k-q5_1",
"9.8GB"
],
[
"13b-16k-q5_K_M",
"9.2GB"
],
[
"13b-16k-q5_K_S",
"9.0GB"
],
[
"13b-16k-q6_K",
"11GB"
],
[
"13b-16k-q8_0",
"14GB"
]
],
"image": false,
"author": "Totally Not An LLM"
},
"nexusraven": {
"url": "https://ollama.com/library/nexusraven",
"description": "Nexus Raven is a 13B instruction tuned model for function calling tasks.",
"tags": [
[
"latest",
"7.4GB"
],
[
"13b",
"7.4GB"
],
[
"13b-v2-fp16",
"26GB"
],
[
"13b-v2-q2_K",
"5.4GB"
],
[
"13b-v2-q3_K_L",
"6.9GB"
],
[
"13b-v2-q3_K_M",
"6.3GB"
],
[
"13b-v2-q3_K_S",
"5.7GB"
],
[
"13b-v2-q4_0",
"7.4GB"
],
[
"13b-v2-q4_1",
"8.2GB"
],
[
"13b-v2-q4_K_M",
"7.9GB"
],
[
"13b-v2-q4_K_S",
"7.4GB"
],
[
"13b-v2-q5_0",
"9.0GB"
],
[
"13b-v2-q5_1",
"9.8GB"
],
[
"13b-v2-q5_K_M",
"9.2GB"
],
[
"13b-v2-q5_K_S",
"9.0GB"
],
[
"13b-v2-q6_K",
"11GB"
],
[
"13b-v2-q8_0",
"14GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
]
],
"image": false,
"author": "NexusFlow AI"
},
"magicoder": {
"url": "https://ollama.com/library/magicoder",
"description": "\ud83c\udfa9 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets.",
"tags": [
[
"latest",
"3.8GB"
],
[
"7b",
"3.8GB"
],
[
"7b-s-cl",
"3.8GB"
],
[
"7b-s-cl-fp16",
"13GB"
],
[
"7b-s-cl-q2_K",
"2.8GB"
],
[
"7b-s-cl-q3_K_L",
"3.6GB"
],
[
"7b-s-cl-q3_K_M",
"3.3GB"
],
[
"7b-s-cl-q3_K_S",
"2.9GB"
],
[
"7b-s-cl-q4_0",
"3.8GB"
],
[
"7b-s-cl-q4_1",
"4.2GB"
],
[
"7b-s-cl-q4_K_M",
"4.1GB"
],
[
"7b-s-cl-q4_K_S",
"3.9GB"
],
[
"7b-s-cl-q5_0",
"4.7GB"
],
[
"7b-s-cl-q5_1",
"5.1GB"
],
[
"7b-s-cl-q5_K_M",
"4.8GB"
],
[
"7b-s-cl-q5_K_S",
"4.7GB"
],
[
"7b-s-cl-q6_K",
"5.5GB"
],
[
"7b-s-cl-q8_0",
"7.2GB"
]
],
"image": false,
"author": "iSE"
},
"deepseek-v2": {
"url": "https://ollama.com/library/deepseek-v2",
"description": "A strong, economical, and efficient Mixture-of-Experts language model.",
"tags": [
[
"latest",
"8.9GB"
],
[
"236b",
"133GB"
],
[
"16b",
"8.9GB"
],
[
"lite",
"8.9GB"
],
[
"236b-chat-f16",
"472GB"
],
[
"236b-chat-fp16",
"472GB"
],
[
"236b-chat-q2_K",
"86GB"
],
[
"236b-chat-q3_K_L",
"122GB"
],
[
"236b-chat-q3_K_M",
"113GB"
],
[
"236b-chat-q3_K_S",
"102GB"
],
[
"236b-chat-q4_0",
"133GB"
],
[
"236b-chat-q4_1",
"148GB"
],
[
"236b-chat-q4_K_M",
"142GB"
],
[
"236b-chat-q4_K_S",
"134GB"
],
[
"236b-chat-q5_0",
"162GB"
],
[
"236b-chat-q5_1",
"177GB"
],
[
"236b-chat-q5_K_M",
"167GB"
],
[
"236b-chat-q5_K_S",
"162GB"
],
[
"236b-chat-q6_K",
"194GB"
],
[
"236b-chat-q8_0",
"251GB"
],
[
"16b-lite-chat-f16",
"31GB"
],
[
"16b-lite-chat-fp16",
"31GB"
],
[
"16b-lite-chat-q2_K",
"6.4GB"
],
[
"16b-lite-chat-q3_K_L",
"8.5GB"
],
[
"16b-lite-chat-q3_K_M",
"8.1GB"
],
[
"16b-lite-chat-q3_K_S",
"7.5GB"
],
[
"16b-lite-chat-q4_0",
"8.9GB"
],
[
"16b-lite-chat-q4_1",
"9.9GB"
],
[
"16b-lite-chat-q4_K_M",
"10GB"
],
[
"16b-lite-chat-q4_K_S",
"9.5GB"
],
[
"16b-lite-chat-q5_0",
"11GB"
],
[
"16b-lite-chat-q5_1",
"12GB"
],
[
"16b-lite-chat-q5_K_M",
"12GB"
],
[
"16b-lite-chat-q5_K_S",
"11GB"
],
[
"16b-lite-chat-q6_K",
"14GB"
],
[
"16b-lite-chat-q8_0",
"17GB"
]
],
"image": false,
"author": "DeepSeek Team"
},
"stablelm-zephyr": {
"url": "https://ollama.com/library/stablelm-zephyr",
"description": "A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware.",
"tags": [
[
"latest",
"1.6GB"
],
[
"3b",
"1.6GB"
],
[
"3b-fp16",
"5.6GB"
],
[
"3b-q2_K",
"1.2GB"
],
[
"3b-q3_K_L",
"1.5GB"
],
[
"3b-q3_K_M",
"1.4GB"
],
[
"3b-q3_K_S",
"1.3GB"
],
[
"3b-q4_0",
"1.6GB"
],
[
"3b-q4_1",
"1.8GB"
],
[
"3b-q4_K_M",
"1.7GB"
],
[
"3b-q4_K_S",
"1.6GB"
],
[
"3b-q5_0",
"1.9GB"
],
[
"3b-q5_1",
"2.1GB"
],
[
"3b-q5_K_M",
"2.0GB"
],
[
"3b-q5_K_S",
"1.9GB"
],
[
"3b-q6_K",
"2.3GB"
],
[
"3b-q8_0",
"3.0GB"
]
],
"image": false,
"author": "Stability AI"
},
"codebooga": {
"url": "https://ollama.com/library/codebooga",
"description": "A high-performing code instruct model created by merging two existing code models.",
"tags": [
[
"latest",
"19GB"
],
[
"34b",
"19GB"
],
[
"34b-v0.1-fp16",
"67GB"
],
[
"34b-v0.1-q2_K",
"14GB"
],
[
"34b-v0.1-q3_K_L",
"18GB"
],
[
"34b-v0.1-q3_K_M",
"16GB"
],
[
"34b-v0.1-q3_K_S",
"15GB"
],
[
"34b-v0.1-q4_0",
"19GB"
],
[
"34b-v0.1-q4_1",
"21GB"
],
[
"34b-v0.1-q4_K_M",
"20GB"
],
[
"34b-v0.1-q5_0",
"23GB"
],
[
"34b-v0.1-q5_1",
"25GB"
],
[
"34b-v0.1-q5_K_M",
"24GB"
],
[
"34b-v0.1-q5_K_S",
"23GB"
],
[
"34b-v0.1-q6_K",
"28GB"
],
[
"34b-v0.1-q8_0",
"36GB"
]
],
"image": false,
"author": "Oobabooga"
},
"llava-phi3": {
"url": "https://ollama.com/library/llava-phi3",
"description": "A new small LLaVA model fine-tuned from Phi 3 Mini.",
"tags": [
[
"latest",
"2.9GB"
],
[
"3.8b",
"2.9GB"
],
[
"3.8b-mini-fp16",
"8.3GB"
],
[
"3.8b-mini-q4_0",
"2.9GB"
]
],
"image": false,
"author": "Xtuner"
},
"mistrallite": {
"url": "https://ollama.com/library/mistrallite",
"description": "MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-v0.1-fp16",
"14GB"
],
[
"7b-v0.1-q2_K",
"3.1GB"
],
[
"7b-v0.1-q3_K_L",
"3.8GB"
],
[
"7b-v0.1-q3_K_M",
"3.5GB"
],
[
"7b-v0.1-q3_K_S",
"3.2GB"
],
[
"7b-v0.1-q4_0",
"4.1GB"
],
[
"7b-v0.1-q4_1",
"4.6GB"
],
[
"7b-v0.1-q4_K_M",
"4.4GB"
],
[
"7b-v0.1-q4_K_S",
"4.1GB"
],
[
"7b-v0.1-q5_0",
"5.0GB"
],
[
"7b-v0.1-q5_1",
"5.4GB"
],
[
"7b-v0.1-q5_K_M",
"5.1GB"
],
[
"7b-v0.1-q5_K_S",
"5.0GB"
],
[
"7b-v0.1-q6_K",
"5.9GB"
],
[
"7b-v0.1-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Amazon Web Services"
},
"wizard-vicuna": {
"url": "https://ollama.com/library/wizard-vicuna",
"description": "Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj.",
"tags": [
[
"latest",
"7.4GB"
],
[
"13b",
"7.4GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
]
],
"image": false,
"author": "MelodysDreamj"
},
"duckdb-nsql": {
"url": "https://ollama.com/library/duckdb-nsql",
"description": "7B parameter text-to-SQL model made by MotherDuck and Numbers Station.",
"tags": [
[
"latest",
"3.8GB"
],
[
"7b",
"3.8GB"
],
[
"7b-fp16",
"13GB"
],
[
"7b-q2_K",
"2.5GB"
],
[
"7b-q3_K_L",
"3.6GB"
],
[
"7b-q3_K_M",
"3.3GB"
],
[
"7b-q3_K_S",
"2.9GB"
],
[
"7b-q4_0",
"3.8GB"
],
[
"7b-q4_1",
"4.2GB"
],
[
"7b-q4_K_M",
"4.1GB"
],
[
"7b-q4_K_S",
"3.9GB"
],
[
"7b-q5_0",
"4.7GB"
],
[
"7b-q5_1",
"5.1GB"
],
[
"7b-q5_K_M",
"4.8GB"
],
[
"7b-q5_K_S",
"4.7GB"
],
[
"7b-q6_K",
"5.5GB"
],
[
"7b-q8_0",
"7.2GB"
]
],
"image": false,
"author": "MotherDuck, Numbers Station"
},
"goliath": {
"url": "https://ollama.com/library/goliath",
"description": "A language model created by combining two fine-tuned Llama 2 70B models into one.",
"tags": [
[
"latest",
"66GB"
],
[
"120b-fp16",
"236GB"
],
[
"120b-q2_K",
"50GB"
],
[
"120b-q3_K_L",
"62GB"
],
[
"120b-q3_K_M",
"56GB"
],
[
"120b-q3_K_S",
"51GB"
],
[
"120b-q4_0",
"66GB"
],
[
"120b-q4_1",
"74GB"
],
[
"120b-q4_K_M",
"71GB"
],
[
"120b-q4_K_S",
"66GB"
],
[
"120b-q5_0",
"81GB"
],
[
"120b-q5_1",
"88GB"
],
[
"120b-q5_K_M",
"83GB"
],
[
"120b-q5_K_S",
"81GB"
],
[
"120b-q6_K",
"97GB"
],
[
"120b-q8_0",
"125GB"
]
],
"image": false,
"author": "Alpindale"
},
"megadolphin": {
"url": "https://ollama.com/library/megadolphin",
"description": "MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself.",
"tags": [
[
"latest",
"68GB"
],
[
"120b",
"68GB"
],
[
"v2.2",
"68GB"
],
[
"120b-v2.2",
"68GB"
],
[
"120b-v2.2-fp16",
"241GB"
],
[
"120b-v2.2-q2_K",
"51GB"
],
[
"120b-v2.2-q3_K_L",
"63GB"
],
[
"120b-v2.2-q3_K_M",
"58GB"
],
[
"120b-v2.2-q3_K_S",
"52GB"
],
[
"120b-v2.2-q4_0",
"68GB"
],
[
"120b-v2.2-q4_1",
"75GB"
],
[
"120b-v2.2-q4_K_M",
"72GB"
],
[
"120b-v2.2-q4_K_S",
"68GB"
],
[
"120b-v2.2-q5_0",
"83GB"
],
[
"120b-v2.2-q5_1",
"90GB"
],
[
"120b-v2.2-q5_K_M",
"85GB"
],
[
"120b-v2.2-q5_K_S",
"83GB"
],
[
"120b-v2.2-q6_K",
"99GB"
],
[
"120b-v2.2-q8_0",
"128GB"
]
],
"image": false,
"author": "Cognitive Computations"
},
"open-orca-platypus2": {
"url": "https://ollama.com/library/open-orca-platypus2",
"description": "Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. Designed for chat and code generation.",
"tags": [
[
"latest",
"7.4GB"
],
[
"13b",
"7.4GB"
],
[
"13b-fp16",
"26GB"
],
[
"13b-q2_K",
"5.4GB"
],
[
"13b-q3_K_L",
"6.9GB"
],
[
"13b-q3_K_M",
"6.3GB"
],
[
"13b-q3_K_S",
"5.7GB"
],
[
"13b-q4_0",
"7.4GB"
],
[
"13b-q4_1",
"8.2GB"
],
[
"13b-q4_K_M",
"7.9GB"
],
[
"13b-q4_K_S",
"7.4GB"
],
[
"13b-q5_0",
"9.0GB"
],
[
"13b-q5_1",
"9.8GB"
],
[
"13b-q5_K_M",
"9.2GB"
],
[
"13b-q5_K_S",
"9.0GB"
],
[
"13b-q6_K",
"11GB"
],
[
"13b-q8_0",
"14GB"
]
],
"image": false,
"author": "Open Orca"
},
"notux": {
"url": "https://ollama.com/library/notux",
"description": "A top-performing mixture of experts model, fine-tuned with high-quality data.",
"tags": [
[
"latest",
"26GB"
],
[
"8x7b",
"26GB"
],
[
"8x7b-v1",
"26GB"
],
[
"8x7b-v1-fp16",
"93GB"
],
[
"8x7b-v1-q2_K",
"16GB"
],
[
"8x7b-v1-q3_K_L",
"20GB"
],
[
"8x7b-v1-q3_K_M",
"20GB"
],
[
"8x7b-v1-q3_K_S",
"20GB"
],
[
"8x7b-v1-q4_0",
"26GB"
],
[
"8x7b-v1-q4_1",
"29GB"
],
[
"8x7b-v1-q4_K_M",
"26GB"
],
[
"8x7b-v1-q4_K_S",
"26GB"
],
[
"8x7b-v1-q5_0",
"32GB"
],
[
"8x7b-v1-q5_1",
"35GB"
],
[
"8x7b-v1-q5_K_M",
"32GB"
],
[
"8x7b-v1-q5_K_S",
"32GB"
],
[
"8x7b-v1-q6_K",
"38GB"
],
[
"8x7b-v1-q8_0",
"50GB"
]
],
"image": false,
"author": "Argilla"
},
"notus": {
"url": "https://ollama.com/library/notus",
"description": "A 7B chat model fine-tuned with high-quality data and based on Zephyr.",
"tags": [
[
"latest",
"4.1GB"
],
[
"7b",
"4.1GB"
],
[
"7b-v1",
"4.1GB"
],
[
"7b-v1-fp16",
"14GB"
],
[
"7b-v1-q2_K",
"3.1GB"
],
[
"7b-v1-q3_K_L",
"3.8GB"
],
[
"7b-v1-q3_K_M",
"3.5GB"
],
[
"7b-v1-q3_K_S",
"3.2GB"
],
[
"7b-v1-q4_0",
"4.1GB"
],
[
"7b-v1-q4_1",
"4.6GB"
],
[
"7b-v1-q4_K_M",
"4.4GB"
],
[
"7b-v1-q4_K_S",
"4.1GB"
],
[
"7b-v1-q5_0",
"5.0GB"
],
[
"7b-v1-q5_1",
"5.4GB"
],
[
"7b-v1-q5_K_M",
"5.1GB"
],
[
"7b-v1-q5_K_S",
"5.0GB"
],
[
"7b-v1-q6_K",
"5.9GB"
],
[
"7b-v1-q8_0",
"7.7GB"
]
],
"image": false,
"author": "Argilla"
},
"dbrx": {
"url": "https://ollama.com/library/dbrx",
"description": "DBRX is an open, general-purpose LLM created by Databricks.",
"tags": [
[
"latest",
"74GB"
],
[
"132b",
"74GB"
],
[
"instruct",
"74GB"
],
[
"132b-instruct-fp16",
"263GB"
],
[
"132b-instruct-q2_K",
"48GB"
],
[
"132b-instruct-q4_0",
"74GB"
],
[
"132b-instruct-q8_0",
"140GB"
]
],
"image": false,
"author": "Databricks"
},
"falcon2": {
"url": "https://ollama.com/library/falcon2",
"description": "Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens.",
"tags": [
[
"latest",
"6.4GB"
],
[
"11b",
"6.4GB"
],
[
"11b-fp16",
"22GB"
],
[
"11b-q2_K",
"4.3GB"
],
[
"11b-q3_K_L",
"5.8GB"
],
[
"11b-q3_K_M",
"5.4GB"
],
[
"11b-q3_K_S",
"4.9GB"
],
[
"11b-q4_0",
"6.4GB"
],
[
"11b-q4_1",
"7.1GB"
],
[
"11b-q4_K_M",
"6.8GB"
],
[
"11b-q4_K_S",
"6.4GB"
],
[
"11b-q5_0",
"7.7GB"
],
[
"11b-q5_1",
"8.4GB"
],
[
"11b-q5_K_M",
"8.2GB"
],
[
"11b-q5_K_S",
"7.7GB"
],
[
"11b-q6_K",
"9.2GB"
],
[
"11b-q8_0",
"12GB"
]
],
"image": false,
"author": "Technology Innovation Institute"
},
"alfred": {
"url": "https://ollama.com/library/alfred",
"description": "A robust conversational model designed to be used for both chat and instruct use cases.",
"tags": [
[
"latest",
"24GB"
],
[
"40b",
"24GB"
],
[
"40b-1023-q4_0",
"24GB"
],
[
"40b-1023-q4_1",
"26GB"
],
[
"40b-1023-q5_0",
"29GB"
],
[
"40b-1023-q5_1",
"32GB"
],
[
"40b-1023-q8_0",
"44GB"
]
],
"image": false,
"author": "LightOn AI"
}
}