Updated Spanish translation

This commit is contained in:
jeffser 2024-07-07 18:21:10 -06:00
parent 6c851784e2
commit 57fbf397c3

205
po/es.po
View File

@ -111,6 +111,8 @@ msgid ""
"Removed DOCX compatibility temporally due to error with python-lxml " "Removed DOCX compatibility temporally due to error with python-lxml "
"dependency" "dependency"
msgstr "" msgstr ""
"Removida compatibilidad con DOCX temporalmente debido a un error con la "
"dependencia python-lxml"
#: data/com.jeffser.Alpaca.metainfo.xml.in:77 #: data/com.jeffser.Alpaca.metainfo.xml.in:77
#: data/com.jeffser.Alpaca.metainfo.xml.in:107 #: data/com.jeffser.Alpaca.metainfo.xml.in:107
@ -763,11 +765,11 @@ msgstr "Añadido reconocimiento de imagenes (modelo llava)"
#: src/window.py:137 #: src/window.py:137
msgid "This video does not have any transcriptions" msgid "This video does not have any transcriptions"
msgstr "" msgstr "Este video no tiene transcripciones"
#: src/window.py:138 #: src/window.py:138
msgid "This video is not available" msgid "This video is not available"
msgstr "" msgstr "Este video no está disponible"
#: src/window.py:141 #: src/window.py:141
msgid "Please select a model before chatting" msgid "Please select a model before chatting"
@ -834,7 +836,7 @@ msgstr "Ejecutar en el fondo"
#: src/window.py:384 #: src/window.py:384
msgid "Stop Creating '{}'" msgid "Stop Creating '{}'"
msgstr "" msgstr "Parar la creación de '{}'"
#: src/window.py:421 #: src/window.py:421
#, fuzzy #, fuzzy
@ -858,15 +860,15 @@ msgstr "Enviar Mensaje"
#: src/window.py:661 #: src/window.py:661
msgid "Missing Image" msgid "Missing Image"
msgstr "" msgstr "Imagen no Encontrada"
#: src/window.py:677 #: src/window.py:677
msgid "Missing image" msgid "Missing image"
msgstr "" msgstr "Imagen no Encontrada"
#: src/window.py:757 #: src/window.py:757
msgid "Remove '{} ({})'" msgid "Remove '{} ({})'"
msgstr "" msgstr "Remover '{} ({})'"
#: src/window.py:969 #: src/window.py:969
msgid "Task Complete" msgid "Task Complete"
@ -886,7 +888,7 @@ msgstr "No se pudo descargar el modelo '{}' debido a un error de red"
#: src/window.py:1008 #: src/window.py:1008
msgid "Stop Pulling '{} ({})'" msgid "Stop Pulling '{} ({})'"
msgstr "" msgstr "Parar Descarga de '{} ({})'"
#: src/window.ui:52 #: src/window.ui:52
msgid "Menu" msgid "Menu"
@ -935,7 +937,7 @@ msgstr "URL de la instancia remota"
#: src/window.ui:312 #: src/window.ui:312
msgid "Bearer Token (Optional)" msgid "Bearer Token (Optional)"
msgstr "" msgstr "Bearer Token (Opcional)"
#: src/window.ui:322 #: src/window.ui:322
#, fuzzy #, fuzzy
@ -1043,21 +1045,23 @@ msgstr "Modelos Destacados"
#: src/window.ui:664 #: src/window.ui:664
msgid "No Models Found" msgid "No Models Found"
msgstr "" msgstr "Ningún modelo encontrado"
#: src/window.ui:665 #: src/window.ui:665
msgid "Try a different search" msgid "Try a different search"
msgstr "" msgstr "Intenta una busqueda distinta"
#: src/window.ui:708 #: src/window.ui:708
msgid "" msgid ""
"By downloading this model you accept the license agreement available on the " "By downloading this model you accept the license agreement available on the "
"model's website." "model's website."
msgstr "" msgstr ""
"Al descargar este modelo aceptas la licencia disponible en el sitio web del "
"modelo"
#: src/window.ui:745 #: src/window.ui:745
msgid "Open with Default App" msgid "Open with Default App"
msgstr "" msgstr "Abrir con Aplicación Predeterminada"
#: src/window.ui:797 #: src/window.ui:797
msgid "Previous" msgid "Previous"
@ -1195,43 +1199,51 @@ msgstr "Saltar línea"
#: src/available_models_descriptions.py:2 #: src/available_models_descriptions.py:2
msgid "Google Gemma 2 is now available in 2 sizes, 9B and 27B." msgid "Google Gemma 2 is now available in 2 sizes, 9B and 27B."
msgstr "" msgstr "Google Gemma 2 ahora esta disponible en 2 tamaños, 9B y 27B."
#: src/available_models_descriptions.py:3 #: src/available_models_descriptions.py:3
msgid "Meta Llama 3: The most capable openly available LLM to date" msgid "Meta Llama 3: The most capable openly available LLM to date"
msgstr "" msgstr "Meta Llama 3: El LLM abierto más capaz a esta fecha."
#: src/available_models_descriptions.py:4 #: src/available_models_descriptions.py:4
msgid "Qwen2 is a new series of large language models from Alibaba group" msgid "Qwen2 is a new series of large language models from Alibaba group"
msgstr "" msgstr "Qwen2 es una nueva serie de LLM del grupo Alibaba."
#: src/available_models_descriptions.py:5 #: src/available_models_descriptions.py:5
msgid "" msgid ""
"An open-source Mixture-of-Experts code language model that achieves " "An open-source Mixture-of-Experts code language model that achieves "
"performance comparable to GPT4-Turbo in code-specific tasks." "performance comparable to GPT4-Turbo in code-specific tasks."
msgstr "" msgstr ""
"Un modelo de lenguaje Mixturer-of-Experts abierto que consigue un rendimiento "
"comparable a GPT4-Turbo en tareas especificas a codigo."
#: src/available_models_descriptions.py:6 #: src/available_models_descriptions.py:6
msgid "" msgid ""
"Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art " "Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art "
"open models by Microsoft." "open models by Microsoft."
msgstr "" msgstr ""
"Phi-3 es una familia de los ultimos modelos livianos de Microsoft, 3B (Mini) "
"y 14B (Medium)."
#: src/available_models_descriptions.py:7 #: src/available_models_descriptions.py:7
msgid "" msgid ""
"Aya 23, released by Cohere, is a new family of state-of-the-art, " "Aya 23, released by Cohere, is a new family of state-of-the-art, "
"multilingual models that support 23 languages." "multilingual models that support 23 languages."
msgstr "" msgstr ""
"Aya 23, lanzado por Cohere, es una familia de los ultimos modelos "
"multilingües que soportan 23 lenguajes."
#: src/available_models_descriptions.py:8 #: src/available_models_descriptions.py:8
msgid "The 7B model released by Mistral AI, updated to version 0.3." msgid "The 7B model released by Mistral AI, updated to version 0.3."
msgstr "" msgstr "El modelo 7B lanzado por Mistral AI, actualizado a la versión 0.3."
#: src/available_models_descriptions.py:9 #: src/available_models_descriptions.py:9
msgid "" msgid ""
"A set of Mixture of Experts (MoE) model with open weights by Mistral AI in " "A set of Mixture of Experts (MoE) model with open weights by Mistral AI in "
"8x7b and 8x22b parameter sizes." "8x7b and 8x22b parameter sizes."
msgstr "" msgstr ""
"Un set de modelos Mixture-of-Experts (MoE) con pesos abiertos por Mistral AI "
"dispnible en tamaños de parametros 8x7b y 8x22b."
#: src/available_models_descriptions.py:10 #: src/available_models_descriptions.py:10
msgid "" msgid ""
@ -1240,18 +1252,26 @@ msgid ""
"generation, natural language understanding, mathematical reasoning, and " "generation, natural language understanding, mathematical reasoning, and "
"instruction following." "instruction following."
msgstr "" msgstr ""
"CodeGemma es una colección de poderosos, modelos livianos que pueden hacer "
"una variedad de tareas de codigo como fill-in-the-middle completación de "
"codigo, generación de codigo, comprensión de lenguaje natural, razonamiento "
"matematico y seguimiento de instrucciones."
#: src/available_models_descriptions.py:11 #: src/available_models_descriptions.py:11
msgid "" msgid ""
"Command R is a Large Language Model optimized for conversational interaction " "Command R is a Large Language Model optimized for conversational interaction "
"and long context tasks." "and long context tasks."
msgstr "" msgstr ""
"Command R es un LLM optimizado para interacciones conversacionales y "
"tareas que requieren un contexto largo."
#: src/available_models_descriptions.py:12 #: src/available_models_descriptions.py:12
msgid "" msgid ""
"Command R+ is a powerful, scalable large language model purpose-built to " "Command R+ is a powerful, scalable large language model purpose-built to "
"excel at real-world enterprise use cases." "excel at real-world enterprise use cases."
msgstr "" msgstr ""
"Command R+ es un poderoso, escalable LLM construido con el proposito de "
"sobresalir en usos profesionales del mundo real."
#: src/available_models_descriptions.py:13 #: src/available_models_descriptions.py:13
msgid "" msgid ""
@ -1259,194 +1279,262 @@ msgid ""
"a vision encoder and Vicuna for general-purpose visual and language " "a vision encoder and Vicuna for general-purpose visual and language "
"understanding. Updated to version 1.6." "understanding. Updated to version 1.6."
msgstr "" msgstr ""
"🌋 LLaVA es un nuevo LLM entrenado en end-to-end que combina un encodificador "
"visual y Vicuna para entendimiento general en lenguaje y visión. Acutalizado "
"a la versión 1.6."
#: src/available_models_descriptions.py:14 #: src/available_models_descriptions.py:14
msgid "" msgid ""
"Gemma is a family of lightweight, state-of-the-art open models built by " "Gemma is a family of lightweight, state-of-the-art open models built by "
"Google DeepMind. Updated to version 1.1" "Google DeepMind. Updated to version 1.1"
msgstr "" msgstr ""
"Gemma es una familia de nuevos modelos abiertos livianos construidos por "
"Google DeepMind. Actualizado a la versión 1.1."
#: src/available_models_descriptions.py:15 #: src/available_models_descriptions.py:15
msgid "" msgid ""
"Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from " "Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from "
"0.5B to 110B parameters" "0.5B to 110B parameters"
msgstr "" msgstr ""
"Qwen 1.5 es una serie de LLM por Alibaba Cloud que cubren parametros entre "
"0.5B hasta 110B."
#: src/available_models_descriptions.py:16 #: src/available_models_descriptions.py:16
msgid "" msgid ""
"Llama 2 is a collection of foundation language models ranging from 7B to 70B " "Llama 2 is a collection of foundation language models ranging from 7B to 70B "
"parameters." "parameters."
msgstr "" msgstr ""
"Llama 2 es una colección de modelos bases que cubren parametros entre 7B y "
"70B."
#: src/available_models_descriptions.py:17 #: src/available_models_descriptions.py:17
msgid "" msgid ""
"A large language model that can use text prompts to generate and discuss " "A large language model that can use text prompts to generate and discuss "
"code." "code."
msgstr "" msgstr ""
"Un LLM que puede usar texto para generar y discutir sobre codigo."
#: src/available_models_descriptions.py:18 #: src/available_models_descriptions.py:18
msgid "" msgid ""
"Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of " "Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of "
"experts models that excels at coding tasks. Created by Eric Hartford." "experts models that excels at coding tasks. Created by Eric Hartford."
msgstr "" msgstr ""
"Descensurado, 8x7b y 8x22b, modelos afinados basados enn una mezcla de "
"modelos expertos de Mixtral especializados en tareas de codigo. Creado por "
"Eric Hartford."
#: src/available_models_descriptions.py:19 #: src/available_models_descriptions.py:19
msgid "Uncensored Llama 2 model by George Sung and Jarrad Hope." msgid "Uncensored Llama 2 model by George Sung and Jarrad Hope."
msgstr "" msgstr "Modelo Llama 2 descensurado por George Sung y Jarrad Hope."
#: src/available_models_descriptions.py:20 #: src/available_models_descriptions.py:20
msgid "" msgid ""
"DeepSeek Coder is a capable coding model trained on two trillion code and " "DeepSeek Coder is a capable coding model trained on two trillion code and "
"natural language tokens." "natural language tokens."
msgstr "" msgstr ""
"DeepSeek Coder en un modelo especializado en codigo, entrenado en 2 trillones "
"de tokens de codigo y lenguaje natural."
#: src/available_models_descriptions.py:21 #: src/available_models_descriptions.py:21
msgid "" msgid ""
"A high-performing open embedding model with a large token context window." "A high-performing open embedding model with a large token context window."
msgstr "" msgstr ""
"Un modelo de integración abierto de alto rendimiento con una gran ventana de "
"contexto de token."
#: src/available_models_descriptions.py:22 #: src/available_models_descriptions.py:22
msgid "" msgid ""
"Phi-2: a 2.7B language model by Microsoft Research that demonstrates " "Phi-2: a 2.7B language model by Microsoft Research that demonstrates "
"outstanding reasoning and language understanding capabilities." "outstanding reasoning and language understanding capabilities."
msgstr "" msgstr ""
"Phi-2: un modelo de lenguaje de 2.700 millones de Microsoft Research que "
"demuestra excelentes capacidades de razonamiento y comprensión del lenguaje."
#: src/available_models_descriptions.py:23 #: src/available_models_descriptions.py:23
msgid "" msgid ""
"The uncensored Dolphin model based on Mistral that excels at coding tasks. " "The uncensored Dolphin model based on Mistral that excels at coding tasks. "
"Updated to version 2.8." "Updated to version 2.8."
msgstr "" msgstr ""
"El modelo descensurado Dolphin, basado en Mistral que sobresale en tareas de "
"codigo. Actualizado a la versión 2.8."
#: src/available_models_descriptions.py:24 #: src/available_models_descriptions.py:24
msgid "" msgid ""
"Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the " "Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the "
"Mistral 7B model using the OpenOrca dataset." "Mistral 7B model using the OpenOrca dataset."
msgstr "" msgstr ""
"Mistral OpenOrca es un modelo de 7 billones de parametros, afinado con base "
"en el modelo Mistral 7B usando el dataset de OpenOrca."
#: src/available_models_descriptions.py:25 #: src/available_models_descriptions.py:25
msgid "" msgid ""
"A general-purpose model ranging from 3 billion parameters to 70 billion, " "A general-purpose model ranging from 3 billion parameters to 70 billion, "
"suitable for entry-level hardware." "suitable for entry-level hardware."
msgstr "" msgstr ""
"Un modelo de uso general oscilando entre 3 billones hasta 70 billones de "
"parametros, adecuado para hardware básico."
#: src/available_models_descriptions.py:26 #: src/available_models_descriptions.py:26
msgid "State-of-the-art large embedding model from mixedbread.ai" msgid "State-of-the-art large embedding model from mixedbread.ai"
msgstr "" msgstr "Modelo de integración grande de última generación de Mixedbread.ai"
#: src/available_models_descriptions.py:27 #: src/available_models_descriptions.py:27
msgid "" msgid ""
"Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on " "Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on "
"Llama 3 that has a variety of instruction, conversational, and coding skills." "Llama 3 that has a variety of instruction, conversational, and coding skills."
msgstr "" msgstr ""
"Dolphin 2.9 es un modelo nuevo con tamaños de 8B y 70B hecho por "
"Eric Hartford basado en Llama 3, tiene una variedad de instrucciones "
"conversacionales y habilidades en código"
#: src/available_models_descriptions.py:28 #: src/available_models_descriptions.py:28
msgid "" msgid ""
"StarCoder2 is the next generation of transparently trained open code LLMs " "StarCoder2 is the next generation of transparently trained open code LLMs "
"that comes in three sizes: 3B, 7B and 15B parameters." "that comes in three sizes: 3B, 7B and 15B parameters."
msgstr "" msgstr ""
"StarCoder2 es la próxima generación de modelos de lenguaje abiertos "
"entrenados de manera transparente, que vienen en tres tamaños: 3B, 7B y 15B "
"parámetros."
#: src/available_models_descriptions.py:29 #: src/available_models_descriptions.py:29
msgid "Llama 2 based model fine tuned to improve Chinese dialogue ability." msgid "Llama 2 based model fine tuned to improve Chinese dialogue ability."
msgstr "" msgstr ""
"Modelo basado en Llama 2 ajustado para mejorar la capacidad de diálogo en "
"chino."
#: src/available_models_descriptions.py:30 #: src/available_models_descriptions.py:30
msgid "" msgid ""
"Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models " "Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models "
"that are trained to act as helpful assistants." "that are trained to act as helpful assistants."
msgstr "" msgstr ""
"Zephyr es una serie de versiones ajustadas de los modelos Mistral y Mixtral "
"que están entrenados para actuar como asistentes útiles."
#: src/available_models_descriptions.py:31 #: src/available_models_descriptions.py:31
msgid "Yi 1.5 is a high-performing, bilingual language model." msgid "Yi 1.5 is a high-performing, bilingual language model."
msgstr "" msgstr ""
"Yi 1.5 es un modelo de lenguaje bilingüe de alto rendimiento."
#: src/available_models_descriptions.py:32 #: src/available_models_descriptions.py:32
msgid "" msgid ""
"The powerful family of models by Nous Research that excels at scientific " "The powerful family of models by Nous Research that excels at scientific "
"discussion and coding tasks." "discussion and coding tasks."
msgstr "" msgstr ""
"La poderosa familia de modelos de Nous Research que sobresale en discusiones "
"científicas y tareas de programación."
#: src/available_models_descriptions.py:33 #: src/available_models_descriptions.py:33
msgid "" msgid ""
"General use chat model based on Llama and Llama 2 with 2K to 16K context " "General use chat model based on Llama and Llama 2 with 2K to 16K context "
"sizes." "sizes."
msgstr "" msgstr ""
"Modelo de chat de uso general basado en Llama y Llama 2 con tamaños de "
"contexto de 2K a 16K."
#: src/available_models_descriptions.py:34 #: src/available_models_descriptions.py:34
msgid "" msgid ""
"Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on " "Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on "
"Llama 2 uncensored by Eric Hartford." "Llama 2 uncensored by Eric Hartford."
msgstr "" msgstr ""
"Wizard Vicuna Uncensored es un modelo de 7B, 13B y 30B parámetros basado en "
"Llama 2 sin censura por Eric Hartford."
#: src/available_models_descriptions.py:35 #: src/available_models_descriptions.py:35
msgid "" msgid ""
"The TinyLlama project is an open endeavor to train a compact 1.1B Llama " "The TinyLlama project is an open endeavor to train a compact 1.1B Llama "
"model on 3 trillion tokens." "model on 3 trillion tokens."
msgstr "" msgstr ""
"El proyecto TinyLlama es un esfuerzo abierto para entrenar un modelo "
"compacto de Llama de 1.1B en 3 billones de tokens."
#: src/available_models_descriptions.py:36 #: src/available_models_descriptions.py:36
msgid "" msgid ""
"State of the art large language model from Microsoft AI with improved " "State of the art large language model from Microsoft AI with improved "
"performance on complex chat, multilingual, reasoning and agent use cases." "performance on complex chat, multilingual, reasoning and agent use cases."
msgstr "" msgstr ""
"Modelo de lenguaje grande de vanguardia de Microsoft AI con rendimiento "
"mejorado en chat complejo, multilingüe, razonamiento y casos de uso de "
"agentes."
#: src/available_models_descriptions.py:37 #: src/available_models_descriptions.py:37
msgid "" msgid ""
"StarCoder is a code generation model trained on 80+ programming languages." "StarCoder is a code generation model trained on 80+ programming languages."
msgstr "" msgstr ""
"StarCoder es un modelo de generación de código entrenado en más de 80 "
"lenguajes de programación."
#: src/available_models_descriptions.py:38 #: src/available_models_descriptions.py:38
msgid "" msgid ""
"Codestral is Mistral AIs first-ever code model designed for code generation " "Codestral is Mistral AIs first-ever code model designed for code generation "
"tasks." "tasks."
msgstr "" msgstr ""
"Codestral es el primer modelo de código de Mistral AI diseñado para tareas "
"de generación de código."
#: src/available_models_descriptions.py:39 #: src/available_models_descriptions.py:39
msgid "" msgid ""
"A family of open-source models trained on a wide variety of data, surpassing " "A family of open-source models trained on a wide variety of data, surpassing "
"ChatGPT on various benchmarks. Updated to version 3.5-0106." "ChatGPT on various benchmarks. Updated to version 3.5-0106."
msgstr "" msgstr ""
"Una familia de modelos de código abierto entrenados en una amplia variedad "
"de datos, superando a ChatGPT en varios benchmarks. Actualizado a la "
"versión 3.5-0106."
#: src/available_models_descriptions.py:40 #: src/available_models_descriptions.py:40
msgid "" msgid ""
"An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset " "An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset "
"by Eric Hartford and based on TinyLlama." "by Eric Hartford and based on TinyLlama."
msgstr "" msgstr ""
"Un modelo experimental de 1.1B parámetros entrenado en el nuevo conjunto "
"de datos Dolphin 2.8 por Eric Hartford y basado en TinyLlama."
#: src/available_models_descriptions.py:41 #: src/available_models_descriptions.py:41
msgid "" msgid ""
"OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully " "OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully "
"open datasets." "open datasets."
msgstr "" msgstr ""
"OpenHermes 2.5 es un modelo de 7B ajustado por Teknium en Mistral con "
"conjuntos de datos completamente abiertos."
#: src/available_models_descriptions.py:42 #: src/available_models_descriptions.py:42
msgid "State-of-the-art code generation model" msgid "State-of-the-art code generation model"
msgstr "" msgstr ""
"Modelo de generación de código de vanguardia."
#: src/available_models_descriptions.py:43 #: src/available_models_descriptions.py:43
msgid "" msgid ""
"Stable Code 3B is a coding model with instruct and code completion variants " "Stable Code 3B is a coding model with instruct and code completion variants "
"on par with models such as Code Llama 7B that are 2.5x larger." "on par with models such as Code Llama 7B that are 2.5x larger."
msgstr "" msgstr ""
"Stable Code 3B es un modelo de codificación con variantes de instrucción y "
"completado de código a la par con modelos como Code Llama 7B que son 2.5 "
"veces más grandes."
#: src/available_models_descriptions.py:44 #: src/available_models_descriptions.py:44
msgid "" msgid ""
"A fine-tuned model based on Mistral with good coverage of domain and " "A fine-tuned model based on Mistral with good coverage of domain and "
"language." "language."
msgstr "" msgstr ""
"Un modelo ajustado basado en Mistral con buena cobertura de dominio y "
"lenguaje."
#: src/available_models_descriptions.py:45 #: src/available_models_descriptions.py:45
msgid "Model focused on math and logic problems" msgid "Model focused on math and logic problems"
msgstr "" msgstr ""
"Modelo enfocado en problemas de matemáticas y lógica."
#: src/available_models_descriptions.py:46 #: src/available_models_descriptions.py:46
msgid "" msgid ""
"CodeQwen1.5 is a large language model pretrained on a large amount of code " "CodeQwen1.5 is a large language model pretrained on a large amount of code "
"data." "data."
msgstr "" msgstr ""
"CodeQwen1.5 es un modelo de lenguaje grande preentrenado con una gran "
"cantidad de datos de código."
#: src/available_models_descriptions.py:47 #: src/available_models_descriptions.py:47
msgid "Code generation model based on Code Llama." msgid "Code generation model based on Code Llama."
msgstr "" msgstr ""
"Modelo de generación de código basado en Code Llama."
#: src/available_models_descriptions.py:48 #: src/available_models_descriptions.py:48
msgid "" msgid ""
@ -1454,32 +1542,44 @@ msgid ""
"trained on multilingual data in English, Spanish, German, Italian, French, " "trained on multilingual data in English, Spanish, German, Italian, French, "
"Portuguese, and Dutch." "Portuguese, and Dutch."
msgstr "" msgstr ""
"Stable LM 2 es un modelo de lenguaje de vanguardia de 1.6B y 12B parámetros "
"entrenado en datos multilingües en inglés, español, alemán, italiano, "
"francés, portugués y neerlandés."
#: src/available_models_descriptions.py:49 #: src/available_models_descriptions.py:49
msgid "" msgid ""
"A 7B and 15B uncensored variant of the Dolphin model family that excels at " "A 7B and 15B uncensored variant of the Dolphin model family that excels at "
"coding, based on StarCoder2." "coding, based on StarCoder2."
msgstr "" msgstr ""
"Una variante sin censura de 7B y 15B de la familia de modelos Dolphin que "
"sobresale en codificación, basada en StarCoder2."
#: src/available_models_descriptions.py:50 #: src/available_models_descriptions.py:50
msgid "Embedding models on very large sentence level datasets." msgid "Embedding models on very large sentence level datasets."
msgstr "" msgstr ""
"Modelos de incrustación en conjuntos de datos de nivel de oración muy grandes."
#: src/available_models_descriptions.py:51 #: src/available_models_descriptions.py:51
msgid "General use models based on Llama and Llama 2 from Nous Research." msgid "General use models based on Llama and Llama 2 from Nous Research."
msgstr "" msgstr ""
"Modelos de uso general basados en Llama y Llama 2 de Nous Research."
#: src/available_models_descriptions.py:52 #: src/available_models_descriptions.py:52
msgid "" msgid ""
"Starling is a large language model trained by reinforcement learning from AI " "Starling is a large language model trained by reinforcement learning from AI "
"feedback focused on improving chatbot helpfulness." "feedback focused on improving chatbot helpfulness."
msgstr "" msgstr ""
"Starling es un modelo de lenguaje grande entrenado mediante aprendizaje por "
"refuerzo a partir de retroalimentación de IA enfocado en mejorar la "
"utilidad de los chatbots."
#: src/available_models_descriptions.py:53 #: src/available_models_descriptions.py:53
msgid "" msgid ""
"SQLCoder is a code completion model fined-tuned on StarCoder for SQL " "SQLCoder is a code completion model fined-tuned on StarCoder for SQL "
"generation tasks" "generation tasks"
msgstr "" msgstr ""
"SQLCoder es un modelo de completado de código ajustado en StarCoder para "
"tareas de generación de SQL."
#: src/available_models_descriptions.py:54 #: src/available_models_descriptions.py:54
msgid "" msgid ""
@ -1487,98 +1587,130 @@ msgid ""
"Meta's Llama 2 models. The model is designed to excel particularly in " "Meta's Llama 2 models. The model is designed to excel particularly in "
"reasoning." "reasoning."
msgstr "" msgstr ""
"Orca 2 es construido por Microsoft Research, y es una versión ajustada de "
"los modelos Llama 2 de Meta. El modelo está diseñado para sobresalir "
"particularmente en razonamiento."
#: src/available_models_descriptions.py:55 #: src/available_models_descriptions.py:55
msgid "" msgid ""
"This model extends LLama-3 8B's context length from 8k to over 1m tokens." "This model extends LLama-3 8B's context length from 8k to over 1m tokens."
msgstr "" msgstr ""
"Este modelo extiende la longitud del contexto de LLama-3 8B de 8k a más de "
"1m tokens."
#: src/available_models_descriptions.py:56 #: src/available_models_descriptions.py:56
msgid "An advanced language model crafted with 2 trillion bilingual tokens." msgid "An advanced language model crafted with 2 trillion bilingual tokens."
msgstr "" msgstr ""
"Un modelo de lenguaje avanzado creado con 2 billones de tokens bilingües."
#: src/available_models_descriptions.py:57 #: src/available_models_descriptions.py:57
msgid "An extension of Llama 2 that supports a context of up to 128k tokens." msgid "An extension of Llama 2 that supports a context of up to 128k tokens."
msgstr "" msgstr ""
"Una extensión de Llama 2 que soporta un contexto de hasta 128k tokens."
#: src/available_models_descriptions.py:58 #: src/available_models_descriptions.py:58
msgid "" msgid ""
"A model from NVIDIA based on Llama 3 that excels at conversational question " "A model from NVIDIA based on Llama 3 that excels at conversational question "
"answering (QA) and retrieval-augmented generation (RAG)." "answering (QA) and retrieval-augmented generation (RAG)."
msgstr "" msgstr ""
"Un modelo de NVIDIA basado en Llama 3 que sobresale en respuesta a preguntas "
"conversacionales (QA) y generación aumentada por recuperación (RAG)."
#: src/available_models_descriptions.py:59 #: src/available_models_descriptions.py:59
msgid "" msgid ""
"A compact, yet powerful 10.7B large language model designed for single-turn " "A compact, yet powerful 10.7B large language model designed for single-turn "
"conversation." "conversation."
msgstr "" msgstr ""
"Un modelo de lenguaje grande compacto pero poderoso de 10.7B diseñado para "
"conversación de un solo turno."
#: src/available_models_descriptions.py:60 #: src/available_models_descriptions.py:60
msgid "" msgid ""
"Conversational model based on Llama 2 that performs competitively on various " "Conversational model based on Llama 2 that performs competitively on various "
"benchmarks." "benchmarks."
msgstr "" msgstr ""
"Modelo conversacional basado en Llama 2 que tiene un rendimiento "
"competitivo en varios benchmarks."
#: src/available_models_descriptions.py:61 #: src/available_models_descriptions.py:61
msgid "A family of open foundation models by IBM for Code Intelligence" msgid "A family of open foundation models by IBM for Code Intelligence"
msgstr "" msgstr ""
"Una familia de modelos de base abiertos por IBM para Code Intelligence."
#: src/available_models_descriptions.py:62 #: src/available_models_descriptions.py:62
msgid "" msgid ""
"2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language " "2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language "
"model by Microsoft Research." "model by Microsoft Research."
msgstr "" msgstr ""
"Modelo Dolphin sin censura de 2.7B por Eric Hartford, basado en el modelo "
"de lenguaje Phi por Microsoft Research."
#: src/available_models_descriptions.py:63 #: src/available_models_descriptions.py:63
msgid "General use model based on Llama 2." msgid "General use model based on Llama 2."
msgstr "" msgstr ""
"Modelo de uso general basado en Llama 2."
#: src/available_models_descriptions.py:64 #: src/available_models_descriptions.py:64
msgid "" msgid ""
"A companion assistant trained in philosophy, psychology, and personal " "A companion assistant trained in philosophy, psychology, and personal "
"relationships. Based on Mistral." "relationships. Based on Mistral."
msgstr "" msgstr ""
"Un asistente compañero entrenado en filosofía, psicología y relaciones "
"personales. Basado en Mistral."
#: src/available_models_descriptions.py:65 #: src/available_models_descriptions.py:65
msgid "" msgid ""
"Llama 2 based model fine tuned on an Orca-style dataset. Originally called " "Llama 2 based model fine tuned on an Orca-style dataset. Originally called "
"Free Willy." "Free Willy."
msgstr "" msgstr ""
"Modelo basado en Llama 2 ajustado en un conjunto de datos estilo Orca. "
"Originalmente llamado Free Willy."
#: src/available_models_descriptions.py:66 #: src/available_models_descriptions.py:66
msgid "" msgid ""
"BakLLaVA is a multimodal model consisting of the Mistral 7B base model " "BakLLaVA is a multimodal model consisting of the Mistral 7B base model "
"augmented with the LLaVA architecture." "augmented with the LLaVA architecture."
msgstr "" msgstr ""
"BakLLaVA es un modelo multimodal que consiste en el modelo base Mistral 7B "
"aumentado con la arquitectura LLaVA."
#: src/available_models_descriptions.py:67 #: src/available_models_descriptions.py:67
msgid "" msgid ""
"A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several " "A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several "
"benchmarks." "benchmarks."
msgstr "" msgstr ""
"Un modelo LLaVA ajustado a partir de Llama 3 Instruct con mejores "
"puntuaciones en varios benchmarks."
#: src/available_models_descriptions.py:68 #: src/available_models_descriptions.py:68
msgid "Uncensored version of Wizard LM model" msgid "Uncensored version of Wizard LM model"
msgstr "" msgstr ""
"Versión sin censura del modelo Wizard LM."
#: src/available_models_descriptions.py:69 #: src/available_models_descriptions.py:69
msgid "" msgid ""
"Fine-tuned Llama 2 model to answer medical questions based on an open source " "Fine-tuned Llama 2 model to answer medical questions based on an open source "
"medical dataset." "medical dataset."
msgstr "" msgstr ""
"Modelo Llama 2 ajustado para responder preguntas médicas basado en un "
"conjunto de datos médicos de código abierto."
#: src/available_models_descriptions.py:70 #: src/available_models_descriptions.py:70
msgid "The Nous Hermes 2 model from Nous Research, now trained over Mixtral." msgid "The Nous Hermes 2 model from Nous Research, now trained over Mixtral."
msgstr "" msgstr ""
"El modelo Nous Hermes 2 de Nous Research, ahora entrenado sobre Mixtral."
#: src/available_models_descriptions.py:71 #: src/available_models_descriptions.py:71
msgid "An extension of Mistral to support context windows of 64K or 128K." msgid "An extension of Mistral to support context windows of 64K or 128K."
msgstr "" msgstr ""
"Una extensión de Mistral para soportar ventanas de contexto de 64K o 128K."
#: src/available_models_descriptions.py:72 #: src/available_models_descriptions.py:72
msgid "" msgid ""
"A suite of text embedding models by Snowflake, optimized for performance." "A suite of text embedding models by Snowflake, optimized for performance."
msgstr "" msgstr ""
"Un conjunto de modelos de incrustación de texto por Snowflake, "
"optimizados para el rendimiento."
#: src/available_models_descriptions.py:73 #: src/available_models_descriptions.py:73
msgid "" msgid ""
@ -1586,31 +1718,42 @@ msgid ""
"language understanding and domain-specific knowledge, particularly in " "language understanding and domain-specific knowledge, particularly in "
"programming and mathematics." "programming and mathematics."
msgstr "" msgstr ""
"Una expansión de Llama 2 que se especializa en integrar tanto la "
"comprensión general del lenguaje como el conocimiento específico del "
"dominio, particularmente en programación y matemáticas."
#: src/available_models_descriptions.py:74 #: src/available_models_descriptions.py:74
msgid "Great code generation model based on Llama2." msgid "Great code generation model based on Llama2."
msgstr "" msgstr ""
"Gran modelo de generación de código basado en Llama2."
#: src/available_models_descriptions.py:75 #: src/available_models_descriptions.py:75
msgid "" msgid ""
"Open-source medical large language model adapted from Llama 2 to the medical " "Open-source medical large language model adapted from Llama 2 to the medical "
"domain." "domain."
msgstr "" msgstr ""
"Modelo de lenguaje grande médico de código abierto adaptado de Llama 2 "
"al dominio médico."
#: src/available_models_descriptions.py:76 #: src/available_models_descriptions.py:76
msgid "" msgid ""
"moondream2 is a small vision language model designed to run efficiently on " "moondream2 is a small vision language model designed to run efficiently on "
"edge devices." "edge devices."
msgstr "" msgstr ""
"moondream2 es un pequeño modelo de lenguaje de visión diseñado para "
"funcionar eficientemente en dispositivos periféricos."
#: src/available_models_descriptions.py:77 #: src/available_models_descriptions.py:77
msgid "Uncensored Llama2 based model with support for a 16K context window." msgid "Uncensored Llama2 based model with support for a 16K context window."
msgstr "" msgstr ""
"Modelo sin censura basado en Llama2 con soporte para una ventana de "
"contexto de 16K."
#: src/available_models_descriptions.py:78 #: src/available_models_descriptions.py:78
msgid "" msgid ""
"Nexus Raven is a 13B instruction tuned model for function calling tasks." "Nexus Raven is a 13B instruction tuned model for function calling tasks."
msgstr "" msgstr ""
"Nexus Raven es un modelo ajustado de 13B para tareas de llamada de funciones."
#: src/available_models_descriptions.py:79 #: src/available_models_descriptions.py:79
msgid "" msgid ""
@ -1618,85 +1761,113 @@ msgid ""
"instruction data using OSS-Instruct, a novel approach to enlightening LLMs " "instruction data using OSS-Instruct, a novel approach to enlightening LLMs "
"with open-source code snippets." "with open-source code snippets."
msgstr "" msgstr ""
"🎩 Magicoder es una familia de modelos de 7B parámetros entrenados en 75K "
"datos de instrucción sintética utilizando OSS-Instruct, un enfoque novedoso "
"para iluminar a los LLMs con fragmentos de código de código abierto."
#: src/available_models_descriptions.py:80 #: src/available_models_descriptions.py:80
msgid "A strong, economical, and efficient Mixture-of-Experts language model." msgid "A strong, economical, and efficient Mixture-of-Experts language model."
msgstr "" msgstr ""
"Un modelo de lenguaje Mixture-of-Experts fuerte, económico y eficiente."
#: src/available_models_descriptions.py:81 #: src/available_models_descriptions.py:81
msgid "" msgid ""
"A lightweight chat model allowing accurate, and responsive output without " "A lightweight chat model allowing accurate, and responsive output without "
"requiring high-end hardware." "requiring high-end hardware."
msgstr "" msgstr ""
"Un modelo de chat ligero que permite una salida precisa y receptiva sin "
"requerir hardware de alta gama."
#: src/available_models_descriptions.py:82 #: src/available_models_descriptions.py:82
msgid "" msgid ""
"A high-performing code instruct model created by merging two existing code " "A high-performing code instruct model created by merging two existing code "
"models." "models."
msgstr "" msgstr ""
"Un modelo de instrucción de código de alto rendimiento creado mediante la "
"fusión de dos modelos de código existentes."
#: src/available_models_descriptions.py:83 #: src/available_models_descriptions.py:83
msgid "A new small LLaVA model fine-tuned from Phi 3 Mini." msgid "A new small LLaVA model fine-tuned from Phi 3 Mini."
msgstr "" msgstr ""
"Un nuevo pequeño modelo LLaVA ajustado a partir de Phi 3 Mini."
#: src/available_models_descriptions.py:84 #: src/available_models_descriptions.py:84
msgid "" msgid ""
"MistralLite is a fine-tuned model based on Mistral with enhanced " "MistralLite is a fine-tuned model based on Mistral with enhanced "
"capabilities of processing long contexts." "capabilities of processing long contexts."
msgstr "" msgstr ""
"MistralLite es un modelo ajustado basado en Mistral con capacidades "
"mejoradas de procesamiento de contextos largos."
#: src/available_models_descriptions.py:85 #: src/available_models_descriptions.py:85
msgid "" msgid ""
"Wizard Vicuna is a 13B parameter model based on Llama 2 trained by " "Wizard Vicuna is a 13B parameter model based on Llama 2 trained by "
"MelodysDreamj." "MelodysDreamj."
msgstr "" msgstr ""
"Wizard Vicuna es un modelo de 13B parámetros basado en Llama 2 entrenado "
"por MelodysDreamj."
#: src/available_models_descriptions.py:86 #: src/available_models_descriptions.py:86
msgid "7B parameter text-to-SQL model made by MotherDuck and Numbers Station." msgid "7B parameter text-to-SQL model made by MotherDuck and Numbers Station."
msgstr "" msgstr ""
"Modelo de texto a SQL de 7B parámetros hecho por MotherDuck y Numbers Station."
#: src/available_models_descriptions.py:87 #: src/available_models_descriptions.py:87
msgid "" msgid ""
"A language model created by combining two fine-tuned Llama 2 70B models into " "A language model created by combining two fine-tuned Llama 2 70B models into "
"one." "one."
msgstr "" msgstr ""
"Un modelo de lenguaje creado combinando dos modelos ajustados de Llama 2 "
"70B en uno."
#: src/available_models_descriptions.py:88 #: src/available_models_descriptions.py:88
msgid "" msgid ""
"MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by " "MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by "
"interleaving the model with itself." "interleaving the model with itself."
msgstr "" msgstr ""
"MegaDolphin-2.2-120b es una transformación de Dolphin-2.2-70b creada al "
"entrelazar el modelo consigo mismo."
#: src/available_models_descriptions.py:89 #: src/available_models_descriptions.py:89
msgid "" msgid ""
"Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. " "Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. "
"Designed for chat and code generation." "Designed for chat and code generation."
msgstr "" msgstr ""
"Fusión del modelo Open Orca OpenChat y el modelo Garage-bAInd Platypus 2. "
"Diseñado para chat y generación de código."
#: src/available_models_descriptions.py:90 #: src/available_models_descriptions.py:90
msgid "" msgid ""
"A top-performing mixture of experts model, fine-tuned with high-quality data." "A top-performing mixture of experts model, fine-tuned with high-quality data."
msgstr "" msgstr ""
"Un modelo de mezcla de expertos de alto rendimiento, ajustado con datos "
"de alta calidad."
#: src/available_models_descriptions.py:91 #: src/available_models_descriptions.py:91
msgid "A 7B chat model fine-tuned with high-quality data and based on Zephyr." msgid "A 7B chat model fine-tuned with high-quality data and based on Zephyr."
msgstr "" msgstr ""
"Un modelo de chat de 7B ajustado con datos de alta calidad y basado en Zephyr."
#: src/available_models_descriptions.py:92 #: src/available_models_descriptions.py:92
msgid "DBRX is an open, general-purpose LLM created by Databricks." msgid "DBRX is an open, general-purpose LLM created by Databricks."
msgstr "" msgstr ""
"DBRX es un LLM abierto de propósito general creado por Databricks."
#: src/available_models_descriptions.py:93 #: src/available_models_descriptions.py:93
msgid "" msgid ""
"Falcon2 is an 11B parameters causal decoder-only model built by TII and " "Falcon2 is an 11B parameters causal decoder-only model built by TII and "
"trained over 5T tokens." "trained over 5T tokens."
msgstr "" msgstr ""
"Falcon2 es un modelo causal de 11B parámetros solo decodificador construido "
"por TII y entrenado en más de 5T tokens."
#: src/available_models_descriptions.py:94 #: src/available_models_descriptions.py:94
msgid "" msgid ""
"A robust conversational model designed to be used for both chat and instruct " "A robust conversational model designed to be used for both chat and instruct "
"use cases." "use cases."
msgstr "" msgstr ""
"Un modelo conversacional robusto diseñado para ser utilizado tanto en "
"casos de uso de chat como de instrucción."
#, fuzzy #, fuzzy
#~ msgid "New message from '{}'" #~ msgid "New message from '{}'"