1961 lines
63 KiB
Plaintext
1961 lines
63 KiB
Plaintext
# Spanish translations for Alpaca package.
|
||
# Copyright (C) 2024 Jeffser
|
||
# This file is distributed under the same license as the Alpaca package.
|
||
# Jeffry Samuel Eduarte Rojas <jeffrysamuer@gmail.com>, 2024.
|
||
#
|
||
msgid ""
|
||
msgstr ""
|
||
"Project-Id-Version: 1.0.0\n"
|
||
"Report-Msgid-Bugs-To: \n"
|
||
"POT-Creation-Date: 2024-07-21 18:18-0600\n"
|
||
"PO-Revision-Date: 2024-05-19 19:44-0600\n"
|
||
"Last-Translator: Jeffry Samuel Eduarte Rojas <jeffrysamuer@gmail.com>\n"
|
||
"Language-Team: Spanish\n"
|
||
"Language: es\n"
|
||
"MIME-Version: 1.0\n"
|
||
"Content-Type: text/plain; charset=UTF-8\n"
|
||
"Content-Transfer-Encoding: 8bit\n"
|
||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||
|
||
#: data/com.jeffser.Alpaca.desktop.in:3
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:7
|
||
msgid "Alpaca"
|
||
msgstr "Alpaca"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:8
|
||
msgid "Chat with local AI models"
|
||
msgstr "Chatea con modelos de IA"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:10
|
||
msgid "An Ollama client"
|
||
msgstr "Un cliente de Ollama"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:11
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:416
|
||
msgid "Features"
|
||
msgstr "Funcionalidades"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:13
|
||
msgid "Built in Ollama instance"
|
||
msgstr "Instancia de Ollama incluida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:14
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:418
|
||
msgid "Talk to multiple models in the same conversation"
|
||
msgstr "Habla con multiples modelos en la misma conversación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:15
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:419
|
||
msgid "Pull and delete models from the app"
|
||
msgstr "Descarga y elimina modelos desde la app"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:16
|
||
msgid "Have multiple conversations"
|
||
msgstr "Multiples conversaciones"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:17
|
||
msgid "Image recognition (Only available with compatible models)"
|
||
msgstr "Reconocimiento de imagenes (Solo disponible con modelos compatibles)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:18
|
||
msgid "Plain text documents recognition"
|
||
msgstr "Reconocimiento de documentos de texto plano"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:19
|
||
msgid "Import and export chats"
|
||
msgstr "Importa y exporta chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:21 src/window.ui:883
|
||
msgid "Disclaimer"
|
||
msgstr "Aviso Legal"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:22
|
||
msgid ""
|
||
"This project is not affiliated at all with Ollama, I'm not responsible for "
|
||
"any damages to your device or software caused by running code given by any "
|
||
"models."
|
||
msgstr ""
|
||
"Este proyecto no está afiliado del todo con Ollama, no soy responsable por "
|
||
"cualquier daño a tu dispositivo o software causado por correr codigo "
|
||
"proveido por cualquier modelo."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:25
|
||
msgid "Jeffry Samuel Eduarte Rojas"
|
||
msgstr "Jeffry Samuel Eduarte Rojas"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:51
|
||
msgid "A conversation showing code highlight"
|
||
msgstr "Una conversación mostrando highlight de codigo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:55
|
||
msgid "A conversation involving multiple models"
|
||
msgstr "Una conversación incluyendo multiples modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:59
|
||
msgid "Managing models"
|
||
msgstr "Gestionando modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:70
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:119
|
||
msgid "Fix"
|
||
msgstr "Arreglo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:71
|
||
msgid ""
|
||
"Removed DOCX compatibility temporally due to error with python-lxml "
|
||
"dependency"
|
||
msgstr ""
|
||
"Removida compatibilidad con DOCX temporalmente debido a un error con la "
|
||
"dependencia python-lxml"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:77
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:107
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:128
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:333
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:390
|
||
msgid "Big Update"
|
||
msgstr "Gran Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:79
|
||
msgid "Added compatibility for PDF"
|
||
msgstr "Añadida compatibilidad para PDF"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:80
|
||
msgid "Added compatibility for DOCX"
|
||
msgstr "Añadida compatibilidad para DOCX"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:81
|
||
msgid "Merged 'file attachment' menu into one button"
|
||
msgstr "Combinado menu 'subir archivos' en un botón"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:88
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:281
|
||
msgid "Quick Fix"
|
||
msgstr "Arreglo rápido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:89
|
||
msgid ""
|
||
"There were some errors when transitioning from the old version of chats to "
|
||
"the new version. I apologize if this caused any corruption in your chat "
|
||
"history. This should be the only time such a transition is needed."
|
||
msgstr ""
|
||
"Hubieron unos errores mientras los chats transicionaban a la nueva versión. "
|
||
"Pido disculpas si eso causo alguna corrupción en to historial de chats. Esta "
|
||
"debería de ser la única vez que una transición es necesaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:95
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:247
|
||
msgid "Huge Update"
|
||
msgstr "Gran Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:97
|
||
msgid "Added: Support for plain text files"
|
||
msgstr "Añadido: Soporte para archivos de texto plano"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:98
|
||
msgid "Added: New backend system for storing messages"
|
||
msgstr "Añadido: Nuevo sistema en el backend para guardar mensajes"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:99
|
||
msgid "Added: Support for changing Ollama's overrides"
|
||
msgstr "Añadido: Soporte para cambiar overrides de Ollama"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:100
|
||
msgid "General Optimization"
|
||
msgstr "Optimización general"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:109
|
||
msgid "Added: Support for GGUF models (experimental)"
|
||
msgstr "Añadido: Soporte de modelos GGUF (experimental)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:110
|
||
msgid "Added: Support for customization and creation of models"
|
||
msgstr "Añadido: Soporte para personalización y creración de modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:111
|
||
msgid "Fixed: Icons don't appear on non Gnome systems"
|
||
msgstr "Arreglado: Iconos no se mostraban en sistemas que no usan Gnome"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:112
|
||
msgid "Update Ollama to v0.1.39"
|
||
msgstr "Ollama actualizado a v0.1.39"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:121
|
||
msgid ""
|
||
"Fixed: app didn't open if models tweaks wasn't present in the config files"
|
||
msgstr ""
|
||
"Arreglado: La aplicación no abre si 'models tweaks' no esta presente en los "
|
||
"archivos de configuración"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:130
|
||
msgid "Changed multiple icons (paper airplane for the send button)"
|
||
msgstr ""
|
||
"Multiples iconos cambiados (avion de papel para el boton de enviar mensaje)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:131
|
||
msgid "Combined export / import chat buttons into a menu"
|
||
msgstr "Botones importar / exportar chat combinados en un menu"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:132
|
||
msgid "Added 'model tweaks' (temperature, seed, keep_alive)"
|
||
msgstr "Añadidos ajustes de modelo (temperatura, semilla, mantener vivo)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:133
|
||
msgid "Fixed send / stop button"
|
||
msgstr "Arreglado boton enviar / parar"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:134
|
||
msgid "Fixed app not checking if remote connection works when starting"
|
||
msgstr ""
|
||
"Arreglado: Aplicación no chequea si la conexión remota funciona cuando inicia"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:141
|
||
msgid "Daily Update"
|
||
msgstr "Actulización Diaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:143
|
||
msgid "Added text ellipsis to chat name so it doesn't change the button width"
|
||
msgstr ""
|
||
"Añadido elipsis a el nombre del chat para que no afecte el largo del boton"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:144
|
||
msgid "New shortcut for creating a chat (CTRL+N)"
|
||
msgstr "Nuevo atajo de teclado para crear chat (CTRL+N)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:145
|
||
msgid "New message entry design"
|
||
msgstr "Nuevo diseño para el entry de mensaje"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:146
|
||
msgid "Fixed: Can't rename the same chat multiple times"
|
||
msgstr "Arreglado: No se puede renombrar el mismo chat multiples veces"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:153
|
||
msgid "The fix"
|
||
msgstr "Arreglos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:155
|
||
msgid ""
|
||
"Fixed: Ollama instance keeps running on the background even when it is "
|
||
"disabled"
|
||
msgstr ""
|
||
"Arreglado: Instancia de Ollama sigue siendo ejecutada en el fondo aunque sea "
|
||
"desactivada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:156
|
||
msgid "Fixed: Can't pull models on the integrated instance"
|
||
msgstr "Arreglado: No se puede descargar modelos en la instancia integrada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:163
|
||
msgid "Quick tweaks"
|
||
msgstr "Arreglos rápido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:165
|
||
msgid "Added progress bar to models that are being pulled"
|
||
msgstr "Añadida barra de progreso a modelos que estan siendo descargados"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:166
|
||
msgid "Added size to tags when pulling a model"
|
||
msgstr "Añadido tamaño de tags cuando se descarga un modelo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:167
|
||
msgid "General optimizations on the background"
|
||
msgstr "Optimizaciones general en el fondo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:174
|
||
msgid "Quick fixes"
|
||
msgstr "Arreglos rápidos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:176
|
||
msgid "Fixed: Scroll when message is received"
|
||
msgstr "Arreglado: Desplazamiento automatico cuando un mensaje es recibido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:177
|
||
msgid "Fixed: Content doesn't change when creating a new chat"
|
||
msgstr "Arreglad: Contenido no cambia cuando se crea un nuevo chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:178
|
||
msgid "Added 'Featured Models' page on welcome dialog"
|
||
msgstr "Añadida sección 'Modelos Destacados' en el dialogo de bienvenida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:185
|
||
msgid "Nice Update"
|
||
msgstr "Buena Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:187
|
||
msgid "UI tweaks (Thanks Nokse22)"
|
||
msgstr "Mejor UI en general (Gracias Nokse22)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:188
|
||
msgid "General optimizations"
|
||
msgstr "Optimización general"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:189
|
||
msgid "Metadata fixes"
|
||
msgstr "Correciones de metadata"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:196
|
||
msgid "Quick fix"
|
||
msgstr "Arreglo rápido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:198
|
||
msgid "Updated Spanish translation"
|
||
msgstr "Actualización a la traducción a Español"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:199
|
||
msgid "Added compatibility for PNG"
|
||
msgstr "Añadida compatibilidad para PNG"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:206
|
||
msgid "New Update"
|
||
msgstr "Nueva Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:208
|
||
msgid "Updated model list"
|
||
msgstr "Lista de modelos actualizada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:209
|
||
msgid "Added image recognition to more models"
|
||
msgstr "Añadido reconocimiento de imagenes a más modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:210
|
||
msgid "Added Brazilian Portuguese translation (Thanks Daimaar Stein)"
|
||
msgstr "Añadida tradución a Portugues Brasileño (Gracias Daimaar Stein)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:211
|
||
msgid "Refined the general UI (Thanks Nokse22)"
|
||
msgstr "Mejor UI en general (Gracias Nokse22)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:212
|
||
msgid "Added 'delete message' feature"
|
||
msgstr "Añadida función 'eliminar mensaje'"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:213
|
||
msgid ""
|
||
"Added metadata so that software distributors know that the app is compatible "
|
||
"with mobile"
|
||
msgstr ""
|
||
"Añadida metadata para que distribuidores de software puedan saber que la "
|
||
"aplicación es compatible con celulares"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:214
|
||
msgid ""
|
||
"Changed 'send' shortcut to just the return/enter key (to add a new line use "
|
||
"shift+return)"
|
||
msgstr ""
|
||
"Cambiado el atajo para enviar mensaje a solo la tecla enter (para hacer "
|
||
"salto de linea usa shift+enter)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:221
|
||
msgid "Bug Fixes"
|
||
msgstr "Arreglo de errores"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:223
|
||
msgid "Fixed: Minor spelling mistake"
|
||
msgstr "Arregalada falta de ortografía"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:224
|
||
msgid "Added 'mobile' as a supported form factor"
|
||
msgstr "Añadido soporte para celulares"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:225
|
||
msgid "Fixed: 'Connection Error' dialog not working properly"
|
||
msgstr "Arreglado: Dialogo 'Error de conexión' no funcionando correctamente"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:226
|
||
msgid "Fixed: App might freeze randomly on startup"
|
||
msgstr "Arreglado: Aplicación se congela al azar cuando inicia"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:227
|
||
msgid "Changed 'chats' label on sidebar for 'Alpaca'"
|
||
msgstr "Cambiado label 'chats' en la barra del lado por 'Alpaca'"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:234
|
||
msgid "Cool Update"
|
||
msgstr "Actualización Potente"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:236
|
||
msgid "Better design for chat window"
|
||
msgstr "Mejor diseño para la ventana de chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:237
|
||
msgid "Better design for chat sidebar"
|
||
msgstr "Mejor interfaz para la barra de lado de chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:238
|
||
msgid "Fixed remote connections"
|
||
msgstr "Conexión remota arreglada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:239
|
||
msgid "Fixed Ollama restarting in loop"
|
||
msgstr "Arreglado, Ollama reiniciandose en bucle"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:240
|
||
msgid "Other cool backend stuff"
|
||
msgstr "Otras cosas geniales en el backend"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:249
|
||
msgid "Added Ollama as part of Alpaca, Ollama will run in a sandbox"
|
||
msgstr "Añadido Ollama como parte de Alpaca, Ollama se ejecutara en un sandbox"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:250
|
||
msgid "Added option to connect to remote instances (how it worked before)"
|
||
msgstr ""
|
||
"Añadida la opcion de conectarse a instancias remotas (como funcionaba) antes"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:251
|
||
msgid "Added option to import and export chats"
|
||
msgstr "Añadida la opcion de importar y exportar chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:252
|
||
msgid "Added option to run Alpaca with Ollama in the background"
|
||
msgstr "Añadida la opcion de ejecutar Alpaca y Ollama en el fondo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:253
|
||
msgid "Added preferences dialog"
|
||
msgstr "Añadido dialogo de preferencias"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:254
|
||
msgid "Changed the welcome dialog"
|
||
msgstr "Nuevo dialogo de bienvenida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:256
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:273
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:285
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:304
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:325
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:341
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:357
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:371
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:381
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:399
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:421
|
||
msgid "Please report any errors to the issues page, thank you."
|
||
msgstr "Por favor reporta cualquier error a la página de problemas, gracias."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:264
|
||
msgid "Yet Another Daily Update"
|
||
msgstr "Otra Actulización Diaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:266
|
||
msgid "Added better UI for 'Manage Models' dialog"
|
||
msgstr "Añadida mejor interfaz para el dialogo 'gestión de modelos'"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:267
|
||
msgid "Added better UI for the chat sidebar"
|
||
msgstr "Añadida mejor interfaz para la barra de lado de chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:268
|
||
msgid ""
|
||
"Replaced model description with a button to open Ollama's website for the "
|
||
"model"
|
||
msgstr ""
|
||
"Remplazada la descripción de modelo por un botón para abrir la página web de "
|
||
"Ollama para el modelo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:269
|
||
msgid "Added myself to the credits as the spanish translator"
|
||
msgstr "Agregue mi nombre en los creditos como el traductor a Español"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:270
|
||
msgid "Using XDG properly to get config folder"
|
||
msgstr "Usando XDG apropiadamente para obtener el folder de configuración"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:271
|
||
msgid "Update for translations"
|
||
msgstr "Actualización para traducciones"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:283
|
||
msgid "The last update had some mistakes in the description of the update"
|
||
msgstr ""
|
||
"La última actualización tenía unos errores en la descripción de la "
|
||
"actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:293
|
||
msgid "Another Daily Update"
|
||
msgstr "Otra Actulización Diaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:295
|
||
msgid "Added full Spanish translation"
|
||
msgstr "Añadida traducción completa a Español"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:296
|
||
msgid "Added support for background pulling of multiple models"
|
||
msgstr "Añadido soporte para descargar multiples modelos en el fondo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:297
|
||
msgid "Added interrupt button"
|
||
msgstr "Añadido botón de interrupción"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:298
|
||
msgid "Added basic shortcuts"
|
||
msgstr "Añadidos atajos de teclado basicos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:299
|
||
msgid "Better translation support"
|
||
msgstr "Mejor soporte para traducciones"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:300
|
||
msgid ""
|
||
"User can now leave chat name empty when creating a new one, it will add a "
|
||
"placeholder name"
|
||
msgstr ""
|
||
"El usuario ahora puede dejar el nombre del chat vacio durante la creación, "
|
||
"la aplicación añadira un placeholder"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:301
|
||
msgid "Better scalling for different window sizes"
|
||
msgstr "Mejor escalado para distintos tamaños de ventana"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:302
|
||
msgid "Fixed: Can't close app if first time setup fails"
|
||
msgstr "Arreglado: No se puede cerrar la aplicación en el primer setup"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:312
|
||
msgid "Really Big Update"
|
||
msgstr "Actualización Bastante Grande"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:314
|
||
msgid "Added multiple chats support!"
|
||
msgstr "Añadido soporte para multiples chats!"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:315
|
||
msgid "Added Pango Markup support (bold, list, title, subtitle, monospace)"
|
||
msgstr ""
|
||
"Añadido soporte para Pango Markup (negrita, lista, titulo, subtitulo, "
|
||
"monoespaciado)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:316
|
||
msgid "Added autoscroll if the user is at the bottom of the chat"
|
||
msgstr ""
|
||
"Añadido autoscroll si el usuario se encuentra en la parte inferior del chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:317
|
||
msgid "Added support for multiple tags on a single model"
|
||
msgstr "Añadido soporte para multiples etiquetas con un solo modelo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:318
|
||
msgid "Added better model management dialog"
|
||
msgstr "Añadido mejor gestión de modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:319
|
||
msgid "Added loading spinner when sending message"
|
||
msgstr "Añadido spinner de carga cuando se envia un mensaje"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:320
|
||
msgid "Added notifications if app is not active and a model pull finishes"
|
||
msgstr ""
|
||
"Añadidas notificaciones si la aplicación no está activa y la descarga de un "
|
||
"modelo finaliza"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:321
|
||
msgid "Added new symbolic icon"
|
||
msgstr "Añadido nuevo icono simbolico"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:322
|
||
msgid "Added frame to message textview widget"
|
||
msgstr "Añadido borde al objeto textview del mensaje"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:323
|
||
msgid "Fixed \"code blocks shouldn't be editable\""
|
||
msgstr "Arreglado \"bloques de codigo no deberían de ser editables\""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:335
|
||
msgid "Added code highlighting"
|
||
msgstr "Añadido resaltado de código"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:336
|
||
msgid "Added image recognition (llava model)"
|
||
msgstr "Añadido reconocimiento de imagenes (modelo llava)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:337
|
||
msgid "Added multiline prompt"
|
||
msgstr "Añadido caja de texto de multiples lineas"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:338
|
||
msgid "Fixed some small bugs"
|
||
msgstr "Arreglados unos pequeños errores"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:339
|
||
msgid "General optimization"
|
||
msgstr "Optimización general"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:349
|
||
msgid "Fixes and features"
|
||
msgstr "Arreglos y funcionalidades"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:351
|
||
msgid "Russian translation (thanks github/alexkdeveloper)"
|
||
msgstr "Traducción a Ruso (gracias github/alexkdeveloper)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:352
|
||
msgid "Fixed: Cannot close app on first setup"
|
||
msgstr "Arreglado: No se puede cerrar la aplicación en el primer setup"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:353
|
||
msgid "Fixed: Brand colors for Flathub"
|
||
msgstr "Arreglado: Colores de marca para Flathub"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:354
|
||
msgid "Fixed: App description"
|
||
msgstr "Arreglado: Descripción de aplicación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:355
|
||
msgid "Fixed: Only show 'save changes dialog' when you actually change the url"
|
||
msgstr ""
|
||
"Arreglado: Solo mostrar el dialogo 'guardar cambios' cuando se cambia el url"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:365
|
||
msgid "0.2.2 Bug fixes"
|
||
msgstr "0.2.2 Arreglo de errores"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:367
|
||
msgid "Toast messages appearing behind dialogs"
|
||
msgstr "Mensajes toast apareciendo detrás de dialogos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:368
|
||
msgid "Local model list not updating when changing servers"
|
||
msgstr ""
|
||
"Lista de modelos locales no es actualizada cuando se cambia el servidor"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:369
|
||
msgid "Closing the setup dialog closes the whole app"
|
||
msgstr "Cerrar el dialogo de setup cierra toda la aplicación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:379
|
||
msgid "0.2.1 Data saving fix"
|
||
msgstr "0.2.1 Arreglo en el guardado de datos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:380
|
||
msgid ""
|
||
"The app didn't save the config files and chat history to the right "
|
||
"directory, this is now fixed"
|
||
msgstr ""
|
||
"La aplicación no guardaba los archivos de configuración o los chats en el "
|
||
"directorio correcto, esto ahora ha sido arreglado"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:389
|
||
msgid "0.2.0"
|
||
msgstr "0.2.0"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:391
|
||
msgid "New Features"
|
||
msgstr "Nuevas funcionalidades"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:393
|
||
msgid "Restore chat after closing the app"
|
||
msgstr "Restaurar chat despues de cerrar la aplicación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:394
|
||
msgid "A button to clear the chat"
|
||
msgstr "Un botón para limpiar el chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:395
|
||
msgid "Fixed multiple bugs involving how messages are shown"
|
||
msgstr "Arreglados multiples errores acerca de como los mensajes son mostrados"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:396
|
||
msgid "Added welcome dialog"
|
||
msgstr "Añadido dialogo de bienvenida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:397
|
||
msgid "More stability"
|
||
msgstr "Más estabilidad"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:407
|
||
msgid "0.1.2 Quick fixes"
|
||
msgstr "0.1.2 Arreglos rápidos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:408
|
||
msgid ""
|
||
"This release fixes some metadata needed to have a proper Flatpak application"
|
||
msgstr ""
|
||
"Esta versión arregla metadatos necesarios para tener un aplicación de "
|
||
"Flatpak justa"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:414
|
||
msgid "0.1.1 Stable Release"
|
||
msgstr "0.1.1"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:415
|
||
msgid "This is the first public version of Alpaca"
|
||
msgstr "Esta es la primera versión publica de Alpaca"
|
||
|
||
#: src/window.py:62 src/window.py:1100 src/window.py:1166 src/dialogs.py:84
|
||
#: src/window.ui:43
|
||
msgid "New Chat"
|
||
msgstr "Nuevo Chat"
|
||
|
||
#: src/window.py:174
|
||
msgid "Message edited successfully"
|
||
msgstr "Mensaje eliminado exitosamente"
|
||
|
||
#: src/window.py:189
|
||
msgid "Please select a model before chatting"
|
||
msgstr "Por favor selecciona un modelo antes de enviar un mensaje"
|
||
|
||
#: src/window.py:260 src/window.py:261
|
||
msgid "Close"
|
||
msgstr "Cerrar"
|
||
|
||
#: src/window.py:263 src/window.py:264 src/window.ui:836
|
||
msgid "Next"
|
||
msgstr "Siguiente"
|
||
|
||
#: src/window.py:299 src/window.py:310
|
||
msgid "Failed to connect to server"
|
||
msgstr "No se pudo conectar al servidor"
|
||
|
||
#: src/window.py:317
|
||
msgid "Pulling in the background..."
|
||
msgstr "Descargando en el fondo..."
|
||
|
||
#: src/window.py:369
|
||
msgid "Stop Creating '{}'"
|
||
msgstr "Parar la creación de '{}'"
|
||
|
||
#: src/window.py:406
|
||
msgid "image"
|
||
msgstr "Imagen"
|
||
|
||
#: src/window.py:478
|
||
msgid "Message copied to the clipboard"
|
||
msgstr "Mensaje copiado"
|
||
|
||
#: src/window.py:603
|
||
msgid "Remove Message"
|
||
msgstr "Remover Mensaje"
|
||
|
||
#: src/window.py:608 src/window.py:876
|
||
msgid "Copy Message"
|
||
msgstr "Copiar Mensaje"
|
||
|
||
#: src/window.py:613
|
||
msgid "Edit Message"
|
||
msgstr "Editar Mensaje"
|
||
|
||
#: src/window.py:666
|
||
msgid "Missing Image"
|
||
msgstr "Imagen no Encontrada"
|
||
|
||
#: src/window.py:682 src/window.py:684
|
||
msgid "Missing image"
|
||
msgstr "Imagen no Encontrada"
|
||
|
||
#: src/window.py:764
|
||
msgid "Remove '{} ({})'"
|
||
msgstr "Remover '{} ({})'"
|
||
|
||
#: src/window.py:904
|
||
msgid "Code copied to the clipboard"
|
||
msgstr "Codigo copiado"
|
||
|
||
#: src/window.py:985
|
||
msgid "Task Complete"
|
||
msgstr "Tarea completada"
|
||
|
||
#: src/window.py:985 src/window.py:986
|
||
msgid "Model '{}' pulled successfully."
|
||
msgstr "El modelo '{}' fue descargado exitosamente"
|
||
|
||
#: src/window.py:990
|
||
msgid "Pull Model Error"
|
||
msgstr "Error Descargando Modelo"
|
||
|
||
#: src/window.py:990
|
||
msgid "Failed to pull model '{}' due to network error."
|
||
msgstr "No se pudo descargar el modelo '{}' debido a un error de red"
|
||
|
||
#: src/window.py:1022
|
||
msgid "Stop Pulling '{} ({})'"
|
||
msgstr "Parar Descarga de '{} ({})'"
|
||
|
||
#: src/window.py:1065
|
||
msgid "Image Recognition"
|
||
msgstr "Reconocimiento de Imagenes"
|
||
|
||
#: src/window.py:1182
|
||
msgid "Model deleted successfully"
|
||
msgstr "Modelo eliminado exitosamente"
|
||
|
||
#: src/window.py:1260
|
||
msgid "There was an error with the local Ollama instance, so it has been reset"
|
||
msgstr ""
|
||
"Ha ocurrido un error con la instancia local de Ollama, ha sido reinicida"
|
||
|
||
#: src/window.py:1280
|
||
msgid "Chat exported successfully"
|
||
msgstr "Chat exportado exitosamente"
|
||
|
||
#: src/window.py:1349
|
||
msgid "Chat imported successfully"
|
||
msgstr "Chat importado exitosamente"
|
||
|
||
#: src/window.py:1382
|
||
msgid "Cannot open image"
|
||
msgstr "No se pudo abrir la imagen"
|
||
|
||
#: src/window.py:1464
|
||
msgid "This video is not available"
|
||
msgstr "Este video no está disponible"
|
||
|
||
#: src/window.py:1482 src/dialogs.py:264
|
||
msgid "Image recognition is only available on specific models"
|
||
msgstr ""
|
||
"Reconocimiento de imagenes esta disponible solamente en modelos compatibles"
|
||
|
||
#: src/available_models_descriptions.py:2
|
||
msgid "Google Gemma 2 is now available in 2 sizes, 9B and 27B."
|
||
msgstr "Google Gemma 2 ahora esta disponible en 2 tamaños, 9B y 27B."
|
||
|
||
#: src/available_models_descriptions.py:3
|
||
msgid "Meta Llama 3: The most capable openly available LLM to date"
|
||
msgstr "Meta Llama 3: El LLM abierto más capaz a esta fecha."
|
||
|
||
#: src/available_models_descriptions.py:4
|
||
msgid "Qwen2 is a new series of large language models from Alibaba group"
|
||
msgstr "Qwen2 es una nueva serie de LLM del grupo Alibaba."
|
||
|
||
#: src/available_models_descriptions.py:5
|
||
msgid ""
|
||
"An open-source Mixture-of-Experts code language model that achieves "
|
||
"performance comparable to GPT4-Turbo in code-specific tasks."
|
||
msgstr ""
|
||
"Un modelo de lenguaje Mixturer-of-Experts abierto que consigue un "
|
||
"rendimiento comparable a GPT4-Turbo en tareas especificas a codigo."
|
||
|
||
#: src/available_models_descriptions.py:6
|
||
msgid ""
|
||
"Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art "
|
||
"open models by Microsoft."
|
||
msgstr ""
|
||
"Phi-3 es una familia de los ultimos modelos livianos de Microsoft, 3B (Mini) "
|
||
"y 14B (Medium)."
|
||
|
||
#: src/available_models_descriptions.py:7
|
||
msgid ""
|
||
"Aya 23, released by Cohere, is a new family of state-of-the-art, "
|
||
"multilingual models that support 23 languages."
|
||
msgstr ""
|
||
"Aya 23, lanzado por Cohere, es una familia de los ultimos modelos "
|
||
"multilingües que soportan 23 lenguajes."
|
||
|
||
#: src/available_models_descriptions.py:8
|
||
msgid "The 7B model released by Mistral AI, updated to version 0.3."
|
||
msgstr "El modelo 7B lanzado por Mistral AI, actualizado a la versión 0.3."
|
||
|
||
#: src/available_models_descriptions.py:9
|
||
msgid ""
|
||
"A set of Mixture of Experts (MoE) model with open weights by Mistral AI in "
|
||
"8x7b and 8x22b parameter sizes."
|
||
msgstr ""
|
||
"Un set de modelos Mixture-of-Experts (MoE) con pesos abiertos por Mistral AI "
|
||
"dispnible en tamaños de parametros 8x7b y 8x22b."
|
||
|
||
#: src/available_models_descriptions.py:10
|
||
msgid ""
|
||
"CodeGemma is a collection of powerful, lightweight models that can perform a "
|
||
"variety of coding tasks like fill-in-the-middle code completion, code "
|
||
"generation, natural language understanding, mathematical reasoning, and "
|
||
"instruction following."
|
||
msgstr ""
|
||
"CodeGemma es una colección de poderosos, modelos livianos que pueden hacer "
|
||
"una variedad de tareas de codigo como fill-in-the-middle completación de "
|
||
"codigo, generación de codigo, comprensión de lenguaje natural, razonamiento "
|
||
"matematico y seguimiento de instrucciones."
|
||
|
||
#: src/available_models_descriptions.py:11
|
||
msgid ""
|
||
"Command R is a Large Language Model optimized for conversational interaction "
|
||
"and long context tasks."
|
||
msgstr ""
|
||
"Command R es un LLM optimizado para interacciones conversacionales y tareas "
|
||
"que requieren un contexto largo."
|
||
|
||
#: src/available_models_descriptions.py:12
|
||
msgid ""
|
||
"Command R+ is a powerful, scalable large language model purpose-built to "
|
||
"excel at real-world enterprise use cases."
|
||
msgstr ""
|
||
"Command R+ es un poderoso, escalable LLM construido con el proposito de "
|
||
"sobresalir en usos profesionales del mundo real."
|
||
|
||
#: src/available_models_descriptions.py:13
|
||
msgid ""
|
||
"🌋 LLaVA is a novel end-to-end trained large multimodal model that combines "
|
||
"a vision encoder and Vicuna for general-purpose visual and language "
|
||
"understanding. Updated to version 1.6."
|
||
msgstr ""
|
||
"🌋 LLaVA es un nuevo LLM entrenado en end-to-end que combina un "
|
||
"encodificador visual y Vicuna para entendimiento general en lenguaje y "
|
||
"visión. Acutalizado a la versión 1.6."
|
||
|
||
#: src/available_models_descriptions.py:14
|
||
msgid ""
|
||
"Gemma is a family of lightweight, state-of-the-art open models built by "
|
||
"Google DeepMind. Updated to version 1.1"
|
||
msgstr ""
|
||
"Gemma es una familia de nuevos modelos abiertos livianos construidos por "
|
||
"Google DeepMind. Actualizado a la versión 1.1."
|
||
|
||
#: src/available_models_descriptions.py:15
|
||
msgid ""
|
||
"Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from "
|
||
"0.5B to 110B parameters"
|
||
msgstr ""
|
||
"Qwen 1.5 es una serie de LLM por Alibaba Cloud que cubren parametros entre "
|
||
"0.5B hasta 110B."
|
||
|
||
#: src/available_models_descriptions.py:16
|
||
msgid ""
|
||
"Llama 2 is a collection of foundation language models ranging from 7B to 70B "
|
||
"parameters."
|
||
msgstr ""
|
||
"Llama 2 es una colección de modelos bases que cubren parametros entre 7B y "
|
||
"70B."
|
||
|
||
#: src/available_models_descriptions.py:17
|
||
msgid ""
|
||
"A large language model that can use text prompts to generate and discuss "
|
||
"code."
|
||
msgstr "Un LLM que puede usar texto para generar y discutir sobre codigo."
|
||
|
||
#: src/available_models_descriptions.py:18
|
||
msgid ""
|
||
"Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of "
|
||
"experts models that excels at coding tasks. Created by Eric Hartford."
|
||
msgstr ""
|
||
"Descensurado, 8x7b y 8x22b, modelos afinados basados enn una mezcla de "
|
||
"modelos expertos de Mixtral especializados en tareas de codigo. Creado por "
|
||
"Eric Hartford."
|
||
|
||
#: src/available_models_descriptions.py:19
|
||
msgid "Uncensored Llama 2 model by George Sung and Jarrad Hope."
|
||
msgstr "Modelo Llama 2 descensurado por George Sung y Jarrad Hope."
|
||
|
||
#: src/available_models_descriptions.py:20
|
||
msgid ""
|
||
"DeepSeek Coder is a capable coding model trained on two trillion code and "
|
||
"natural language tokens."
|
||
msgstr ""
|
||
"DeepSeek Coder en un modelo especializado en codigo, entrenado en 2 "
|
||
"trillones de tokens de codigo y lenguaje natural."
|
||
|
||
#: src/available_models_descriptions.py:21
|
||
msgid ""
|
||
"A high-performing open embedding model with a large token context window."
|
||
msgstr ""
|
||
"Un modelo de integración abierto de alto rendimiento con una gran ventana de "
|
||
"contexto de token."
|
||
|
||
#: src/available_models_descriptions.py:22
|
||
msgid ""
|
||
"Phi-2: a 2.7B language model by Microsoft Research that demonstrates "
|
||
"outstanding reasoning and language understanding capabilities."
|
||
msgstr ""
|
||
"Phi-2: un modelo de lenguaje de 2.700 millones de Microsoft Research que "
|
||
"demuestra excelentes capacidades de razonamiento y comprensión del lenguaje."
|
||
|
||
#: src/available_models_descriptions.py:23
|
||
msgid ""
|
||
"The uncensored Dolphin model based on Mistral that excels at coding tasks. "
|
||
"Updated to version 2.8."
|
||
msgstr ""
|
||
"El modelo descensurado Dolphin, basado en Mistral que sobresale en tareas de "
|
||
"codigo. Actualizado a la versión 2.8."
|
||
|
||
#: src/available_models_descriptions.py:24
|
||
msgid ""
|
||
"Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the "
|
||
"Mistral 7B model using the OpenOrca dataset."
|
||
msgstr ""
|
||
"Mistral OpenOrca es un modelo de 7 billones de parametros, afinado con base "
|
||
"en el modelo Mistral 7B usando el dataset de OpenOrca."
|
||
|
||
#: src/available_models_descriptions.py:25
|
||
msgid ""
|
||
"A general-purpose model ranging from 3 billion parameters to 70 billion, "
|
||
"suitable for entry-level hardware."
|
||
msgstr ""
|
||
"Un modelo de uso general oscilando entre 3 billones hasta 70 billones de "
|
||
"parametros, adecuado para hardware básico."
|
||
|
||
#: src/available_models_descriptions.py:26
|
||
msgid "State-of-the-art large embedding model from mixedbread.ai"
|
||
msgstr "Modelo de integración grande de última generación de Mixedbread.ai"
|
||
|
||
#: src/available_models_descriptions.py:27
|
||
msgid ""
|
||
"Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on "
|
||
"Llama 3 that has a variety of instruction, conversational, and coding skills."
|
||
msgstr ""
|
||
"Dolphin 2.9 es un modelo nuevo con tamaños de 8B y 70B hecho por Eric "
|
||
"Hartford basado en Llama 3, tiene una variedad de instrucciones "
|
||
"conversacionales y habilidades en código"
|
||
|
||
#: src/available_models_descriptions.py:28
|
||
msgid ""
|
||
"StarCoder2 is the next generation of transparently trained open code LLMs "
|
||
"that comes in three sizes: 3B, 7B and 15B parameters."
|
||
msgstr ""
|
||
"StarCoder2 es la próxima generación de modelos de lenguaje abiertos "
|
||
"entrenados de manera transparente, que vienen en tres tamaños: 3B, 7B y 15B "
|
||
"parámetros."
|
||
|
||
#: src/available_models_descriptions.py:29
|
||
msgid "Llama 2 based model fine tuned to improve Chinese dialogue ability."
|
||
msgstr ""
|
||
"Modelo basado en Llama 2 ajustado para mejorar la capacidad de diálogo en "
|
||
"chino."
|
||
|
||
#: src/available_models_descriptions.py:30
|
||
msgid ""
|
||
"Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models "
|
||
"that are trained to act as helpful assistants."
|
||
msgstr ""
|
||
"Zephyr es una serie de versiones ajustadas de los modelos Mistral y Mixtral "
|
||
"que están entrenados para actuar como asistentes útiles."
|
||
|
||
#: src/available_models_descriptions.py:31
|
||
msgid "Yi 1.5 is a high-performing, bilingual language model."
|
||
msgstr "Yi 1.5 es un modelo de lenguaje bilingüe de alto rendimiento."
|
||
|
||
#: src/available_models_descriptions.py:32
|
||
msgid ""
|
||
"The powerful family of models by Nous Research that excels at scientific "
|
||
"discussion and coding tasks."
|
||
msgstr ""
|
||
"La poderosa familia de modelos de Nous Research que sobresale en discusiones "
|
||
"científicas y tareas de programación."
|
||
|
||
#: src/available_models_descriptions.py:33
|
||
msgid ""
|
||
"General use chat model based on Llama and Llama 2 with 2K to 16K context "
|
||
"sizes."
|
||
msgstr ""
|
||
"Modelo de chat de uso general basado en Llama y Llama 2 con tamaños de "
|
||
"contexto de 2K a 16K."
|
||
|
||
#: src/available_models_descriptions.py:34
|
||
msgid ""
|
||
"Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on "
|
||
"Llama 2 uncensored by Eric Hartford."
|
||
msgstr ""
|
||
"Wizard Vicuna Uncensored es un modelo de 7B, 13B y 30B parámetros basado en "
|
||
"Llama 2 sin censura por Eric Hartford."
|
||
|
||
#: src/available_models_descriptions.py:35
|
||
msgid ""
|
||
"The TinyLlama project is an open endeavor to train a compact 1.1B Llama "
|
||
"model on 3 trillion tokens."
|
||
msgstr ""
|
||
"El proyecto TinyLlama es un esfuerzo abierto para entrenar un modelo "
|
||
"compacto de Llama de 1.1B en 3 billones de tokens."
|
||
|
||
#: src/available_models_descriptions.py:36
|
||
msgid ""
|
||
"State of the art large language model from Microsoft AI with improved "
|
||
"performance on complex chat, multilingual, reasoning and agent use cases."
|
||
msgstr ""
|
||
"Modelo de lenguaje grande de vanguardia de Microsoft AI con rendimiento "
|
||
"mejorado en chat complejo, multilingüe, razonamiento y casos de uso de "
|
||
"agentes."
|
||
|
||
#: src/available_models_descriptions.py:37
|
||
msgid ""
|
||
"StarCoder is a code generation model trained on 80+ programming languages."
|
||
msgstr ""
|
||
"StarCoder es un modelo de generación de código entrenado en más de 80 "
|
||
"lenguajes de programación."
|
||
|
||
#: src/available_models_descriptions.py:38
|
||
msgid ""
|
||
"Codestral is Mistral AI’s first-ever code model designed for code generation "
|
||
"tasks."
|
||
msgstr ""
|
||
"Codestral es el primer modelo de código de Mistral AI diseñado para tareas "
|
||
"de generación de código."
|
||
|
||
#: src/available_models_descriptions.py:39
|
||
msgid ""
|
||
"A family of open-source models trained on a wide variety of data, surpassing "
|
||
"ChatGPT on various benchmarks. Updated to version 3.5-0106."
|
||
msgstr ""
|
||
"Una familia de modelos de código abierto entrenados en una amplia variedad "
|
||
"de datos, superando a ChatGPT en varios benchmarks. Actualizado a la versión "
|
||
"3.5-0106."
|
||
|
||
#: src/available_models_descriptions.py:40
|
||
msgid ""
|
||
"An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset "
|
||
"by Eric Hartford and based on TinyLlama."
|
||
msgstr ""
|
||
"Un modelo experimental de 1.1B parámetros entrenado en el nuevo conjunto de "
|
||
"datos Dolphin 2.8 por Eric Hartford y basado en TinyLlama."
|
||
|
||
#: src/available_models_descriptions.py:41
|
||
msgid ""
|
||
"OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully "
|
||
"open datasets."
|
||
msgstr ""
|
||
"OpenHermes 2.5 es un modelo de 7B ajustado por Teknium en Mistral con "
|
||
"conjuntos de datos completamente abiertos."
|
||
|
||
#: src/available_models_descriptions.py:42
|
||
msgid "State-of-the-art code generation model"
|
||
msgstr "Modelo de generación de código de vanguardia."
|
||
|
||
#: src/available_models_descriptions.py:43
|
||
msgid ""
|
||
"Stable Code 3B is a coding model with instruct and code completion variants "
|
||
"on par with models such as Code Llama 7B that are 2.5x larger."
|
||
msgstr ""
|
||
"Stable Code 3B es un modelo de codificación con variantes de instrucción y "
|
||
"completado de código a la par con modelos como Code Llama 7B que son 2.5 "
|
||
"veces más grandes."
|
||
|
||
#: src/available_models_descriptions.py:44
|
||
msgid ""
|
||
"A fine-tuned model based on Mistral with good coverage of domain and "
|
||
"language."
|
||
msgstr ""
|
||
"Un modelo ajustado basado en Mistral con buena cobertura de dominio y "
|
||
"lenguaje."
|
||
|
||
#: src/available_models_descriptions.py:45
|
||
msgid "Model focused on math and logic problems"
|
||
msgstr "Modelo enfocado en problemas de matemáticas y lógica."
|
||
|
||
#: src/available_models_descriptions.py:46
|
||
msgid ""
|
||
"CodeQwen1.5 is a large language model pretrained on a large amount of code "
|
||
"data."
|
||
msgstr ""
|
||
"CodeQwen1.5 es un modelo de lenguaje grande preentrenado con una gran "
|
||
"cantidad de datos de código."
|
||
|
||
#: src/available_models_descriptions.py:47
|
||
msgid "Code generation model based on Code Llama."
|
||
msgstr "Modelo de generación de código basado en Code Llama."
|
||
|
||
#: src/available_models_descriptions.py:48
|
||
msgid ""
|
||
"Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model "
|
||
"trained on multilingual data in English, Spanish, German, Italian, French, "
|
||
"Portuguese, and Dutch."
|
||
msgstr ""
|
||
"Stable LM 2 es un modelo de lenguaje de vanguardia de 1.6B y 12B parámetros "
|
||
"entrenado en datos multilingües en inglés, español, alemán, italiano, "
|
||
"francés, portugués y neerlandés."
|
||
|
||
#: src/available_models_descriptions.py:49
|
||
msgid ""
|
||
"A 7B and 15B uncensored variant of the Dolphin model family that excels at "
|
||
"coding, based on StarCoder2."
|
||
msgstr ""
|
||
"Una variante sin censura de 7B y 15B de la familia de modelos Dolphin que "
|
||
"sobresale en codificación, basada en StarCoder2."
|
||
|
||
#: src/available_models_descriptions.py:50
|
||
msgid "Embedding models on very large sentence level datasets."
|
||
msgstr ""
|
||
"Modelos de incrustación en conjuntos de datos de nivel de oración muy "
|
||
"grandes."
|
||
|
||
#: src/available_models_descriptions.py:51
|
||
msgid "General use models based on Llama and Llama 2 from Nous Research."
|
||
msgstr "Modelos de uso general basados en Llama y Llama 2 de Nous Research."
|
||
|
||
#: src/available_models_descriptions.py:52
|
||
msgid ""
|
||
"Starling is a large language model trained by reinforcement learning from AI "
|
||
"feedback focused on improving chatbot helpfulness."
|
||
msgstr ""
|
||
"Starling es un modelo de lenguaje grande entrenado mediante aprendizaje por "
|
||
"refuerzo a partir de retroalimentación de IA enfocado en mejorar la utilidad "
|
||
"de los chatbots."
|
||
|
||
#: src/available_models_descriptions.py:53
|
||
msgid ""
|
||
"SQLCoder is a code completion model fined-tuned on StarCoder for SQL "
|
||
"generation tasks"
|
||
msgstr ""
|
||
"SQLCoder es un modelo de completado de código ajustado en StarCoder para "
|
||
"tareas de generación de SQL."
|
||
|
||
#: src/available_models_descriptions.py:54
|
||
msgid ""
|
||
"Orca 2 is built by Microsoft research, and are a fine-tuned version of "
|
||
"Meta's Llama 2 models. The model is designed to excel particularly in "
|
||
"reasoning."
|
||
msgstr ""
|
||
"Orca 2 es construido por Microsoft Research, y es una versión ajustada de "
|
||
"los modelos Llama 2 de Meta. El modelo está diseñado para sobresalir "
|
||
"particularmente en razonamiento."
|
||
|
||
#: src/available_models_descriptions.py:55
|
||
msgid ""
|
||
"This model extends LLama-3 8B's context length from 8k to over 1m tokens."
|
||
msgstr ""
|
||
"Este modelo extiende la longitud del contexto de LLama-3 8B de 8k a más de "
|
||
"1m tokens."
|
||
|
||
#: src/available_models_descriptions.py:56
|
||
msgid "An advanced language model crafted with 2 trillion bilingual tokens."
|
||
msgstr ""
|
||
"Un modelo de lenguaje avanzado creado con 2 billones de tokens bilingües."
|
||
|
||
#: src/available_models_descriptions.py:57
|
||
msgid "An extension of Llama 2 that supports a context of up to 128k tokens."
|
||
msgstr "Una extensión de Llama 2 que soporta un contexto de hasta 128k tokens."
|
||
|
||
#: src/available_models_descriptions.py:58
|
||
msgid ""
|
||
"A model from NVIDIA based on Llama 3 that excels at conversational question "
|
||
"answering (QA) and retrieval-augmented generation (RAG)."
|
||
msgstr ""
|
||
"Un modelo de NVIDIA basado en Llama 3 que sobresale en respuesta a preguntas "
|
||
"conversacionales (QA) y generación aumentada por recuperación (RAG)."
|
||
|
||
#: src/available_models_descriptions.py:59
|
||
msgid ""
|
||
"A compact, yet powerful 10.7B large language model designed for single-turn "
|
||
"conversation."
|
||
msgstr ""
|
||
"Un modelo de lenguaje grande compacto pero poderoso de 10.7B diseñado para "
|
||
"conversación de un solo turno."
|
||
|
||
#: src/available_models_descriptions.py:60
|
||
msgid ""
|
||
"Conversational model based on Llama 2 that performs competitively on various "
|
||
"benchmarks."
|
||
msgstr ""
|
||
"Modelo conversacional basado en Llama 2 que tiene un rendimiento competitivo "
|
||
"en varios benchmarks."
|
||
|
||
#: src/available_models_descriptions.py:61
|
||
msgid "A family of open foundation models by IBM for Code Intelligence"
|
||
msgstr ""
|
||
"Una familia de modelos de base abiertos por IBM para Code Intelligence."
|
||
|
||
#: src/available_models_descriptions.py:62
|
||
msgid ""
|
||
"2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language "
|
||
"model by Microsoft Research."
|
||
msgstr ""
|
||
"Modelo Dolphin sin censura de 2.7B por Eric Hartford, basado en el modelo de "
|
||
"lenguaje Phi por Microsoft Research."
|
||
|
||
#: src/available_models_descriptions.py:63
|
||
msgid "General use model based on Llama 2."
|
||
msgstr "Modelo de uso general basado en Llama 2."
|
||
|
||
#: src/available_models_descriptions.py:64
|
||
msgid ""
|
||
"A companion assistant trained in philosophy, psychology, and personal "
|
||
"relationships. Based on Mistral."
|
||
msgstr ""
|
||
"Un asistente compañero entrenado en filosofía, psicología y relaciones "
|
||
"personales. Basado en Mistral."
|
||
|
||
#: src/available_models_descriptions.py:65
|
||
msgid ""
|
||
"Llama 2 based model fine tuned on an Orca-style dataset. Originally called "
|
||
"Free Willy."
|
||
msgstr ""
|
||
"Modelo basado en Llama 2 ajustado en un conjunto de datos estilo Orca. "
|
||
"Originalmente llamado Free Willy."
|
||
|
||
#: src/available_models_descriptions.py:66
|
||
msgid ""
|
||
"BakLLaVA is a multimodal model consisting of the Mistral 7B base model "
|
||
"augmented with the LLaVA architecture."
|
||
msgstr ""
|
||
"BakLLaVA es un modelo multimodal que consiste en el modelo base Mistral 7B "
|
||
"aumentado con la arquitectura LLaVA."
|
||
|
||
#: src/available_models_descriptions.py:67
|
||
msgid ""
|
||
"A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several "
|
||
"benchmarks."
|
||
msgstr ""
|
||
"Un modelo LLaVA ajustado a partir de Llama 3 Instruct con mejores "
|
||
"puntuaciones en varios benchmarks."
|
||
|
||
#: src/available_models_descriptions.py:68
|
||
msgid "Uncensored version of Wizard LM model"
|
||
msgstr "Versión sin censura del modelo Wizard LM."
|
||
|
||
#: src/available_models_descriptions.py:69
|
||
msgid ""
|
||
"Fine-tuned Llama 2 model to answer medical questions based on an open source "
|
||
"medical dataset."
|
||
msgstr ""
|
||
"Modelo Llama 2 ajustado para responder preguntas médicas basado en un "
|
||
"conjunto de datos médicos de código abierto."
|
||
|
||
#: src/available_models_descriptions.py:70
|
||
msgid "The Nous Hermes 2 model from Nous Research, now trained over Mixtral."
|
||
msgstr ""
|
||
"El modelo Nous Hermes 2 de Nous Research, ahora entrenado sobre Mixtral."
|
||
|
||
#: src/available_models_descriptions.py:71
|
||
msgid "An extension of Mistral to support context windows of 64K or 128K."
|
||
msgstr ""
|
||
"Una extensión de Mistral para soportar ventanas de contexto de 64K o 128K."
|
||
|
||
#: src/available_models_descriptions.py:72
|
||
msgid ""
|
||
"A suite of text embedding models by Snowflake, optimized for performance."
|
||
msgstr ""
|
||
"Un conjunto de modelos de incrustación de texto por Snowflake, optimizados "
|
||
"para el rendimiento."
|
||
|
||
#: src/available_models_descriptions.py:73
|
||
msgid ""
|
||
"An expansion of Llama 2 that specializes in integrating both general "
|
||
"language understanding and domain-specific knowledge, particularly in "
|
||
"programming and mathematics."
|
||
msgstr ""
|
||
"Una expansión de Llama 2 que se especializa en integrar tanto la comprensión "
|
||
"general del lenguaje como el conocimiento específico del dominio, "
|
||
"particularmente en programación y matemáticas."
|
||
|
||
#: src/available_models_descriptions.py:74
|
||
msgid "Great code generation model based on Llama2."
|
||
msgstr "Gran modelo de generación de código basado en Llama2."
|
||
|
||
#: src/available_models_descriptions.py:75
|
||
msgid ""
|
||
"Open-source medical large language model adapted from Llama 2 to the medical "
|
||
"domain."
|
||
msgstr ""
|
||
"Modelo de lenguaje grande médico de código abierto adaptado de Llama 2 al "
|
||
"dominio médico."
|
||
|
||
#: src/available_models_descriptions.py:76
|
||
msgid ""
|
||
"moondream2 is a small vision language model designed to run efficiently on "
|
||
"edge devices."
|
||
msgstr ""
|
||
"moondream2 es un pequeño modelo de lenguaje de visión diseñado para "
|
||
"funcionar eficientemente en dispositivos periféricos."
|
||
|
||
#: src/available_models_descriptions.py:77
|
||
msgid "Uncensored Llama2 based model with support for a 16K context window."
|
||
msgstr ""
|
||
"Modelo sin censura basado en Llama2 con soporte para una ventana de contexto "
|
||
"de 16K."
|
||
|
||
#: src/available_models_descriptions.py:78
|
||
msgid ""
|
||
"Nexus Raven is a 13B instruction tuned model for function calling tasks."
|
||
msgstr ""
|
||
"Nexus Raven es un modelo ajustado de 13B para tareas de llamada de funciones."
|
||
|
||
#: src/available_models_descriptions.py:79
|
||
msgid ""
|
||
"🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic "
|
||
"instruction data using OSS-Instruct, a novel approach to enlightening LLMs "
|
||
"with open-source code snippets."
|
||
msgstr ""
|
||
"🎩 Magicoder es una familia de modelos de 7B parámetros entrenados en 75K "
|
||
"datos de instrucción sintética utilizando OSS-Instruct, un enfoque novedoso "
|
||
"para iluminar a los LLMs con fragmentos de código de código abierto."
|
||
|
||
#: src/available_models_descriptions.py:80
|
||
msgid "A strong, economical, and efficient Mixture-of-Experts language model."
|
||
msgstr ""
|
||
"Un modelo de lenguaje Mixture-of-Experts fuerte, económico y eficiente."
|
||
|
||
#: src/available_models_descriptions.py:81
|
||
msgid ""
|
||
"A lightweight chat model allowing accurate, and responsive output without "
|
||
"requiring high-end hardware."
|
||
msgstr ""
|
||
"Un modelo de chat ligero que permite una salida precisa y receptiva sin "
|
||
"requerir hardware de alta gama."
|
||
|
||
#: src/available_models_descriptions.py:82
|
||
msgid ""
|
||
"A high-performing code instruct model created by merging two existing code "
|
||
"models."
|
||
msgstr ""
|
||
"Un modelo de instrucción de código de alto rendimiento creado mediante la "
|
||
"fusión de dos modelos de código existentes."
|
||
|
||
#: src/available_models_descriptions.py:83
|
||
msgid "A new small LLaVA model fine-tuned from Phi 3 Mini."
|
||
msgstr "Un nuevo pequeño modelo LLaVA ajustado a partir de Phi 3 Mini."
|
||
|
||
#: src/available_models_descriptions.py:84
|
||
msgid ""
|
||
"MistralLite is a fine-tuned model based on Mistral with enhanced "
|
||
"capabilities of processing long contexts."
|
||
msgstr ""
|
||
"MistralLite es un modelo ajustado basado en Mistral con capacidades "
|
||
"mejoradas de procesamiento de contextos largos."
|
||
|
||
#: src/available_models_descriptions.py:85
|
||
msgid ""
|
||
"Wizard Vicuna is a 13B parameter model based on Llama 2 trained by "
|
||
"MelodysDreamj."
|
||
msgstr ""
|
||
"Wizard Vicuna es un modelo de 13B parámetros basado en Llama 2 entrenado por "
|
||
"MelodysDreamj."
|
||
|
||
#: src/available_models_descriptions.py:86
|
||
msgid "7B parameter text-to-SQL model made by MotherDuck and Numbers Station."
|
||
msgstr ""
|
||
"Modelo de texto a SQL de 7B parámetros hecho por MotherDuck y Numbers "
|
||
"Station."
|
||
|
||
#: src/available_models_descriptions.py:87
|
||
msgid ""
|
||
"A language model created by combining two fine-tuned Llama 2 70B models into "
|
||
"one."
|
||
msgstr ""
|
||
"Un modelo de lenguaje creado combinando dos modelos ajustados de Llama 2 70B "
|
||
"en uno."
|
||
|
||
#: src/available_models_descriptions.py:88
|
||
msgid ""
|
||
"MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by "
|
||
"interleaving the model with itself."
|
||
msgstr ""
|
||
"MegaDolphin-2.2-120b es una transformación de Dolphin-2.2-70b creada al "
|
||
"entrelazar el modelo consigo mismo."
|
||
|
||
#: src/available_models_descriptions.py:89
|
||
msgid ""
|
||
"Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. "
|
||
"Designed for chat and code generation."
|
||
msgstr ""
|
||
"Fusión del modelo Open Orca OpenChat y el modelo Garage-bAInd Platypus 2. "
|
||
"Diseñado para chat y generación de código."
|
||
|
||
#: src/available_models_descriptions.py:90
|
||
msgid ""
|
||
"A top-performing mixture of experts model, fine-tuned with high-quality data."
|
||
msgstr ""
|
||
"Un modelo de mezcla de expertos de alto rendimiento, ajustado con datos de "
|
||
"alta calidad."
|
||
|
||
#: src/available_models_descriptions.py:91
|
||
msgid "A 7B chat model fine-tuned with high-quality data and based on Zephyr."
|
||
msgstr ""
|
||
"Un modelo de chat de 7B ajustado con datos de alta calidad y basado en "
|
||
"Zephyr."
|
||
|
||
#: src/available_models_descriptions.py:92
|
||
msgid "DBRX is an open, general-purpose LLM created by Databricks."
|
||
msgstr "DBRX es un LLM abierto de propósito general creado por Databricks."
|
||
|
||
#: src/available_models_descriptions.py:93
|
||
msgid ""
|
||
"Falcon2 is an 11B parameters causal decoder-only model built by TII and "
|
||
"trained over 5T tokens."
|
||
msgstr ""
|
||
"Falcon2 es un modelo causal de 11B parámetros solo decodificador construido "
|
||
"por TII y entrenado en más de 5T tokens."
|
||
|
||
#: src/available_models_descriptions.py:94
|
||
msgid ""
|
||
"A robust conversational model designed to be used for both chat and instruct "
|
||
"use cases."
|
||
msgstr ""
|
||
"Un modelo conversacional robusto diseñado para ser utilizado tanto en casos "
|
||
"de uso de chat como de instrucción."
|
||
|
||
#: src/dialogs.py:17
|
||
msgid "Chat cannot be cleared while receiving a message"
|
||
msgstr "El chat no puede ser limpiado mientras se recibe un mensaje"
|
||
|
||
#: src/dialogs.py:20
|
||
msgid "Clear Chat?"
|
||
msgstr "¿Limpiar Chat?"
|
||
|
||
#: src/dialogs.py:21
|
||
msgid "Are you sure you want to clear the chat?"
|
||
msgstr "¿Estás seguro de que quieres limpiar el chat?"
|
||
|
||
#: src/dialogs.py:24 src/dialogs.py:45 src/dialogs.py:72 src/dialogs.py:99
|
||
#: src/dialogs.py:121 src/dialogs.py:142 src/dialogs.py:164 src/dialogs.py:224
|
||
#: src/dialogs.py:314 src/dialogs.py:352
|
||
msgid "Cancel"
|
||
msgstr "Cancelar"
|
||
|
||
#: src/dialogs.py:25
|
||
msgid "Clear"
|
||
msgstr "Limpiar"
|
||
|
||
#: src/dialogs.py:41
|
||
msgid "Delete Chat?"
|
||
msgstr "¿Eliminar Chat?"
|
||
|
||
#: src/dialogs.py:42 src/dialogs.py:139
|
||
msgid "Are you sure you want to delete '{}'?"
|
||
msgstr "¿Estás seguro de que quieres eliminar '{}'?"
|
||
|
||
#: src/dialogs.py:46 src/dialogs.py:143
|
||
msgid "Delete"
|
||
msgstr "Eliminar"
|
||
|
||
#: src/dialogs.py:66
|
||
msgid "Rename Chat?"
|
||
msgstr "¿Renombrar Chat?"
|
||
|
||
#: src/dialogs.py:67
|
||
msgid "Renaming '{}'"
|
||
msgstr "Renombrando '{}'"
|
||
|
||
#: src/dialogs.py:73
|
||
msgid "Rename"
|
||
msgstr "Renombrar"
|
||
|
||
#: src/dialogs.py:93
|
||
msgid "Create Chat?"
|
||
msgstr "¿Crear Chat?"
|
||
|
||
#: src/dialogs.py:94
|
||
msgid "Enter name for new chat"
|
||
msgstr "Ingrese el nombre para el nuevo chat"
|
||
|
||
#: src/dialogs.py:100 src/window.ui:472
|
||
msgid "Create"
|
||
msgstr "Crear"
|
||
|
||
#: src/dialogs.py:117
|
||
msgid "Stop Download?"
|
||
msgstr "¿Parar Descarga?"
|
||
|
||
#: src/dialogs.py:118
|
||
msgid "Are you sure you want to stop pulling '{} ({})'?"
|
||
msgstr "¿Estás seguro de que quieres parar la descarga de '{} ({})'?"
|
||
|
||
#: src/dialogs.py:122
|
||
msgid "Stop"
|
||
msgstr "Parar"
|
||
|
||
#: src/dialogs.py:138
|
||
msgid "Delete Model?"
|
||
msgstr "¿Eliminar Modelo?"
|
||
|
||
#: src/dialogs.py:160
|
||
msgid "Remove Attachment?"
|
||
msgstr "¿Remover Adjunto?"
|
||
|
||
#: src/dialogs.py:161
|
||
msgid "Are you sure you want to remove attachment?"
|
||
msgstr "¿Estás seguro de que quieres remover el adjunto?"
|
||
|
||
#: src/dialogs.py:165
|
||
msgid "Remove"
|
||
msgstr "Remover"
|
||
|
||
#: src/dialogs.py:190
|
||
msgid "Connection Error"
|
||
msgstr "Error de conexión"
|
||
|
||
#: src/dialogs.py:191
|
||
msgid "The remote instance has disconnected"
|
||
msgstr "La instancia remota se ha desconectado"
|
||
|
||
#: src/dialogs.py:195
|
||
msgid "Close Alpaca"
|
||
msgstr "Cerrar Alpaca"
|
||
|
||
#: src/dialogs.py:196
|
||
msgid "Use local instance"
|
||
msgstr "Usar instancia local"
|
||
|
||
#: src/dialogs.py:197
|
||
msgid "Connect"
|
||
msgstr "Conectar"
|
||
|
||
#: src/dialogs.py:220
|
||
msgid "Select Model"
|
||
msgstr "Selecciona el Modelo"
|
||
|
||
#: src/dialogs.py:221
|
||
msgid "This model will be used as the base for the new model"
|
||
msgstr "Este modelo será usado como base para el nuevo modelo"
|
||
|
||
#: src/dialogs.py:225 src/dialogs.py:315 src/dialogs.py:353
|
||
msgid "Accept"
|
||
msgstr "Aceptar"
|
||
|
||
#: src/dialogs.py:242
|
||
msgid "An error occurred while creating the model"
|
||
msgstr "Ha ocurrido un error mientras se creaba el modelo"
|
||
|
||
#: src/dialogs.py:300
|
||
msgid "This video does not have any transcriptions"
|
||
msgstr "Este video no tiene transcripciones"
|
||
|
||
#: src/dialogs.py:309
|
||
msgid "Attach YouTube Video?"
|
||
msgstr "¿Adjuntar Video de YouTube?"
|
||
|
||
#: src/dialogs.py:310
|
||
msgid ""
|
||
"{}\n"
|
||
"\n"
|
||
"Please select a transcript to include"
|
||
msgstr ""
|
||
"{}\n"
|
||
"\n"
|
||
"Por favor selecciona la transcripción ha incluir"
|
||
|
||
#: src/dialogs.py:343
|
||
msgid "An error occurred while extracting text from the website"
|
||
msgstr "Ha ocurrido un error mientras se extraía texto del sitio web"
|
||
|
||
#: src/dialogs.py:348
|
||
msgid "Attach Website? (Experimental)"
|
||
msgstr "¿Adjuntar Sitio Web? (Experimental)"
|
||
|
||
#: src/dialogs.py:349
|
||
msgid ""
|
||
"Are you sure you want to attach\n"
|
||
"'{}'?"
|
||
msgstr ""
|
||
"¿Estás seguro de que quieres adjuntar\n"
|
||
"'{}'?"
|
||
|
||
#: src/window.ui:54
|
||
msgid "Menu"
|
||
msgstr "Menu"
|
||
|
||
#: src/window.ui:85
|
||
msgid "Toggle Sidebar"
|
||
msgstr "Alternar barra de lado"
|
||
|
||
#: src/window.ui:110 src/window.ui:590
|
||
msgid "Manage Models"
|
||
msgstr "Gestionar Modelos"
|
||
|
||
#: src/window.ui:124
|
||
msgid "Chat Menu"
|
||
msgstr "Menu de Chat"
|
||
|
||
#: src/window.ui:200
|
||
msgid "Attach File"
|
||
msgstr "Adjuntar Archivo"
|
||
|
||
#: src/window.ui:245 src/window.ui:1193
|
||
msgid "Send Message"
|
||
msgstr "Enviar Mensaje"
|
||
|
||
#: src/window.ui:293 src/window.ui:1038 src/window.ui:1152
|
||
msgid "Preferences"
|
||
msgstr "Preferencias"
|
||
|
||
#: src/window.ui:296 src/window.ui:1130
|
||
msgid "General"
|
||
msgstr "General"
|
||
|
||
#: src/window.ui:302
|
||
msgid "Use Remote Connection to Ollama"
|
||
msgstr "Usa una conección remota a Ollama"
|
||
|
||
#: src/window.ui:308
|
||
msgid "URL of Remote Instance"
|
||
msgstr "URL de la Instancia Remota"
|
||
|
||
#: src/window.ui:315
|
||
msgid "Bearer Token (Optional)"
|
||
msgstr "Bearer Token (Opcional)"
|
||
|
||
#: src/window.ui:325
|
||
msgid "Run Alpaca In Background"
|
||
msgstr "Ejecutar Alpaca en el fondo"
|
||
|
||
#: src/window.ui:336
|
||
msgid "Temperature"
|
||
msgstr "Temperatura"
|
||
|
||
#: src/window.ui:337
|
||
msgid ""
|
||
"The temperature of the model. Increasing the temperature will make the model "
|
||
"answer more creatively. (Default: 0.8)"
|
||
msgstr ""
|
||
"La temperatura del modelo. Incrementando la temparatura hará que el modelo "
|
||
"responda más creativamente (Por defecto: 0.8)"
|
||
|
||
#: src/window.ui:352
|
||
msgid "Seed"
|
||
msgstr "Semilla"
|
||
|
||
#: src/window.ui:353
|
||
msgid ""
|
||
"Sets the random number seed to use for generation. Setting this to a "
|
||
"specific number will make the model generate the same text for the same "
|
||
"prompt. (Default: 0 (random))"
|
||
msgstr ""
|
||
"Aplica el numero al azar que se usa como semilla para generación. Aplicar un "
|
||
"numero especifico hará que el modelo genere el mismo texto a la misma "
|
||
"pregunta del usuario (Por defecto: 0 (Al azar))"
|
||
|
||
#: src/window.ui:367
|
||
msgid "Keep Alive Time"
|
||
msgstr "Tiempo Para Mantener Vivo"
|
||
|
||
#: src/window.ui:368
|
||
msgid ""
|
||
"Controls how long the model will stay loaded into memory following the "
|
||
"request in minutes (Default: 5)"
|
||
msgstr ""
|
||
"Controla por cuanto tiempo el modelo permanecera cargado en la memoria "
|
||
"despues de la ultima petición en minutos (Por defecto: 5)"
|
||
|
||
#: src/window.ui:384
|
||
msgid "Ollama Instance"
|
||
msgstr "Instancia de Ollama"
|
||
|
||
#: src/window.ui:388
|
||
msgid "Ollama Overrides"
|
||
msgstr "Overrides de Ollama"
|
||
|
||
#: src/window.ui:389
|
||
msgid ""
|
||
"Manage the arguments used on Ollama, any changes on this page only applies "
|
||
"to the integrated instance, the instance will restart if you make changes."
|
||
msgstr ""
|
||
"Administra los argumentos usados en Ollama, cualquier cambio en esta página "
|
||
"solo aplica a la instancia integrada, la instancia se reiniciará si haces "
|
||
"algún cambio"
|
||
|
||
#: src/window.ui:485 src/window.ui:600
|
||
msgid "Create Model"
|
||
msgstr "Crear Modelo"
|
||
|
||
#: src/window.ui:511
|
||
msgid "Base"
|
||
msgstr "Base"
|
||
|
||
#: src/window.ui:529
|
||
msgid "Name"
|
||
msgstr "Nombre"
|
||
|
||
#: src/window.ui:535
|
||
msgid "Context"
|
||
msgstr "Contexto"
|
||
|
||
#: src/window.ui:550
|
||
msgid "Template"
|
||
msgstr "Plantilla"
|
||
|
||
#: src/window.ui:556
|
||
msgid ""
|
||
"Some models require a specific template. Please visit the model's website "
|
||
"for more information if you're unsure."
|
||
msgstr ""
|
||
"Algunos modelos requieren de una plantilla especifica. Por favor visita el "
|
||
"sitio web del modelo para más información en caso de que no estés seguro"
|
||
|
||
#: src/window.ui:607
|
||
msgid "Search Model"
|
||
msgstr "Buscar Modelo"
|
||
|
||
#: src/window.ui:620
|
||
msgid "Search models"
|
||
msgstr "Buscar Modelos"
|
||
|
||
#: src/window.ui:667
|
||
msgid "No Models Found"
|
||
msgstr "Ningún modelo encontrado"
|
||
|
||
#: src/window.ui:668
|
||
msgid "Try a different search"
|
||
msgstr "Intenta una busqueda distinta"
|
||
|
||
#: src/window.ui:713
|
||
msgid ""
|
||
"By downloading this model you accept the license agreement available on the "
|
||
"model's website."
|
||
msgstr ""
|
||
"Al descargar este modelo aceptas la licencia disponible en el sitio web del "
|
||
"modelo"
|
||
|
||
#: src/window.ui:750
|
||
msgid "Open With Default App"
|
||
msgstr "Abrir con aplicación predeterminada"
|
||
|
||
#: src/window.ui:758
|
||
msgid "Remove Attachment"
|
||
msgstr "Remover Adjunto"
|
||
|
||
#: src/window.ui:820
|
||
msgid "Previous"
|
||
msgstr "Anterior"
|
||
|
||
#: src/window.ui:863
|
||
msgid "Welcome to Alpaca"
|
||
msgstr "Bienvenido a Alpaca"
|
||
|
||
#: src/window.ui:864
|
||
msgid "Powered by Ollama"
|
||
msgstr "Impulsado por Ollama"
|
||
|
||
#: src/window.ui:867
|
||
msgid "Ollama Website"
|
||
msgstr "Sitio Web de Ollama"
|
||
|
||
#: src/window.ui:884
|
||
msgid ""
|
||
"Alpaca and its developers are not liable for any damages to devices or "
|
||
"software resulting from the execution of code generated by an AI model. "
|
||
"Please exercise caution and review the code carefully before running it."
|
||
msgstr ""
|
||
"Alpaca y sus desarrolladores no son responsables por cualquier daño a "
|
||
"dispositivos o software resultados por la ejecución de codigo generado por "
|
||
"un modelo de IA. Por favor sea precavido y revise el codigo cuidadosamente "
|
||
"antes de correrlo"
|
||
|
||
#: src/window.ui:895
|
||
msgid "Featured Models"
|
||
msgstr "Modelos Destacados"
|
||
|
||
#: src/window.ui:896
|
||
msgid ""
|
||
"Alpaca works locally on your device, to start chatting you'll need an AI "
|
||
"model, you can either pull models from this list or the 'Manage Models' menu "
|
||
"later.\n"
|
||
"\n"
|
||
"By downloading any model you accept their license agreement available on the "
|
||
"model's website.\n"
|
||
" "
|
||
msgstr ""
|
||
"Alpaca funciona localmente en tu dispositivo, para empezar a conversar "
|
||
"necesitaras un modelo de AI, puedes descargar un modelo de esta página o "
|
||
"en el menu 'Gestionar Modelos' despues.\n"
|
||
"\n"
|
||
"Al descargar cualquier modelo aceptas su acuerdo de licencia disponible en "
|
||
"el sitio web del modelo.\n"
|
||
" "
|
||
|
||
#: src/window.ui:909
|
||
msgid "Built by Meta"
|
||
msgstr "Construido por Meta"
|
||
|
||
#: src/window.ui:937
|
||
msgid "Built by Google DeepMind"
|
||
msgstr "Construido por Google DeepMind"
|
||
|
||
#: src/window.ui:965
|
||
msgid "Built by Microsoft"
|
||
msgstr "Construido por Microsoft"
|
||
|
||
#: src/window.ui:993
|
||
msgid "Multimodal AI with image recognition"
|
||
msgstr "IA multimodal con reconocimiento de imagenes"
|
||
|
||
#: src/window.ui:1032
|
||
msgid "Import Chat"
|
||
msgstr "Importar chat"
|
||
|
||
#: src/window.ui:1042
|
||
msgid "Keyboard Shortcuts"
|
||
msgstr "Atajos de Teclado"
|
||
|
||
#: src/window.ui:1046
|
||
msgid "About Alpaca"
|
||
msgstr "Sobre Alpaca"
|
||
|
||
#: src/window.ui:1053 src/window.ui:1072
|
||
msgid "Rename Chat"
|
||
msgstr "Renombrar Chat"
|
||
|
||
#: src/window.ui:1057 src/window.ui:1076
|
||
msgid "Export Chat"
|
||
msgstr "Exportar chat"
|
||
|
||
#: src/window.ui:1061
|
||
msgid "Clear Chat"
|
||
msgstr "Limpiar Chat"
|
||
|
||
#: src/window.ui:1068
|
||
msgid "Delete Chat"
|
||
msgstr "Eliminar Chat"
|
||
|
||
#: src/window.ui:1084
|
||
msgid "From Existing Model"
|
||
msgstr "Usar modelo existente"
|
||
|
||
#: src/window.ui:1088
|
||
msgid "From GGUF File (Experimental)"
|
||
msgstr "Usar archivo GGUF (Experimental)"
|
||
|
||
#: src/window.ui:1134
|
||
msgid "Close application"
|
||
msgstr "Cerrar aplicación"
|
||
|
||
#: src/window.ui:1140
|
||
msgid "Import chat"
|
||
msgstr "Importar chat"
|
||
|
||
#: src/window.ui:1146
|
||
msgid "Clear chat"
|
||
msgstr "Limpiar chat"
|
||
|
||
#: src/window.ui:1158
|
||
msgid "New chat"
|
||
msgstr "Nuevo chat"
|
||
|
||
#: src/window.ui:1164
|
||
msgid "Show shortcuts window"
|
||
msgstr "Mostrar ventana de atajos"
|
||
|
||
#: src/window.ui:1171
|
||
msgid "Editor"
|
||
msgstr "Editor"
|
||
|
||
#: src/window.ui:1175
|
||
msgid "Copy"
|
||
msgstr "Copiar"
|
||
|
||
#: src/window.ui:1181
|
||
msgid "Paste"
|
||
msgstr "Pegar"
|
||
|
||
#: src/window.ui:1187
|
||
msgid "Insert new line"
|
||
msgstr "Saltar línea"
|
||
|
||
#~ msgid "Open with Default App"
|
||
#~ msgstr "Abrir con Aplicación Predeterminada"
|
||
|
||
#~ msgid ""
|
||
#~ "Alpaca works locally on your device, to start chatting you'll need an AI "
|
||
#~ "model, you can either pull models from this list or the 'Manage Models' "
|
||
#~ "menu later."
|
||
#~ msgstr ""
|
||
#~ "Alpaca funciona localmente en tu dispositivo, para empezar a chatear "
|
||
#~ "necesitas un modelo IA, puedes descargar modelos de esta lista o usando "
|
||
#~ "el menu 'Gestionar Modelos' despues"
|
||
|
||
#~ msgid "An error occurred"
|
||
#~ msgstr "Ha ocurrio un error"
|
||
|
||
#~ msgid "Could not list local models"
|
||
#~ msgstr "No se pudieron listar los modelos locales"
|
||
|
||
#~ msgid "Could not delete model"
|
||
#~ msgstr "No se pudo eliminar el modelo"
|
||
|
||
#~ msgid "Could not pull model"
|
||
#~ msgstr "No se pudo descargar el modelo"
|
||
|
||
#~ msgid "Cannot delete chat because it's the only one left"
|
||
#~ msgstr "No se pudo eliminar el chat por que es el único que queda"
|
||
|
||
#~ msgid "That tag is already being pulled"
|
||
#~ msgstr "Esa etiqueta ya se está descargando"
|
||
|
||
#~ msgid "That tag has been pulled already"
|
||
#~ msgstr "Esa etiqueta ya ha sido descargada"
|
||
|
||
#~ msgid "Model pulled successfully"
|
||
#~ msgstr "Modelo descargado exitosamente"
|