2460 lines
75 KiB
Plaintext
2460 lines
75 KiB
Plaintext
# Spanish translations for Alpaca package.
|
||
# Copyright (C) 2024 Jeffser
|
||
# This file is distributed under the same license as the Alpaca package.
|
||
# Jeffry Samuel Eduarte Rojas <jeffrysamuer@gmail.com>, 2024.
|
||
#
|
||
msgid ""
|
||
msgstr ""
|
||
"Project-Id-Version: 1.0.0\n"
|
||
"Report-Msgid-Bugs-To: \n"
|
||
"POT-Creation-Date: 2024-08-03 00:42-0600\n"
|
||
"PO-Revision-Date: 2024-05-19 19:44-0600\n"
|
||
"Last-Translator: Jeffry Samuel Eduarte Rojas <jeffrysamuer@gmail.com>\n"
|
||
"Language-Team: Spanish\n"
|
||
"Language: es\n"
|
||
"MIME-Version: 1.0\n"
|
||
"Content-Type: text/plain; charset=UTF-8\n"
|
||
"Content-Transfer-Encoding: 8bit\n"
|
||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||
|
||
#: data/com.jeffser.Alpaca.desktop.in:3
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:7
|
||
msgid "Alpaca"
|
||
msgstr "Alpaca"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:8
|
||
msgid "Chat with local AI models"
|
||
msgstr "Chatea con modelos de IA"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:10
|
||
msgid "An Ollama client"
|
||
msgstr "Un cliente de Ollama"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:11
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:572
|
||
msgid "Features"
|
||
msgstr "Funcionalidades"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:13
|
||
msgid "Built in Ollama instance"
|
||
msgstr "Instancia de Ollama incluida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:14
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:574
|
||
msgid "Talk to multiple models in the same conversation"
|
||
msgstr "Habla con multiples modelos en la misma conversación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:15
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:575
|
||
msgid "Pull and delete models from the app"
|
||
msgstr "Descarga y elimina modelos desde la app"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:16
|
||
msgid "Have multiple conversations"
|
||
msgstr "Multiples conversaciones"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:17
|
||
msgid "Image recognition (Only available with compatible models)"
|
||
msgstr "Reconocimiento de imagenes (Solo disponible con modelos compatibles)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:18
|
||
msgid "Plain text documents recognition"
|
||
msgstr "Reconocimiento de documentos de texto plano"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:19
|
||
msgid "Import and export chats"
|
||
msgstr "Importa y exporta chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:20
|
||
msgid "Append YouTube transcripts to the prompt"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:21
|
||
msgid "Append text from a website to the prompt"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:22
|
||
msgid "PDF recognition"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:24 src/window.ui:896
|
||
msgid "Disclaimer"
|
||
msgstr "Aviso Legal"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:25
|
||
msgid ""
|
||
"This project is not affiliated at all with Ollama, I'm not responsible for "
|
||
"any damages to your device or software caused by running code given by any "
|
||
"models."
|
||
msgstr ""
|
||
"Este proyecto no está afiliado del todo con Ollama, no soy responsable por "
|
||
"cualquier daño a tu dispositivo o software causado por correr codigo "
|
||
"proveido por cualquier modelo."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:28
|
||
msgid "Jeffry Samuel Eduarte Rojas"
|
||
msgstr "Jeffry Samuel Eduarte Rojas"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:54
|
||
msgid "A normal conversation with an AI Model"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:58
|
||
msgid "A conversation involving image recognition"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:62
|
||
msgid "A conversation showing code highlighting"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:66
|
||
msgid "A conversation involving a YouTube video transcript"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:70
|
||
msgid "Multiple models being downloaded"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:86
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:107
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:122
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:147
|
||
msgid "New"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:88
|
||
msgid "Regenerate any response, even if they are incomplete"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:89
|
||
msgid "Support for pulling models by name:tag"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:90
|
||
msgid "Stable support for GGUF model files"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:91
|
||
msgid "Restored sidebar toggle button"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:93
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:111
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:129
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:141
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:157
|
||
msgid "Fixes"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:95
|
||
msgid "Reverted back to standard styles"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:96
|
||
msgid "Fixed generated titles having \"'S\" for some reason"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:97
|
||
msgid "Changed min width for model dropdown"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:98
|
||
msgid "Changed message entry shadow"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:99
|
||
msgid "The last model used is now restored when the user changes chat"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:100
|
||
msgid "Better check for message finishing"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:109
|
||
msgid "Added table rendering (Thanks Nokse)"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:113
|
||
msgid "Made support dialog more common"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:114
|
||
msgid ""
|
||
"Dialog title on tag chooser when downloading models didn't display properly"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:115
|
||
msgid "Prevent chat generation from generating a title with multiple lines"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:124
|
||
msgid "Bearer Token entry on connection error dialog"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:125
|
||
msgid "Small appearance changes"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:126
|
||
msgid "Compatibility with code blocks without explicit language"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:127
|
||
msgid "Rare, optional and dismissible support dialog"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:131
|
||
msgid "Date format for Simplified Chinese translation"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:132
|
||
msgid "Bug with unsupported localizations"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:133
|
||
msgid "Min height being too large to be used on mobile"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:134
|
||
msgid "Remote connection checker bug"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:143
|
||
msgid "Models with capital letters on their tag don't work"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:144
|
||
msgid "Ollama fails to launch on some systems"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:145
|
||
msgid "YouTube transcripts are not being saved in the right TMP directory"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:149
|
||
msgid "Debug messages are now shown on the 'About Alpaca' dialog"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:150
|
||
msgid "Updated Ollama to v0.3.0 (new models)"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:159
|
||
msgid "Models with '-' in their names didn't work properly, this is now fixed"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:160
|
||
msgid "Better connection check for Ollama"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:167
|
||
msgid "Stable Release"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:168
|
||
msgid ""
|
||
"The new icon was made by Tobias Bernard over the Gnome Gitlab, thanks for "
|
||
"the great icon!"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:169
|
||
msgid "Features and fixes"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:171
|
||
msgid "Updated Ollama instance to 0.2.8"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:172
|
||
msgid "Better model selector"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:173
|
||
msgid "Model manager redesign"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:174
|
||
msgid "Better tag selector when pulling a model"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:175
|
||
msgid "Model search"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:176
|
||
msgid "Added support for bearer tokens on remote instances"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:177
|
||
msgid "Preferences dialog redesign"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:178
|
||
msgid "Added context menus to interact with a chat"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:179
|
||
msgid "Redesigned primary and secondary menus"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:180
|
||
msgid ""
|
||
"YouTube integration: Paste the URL of a video with a transcript and it will "
|
||
"be added to the prompt"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:181
|
||
msgid ""
|
||
"Website integration (Experimental): Extract the text from the body of a "
|
||
"website by adding it's URL to the prompt"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:182
|
||
msgid "Chat title generation"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:183
|
||
msgid "Auto resizing of message entry"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:184
|
||
msgid "Chat notifications"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:185
|
||
msgid "Added indicator when an image is missing"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:186
|
||
msgid "Auto rearrange the order of chats when a message is received"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:187
|
||
msgid "Redesigned file preview dialog"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:188
|
||
msgid "Credited new contributors"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:189
|
||
msgid "Better stability and optimization"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:190
|
||
msgid "Edit messages to change the context of a conversation"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:191
|
||
msgid "Added disclaimers when pulling models"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:192
|
||
msgid "Preview files before sending a message"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:193
|
||
msgid "Better format for date and time on messages"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:194
|
||
msgid "Error and debug logging on terminal"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:195
|
||
msgid "Auto-hiding sidebar button"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:196
|
||
msgid "Various UI tweaks"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:198
|
||
msgid "New Models"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:200
|
||
msgid "Gemma2"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:201
|
||
msgid "GLM4"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:202
|
||
msgid "Codegeex4"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:203
|
||
msgid "InternLM2"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:204
|
||
msgid "Llama3-groq-tool-use"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:205
|
||
msgid "Mathstral"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:206
|
||
msgid "Mistral-nemo"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:207
|
||
msgid "Firefunction-v2"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:208
|
||
msgid "Nuextract"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:210
|
||
msgid "Translations"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:211
|
||
msgid ""
|
||
"These are all the available translations on 1.0.0, thanks to all the "
|
||
"contributors!"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:213
|
||
msgid "Russian: Alex K"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:214
|
||
msgid "Spanish: Jeffser"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:215
|
||
msgid "Brazilian Portuguese: Daimar Stein"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:216
|
||
msgid "French: Louis Chauvet-Villaret"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:217
|
||
msgid "Norwegian: CounterFlow64"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:218
|
||
msgid "Bengali: Aritra Saha"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:219
|
||
msgid "Simplified Chinese: Yuehao Sui"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:226
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:275
|
||
msgid "Fix"
|
||
msgstr "Arreglo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:227
|
||
msgid ""
|
||
"Removed DOCX compatibility temporally due to error with python-lxml "
|
||
"dependency"
|
||
msgstr ""
|
||
"Removida compatibilidad con DOCX temporalmente debido a un error con la "
|
||
"dependencia python-lxml"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:233
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:263
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:284
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:489
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:546
|
||
msgid "Big Update"
|
||
msgstr "Gran Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:235
|
||
msgid "Added compatibility for PDF"
|
||
msgstr "Añadida compatibilidad para PDF"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:236
|
||
msgid "Added compatibility for DOCX"
|
||
msgstr "Añadida compatibilidad para DOCX"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:237
|
||
msgid "Merged 'file attachment' menu into one button"
|
||
msgstr "Combinado menu 'subir archivos' en un botón"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:244
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:437
|
||
msgid "Quick Fix"
|
||
msgstr "Arreglo rápido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:245
|
||
msgid ""
|
||
"There were some errors when transitioning from the old version of chats to "
|
||
"the new version. I apologize if this caused any corruption in your chat "
|
||
"history. This should be the only time such a transition is needed."
|
||
msgstr ""
|
||
"Hubieron unos errores mientras los chats transicionaban a la nueva versión. "
|
||
"Pido disculpas si eso causo alguna corrupción en to historial de chats. Esta "
|
||
"debería de ser la única vez que una transición es necesaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:251
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:403
|
||
msgid "Huge Update"
|
||
msgstr "Gran Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:253
|
||
msgid "Added: Support for plain text files"
|
||
msgstr "Añadido: Soporte para archivos de texto plano"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:254
|
||
msgid "Added: New backend system for storing messages"
|
||
msgstr "Añadido: Nuevo sistema en el backend para guardar mensajes"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:255
|
||
msgid "Added: Support for changing Ollama's overrides"
|
||
msgstr "Añadido: Soporte para cambiar overrides de Ollama"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:256
|
||
msgid "General Optimization"
|
||
msgstr "Optimización general"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:265
|
||
msgid "Added: Support for GGUF models (experimental)"
|
||
msgstr "Añadido: Soporte de modelos GGUF (experimental)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:266
|
||
msgid "Added: Support for customization and creation of models"
|
||
msgstr "Añadido: Soporte para personalización y creración de modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:267
|
||
msgid "Fixed: Icons don't appear on non Gnome systems"
|
||
msgstr "Arreglado: Iconos no se mostraban en sistemas que no usan Gnome"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:268
|
||
msgid "Update Ollama to v0.1.39"
|
||
msgstr "Ollama actualizado a v0.1.39"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:277
|
||
msgid ""
|
||
"Fixed: app didn't open if models tweaks wasn't present in the config files"
|
||
msgstr ""
|
||
"Arreglado: La aplicación no abre si 'models tweaks' no esta presente en los "
|
||
"archivos de configuración"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:286
|
||
msgid "Changed multiple icons (paper airplane for the send button)"
|
||
msgstr ""
|
||
"Multiples iconos cambiados (avion de papel para el boton de enviar mensaje)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:287
|
||
msgid "Combined export / import chat buttons into a menu"
|
||
msgstr "Botones importar / exportar chat combinados en un menu"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:288
|
||
msgid "Added 'model tweaks' (temperature, seed, keep_alive)"
|
||
msgstr "Añadidos ajustes de modelo (temperatura, semilla, mantener vivo)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:289
|
||
msgid "Fixed send / stop button"
|
||
msgstr "Arreglado boton enviar / parar"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:290
|
||
msgid "Fixed app not checking if remote connection works when starting"
|
||
msgstr ""
|
||
"Arreglado: Aplicación no chequea si la conexión remota funciona cuando inicia"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:297
|
||
msgid "Daily Update"
|
||
msgstr "Actulización Diaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:299
|
||
msgid "Added text ellipsis to chat name so it doesn't change the button width"
|
||
msgstr ""
|
||
"Añadido elipsis a el nombre del chat para que no afecte el largo del boton"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:300
|
||
msgid "New shortcut for creating a chat (CTRL+N)"
|
||
msgstr "Nuevo atajo de teclado para crear chat (CTRL+N)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:301
|
||
msgid "New message entry design"
|
||
msgstr "Nuevo diseño para el entry de mensaje"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:302
|
||
msgid "Fixed: Can't rename the same chat multiple times"
|
||
msgstr "Arreglado: No se puede renombrar el mismo chat multiples veces"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:309
|
||
msgid "The fix"
|
||
msgstr "Arreglos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:311
|
||
msgid ""
|
||
"Fixed: Ollama instance keeps running on the background even when it is "
|
||
"disabled"
|
||
msgstr ""
|
||
"Arreglado: Instancia de Ollama sigue siendo ejecutada en el fondo aunque sea "
|
||
"desactivada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:312
|
||
msgid "Fixed: Can't pull models on the integrated instance"
|
||
msgstr "Arreglado: No se puede descargar modelos en la instancia integrada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:319
|
||
msgid "Quick tweaks"
|
||
msgstr "Arreglos rápido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:321
|
||
msgid "Added progress bar to models that are being pulled"
|
||
msgstr "Añadida barra de progreso a modelos que estan siendo descargados"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:322
|
||
msgid "Added size to tags when pulling a model"
|
||
msgstr "Añadido tamaño de tags cuando se descarga un modelo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:323
|
||
msgid "General optimizations on the background"
|
||
msgstr "Optimizaciones general en el fondo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:330
|
||
msgid "Quick fixes"
|
||
msgstr "Arreglos rápidos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:332
|
||
msgid "Fixed: Scroll when message is received"
|
||
msgstr "Arreglado: Desplazamiento automatico cuando un mensaje es recibido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:333
|
||
msgid "Fixed: Content doesn't change when creating a new chat"
|
||
msgstr "Arreglad: Contenido no cambia cuando se crea un nuevo chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:334
|
||
msgid "Added 'Featured Models' page on welcome dialog"
|
||
msgstr "Añadida sección 'Modelos Destacados' en el dialogo de bienvenida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:341
|
||
msgid "Nice Update"
|
||
msgstr "Buena Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:343
|
||
msgid "UI tweaks (Thanks Nokse22)"
|
||
msgstr "Mejor UI en general (Gracias Nokse22)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:344
|
||
msgid "General optimizations"
|
||
msgstr "Optimización general"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:345
|
||
msgid "Metadata fixes"
|
||
msgstr "Correciones de metadata"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:352
|
||
msgid "Quick fix"
|
||
msgstr "Arreglo rápido"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:354
|
||
msgid "Updated Spanish translation"
|
||
msgstr "Actualización a la traducción a Español"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:355
|
||
msgid "Added compatibility for PNG"
|
||
msgstr "Añadida compatibilidad para PNG"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:362
|
||
msgid "New Update"
|
||
msgstr "Nueva Actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:364
|
||
msgid "Updated model list"
|
||
msgstr "Lista de modelos actualizada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:365
|
||
msgid "Added image recognition to more models"
|
||
msgstr "Añadido reconocimiento de imagenes a más modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:366
|
||
msgid "Added Brazilian Portuguese translation (Thanks Daimaar Stein)"
|
||
msgstr "Añadida tradución a Portugues Brasileño (Gracias Daimaar Stein)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:367
|
||
msgid "Refined the general UI (Thanks Nokse22)"
|
||
msgstr "Mejor UI en general (Gracias Nokse22)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:368
|
||
msgid "Added 'delete message' feature"
|
||
msgstr "Añadida función 'eliminar mensaje'"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:369
|
||
msgid ""
|
||
"Added metadata so that software distributors know that the app is compatible "
|
||
"with mobile"
|
||
msgstr ""
|
||
"Añadida metadata para que distribuidores de software puedan saber que la "
|
||
"aplicación es compatible con celulares"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:370
|
||
msgid ""
|
||
"Changed 'send' shortcut to just the return/enter key (to add a new line use "
|
||
"shift+return)"
|
||
msgstr ""
|
||
"Cambiado el atajo para enviar mensaje a solo la tecla enter (para hacer "
|
||
"salto de linea usa shift+enter)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:377
|
||
msgid "Bug Fixes"
|
||
msgstr "Arreglo de errores"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:379
|
||
msgid "Fixed: Minor spelling mistake"
|
||
msgstr "Arregalada falta de ortografía"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:380
|
||
msgid "Added 'mobile' as a supported form factor"
|
||
msgstr "Añadido soporte para celulares"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:381
|
||
msgid "Fixed: 'Connection Error' dialog not working properly"
|
||
msgstr "Arreglado: Dialogo 'Error de conexión' no funcionando correctamente"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:382
|
||
msgid "Fixed: App might freeze randomly on startup"
|
||
msgstr "Arreglado: Aplicación se congela al azar cuando inicia"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:383
|
||
msgid "Changed 'chats' label on sidebar for 'Alpaca'"
|
||
msgstr "Cambiado label 'chats' en la barra del lado por 'Alpaca'"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:390
|
||
msgid "Cool Update"
|
||
msgstr "Actualización Potente"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:392
|
||
msgid "Better design for chat window"
|
||
msgstr "Mejor diseño para la ventana de chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:393
|
||
msgid "Better design for chat sidebar"
|
||
msgstr "Mejor interfaz para la barra de lado de chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:394
|
||
msgid "Fixed remote connections"
|
||
msgstr "Conexión remota arreglada"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:395
|
||
msgid "Fixed Ollama restarting in loop"
|
||
msgstr "Arreglado, Ollama reiniciandose en bucle"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:396
|
||
msgid "Other cool backend stuff"
|
||
msgstr "Otras cosas geniales en el backend"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:405
|
||
msgid "Added Ollama as part of Alpaca, Ollama will run in a sandbox"
|
||
msgstr "Añadido Ollama como parte de Alpaca, Ollama se ejecutara en un sandbox"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:406
|
||
msgid "Added option to connect to remote instances (how it worked before)"
|
||
msgstr ""
|
||
"Añadida la opcion de conectarse a instancias remotas (como funcionaba) antes"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:407
|
||
msgid "Added option to import and export chats"
|
||
msgstr "Añadida la opcion de importar y exportar chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:408
|
||
msgid "Added option to run Alpaca with Ollama in the background"
|
||
msgstr "Añadida la opcion de ejecutar Alpaca y Ollama en el fondo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:409
|
||
msgid "Added preferences dialog"
|
||
msgstr "Añadido dialogo de preferencias"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:410
|
||
msgid "Changed the welcome dialog"
|
||
msgstr "Nuevo dialogo de bienvenida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:412
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:429
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:441
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:460
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:481
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:497
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:513
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:527
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:537
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:555
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:577
|
||
msgid "Please report any errors to the issues page, thank you."
|
||
msgstr "Por favor reporta cualquier error a la página de problemas, gracias."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:420
|
||
msgid "Yet Another Daily Update"
|
||
msgstr "Otra Actulización Diaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:422
|
||
msgid "Added better UI for 'Manage Models' dialog"
|
||
msgstr "Añadida mejor interfaz para el dialogo 'gestión de modelos'"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:423
|
||
msgid "Added better UI for the chat sidebar"
|
||
msgstr "Añadida mejor interfaz para la barra de lado de chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:424
|
||
msgid ""
|
||
"Replaced model description with a button to open Ollama's website for the "
|
||
"model"
|
||
msgstr ""
|
||
"Remplazada la descripción de modelo por un botón para abrir la página web de "
|
||
"Ollama para el modelo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:425
|
||
msgid "Added myself to the credits as the spanish translator"
|
||
msgstr "Agregue mi nombre en los creditos como el traductor a Español"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:426
|
||
msgid "Using XDG properly to get config folder"
|
||
msgstr "Usando XDG apropiadamente para obtener el folder de configuración"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:427
|
||
msgid "Update for translations"
|
||
msgstr "Actualización para traducciones"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:439
|
||
msgid "The last update had some mistakes in the description of the update"
|
||
msgstr ""
|
||
"La última actualización tenía unos errores en la descripción de la "
|
||
"actualización"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:449
|
||
msgid "Another Daily Update"
|
||
msgstr "Otra Actulización Diaria"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:451
|
||
msgid "Added full Spanish translation"
|
||
msgstr "Añadida traducción completa a Español"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:452
|
||
msgid "Added support for background pulling of multiple models"
|
||
msgstr "Añadido soporte para descargar multiples modelos en el fondo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:453
|
||
msgid "Added interrupt button"
|
||
msgstr "Añadido botón de interrupción"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:454
|
||
msgid "Added basic shortcuts"
|
||
msgstr "Añadidos atajos de teclado basicos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:455
|
||
msgid "Better translation support"
|
||
msgstr "Mejor soporte para traducciones"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:456
|
||
msgid ""
|
||
"User can now leave chat name empty when creating a new one, it will add a "
|
||
"placeholder name"
|
||
msgstr ""
|
||
"El usuario ahora puede dejar el nombre del chat vacio durante la creación, "
|
||
"la aplicación añadira un placeholder"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:457
|
||
msgid "Better scalling for different window sizes"
|
||
msgstr "Mejor escalado para distintos tamaños de ventana"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:458
|
||
msgid "Fixed: Can't close app if first time setup fails"
|
||
msgstr "Arreglado: No se puede cerrar la aplicación en el primer setup"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:468
|
||
msgid "Really Big Update"
|
||
msgstr "Actualización Bastante Grande"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:470
|
||
msgid "Added multiple chats support!"
|
||
msgstr "Añadido soporte para multiples chats!"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:471
|
||
msgid "Added Pango Markup support (bold, list, title, subtitle, monospace)"
|
||
msgstr ""
|
||
"Añadido soporte para Pango Markup (negrita, lista, titulo, subtitulo, "
|
||
"monoespaciado)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:472
|
||
msgid "Added autoscroll if the user is at the bottom of the chat"
|
||
msgstr ""
|
||
"Añadido autoscroll si el usuario se encuentra en la parte inferior del chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:473
|
||
msgid "Added support for multiple tags on a single model"
|
||
msgstr "Añadido soporte para multiples etiquetas con un solo modelo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:474
|
||
msgid "Added better model management dialog"
|
||
msgstr "Añadido mejor gestión de modelos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:475
|
||
msgid "Added loading spinner when sending message"
|
||
msgstr "Añadido spinner de carga cuando se envia un mensaje"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:476
|
||
msgid "Added notifications if app is not active and a model pull finishes"
|
||
msgstr ""
|
||
"Añadidas notificaciones si la aplicación no está activa y la descarga de un "
|
||
"modelo finaliza"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:477
|
||
msgid "Added new symbolic icon"
|
||
msgstr "Añadido nuevo icono simbolico"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:478
|
||
msgid "Added frame to message textview widget"
|
||
msgstr "Añadido borde al objeto textview del mensaje"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:479
|
||
msgid "Fixed \"code blocks shouldn't be editable\""
|
||
msgstr "Arreglado \"bloques de codigo no deberían de ser editables\""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:491
|
||
msgid "Added code highlighting"
|
||
msgstr "Añadido resaltado de código"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:492
|
||
msgid "Added image recognition (llava model)"
|
||
msgstr "Añadido reconocimiento de imagenes (modelo llava)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:493
|
||
msgid "Added multiline prompt"
|
||
msgstr "Añadido caja de texto de multiples lineas"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:494
|
||
msgid "Fixed some small bugs"
|
||
msgstr "Arreglados unos pequeños errores"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:495
|
||
msgid "General optimization"
|
||
msgstr "Optimización general"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:505
|
||
msgid "Fixes and features"
|
||
msgstr "Arreglos y funcionalidades"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:507
|
||
msgid "Russian translation (thanks github/alexkdeveloper)"
|
||
msgstr "Traducción a Ruso (gracias github/alexkdeveloper)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:508
|
||
msgid "Fixed: Cannot close app on first setup"
|
||
msgstr "Arreglado: No se puede cerrar la aplicación en el primer setup"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:509
|
||
msgid "Fixed: Brand colors for Flathub"
|
||
msgstr "Arreglado: Colores de marca para Flathub"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:510
|
||
msgid "Fixed: App description"
|
||
msgstr "Arreglado: Descripción de aplicación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:511
|
||
msgid "Fixed: Only show 'save changes dialog' when you actually change the url"
|
||
msgstr ""
|
||
"Arreglado: Solo mostrar el dialogo 'guardar cambios' cuando se cambia el url"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:521
|
||
msgid "0.2.2 Bug fixes"
|
||
msgstr "0.2.2 Arreglo de errores"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:523
|
||
msgid "Toast messages appearing behind dialogs"
|
||
msgstr "Mensajes toast apareciendo detrás de dialogos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:524
|
||
msgid "Local model list not updating when changing servers"
|
||
msgstr ""
|
||
"Lista de modelos locales no es actualizada cuando se cambia el servidor"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:525
|
||
msgid "Closing the setup dialog closes the whole app"
|
||
msgstr "Cerrar el dialogo de setup cierra toda la aplicación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:535
|
||
msgid "0.2.1 Data saving fix"
|
||
msgstr "0.2.1 Arreglo en el guardado de datos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:536
|
||
msgid ""
|
||
"The app didn't save the config files and chat history to the right "
|
||
"directory, this is now fixed"
|
||
msgstr ""
|
||
"La aplicación no guardaba los archivos de configuración o los chats en el "
|
||
"directorio correcto, esto ahora ha sido arreglado"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:545
|
||
msgid "0.2.0"
|
||
msgstr "0.2.0"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:547
|
||
msgid "New Features"
|
||
msgstr "Nuevas funcionalidades"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:549
|
||
msgid "Restore chat after closing the app"
|
||
msgstr "Restaurar chat despues de cerrar la aplicación"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:550
|
||
msgid "A button to clear the chat"
|
||
msgstr "Un botón para limpiar el chat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:551
|
||
msgid "Fixed multiple bugs involving how messages are shown"
|
||
msgstr "Arreglados multiples errores acerca de como los mensajes son mostrados"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:552
|
||
msgid "Added welcome dialog"
|
||
msgstr "Añadido dialogo de bienvenida"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:553
|
||
msgid "More stability"
|
||
msgstr "Más estabilidad"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:563
|
||
msgid "0.1.2 Quick fixes"
|
||
msgstr "0.1.2 Arreglos rápidos"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:564
|
||
msgid ""
|
||
"This release fixes some metadata needed to have a proper Flatpak application"
|
||
msgstr ""
|
||
"Esta versión arregla metadatos necesarios para tener un aplicación de "
|
||
"Flatpak justa"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:570
|
||
msgid "0.1.1 Stable Release"
|
||
msgstr "0.1.1"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:571
|
||
msgid "This is the first public version of Alpaca"
|
||
msgstr "Esta es la primera versión publica de Alpaca"
|
||
|
||
#: src/window.py:61 src/window.py:1184 src/window.py:1257 src/dialogs.py:84
|
||
#: src/window.ui:40
|
||
msgid "New Chat"
|
||
msgstr "Nuevo Chat"
|
||
|
||
#: src/window.py:174
|
||
msgid "Message edited successfully"
|
||
msgstr "Mensaje eliminado exitosamente"
|
||
|
||
#: src/window.py:188
|
||
msgid "Please select a model before chatting"
|
||
msgstr "Por favor selecciona un modelo antes de enviar un mensaje"
|
||
|
||
#: src/window.py:259 src/window.py:260
|
||
msgid "Close"
|
||
msgstr "Cerrar"
|
||
|
||
#: src/window.py:262 src/window.py:263 src/window.ui:849
|
||
msgid "Next"
|
||
msgstr "Siguiente"
|
||
|
||
#: src/window.py:303 src/window.py:314
|
||
msgid "Failed to connect to server"
|
||
msgstr "No se pudo conectar al servidor"
|
||
|
||
#: src/window.py:321
|
||
msgid "Pulling in the background..."
|
||
msgstr "Descargando en el fondo..."
|
||
|
||
#: src/window.py:372
|
||
msgid "Stop Creating '{}'"
|
||
msgstr "Parar la creación de '{}'"
|
||
|
||
#: src/window.py:409
|
||
msgid "image"
|
||
msgstr "Imagen"
|
||
|
||
#: src/window.py:480
|
||
msgid "Message copied to the clipboard"
|
||
msgstr "Mensaje copiado"
|
||
|
||
#: src/window.py:605
|
||
msgid "Remove Message"
|
||
msgstr "Remover Mensaje"
|
||
|
||
#: src/window.py:610 src/window.py:908
|
||
msgid "Copy Message"
|
||
msgstr "Copiar Mensaje"
|
||
|
||
#: src/window.py:615
|
||
msgid "Edit Message"
|
||
msgstr "Editar Mensaje"
|
||
|
||
#: src/window.py:620
|
||
msgid "Regenerate Message"
|
||
msgstr ""
|
||
|
||
#: src/window.py:673
|
||
msgid "Missing Image"
|
||
msgstr "Imagen no Encontrada"
|
||
|
||
#: src/window.py:689 src/window.py:691
|
||
msgid "Missing image"
|
||
msgstr "Imagen no Encontrada"
|
||
|
||
#: src/window.py:773
|
||
msgid "Remove '{}'"
|
||
msgstr ""
|
||
|
||
#: src/window.py:907
|
||
msgid "Code Block"
|
||
msgstr ""
|
||
|
||
#: src/window.py:939
|
||
msgid "Code copied to the clipboard"
|
||
msgstr "Codigo copiado"
|
||
|
||
#: src/window.py:999
|
||
msgid "Regenerate Response"
|
||
msgstr ""
|
||
|
||
#: src/window.py:1066
|
||
msgid "Task Complete"
|
||
msgstr "Tarea completada"
|
||
|
||
#: src/window.py:1066 src/window.py:1067
|
||
msgid "Model '{}' pulled successfully."
|
||
msgstr "El modelo '{}' fue descargado exitosamente"
|
||
|
||
#: src/window.py:1069 src/window.py:1072
|
||
msgid "Pull Model Error"
|
||
msgstr "Error Descargando Modelo"
|
||
|
||
#: src/window.py:1069
|
||
msgid "Failed to pull model '{}': {}"
|
||
msgstr ""
|
||
|
||
#: src/window.py:1070
|
||
msgid "Error pulling '{}': {}"
|
||
msgstr ""
|
||
|
||
#: src/window.py:1072
|
||
msgid "Failed to pull model '{}' due to network error."
|
||
msgstr "No se pudo descargar el modelo '{}' debido a un error de red"
|
||
|
||
#: src/window.py:1073
|
||
msgid "Error pulling '{}'"
|
||
msgstr ""
|
||
|
||
#: src/window.py:1106
|
||
msgid "Stop Pulling '{}'"
|
||
msgstr ""
|
||
|
||
#: src/window.py:1149
|
||
msgid "Image Recognition"
|
||
msgstr "Reconocimiento de Imagenes"
|
||
|
||
#: src/window.py:1273
|
||
msgid "Model deleted successfully"
|
||
msgstr "Modelo eliminado exitosamente"
|
||
|
||
#: src/window.py:1352
|
||
msgid "There was an error with the local Ollama instance, so it has been reset"
|
||
msgstr ""
|
||
"Ha ocurrido un error con la instancia local de Ollama, ha sido reinicida"
|
||
|
||
#: src/window.py:1372
|
||
msgid "Chat exported successfully"
|
||
msgstr "Chat exportado exitosamente"
|
||
|
||
#: src/window.py:1441
|
||
msgid "Chat imported successfully"
|
||
msgstr "Chat importado exitosamente"
|
||
|
||
#: src/window.py:1474
|
||
msgid "Cannot open image"
|
||
msgstr "No se pudo abrir la imagen"
|
||
|
||
#: src/window.py:1556
|
||
msgid "This video is not available"
|
||
msgstr "Este video no está disponible"
|
||
|
||
#: src/window.py:1574 src/dialogs.py:297
|
||
msgid "Image recognition is only available on specific models"
|
||
msgstr ""
|
||
"Reconocimiento de imagenes esta disponible solamente en modelos compatibles"
|
||
|
||
#: src/available_models_descriptions.py:2
|
||
msgid ""
|
||
"Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and "
|
||
"405B parameter sizes."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:3
|
||
msgid "Google Gemma 2 is now available in 2 sizes, 9B and 27B."
|
||
msgstr "Google Gemma 2 ahora esta disponible en 2 tamaños, 9B y 27B."
|
||
|
||
#: src/available_models_descriptions.py:4
|
||
msgid ""
|
||
"A state-of-the-art 12B model with 128k context length, built by Mistral AI "
|
||
"in collaboration with NVIDIA."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:5
|
||
msgid ""
|
||
"Mistral Large 2 is Mistral's new flagship model that is significantly more "
|
||
"capable in code generation, mathematics, and reasoning with 128k context "
|
||
"window and support for dozens of languages."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:6
|
||
msgid "Qwen2 is a new series of large language models from Alibaba group"
|
||
msgstr "Qwen2 es una nueva serie de LLM del grupo Alibaba."
|
||
|
||
#: src/available_models_descriptions.py:7
|
||
msgid ""
|
||
"An open-source Mixture-of-Experts code language model that achieves "
|
||
"performance comparable to GPT4-Turbo in code-specific tasks."
|
||
msgstr ""
|
||
"Un modelo de lenguaje Mixturer-of-Experts abierto que consigue un "
|
||
"rendimiento comparable a GPT4-Turbo en tareas especificas a codigo."
|
||
|
||
#: src/available_models_descriptions.py:8
|
||
msgid ""
|
||
"Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art "
|
||
"open models by Microsoft."
|
||
msgstr ""
|
||
"Phi-3 es una familia de los ultimos modelos livianos de Microsoft, 3B (Mini) "
|
||
"y 14B (Medium)."
|
||
|
||
#: src/available_models_descriptions.py:9
|
||
msgid "The 7B model released by Mistral AI, updated to version 0.3."
|
||
msgstr "El modelo 7B lanzado por Mistral AI, actualizado a la versión 0.3."
|
||
|
||
#: src/available_models_descriptions.py:10
|
||
msgid ""
|
||
"A set of Mixture of Experts (MoE) model with open weights by Mistral AI in "
|
||
"8x7b and 8x22b parameter sizes."
|
||
msgstr ""
|
||
"Un set de modelos Mixture-of-Experts (MoE) con pesos abiertos por Mistral AI "
|
||
"dispnible en tamaños de parametros 8x7b y 8x22b."
|
||
|
||
#: src/available_models_descriptions.py:11
|
||
msgid ""
|
||
"CodeGemma is a collection of powerful, lightweight models that can perform a "
|
||
"variety of coding tasks like fill-in-the-middle code completion, code "
|
||
"generation, natural language understanding, mathematical reasoning, and "
|
||
"instruction following."
|
||
msgstr ""
|
||
"CodeGemma es una colección de poderosos, modelos livianos que pueden hacer "
|
||
"una variedad de tareas de codigo como fill-in-the-middle completación de "
|
||
"codigo, generación de codigo, comprensión de lenguaje natural, razonamiento "
|
||
"matematico y seguimiento de instrucciones."
|
||
|
||
#: src/available_models_descriptions.py:12
|
||
msgid ""
|
||
"Command R is a Large Language Model optimized for conversational interaction "
|
||
"and long context tasks."
|
||
msgstr ""
|
||
"Command R es un LLM optimizado para interacciones conversacionales y tareas "
|
||
"que requieren un contexto largo."
|
||
|
||
#: src/available_models_descriptions.py:13
|
||
msgid ""
|
||
"Command R+ is a powerful, scalable large language model purpose-built to "
|
||
"excel at real-world enterprise use cases."
|
||
msgstr ""
|
||
"Command R+ es un poderoso, escalable LLM construido con el proposito de "
|
||
"sobresalir en usos profesionales del mundo real."
|
||
|
||
#: src/available_models_descriptions.py:14
|
||
msgid ""
|
||
"🌋 LLaVA is a novel end-to-end trained large multimodal model that combines "
|
||
"a vision encoder and Vicuna for general-purpose visual and language "
|
||
"understanding. Updated to version 1.6."
|
||
msgstr ""
|
||
"🌋 LLaVA es un nuevo LLM entrenado en end-to-end que combina un "
|
||
"encodificador visual y Vicuna para entendimiento general en lenguaje y "
|
||
"visión. Acutalizado a la versión 1.6."
|
||
|
||
#: src/available_models_descriptions.py:15
|
||
msgid "Meta Llama 3: The most capable openly available LLM to date"
|
||
msgstr "Meta Llama 3: El LLM abierto más capaz a esta fecha."
|
||
|
||
#: src/available_models_descriptions.py:16
|
||
msgid ""
|
||
"Gemma is a family of lightweight, state-of-the-art open models built by "
|
||
"Google DeepMind. Updated to version 1.1"
|
||
msgstr ""
|
||
"Gemma es una familia de nuevos modelos abiertos livianos construidos por "
|
||
"Google DeepMind. Actualizado a la versión 1.1."
|
||
|
||
#: src/available_models_descriptions.py:17
|
||
msgid ""
|
||
"Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from "
|
||
"0.5B to 110B parameters"
|
||
msgstr ""
|
||
"Qwen 1.5 es una serie de LLM por Alibaba Cloud que cubren parametros entre "
|
||
"0.5B hasta 110B."
|
||
|
||
#: src/available_models_descriptions.py:18
|
||
msgid ""
|
||
"Llama 2 is a collection of foundation language models ranging from 7B to 70B "
|
||
"parameters."
|
||
msgstr ""
|
||
"Llama 2 es una colección de modelos bases que cubren parametros entre 7B y "
|
||
"70B."
|
||
|
||
#: src/available_models_descriptions.py:19
|
||
msgid ""
|
||
"A large language model that can use text prompts to generate and discuss "
|
||
"code."
|
||
msgstr "Un LLM que puede usar texto para generar y discutir sobre codigo."
|
||
|
||
#: src/available_models_descriptions.py:20
|
||
msgid ""
|
||
"Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of "
|
||
"experts models that excels at coding tasks. Created by Eric Hartford."
|
||
msgstr ""
|
||
"Descensurado, 8x7b y 8x22b, modelos afinados basados enn una mezcla de "
|
||
"modelos expertos de Mixtral especializados en tareas de codigo. Creado por "
|
||
"Eric Hartford."
|
||
|
||
#: src/available_models_descriptions.py:21
|
||
msgid ""
|
||
"A high-performing open embedding model with a large token context window."
|
||
msgstr ""
|
||
"Un modelo de integración abierto de alto rendimiento con una gran ventana de "
|
||
"contexto de token."
|
||
|
||
#: src/available_models_descriptions.py:22
|
||
msgid "Uncensored Llama 2 model by George Sung and Jarrad Hope."
|
||
msgstr "Modelo Llama 2 descensurado por George Sung y Jarrad Hope."
|
||
|
||
#: src/available_models_descriptions.py:23
|
||
msgid ""
|
||
"Phi-2: a 2.7B language model by Microsoft Research that demonstrates "
|
||
"outstanding reasoning and language understanding capabilities."
|
||
msgstr ""
|
||
"Phi-2: un modelo de lenguaje de 2.700 millones de Microsoft Research que "
|
||
"demuestra excelentes capacidades de razonamiento y comprensión del lenguaje."
|
||
|
||
#: src/available_models_descriptions.py:24
|
||
msgid ""
|
||
"DeepSeek Coder is a capable coding model trained on two trillion code and "
|
||
"natural language tokens."
|
||
msgstr ""
|
||
"DeepSeek Coder en un modelo especializado en codigo, entrenado en 2 "
|
||
"trillones de tokens de codigo y lenguaje natural."
|
||
|
||
#: src/available_models_descriptions.py:25
|
||
msgid ""
|
||
"The uncensored Dolphin model based on Mistral that excels at coding tasks. "
|
||
"Updated to version 2.8."
|
||
msgstr ""
|
||
"El modelo descensurado Dolphin, basado en Mistral que sobresale en tareas de "
|
||
"codigo. Actualizado a la versión 2.8."
|
||
|
||
#: src/available_models_descriptions.py:26
|
||
msgid ""
|
||
"A general-purpose model ranging from 3 billion parameters to 70 billion, "
|
||
"suitable for entry-level hardware."
|
||
msgstr ""
|
||
"Un modelo de uso general oscilando entre 3 billones hasta 70 billones de "
|
||
"parametros, adecuado para hardware básico."
|
||
|
||
#: src/available_models_descriptions.py:27
|
||
msgid ""
|
||
"Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on "
|
||
"Llama 3 that has a variety of instruction, conversational, and coding skills."
|
||
msgstr ""
|
||
"Dolphin 2.9 es un modelo nuevo con tamaños de 8B y 70B hecho por Eric "
|
||
"Hartford basado en Llama 3, tiene una variedad de instrucciones "
|
||
"conversacionales y habilidades en código"
|
||
|
||
#: src/available_models_descriptions.py:28
|
||
msgid "State-of-the-art large embedding model from mixedbread.ai"
|
||
msgstr "Modelo de integración grande de última generación de Mixedbread.ai"
|
||
|
||
#: src/available_models_descriptions.py:29
|
||
msgid ""
|
||
"StarCoder2 is the next generation of transparently trained open code LLMs "
|
||
"that comes in three sizes: 3B, 7B and 15B parameters."
|
||
msgstr ""
|
||
"StarCoder2 es la próxima generación de modelos de lenguaje abiertos "
|
||
"entrenados de manera transparente, que vienen en tres tamaños: 3B, 7B y 15B "
|
||
"parámetros."
|
||
|
||
#: src/available_models_descriptions.py:30
|
||
msgid ""
|
||
"Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the "
|
||
"Mistral 7B model using the OpenOrca dataset."
|
||
msgstr ""
|
||
"Mistral OpenOrca es un modelo de 7 billones de parametros, afinado con base "
|
||
"en el modelo Mistral 7B usando el dataset de OpenOrca."
|
||
|
||
#: src/available_models_descriptions.py:31
|
||
msgid "Yi 1.5 is a high-performing, bilingual language model."
|
||
msgstr "Yi 1.5 es un modelo de lenguaje bilingüe de alto rendimiento."
|
||
|
||
#: src/available_models_descriptions.py:32
|
||
msgid ""
|
||
"Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models "
|
||
"that are trained to act as helpful assistants."
|
||
msgstr ""
|
||
"Zephyr es una serie de versiones ajustadas de los modelos Mistral y Mixtral "
|
||
"que están entrenados para actuar como asistentes útiles."
|
||
|
||
#: src/available_models_descriptions.py:33
|
||
msgid "Llama 2 based model fine tuned to improve Chinese dialogue ability."
|
||
msgstr ""
|
||
"Modelo basado en Llama 2 ajustado para mejorar la capacidad de diálogo en "
|
||
"chino."
|
||
|
||
#: src/available_models_descriptions.py:34
|
||
msgid ""
|
||
"A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several "
|
||
"benchmarks."
|
||
msgstr ""
|
||
"Un modelo LLaVA ajustado a partir de Llama 3 Instruct con mejores "
|
||
"puntuaciones en varios benchmarks."
|
||
|
||
#: src/available_models_descriptions.py:35
|
||
msgid ""
|
||
"General use chat model based on Llama and Llama 2 with 2K to 16K context "
|
||
"sizes."
|
||
msgstr ""
|
||
"Modelo de chat de uso general basado en Llama y Llama 2 con tamaños de "
|
||
"contexto de 2K a 16K."
|
||
|
||
#: src/available_models_descriptions.py:36
|
||
msgid ""
|
||
"The powerful family of models by Nous Research that excels at scientific "
|
||
"discussion and coding tasks."
|
||
msgstr ""
|
||
"La poderosa familia de modelos de Nous Research que sobresale en discusiones "
|
||
"científicas y tareas de programación."
|
||
|
||
#: src/available_models_descriptions.py:37
|
||
msgid ""
|
||
"The TinyLlama project is an open endeavor to train a compact 1.1B Llama "
|
||
"model on 3 trillion tokens."
|
||
msgstr ""
|
||
"El proyecto TinyLlama es un esfuerzo abierto para entrenar un modelo "
|
||
"compacto de Llama de 1.1B en 3 billones de tokens."
|
||
|
||
#: src/available_models_descriptions.py:38
|
||
msgid ""
|
||
"Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on "
|
||
"Llama 2 uncensored by Eric Hartford."
|
||
msgstr ""
|
||
"Wizard Vicuna Uncensored es un modelo de 7B, 13B y 30B parámetros basado en "
|
||
"Llama 2 sin censura por Eric Hartford."
|
||
|
||
#: src/available_models_descriptions.py:39
|
||
msgid ""
|
||
"Codestral is Mistral AI’s first-ever code model designed for code generation "
|
||
"tasks."
|
||
msgstr ""
|
||
"Codestral es el primer modelo de código de Mistral AI diseñado para tareas "
|
||
"de generación de código."
|
||
|
||
#: src/available_models_descriptions.py:40
|
||
msgid ""
|
||
"StarCoder is a code generation model trained on 80+ programming languages."
|
||
msgstr ""
|
||
"StarCoder es un modelo de generación de código entrenado en más de 80 "
|
||
"lenguajes de programación."
|
||
|
||
#: src/available_models_descriptions.py:41
|
||
msgid ""
|
||
"State of the art large language model from Microsoft AI with improved "
|
||
"performance on complex chat, multilingual, reasoning and agent use cases."
|
||
msgstr ""
|
||
"Modelo de lenguaje grande de vanguardia de Microsoft AI con rendimiento "
|
||
"mejorado en chat complejo, multilingüe, razonamiento y casos de uso de "
|
||
"agentes."
|
||
|
||
#: src/available_models_descriptions.py:42
|
||
msgid ""
|
||
"A family of open-source models trained on a wide variety of data, surpassing "
|
||
"ChatGPT on various benchmarks. Updated to version 3.5-0106."
|
||
msgstr ""
|
||
"Una familia de modelos de código abierto entrenados en una amplia variedad "
|
||
"de datos, superando a ChatGPT en varios benchmarks. Actualizado a la versión "
|
||
"3.5-0106."
|
||
|
||
#: src/available_models_descriptions.py:43
|
||
msgid ""
|
||
"Aya 23, released by Cohere, is a new family of state-of-the-art, "
|
||
"multilingual models that support 23 languages."
|
||
msgstr ""
|
||
"Aya 23, lanzado por Cohere, es una familia de los ultimos modelos "
|
||
"multilingües que soportan 23 lenguajes."
|
||
|
||
#: src/available_models_descriptions.py:44
|
||
msgid ""
|
||
"An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset "
|
||
"by Eric Hartford and based on TinyLlama."
|
||
msgstr ""
|
||
"Un modelo experimental de 1.1B parámetros entrenado en el nuevo conjunto de "
|
||
"datos Dolphin 2.8 por Eric Hartford y basado en TinyLlama."
|
||
|
||
#: src/available_models_descriptions.py:45
|
||
msgid ""
|
||
"OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully "
|
||
"open datasets."
|
||
msgstr ""
|
||
"OpenHermes 2.5 es un modelo de 7B ajustado por Teknium en Mistral con "
|
||
"conjuntos de datos completamente abiertos."
|
||
|
||
#: src/available_models_descriptions.py:46
|
||
msgid "State-of-the-art code generation model"
|
||
msgstr "Modelo de generación de código de vanguardia."
|
||
|
||
#: src/available_models_descriptions.py:47
|
||
msgid ""
|
||
"Stable Code 3B is a coding model with instruct and code completion variants "
|
||
"on par with models such as Code Llama 7B that are 2.5x larger."
|
||
msgstr ""
|
||
"Stable Code 3B es un modelo de codificación con variantes de instrucción y "
|
||
"completado de código a la par con modelos como Code Llama 7B que son 2.5 "
|
||
"veces más grandes."
|
||
|
||
#: src/available_models_descriptions.py:48
|
||
msgid ""
|
||
"CodeQwen1.5 is a large language model pretrained on a large amount of code "
|
||
"data."
|
||
msgstr ""
|
||
"CodeQwen1.5 es un modelo de lenguaje grande preentrenado con una gran "
|
||
"cantidad de datos de código."
|
||
|
||
#: src/available_models_descriptions.py:49
|
||
msgid "Model focused on math and logic problems"
|
||
msgstr "Modelo enfocado en problemas de matemáticas y lógica."
|
||
|
||
#: src/available_models_descriptions.py:50
|
||
msgid ""
|
||
"A fine-tuned model based on Mistral with good coverage of domain and "
|
||
"language."
|
||
msgstr ""
|
||
"Un modelo ajustado basado en Mistral con buena cobertura de dominio y "
|
||
"lenguaje."
|
||
|
||
#: src/available_models_descriptions.py:51
|
||
msgid ""
|
||
"Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model "
|
||
"trained on multilingual data in English, Spanish, German, Italian, French, "
|
||
"Portuguese, and Dutch."
|
||
msgstr ""
|
||
"Stable LM 2 es un modelo de lenguaje de vanguardia de 1.6B y 12B parámetros "
|
||
"entrenado en datos multilingües en inglés, español, alemán, italiano, "
|
||
"francés, portugués y neerlandés."
|
||
|
||
#: src/available_models_descriptions.py:52
|
||
msgid "A family of open foundation models by IBM for Code Intelligence"
|
||
msgstr ""
|
||
"Una familia de modelos de base abiertos por IBM para Code Intelligence."
|
||
|
||
#: src/available_models_descriptions.py:53
|
||
msgid "Embedding models on very large sentence level datasets."
|
||
msgstr ""
|
||
"Modelos de incrustación en conjuntos de datos de nivel de oración muy "
|
||
"grandes."
|
||
|
||
#: src/available_models_descriptions.py:54
|
||
msgid "Code generation model based on Code Llama."
|
||
msgstr "Modelo de generación de código basado en Code Llama."
|
||
|
||
#: src/available_models_descriptions.py:55
|
||
msgid ""
|
||
"A 7B and 15B uncensored variant of the Dolphin model family that excels at "
|
||
"coding, based on StarCoder2."
|
||
msgstr ""
|
||
"Una variante sin censura de 7B y 15B de la familia de modelos Dolphin que "
|
||
"sobresale en codificación, basada en StarCoder2."
|
||
|
||
#: src/available_models_descriptions.py:56
|
||
msgid "General use models based on Llama and Llama 2 from Nous Research."
|
||
msgstr "Modelos de uso general basados en Llama y Llama 2 de Nous Research."
|
||
|
||
#: src/available_models_descriptions.py:57
|
||
msgid ""
|
||
"SQLCoder is a code completion model fined-tuned on StarCoder for SQL "
|
||
"generation tasks"
|
||
msgstr ""
|
||
"SQLCoder es un modelo de completado de código ajustado en StarCoder para "
|
||
"tareas de generación de SQL."
|
||
|
||
#: src/available_models_descriptions.py:58
|
||
msgid ""
|
||
"This model extends LLama-3 8B's context length from 8k to over 1m tokens."
|
||
msgstr ""
|
||
"Este modelo extiende la longitud del contexto de LLama-3 8B de 8k a más de "
|
||
"1m tokens."
|
||
|
||
#: src/available_models_descriptions.py:59
|
||
msgid ""
|
||
"Starling is a large language model trained by reinforcement learning from AI "
|
||
"feedback focused on improving chatbot helpfulness."
|
||
msgstr ""
|
||
"Starling es un modelo de lenguaje grande entrenado mediante aprendizaje por "
|
||
"refuerzo a partir de retroalimentación de IA enfocado en mejorar la utilidad "
|
||
"de los chatbots."
|
||
|
||
#: src/available_models_descriptions.py:60
|
||
msgid "An extension of Llama 2 that supports a context of up to 128k tokens."
|
||
msgstr "Una extensión de Llama 2 que soporta un contexto de hasta 128k tokens."
|
||
|
||
#: src/available_models_descriptions.py:61
|
||
msgid ""
|
||
"Conversational model based on Llama 2 that performs competitively on various "
|
||
"benchmarks."
|
||
msgstr ""
|
||
"Modelo conversacional basado en Llama 2 que tiene un rendimiento competitivo "
|
||
"en varios benchmarks."
|
||
|
||
#: src/available_models_descriptions.py:62
|
||
msgid "An advanced language model crafted with 2 trillion bilingual tokens."
|
||
msgstr ""
|
||
"Un modelo de lenguaje avanzado creado con 2 billones de tokens bilingües."
|
||
|
||
#: src/available_models_descriptions.py:63
|
||
msgid ""
|
||
"A model from NVIDIA based on Llama 3 that excels at conversational question "
|
||
"answering (QA) and retrieval-augmented generation (RAG)."
|
||
msgstr ""
|
||
"Un modelo de NVIDIA basado en Llama 3 que sobresale en respuesta a preguntas "
|
||
"conversacionales (QA) y generación aumentada por recuperación (RAG)."
|
||
|
||
#: src/available_models_descriptions.py:64
|
||
msgid ""
|
||
"Orca 2 is built by Microsoft research, and are a fine-tuned version of "
|
||
"Meta's Llama 2 models. The model is designed to excel particularly in "
|
||
"reasoning."
|
||
msgstr ""
|
||
"Orca 2 es construido por Microsoft Research, y es una versión ajustada de "
|
||
"los modelos Llama 2 de Meta. El modelo está diseñado para sobresalir "
|
||
"particularmente en razonamiento."
|
||
|
||
#: src/available_models_descriptions.py:65
|
||
msgid "General use model based on Llama 2."
|
||
msgstr "Modelo de uso general basado en Llama 2."
|
||
|
||
#: src/available_models_descriptions.py:66
|
||
msgid ""
|
||
"A compact, yet powerful 10.7B large language model designed for single-turn "
|
||
"conversation."
|
||
msgstr ""
|
||
"Un modelo de lenguaje grande compacto pero poderoso de 10.7B diseñado para "
|
||
"conversación de un solo turno."
|
||
|
||
#: src/available_models_descriptions.py:67
|
||
msgid ""
|
||
"A companion assistant trained in philosophy, psychology, and personal "
|
||
"relationships. Based on Mistral."
|
||
msgstr ""
|
||
"Un asistente compañero entrenado en filosofía, psicología y relaciones "
|
||
"personales. Basado en Mistral."
|
||
|
||
#: src/available_models_descriptions.py:68
|
||
msgid ""
|
||
"2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language "
|
||
"model by Microsoft Research."
|
||
msgstr ""
|
||
"Modelo Dolphin sin censura de 2.7B por Eric Hartford, basado en el modelo de "
|
||
"lenguaje Phi por Microsoft Research."
|
||
|
||
#: src/available_models_descriptions.py:69
|
||
msgid ""
|
||
"Llama 2 based model fine tuned on an Orca-style dataset. Originally called "
|
||
"Free Willy."
|
||
msgstr ""
|
||
"Modelo basado en Llama 2 ajustado en un conjunto de datos estilo Orca. "
|
||
"Originalmente llamado Free Willy."
|
||
|
||
#: src/available_models_descriptions.py:70
|
||
msgid ""
|
||
"moondream2 is a small vision language model designed to run efficiently on "
|
||
"edge devices."
|
||
msgstr ""
|
||
"moondream2 es un pequeño modelo de lenguaje de visión diseñado para "
|
||
"funcionar eficientemente en dispositivos periféricos."
|
||
|
||
#: src/available_models_descriptions.py:71
|
||
msgid ""
|
||
"BakLLaVA is a multimodal model consisting of the Mistral 7B base model "
|
||
"augmented with the LLaVA architecture."
|
||
msgstr ""
|
||
"BakLLaVA es un modelo multimodal que consiste en el modelo base Mistral 7B "
|
||
"aumentado con la arquitectura LLaVA."
|
||
|
||
#: src/available_models_descriptions.py:72
|
||
msgid "Uncensored version of Wizard LM model"
|
||
msgstr "Versión sin censura del modelo Wizard LM."
|
||
|
||
#: src/available_models_descriptions.py:73
|
||
msgid ""
|
||
"A suite of text embedding models by Snowflake, optimized for performance."
|
||
msgstr ""
|
||
"Un conjunto de modelos de incrustación de texto por Snowflake, optimizados "
|
||
"para el rendimiento."
|
||
|
||
#: src/available_models_descriptions.py:74
|
||
msgid "A strong, economical, and efficient Mixture-of-Experts language model."
|
||
msgstr ""
|
||
"Un modelo de lenguaje Mixture-of-Experts fuerte, económico y eficiente."
|
||
|
||
#: src/available_models_descriptions.py:75
|
||
msgid ""
|
||
"Fine-tuned Llama 2 model to answer medical questions based on an open source "
|
||
"medical dataset."
|
||
msgstr ""
|
||
"Modelo Llama 2 ajustado para responder preguntas médicas basado en un "
|
||
"conjunto de datos médicos de código abierto."
|
||
|
||
#: src/available_models_descriptions.py:76
|
||
msgid "An extension of Mistral to support context windows of 64K or 128K."
|
||
msgstr ""
|
||
"Una extensión de Mistral para soportar ventanas de contexto de 64K o 128K."
|
||
|
||
#: src/available_models_descriptions.py:77
|
||
msgid ""
|
||
"An expansion of Llama 2 that specializes in integrating both general "
|
||
"language understanding and domain-specific knowledge, particularly in "
|
||
"programming and mathematics."
|
||
msgstr ""
|
||
"Una expansión de Llama 2 que se especializa en integrar tanto la comprensión "
|
||
"general del lenguaje como el conocimiento específico del dominio, "
|
||
"particularmente en programación y matemáticas."
|
||
|
||
#: src/available_models_descriptions.py:78
|
||
msgid "The Nous Hermes 2 model from Nous Research, now trained over Mixtral."
|
||
msgstr ""
|
||
"El modelo Nous Hermes 2 de Nous Research, ahora entrenado sobre Mixtral."
|
||
|
||
#: src/available_models_descriptions.py:79
|
||
msgid ""
|
||
"Open-source medical large language model adapted from Llama 2 to the medical "
|
||
"domain."
|
||
msgstr ""
|
||
"Modelo de lenguaje grande médico de código abierto adaptado de Llama 2 al "
|
||
"dominio médico."
|
||
|
||
#: src/available_models_descriptions.py:80
|
||
msgid "Great code generation model based on Llama2."
|
||
msgstr "Gran modelo de generación de código basado en Llama2."
|
||
|
||
#: src/available_models_descriptions.py:81
|
||
msgid ""
|
||
"Nexus Raven is a 13B instruction tuned model for function calling tasks."
|
||
msgstr ""
|
||
"Nexus Raven es un modelo ajustado de 13B para tareas de llamada de funciones."
|
||
|
||
#: src/available_models_descriptions.py:82
|
||
msgid "Uncensored Llama2 based model with support for a 16K context window."
|
||
msgstr ""
|
||
"Modelo sin censura basado en Llama2 con soporte para una ventana de contexto "
|
||
"de 16K."
|
||
|
||
#: src/available_models_descriptions.py:83
|
||
msgid "A new small LLaVA model fine-tuned from Phi 3 Mini."
|
||
msgstr "Un nuevo pequeño modelo LLaVA ajustado a partir de Phi 3 Mini."
|
||
|
||
#: src/available_models_descriptions.py:84
|
||
msgid ""
|
||
"A versatile model for AI software development scenarios, including code "
|
||
"completion."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:85
|
||
msgid ""
|
||
"A strong multi-lingual general language model with competitive performance "
|
||
"to Llama 3."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:86
|
||
msgid ""
|
||
"🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic "
|
||
"instruction data using OSS-Instruct, a novel approach to enlightening LLMs "
|
||
"with open-source code snippets."
|
||
msgstr ""
|
||
"🎩 Magicoder es una familia de modelos de 7B parámetros entrenados en 75K "
|
||
"datos de instrucción sintética utilizando OSS-Instruct, un enfoque novedoso "
|
||
"para iluminar a los LLMs con fragmentos de código de código abierto."
|
||
|
||
#: src/available_models_descriptions.py:87
|
||
msgid ""
|
||
"A lightweight chat model allowing accurate, and responsive output without "
|
||
"requiring high-end hardware."
|
||
msgstr ""
|
||
"Un modelo de chat ligero que permite una salida precisa y receptiva sin "
|
||
"requerir hardware de alta gama."
|
||
|
||
#: src/available_models_descriptions.py:88
|
||
msgid ""
|
||
"A high-performing code instruct model created by merging two existing code "
|
||
"models."
|
||
msgstr ""
|
||
"Un modelo de instrucción de código de alto rendimiento creado mediante la "
|
||
"fusión de dos modelos de código existentes."
|
||
|
||
#: src/available_models_descriptions.py:89
|
||
msgid ""
|
||
"MistralLite is a fine-tuned model based on Mistral with enhanced "
|
||
"capabilities of processing long contexts."
|
||
msgstr ""
|
||
"MistralLite es un modelo ajustado basado en Mistral con capacidades "
|
||
"mejoradas de procesamiento de contextos largos."
|
||
|
||
#: src/available_models_descriptions.py:90
|
||
msgid ""
|
||
"Wizard Vicuna is a 13B parameter model based on Llama 2 trained by "
|
||
"MelodysDreamj."
|
||
msgstr ""
|
||
"Wizard Vicuna es un modelo de 13B parámetros basado en Llama 2 entrenado por "
|
||
"MelodysDreamj."
|
||
|
||
#: src/available_models_descriptions.py:91
|
||
msgid "7B parameter text-to-SQL model made by MotherDuck and Numbers Station."
|
||
msgstr ""
|
||
"Modelo de texto a SQL de 7B parámetros hecho por MotherDuck y Numbers "
|
||
"Station."
|
||
|
||
#: src/available_models_descriptions.py:92
|
||
msgid ""
|
||
"MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by "
|
||
"interleaving the model with itself."
|
||
msgstr ""
|
||
"MegaDolphin-2.2-120b es una transformación de Dolphin-2.2-70b creada al "
|
||
"entrelazar el modelo consigo mismo."
|
||
|
||
#: src/available_models_descriptions.py:93
|
||
msgid ""
|
||
"A language model created by combining two fine-tuned Llama 2 70B models into "
|
||
"one."
|
||
msgstr ""
|
||
"Un modelo de lenguaje creado combinando dos modelos ajustados de Llama 2 70B "
|
||
"en uno."
|
||
|
||
#: src/available_models_descriptions.py:94
|
||
msgid ""
|
||
"A top-performing mixture of experts model, fine-tuned with high-quality data."
|
||
msgstr ""
|
||
"Un modelo de mezcla de expertos de alto rendimiento, ajustado con datos de "
|
||
"alta calidad."
|
||
|
||
#: src/available_models_descriptions.py:95
|
||
msgid ""
|
||
"Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. "
|
||
"Designed for chat and code generation."
|
||
msgstr ""
|
||
"Fusión del modelo Open Orca OpenChat y el modelo Garage-bAInd Platypus 2. "
|
||
"Diseñado para chat y generación de código."
|
||
|
||
#: src/available_models_descriptions.py:96
|
||
msgid ""
|
||
"Falcon2 is an 11B parameters causal decoder-only model built by TII and "
|
||
"trained over 5T tokens."
|
||
msgstr ""
|
||
"Falcon2 es un modelo causal de 11B parámetros solo decodificador construido "
|
||
"por TII y entrenado en más de 5T tokens."
|
||
|
||
#: src/available_models_descriptions.py:97
|
||
msgid "A 7B chat model fine-tuned with high-quality data and based on Zephyr."
|
||
msgstr ""
|
||
"Un modelo de chat de 7B ajustado con datos de alta calidad y basado en "
|
||
"Zephyr."
|
||
|
||
#: src/available_models_descriptions.py:98
|
||
msgid "DBRX is an open, general-purpose LLM created by Databricks."
|
||
msgstr "DBRX es un LLM abierto de propósito general creado por Databricks."
|
||
|
||
#: src/available_models_descriptions.py:99
|
||
msgid ""
|
||
"InternLM2.5 is a 7B parameter model tailored for practical scenarios with "
|
||
"outstanding reasoning capability."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:100
|
||
msgid ""
|
||
"A robust conversational model designed to be used for both chat and instruct "
|
||
"use cases."
|
||
msgstr ""
|
||
"Un modelo conversacional robusto diseñado para ser utilizado tanto en casos "
|
||
"de uso de chat como de instrucción."
|
||
|
||
#: src/available_models_descriptions.py:101
|
||
msgid ""
|
||
"A series of models from Groq that represent a significant advancement in "
|
||
"open-source AI capabilities for tool use/function calling."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:102
|
||
msgid ""
|
||
"MathΣtral: a 7B model designed for math reasoning and scientific discovery "
|
||
"by Mistral AI."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:103
|
||
msgid ""
|
||
"An open weights function calling model based on Llama 3, competitive with "
|
||
"GPT-4o function calling capabilities."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:104
|
||
msgid ""
|
||
"A 3.8B model fine-tuned on a private high-quality synthetic dataset for "
|
||
"information extraction, based on Phi-3."
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:17
|
||
msgid "Chat cannot be cleared while receiving a message"
|
||
msgstr "El chat no puede ser limpiado mientras se recibe un mensaje"
|
||
|
||
#: src/dialogs.py:20
|
||
msgid "Clear Chat?"
|
||
msgstr "¿Limpiar Chat?"
|
||
|
||
#: src/dialogs.py:21
|
||
msgid "Are you sure you want to clear the chat?"
|
||
msgstr "¿Estás seguro de que quieres limpiar el chat?"
|
||
|
||
#: src/dialogs.py:24 src/dialogs.py:45 src/dialogs.py:72 src/dialogs.py:99
|
||
#: src/dialogs.py:121 src/dialogs.py:142 src/dialogs.py:164 src/dialogs.py:236
|
||
#: src/dialogs.py:273 src/dialogs.py:347 src/dialogs.py:385
|
||
msgid "Cancel"
|
||
msgstr "Cancelar"
|
||
|
||
#: src/dialogs.py:25
|
||
msgid "Clear"
|
||
msgstr "Limpiar"
|
||
|
||
#: src/dialogs.py:41
|
||
msgid "Delete Chat?"
|
||
msgstr "¿Eliminar Chat?"
|
||
|
||
#: src/dialogs.py:42 src/dialogs.py:139
|
||
msgid "Are you sure you want to delete '{}'?"
|
||
msgstr "¿Estás seguro de que quieres eliminar '{}'?"
|
||
|
||
#: src/dialogs.py:46 src/dialogs.py:143
|
||
msgid "Delete"
|
||
msgstr "Eliminar"
|
||
|
||
#: src/dialogs.py:66
|
||
msgid "Rename Chat?"
|
||
msgstr "¿Renombrar Chat?"
|
||
|
||
#: src/dialogs.py:67
|
||
msgid "Renaming '{}'"
|
||
msgstr "Renombrando '{}'"
|
||
|
||
#: src/dialogs.py:73
|
||
msgid "Rename"
|
||
msgstr "Renombrar"
|
||
|
||
#: src/dialogs.py:93
|
||
msgid "Create Chat?"
|
||
msgstr "¿Crear Chat?"
|
||
|
||
#: src/dialogs.py:94
|
||
msgid "Enter name for new chat"
|
||
msgstr "Ingrese el nombre para el nuevo chat"
|
||
|
||
#: src/dialogs.py:100 src/window.ui:731
|
||
msgid "Create"
|
||
msgstr "Crear"
|
||
|
||
#: src/dialogs.py:117
|
||
msgid "Stop Download?"
|
||
msgstr "¿Parar Descarga?"
|
||
|
||
#: src/dialogs.py:118
|
||
msgid "Are you sure you want to stop pulling '{} ({})'?"
|
||
msgstr "¿Estás seguro de que quieres parar la descarga de '{} ({})'?"
|
||
|
||
#: src/dialogs.py:122
|
||
msgid "Stop"
|
||
msgstr "Parar"
|
||
|
||
#: src/dialogs.py:138
|
||
msgid "Delete Model?"
|
||
msgstr "¿Eliminar Modelo?"
|
||
|
||
#: src/dialogs.py:160
|
||
msgid "Remove Attachment?"
|
||
msgstr "¿Remover Adjunto?"
|
||
|
||
#: src/dialogs.py:161
|
||
msgid "Are you sure you want to remove attachment?"
|
||
msgstr "¿Estás seguro de que quieres remover el adjunto?"
|
||
|
||
#: src/dialogs.py:165
|
||
msgid "Remove"
|
||
msgstr "Remover"
|
||
|
||
#: src/dialogs.py:202
|
||
msgid "Connection Error"
|
||
msgstr "Error de conexión"
|
||
|
||
#: src/dialogs.py:203
|
||
msgid "The remote instance has disconnected"
|
||
msgstr "La instancia remota se ha desconectado"
|
||
|
||
#: src/dialogs.py:207
|
||
msgid "Close Alpaca"
|
||
msgstr "Cerrar Alpaca"
|
||
|
||
#: src/dialogs.py:208
|
||
msgid "Use local instance"
|
||
msgstr "Usar instancia local"
|
||
|
||
#: src/dialogs.py:209
|
||
msgid "Connect"
|
||
msgstr "Conectar"
|
||
|
||
#: src/dialogs.py:232
|
||
msgid "Select Model"
|
||
msgstr "Selecciona el Modelo"
|
||
|
||
#: src/dialogs.py:233
|
||
msgid "This model will be used as the base for the new model"
|
||
msgstr "Este modelo será usado como base para el nuevo modelo"
|
||
|
||
#: src/dialogs.py:237 src/dialogs.py:274 src/dialogs.py:348 src/dialogs.py:386
|
||
msgid "Accept"
|
||
msgstr "Aceptar"
|
||
|
||
#: src/dialogs.py:254
|
||
msgid "An error occurred while creating the model"
|
||
msgstr "Ha ocurrido un error mientras se creaba el modelo"
|
||
|
||
#: src/dialogs.py:269
|
||
msgid "Pull Model"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:270
|
||
msgid ""
|
||
"Input the name of the model in this format\n"
|
||
"name:tag"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:333
|
||
msgid "This video does not have any transcriptions"
|
||
msgstr "Este video no tiene transcripciones"
|
||
|
||
#: src/dialogs.py:342
|
||
msgid "Attach YouTube Video?"
|
||
msgstr "¿Adjuntar Video de YouTube?"
|
||
|
||
#: src/dialogs.py:343
|
||
msgid ""
|
||
"{}\n"
|
||
"\n"
|
||
"Please select a transcript to include"
|
||
msgstr ""
|
||
"{}\n"
|
||
"\n"
|
||
"Por favor selecciona la transcripción ha incluir"
|
||
|
||
#: src/dialogs.py:376
|
||
msgid "An error occurred while extracting text from the website"
|
||
msgstr "Ha ocurrido un error mientras se extraía texto del sitio web"
|
||
|
||
#: src/dialogs.py:381
|
||
msgid "Attach Website? (Experimental)"
|
||
msgstr "¿Adjuntar Sitio Web? (Experimental)"
|
||
|
||
#: src/dialogs.py:382
|
||
msgid ""
|
||
"Are you sure you want to attach\n"
|
||
"'{}'?"
|
||
msgstr ""
|
||
"¿Estás seguro de que quieres adjuntar\n"
|
||
"'{}'?"
|
||
|
||
#: src/dialogs.py:400
|
||
msgid "Thank you!"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:403
|
||
msgid "Visit Alpaca's website if you change your mind!"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:409 src/dialogs.py:416
|
||
msgid "Support"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:410
|
||
msgid "Are you enjoying Alpaca? Consider sponsoring the project!"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:413
|
||
msgid "Don't show again"
|
||
msgstr ""
|
||
|
||
#: src/dialogs.py:415
|
||
msgid "Later"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:51
|
||
msgid "Menu"
|
||
msgstr "Menu"
|
||
|
||
#: src/window.ui:81
|
||
msgid "Toggle Sidebar"
|
||
msgstr "Alternar barra de lado"
|
||
|
||
#: src/window.ui:106 src/window.ui:466
|
||
msgid "Manage Models"
|
||
msgstr "Gestionar Modelos"
|
||
|
||
#: src/window.ui:120
|
||
msgid "Chat Menu"
|
||
msgstr "Menu de Chat"
|
||
|
||
#: src/window.ui:196
|
||
msgid "Attach File"
|
||
msgstr "Adjuntar Archivo"
|
||
|
||
#: src/window.ui:241 src/window.ui:1210
|
||
msgid "Send Message"
|
||
msgstr "Enviar Mensaje"
|
||
|
||
#: src/window.ui:290 src/window.ui:1051 src/window.ui:1169
|
||
msgid "Preferences"
|
||
msgstr "Preferencias"
|
||
|
||
#: src/window.ui:293 src/window.ui:1147
|
||
msgid "General"
|
||
msgstr "General"
|
||
|
||
#: src/window.ui:299
|
||
msgid "Use Remote Connection to Ollama"
|
||
msgstr "Usa una conección remota a Ollama"
|
||
|
||
#: src/window.ui:305
|
||
msgid "URL of Remote Instance"
|
||
msgstr "URL de la Instancia Remota"
|
||
|
||
#: src/window.ui:312
|
||
msgid "Bearer Token (Optional)"
|
||
msgstr "Bearer Token (Opcional)"
|
||
|
||
#: src/window.ui:322
|
||
msgid "Run Alpaca In Background"
|
||
msgstr "Ejecutar Alpaca en el fondo"
|
||
|
||
#: src/window.ui:333
|
||
msgid "Temperature"
|
||
msgstr "Temperatura"
|
||
|
||
#: src/window.ui:334
|
||
msgid ""
|
||
"The temperature of the model. Increasing the temperature will make the model "
|
||
"answer more creatively. (Default: 0.8)"
|
||
msgstr ""
|
||
"La temperatura del modelo. Incrementando la temparatura hará que el modelo "
|
||
"responda más creativamente (Por defecto: 0.8)"
|
||
|
||
#: src/window.ui:349
|
||
msgid "Seed"
|
||
msgstr "Semilla"
|
||
|
||
#: src/window.ui:350
|
||
msgid ""
|
||
"Sets the random number seed to use for generation. Setting this to a "
|
||
"specific number will make the model generate the same text for the same "
|
||
"prompt. (Default: 0 (random))"
|
||
msgstr ""
|
||
"Aplica el numero al azar que se usa como semilla para generación. Aplicar un "
|
||
"numero especifico hará que el modelo genere el mismo texto a la misma "
|
||
"pregunta del usuario (Por defecto: 0 (Al azar))"
|
||
|
||
#: src/window.ui:364
|
||
msgid "Keep Alive Time"
|
||
msgstr "Tiempo Para Mantener Vivo"
|
||
|
||
#: src/window.ui:365
|
||
msgid ""
|
||
"Controls how long the model will stay loaded into memory following the "
|
||
"request in minutes (Default: 5)"
|
||
msgstr ""
|
||
"Controla por cuanto tiempo el modelo permanecera cargado en la memoria "
|
||
"despues de la ultima petición en minutos (Por defecto: 5)"
|
||
|
||
#: src/window.ui:381
|
||
msgid "Ollama Instance"
|
||
msgstr "Instancia de Ollama"
|
||
|
||
#: src/window.ui:385
|
||
msgid "Ollama Overrides"
|
||
msgstr "Overrides de Ollama"
|
||
|
||
#: src/window.ui:386
|
||
msgid ""
|
||
"Manage the arguments used on Ollama, any changes on this page only applies "
|
||
"to the integrated instance, the instance will restart if you make changes."
|
||
msgstr ""
|
||
"Administra los argumentos usados en Ollama, cualquier cambio en esta página "
|
||
"solo aplica a la instancia integrada, la instancia se reiniciará si haces "
|
||
"algún cambio"
|
||
|
||
#: src/window.ui:476 src/window.ui:611
|
||
msgid "Create Model"
|
||
msgstr "Crear Modelo"
|
||
|
||
#: src/window.ui:483
|
||
msgid "Search Model"
|
||
msgstr "Buscar Modelo"
|
||
|
||
#: src/window.ui:496
|
||
msgid "Search models"
|
||
msgstr "Buscar Modelos"
|
||
|
||
#: src/window.ui:543
|
||
msgid "No Models Found"
|
||
msgstr "Ningún modelo encontrado"
|
||
|
||
#: src/window.ui:544
|
||
msgid "Try a different search"
|
||
msgstr "Intenta una busqueda distinta"
|
||
|
||
#: src/window.ui:589
|
||
msgid ""
|
||
"By downloading this model you accept the license agreement available on the "
|
||
"model's website."
|
||
msgstr ""
|
||
"Al descargar este modelo aceptas la licencia disponible en el sitio web del "
|
||
"modelo"
|
||
|
||
#: src/window.ui:646
|
||
msgid "Base"
|
||
msgstr "Base"
|
||
|
||
#: src/window.ui:665
|
||
msgid "Name"
|
||
msgstr "Nombre"
|
||
|
||
#: src/window.ui:671
|
||
msgid "Context"
|
||
msgstr "Contexto"
|
||
|
||
#: src/window.ui:715
|
||
msgid ""
|
||
"Some models require a modelfile, Alpaca fills FROM and SYSTEM (context) "
|
||
"instructions automatically. Please visit the model's website or Ollama "
|
||
"documentation for more information if you're unsure."
|
||
msgstr ""
|
||
|
||
#: src/window.ui:763
|
||
msgid "Open With Default App"
|
||
msgstr "Abrir con aplicación predeterminada"
|
||
|
||
#: src/window.ui:771
|
||
msgid "Remove Attachment"
|
||
msgstr "Remover Adjunto"
|
||
|
||
#: src/window.ui:833
|
||
msgid "Previous"
|
||
msgstr "Anterior"
|
||
|
||
#: src/window.ui:876
|
||
msgid "Welcome to Alpaca"
|
||
msgstr "Bienvenido a Alpaca"
|
||
|
||
#: src/window.ui:877
|
||
msgid "Powered by Ollama"
|
||
msgstr "Impulsado por Ollama"
|
||
|
||
#: src/window.ui:880
|
||
msgid "Ollama Website"
|
||
msgstr "Sitio Web de Ollama"
|
||
|
||
#: src/window.ui:897
|
||
msgid ""
|
||
"Alpaca and its developers are not liable for any damages to devices or "
|
||
"software resulting from the execution of code generated by an AI model. "
|
||
"Please exercise caution and review the code carefully before running it."
|
||
msgstr ""
|
||
"Alpaca y sus desarrolladores no son responsables por cualquier daño a "
|
||
"dispositivos o software resultados por la ejecución de codigo generado por "
|
||
"un modelo de IA. Por favor sea precavido y revise el codigo cuidadosamente "
|
||
"antes de correrlo"
|
||
|
||
#: src/window.ui:908
|
||
msgid "Featured Models"
|
||
msgstr "Modelos Destacados"
|
||
|
||
#: src/window.ui:909
|
||
msgid ""
|
||
"Alpaca works locally on your device, to start chatting you'll need an AI "
|
||
"model, you can either pull models from this list or the 'Manage Models' menu "
|
||
"later.\n"
|
||
"\n"
|
||
"By downloading any model you accept their license agreement available on the "
|
||
"model's website.\n"
|
||
" "
|
||
msgstr ""
|
||
"Alpaca funciona localmente en tu dispositivo, para empezar a conversar "
|
||
"necesitaras un modelo de AI, puedes descargar un modelo de esta página o en "
|
||
"el menu 'Gestionar Modelos' despues.\n"
|
||
"\n"
|
||
"Al descargar cualquier modelo aceptas su acuerdo de licencia disponible en "
|
||
"el sitio web del modelo.\n"
|
||
" "
|
||
|
||
#: src/window.ui:922
|
||
msgid "Built by Meta"
|
||
msgstr "Construido por Meta"
|
||
|
||
#: src/window.ui:950
|
||
msgid "Built by Google DeepMind"
|
||
msgstr "Construido por Google DeepMind"
|
||
|
||
#: src/window.ui:978
|
||
msgid "Built by Microsoft"
|
||
msgstr "Construido por Microsoft"
|
||
|
||
#: src/window.ui:1006
|
||
msgid "Multimodal AI with image recognition"
|
||
msgstr "IA multimodal con reconocimiento de imagenes"
|
||
|
||
#: src/window.ui:1045
|
||
msgid "Import Chat"
|
||
msgstr "Importar chat"
|
||
|
||
#: src/window.ui:1055
|
||
msgid "Keyboard Shortcuts"
|
||
msgstr "Atajos de Teclado"
|
||
|
||
#: src/window.ui:1059
|
||
msgid "About Alpaca"
|
||
msgstr "Sobre Alpaca"
|
||
|
||
#: src/window.ui:1066 src/window.ui:1085
|
||
msgid "Rename Chat"
|
||
msgstr "Renombrar Chat"
|
||
|
||
#: src/window.ui:1070 src/window.ui:1089
|
||
msgid "Export Chat"
|
||
msgstr "Exportar chat"
|
||
|
||
#: src/window.ui:1074
|
||
msgid "Clear Chat"
|
||
msgstr "Limpiar Chat"
|
||
|
||
#: src/window.ui:1081
|
||
msgid "Delete Chat"
|
||
msgstr "Eliminar Chat"
|
||
|
||
#: src/window.ui:1097
|
||
msgid "From Existing Model"
|
||
msgstr "Usar modelo existente"
|
||
|
||
#: src/window.ui:1101
|
||
msgid "From GGUF File"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:1105
|
||
msgid "From Name"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:1151
|
||
msgid "Close application"
|
||
msgstr "Cerrar aplicación"
|
||
|
||
#: src/window.ui:1157
|
||
msgid "Import chat"
|
||
msgstr "Importar chat"
|
||
|
||
#: src/window.ui:1163
|
||
msgid "Clear chat"
|
||
msgstr "Limpiar chat"
|
||
|
||
#: src/window.ui:1175
|
||
msgid "New chat"
|
||
msgstr "Nuevo chat"
|
||
|
||
#: src/window.ui:1181
|
||
msgid "Show shortcuts window"
|
||
msgstr "Mostrar ventana de atajos"
|
||
|
||
#: src/window.ui:1188
|
||
msgid "Editor"
|
||
msgstr "Editor"
|
||
|
||
#: src/window.ui:1192
|
||
msgid "Copy"
|
||
msgstr "Copiar"
|
||
|
||
#: src/window.ui:1198
|
||
msgid "Paste"
|
||
msgstr "Pegar"
|
||
|
||
#: src/window.ui:1204
|
||
msgid "Insert new line"
|
||
msgstr "Saltar línea"
|
||
|
||
#~ msgid "Remove '{} ({})'"
|
||
#~ msgstr "Remover '{} ({})'"
|
||
|
||
#~ msgid "Stop Pulling '{} ({})'"
|
||
#~ msgstr "Parar Descarga de '{} ({})'"
|
||
|
||
#~ msgid "Template"
|
||
#~ msgstr "Plantilla"
|
||
|
||
#~ msgid ""
|
||
#~ "Some models require a specific template. Please visit the model's website "
|
||
#~ "for more information if you're unsure."
|
||
#~ msgstr ""
|
||
#~ "Algunos modelos requieren de una plantilla especifica. Por favor visita "
|
||
#~ "el sitio web del modelo para más información en caso de que no estés "
|
||
#~ "seguro"
|
||
|
||
#~ msgid "From GGUF File (Experimental)"
|
||
#~ msgstr "Usar archivo GGUF (Experimental)"
|
||
|
||
#~ msgid "A conversation showing code highlight"
|
||
#~ msgstr "Una conversación mostrando highlight de codigo"
|
||
|
||
#~ msgid "A conversation involving multiple models"
|
||
#~ msgstr "Una conversación incluyendo multiples modelos"
|
||
|
||
#~ msgid "Managing models"
|
||
#~ msgstr "Gestionando modelos"
|
||
|
||
#~ msgid "Open with Default App"
|
||
#~ msgstr "Abrir con Aplicación Predeterminada"
|
||
|
||
#~ msgid ""
|
||
#~ "Alpaca works locally on your device, to start chatting you'll need an AI "
|
||
#~ "model, you can either pull models from this list or the 'Manage Models' "
|
||
#~ "menu later."
|
||
#~ msgstr ""
|
||
#~ "Alpaca funciona localmente en tu dispositivo, para empezar a chatear "
|
||
#~ "necesitas un modelo IA, puedes descargar modelos de esta lista o usando "
|
||
#~ "el menu 'Gestionar Modelos' despues"
|
||
|
||
#~ msgid "An error occurred"
|
||
#~ msgstr "Ha ocurrio un error"
|
||
|
||
#~ msgid "Could not list local models"
|
||
#~ msgstr "No se pudieron listar los modelos locales"
|
||
|
||
#~ msgid "Could not delete model"
|
||
#~ msgstr "No se pudo eliminar el modelo"
|
||
|
||
#~ msgid "Could not pull model"
|
||
#~ msgstr "No se pudo descargar el modelo"
|
||
|
||
#~ msgid "Cannot delete chat because it's the only one left"
|
||
#~ msgstr "No se pudo eliminar el chat por que es el único que queda"
|
||
|
||
#~ msgid "That tag is already being pulled"
|
||
#~ msgstr "Esa etiqueta ya se está descargando"
|
||
|
||
#~ msgid "That tag has been pulled already"
|
||
#~ msgstr "Esa etiqueta ya ha sido descargada"
|
||
|
||
#~ msgid "Model pulled successfully"
|
||
#~ msgstr "Modelo descargado exitosamente"
|