3277 lines
105 KiB
Plaintext
3277 lines
105 KiB
Plaintext
# German translations for Alpaca package.
|
||
# Copyright (C) 2024 Jeffry Samuel Eduarte Rojas
|
||
# This file is distributed under the same license as the Alpaca package.
|
||
# Marcel Margenberg <dev.margenberg@gmail.com>, 2024.
|
||
#
|
||
msgid ""
|
||
msgstr ""
|
||
"Project-Id-Version: 2.0.0\n"
|
||
"Report-Msgid-Bugs-To: \n"
|
||
"POT-Creation-Date: 2024-10-15 21:46-0600\n"
|
||
"PO-Revision-Date: 2024-07-30 12:46-0600\n"
|
||
"Last-Translator: Marcel Margenberg <dev.margenberg@gmail.com>\n"
|
||
"Language-Team: German\n"
|
||
"Language: de\n"
|
||
"MIME-Version: 1.0\n"
|
||
"Content-Type: text/plain; charset=UTF-8\n"
|
||
"Content-Transfer-Encoding: 8bit\n"
|
||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||
|
||
#: data/com.jeffser.Alpaca.desktop.in:3
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:7
|
||
msgid "Alpaca"
|
||
msgstr "Alpaca"
|
||
|
||
#: data/com.jeffser.Alpaca.desktop.in:9
|
||
msgid "ai;ollama;llm"
|
||
msgstr "ki;ollama;llm"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:8
|
||
msgid "Chat with local AI models"
|
||
msgstr "Chatten Sie mit lokalen KI-Modellen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:10
|
||
msgid "An Ollama client"
|
||
msgstr "Ein Ollama-Client"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:11
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:809
|
||
msgid "Features"
|
||
msgstr "Funktionen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:13
|
||
msgid "Built in Ollama instance"
|
||
msgstr "Eingebaute Ollama-Instanz"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:14
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:811
|
||
msgid "Talk to multiple models in the same conversation"
|
||
msgstr "Sprechen Sie mit mehreren Modellen in derselben Konversation"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:15
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:812
|
||
msgid "Pull and delete models from the app"
|
||
msgstr "Modelle aus der App abrufen und löschen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:16
|
||
msgid "Have multiple conversations"
|
||
msgstr "Mehrere Konversationen führen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:17
|
||
msgid "Image recognition (Only available with compatible models)"
|
||
msgstr "Bilderkennung (Nur mit kompatiblen Modellen verfügbar)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:18
|
||
msgid "Plain text documents recognition"
|
||
msgstr "Erkennung von Plaintext-Dokumenten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:19
|
||
msgid "Import and export chats"
|
||
msgstr "Chats importieren und exportieren"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:20
|
||
msgid "Append YouTube transcripts to the prompt"
|
||
msgstr "YouTube-Transkripte zur Eingabeaufforderung hinzufügen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:21
|
||
msgid "Append text from a website to the prompt"
|
||
msgstr "Text von einer Website zur Eingabeaufforderung hinzufügen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:22
|
||
msgid "PDF recognition"
|
||
msgstr "PDF-Erkennung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:24 src/window.ui:989
|
||
msgid "Disclaimer"
|
||
msgstr "Haftungsausschluss"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:25
|
||
msgid ""
|
||
"This project is not affiliated at all with Ollama, I'm not responsible for "
|
||
"any damages to your device or software caused by running code given by any "
|
||
"models."
|
||
msgstr ""
|
||
"Dieses Projekt ist in keiner Weise mit Ollama verbunden, ich bin nicht "
|
||
"verantwortlich für jegliche Schäden an Ihrem Gerät oder Ihrer Software, die "
|
||
"durch die Ausführung von Code entstehen, der von Modellen geliefert wird."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:28
|
||
msgid "Jeffry Samuel Eduarte Rojas"
|
||
msgstr "Jeffry Samuel Eduarte Rojas"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:54
|
||
msgid "A normal conversation with an AI Model"
|
||
msgstr "Eine normale Konversation mit einem KI-Modell"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:58
|
||
msgid "A conversation involving image recognition"
|
||
msgstr "Eine Konversation mit Bilderkennung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:62
|
||
msgid "A conversation showing code highlighting"
|
||
msgstr "Eine Konversation mit Code-Hervorhebung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:66
|
||
msgid "A Python script running inside integrated terminal"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:70
|
||
msgid "A conversation involving a YouTube video transcript"
|
||
msgstr "Eine Konversation mit einem YouTube-Video-Transkript"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:74
|
||
msgid "Multiple models being downloaded"
|
||
msgstr "Mehrere Modelle werden heruntergeladen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:88
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:100
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:116
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:131
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:166
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:191
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:222
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:248
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:270
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:301
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:323
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:344
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:359
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:384
|
||
msgid "New"
|
||
msgstr "Neu"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:90
|
||
msgid "Details page for models"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:91
|
||
msgid ""
|
||
"Model selector gets replaced with 'manage models' button when there are no "
|
||
"models downloaded"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:92
|
||
msgid "Added warning when model is too big for the device"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:93
|
||
msgid "Added AMD GPU indicator in preferences"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:102
|
||
msgid "Better system for handling dialogs"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:103
|
||
msgid "Better system for handling instance switching"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:104
|
||
msgid "Remote connection dialog"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:106
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:120
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:137
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:148
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:157
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:174
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:184
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:201
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:211
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:258
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:283
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:308
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:330
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:348
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:366
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:378
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:394
|
||
msgid "Fixes"
|
||
msgstr "Fehlerbehebungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:108
|
||
msgid "Fixed: Models get duplicated when switching remote and local instance"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:109
|
||
msgid "Better internal instance manager"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:118
|
||
msgid "Added 'Cancel' and 'Save' buttons when editing a message"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:122
|
||
msgid "Better handling of image recognition"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:123
|
||
msgid "Remove unused files when canceling a model download"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:124
|
||
msgid "Better message blocks rendering"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:133
|
||
msgid "Run bash and python scripts straight from chat"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:134
|
||
msgid "Updated Ollama to 0.3.12"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:135
|
||
msgid "New models!"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:139
|
||
msgid "Fixed and made faster the launch sequence"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:140
|
||
msgid "Better detection of code blocks in messages"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:141
|
||
msgid "Fixed app not loading in certain setups with Nvidia GPUs"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:150
|
||
msgid ""
|
||
"Fixed message notification sometimes crashing text rendering because of them "
|
||
"running on different threads"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:159
|
||
msgid "Fixed message generation sometimes failing"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:168
|
||
msgid "Sidebar resizes with the window"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:169
|
||
msgid "New welcome dialog"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:170
|
||
msgid "Message search"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:171
|
||
msgid "Updated Ollama to v0.3.11"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:172
|
||
msgid "A lot of new models provided by Ollama repository"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:176
|
||
msgid ""
|
||
"Fixed text inside model manager when the accessibility option 'large text' "
|
||
"is on"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:177
|
||
msgid "Fixed image recognition on unsupported models"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:186
|
||
msgid "Fixed spinner not hiding if the back end fails"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:187
|
||
msgid "Fixed image recognition with local images"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:188
|
||
msgid "Changed appearance of delete / stop model buttons"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:189
|
||
msgid "Fixed stop button crashing the app"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:193
|
||
msgid "Made sidebar resize a little when the window is smaller"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:194
|
||
msgid "Instant launch"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:203
|
||
msgid "Fixed error on first run (welcome dialog)"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:204
|
||
msgid "Fixed checker for Ollama instance (used on system packages)"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:213
|
||
msgid "Fixed 'clear chat' option"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:214
|
||
msgid "Fixed welcome dialog causing the local instance to not launch"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:215
|
||
msgid "Fixed support for AMD GPUs"
|
||
msgstr ""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:224
|
||
msgid "Model, message and chat systems have been rewritten"
|
||
msgstr "Modell-, Nachrichten und Chatsysteme wurden neugeschrieben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:225
|
||
msgid "New models are available"
|
||
msgstr "Neue Modelle verfügbar"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:226
|
||
msgid "Ollama updated to v0.3.9"
|
||
msgstr "Ollama zu Version v0.3.9 aktualisiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:227
|
||
msgid "Added support for multiple chat generations simultaneously"
|
||
msgstr "Support für simultane Multi-Chat-Generationen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:228
|
||
msgid "Added experimental AMD GPU support"
|
||
msgstr "Experimenteller AMD GPU Support hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:229
|
||
msgid "Added message loading spinner and new message indicator to chat tab"
|
||
msgstr ""
|
||
"Statusladebalken als Indikator für Nachrichten zum Chat-Tab hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:230
|
||
msgid "Added animations"
|
||
msgstr "Animationen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:231
|
||
msgid "Changed model manager / model selector appearance"
|
||
msgstr "Die Gestaltung des Modell Manager / Modellauswähler geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:232
|
||
msgid "Changed message appearance"
|
||
msgstr "Die Gestaltung der Nachrichten geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:233
|
||
msgid "Added markdown and code blocks to user messages"
|
||
msgstr "Markdown und Codeblöcke für Benutzernachrichten hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:234
|
||
msgid "Added loading dialog at launch so the app opens faster"
|
||
msgstr "Ladedialog beim Start hinzugefügt, damit sich die App schneller öffnet"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:235
|
||
msgid "Added warning when device is on 'battery saver' mode"
|
||
msgstr ""
|
||
"Warnung hinzugefügt, wenn sich das Gerät im 'Batteriesparmodus' befindet"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:236
|
||
msgid "Added inactivity timer to integrated instance"
|
||
msgstr "Inaktivitäts-Timer zur integrierten Instanz hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:239
|
||
msgid "The chat is now scrolled to the bottom when it's changed"
|
||
msgstr "Der Chat wird jetzt nach unten gescrollt, wenn er geändert wird"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:240
|
||
msgid "Better handling of focus on messages"
|
||
msgstr "Bessere Handhabung von Nachrichten, die im Fokus stehen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:241
|
||
msgid "Better general performance on the app"
|
||
msgstr "Bessere Grundperformance der App"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:250
|
||
msgid "New duplicate chat option"
|
||
msgstr "Option zum Duplizieren von Chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:251
|
||
msgid "Changed model selector appearance"
|
||
msgstr "Erscheinungsbild des Modellselektors geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:252
|
||
msgid "Message entry is focused on launch and chat change"
|
||
msgstr "Nachrichteneingabe wird beim Start und Chatwechsel fokussiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:253
|
||
msgid "Message is focused when it's being edited"
|
||
msgstr "Nachricht wird beim Bearbeiten fokussiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:254
|
||
msgid "Added loading spinner when regenerating a message"
|
||
msgstr "Ladeanimation beim erneuten Generieren einer Nachricht hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:255
|
||
msgid "Added Ollama debugging to 'About Alpaca' dialog"
|
||
msgstr "Ollama-Debugging zum 'Über Alpaca'-Dialog hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:256
|
||
msgid "Changed YouTube transcription dialog appearance and behavior"
|
||
msgstr ""
|
||
"Erscheinungsbild und Verhalten des YouTube-Transkriptionsdialogs geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:260
|
||
msgid "CTRL+W and CTRL+Q stops local instance before closing the app"
|
||
msgstr "STRG+W und STRG+Q stoppen die lokale Instanz vor dem Schließen der App"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:261
|
||
msgid "Changed appearance of 'Open Model Manager' button on welcome screen"
|
||
msgstr ""
|
||
"Erscheinungsbild der Schaltfläche 'Modell-Manager öffnen' auf dem "
|
||
"Begrüßungsbildschirm geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:262
|
||
msgid "Fixed message generation not working consistently"
|
||
msgstr "Nachrichtengenerierung funktioniert nicht konsistent - behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:263
|
||
msgid "Fixed message edition not working consistently"
|
||
msgstr "Nachrichtenbearbeitung funktioniert nicht konsistent - behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:272
|
||
msgid "Model manager opens faster"
|
||
msgstr "Modell-Manager öffnet sich schneller"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:273
|
||
msgid "Delete chat option in secondary menu"
|
||
msgstr "Option 'Chat löschen' im sekundären Menü"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:274
|
||
msgid "New model selector popup"
|
||
msgstr "Neues Popup für die Modellauswahl"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:275
|
||
msgid "Standard shortcuts"
|
||
msgstr "Standard-Tastenkombinationen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:276
|
||
msgid "Model manager is navigable with keyboard"
|
||
msgstr "Modell-Manager ist mit der Tastatur navigierbar"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:277
|
||
msgid "Changed sidebar collapsing behavior"
|
||
msgstr "Verhalten beim Ausblenden der Seitenleiste geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:278
|
||
msgid "Focus indicators on messages"
|
||
msgstr "Fokus-Indikatoren auf Nachrichten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:279
|
||
msgid "Welcome screen"
|
||
msgstr "Begrüßungsbildschirm"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:280
|
||
msgid "Give message entry focus at launch"
|
||
msgstr "Nachrichteneingabe beim Start fokussieren"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:281
|
||
msgid "Generally better code"
|
||
msgstr "Allgemein besserer Code"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:285
|
||
msgid "Better width for dialogs"
|
||
msgstr "Bessere Breite für Dialoge"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:286
|
||
msgid "Better compatibility with screen readers"
|
||
msgstr "Bessere Kompatibilität mit Bildschirmlesern"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:287
|
||
msgid "Fixed message regenerator"
|
||
msgstr "Nachrichtengenerator repariert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:288
|
||
msgid "Removed 'Featured models' from welcome dialog"
|
||
msgstr "'Empfohlene Modelle' aus dem Begrüßungsdialog entfernt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:289
|
||
msgid "Added default buttons to dialogs"
|
||
msgstr "Standard-Schaltflächen zu Dialogen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:290
|
||
msgid "Fixed import / export of chats"
|
||
msgstr "Import/Export von Chats behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:291
|
||
msgid "Changed Python2 title to Python on code blocks"
|
||
msgstr "Titel 'Python2' in Codeblöcken zu 'Python' geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:292
|
||
msgid ""
|
||
"Prevent regeneration of title when the user changed it to a custom title"
|
||
msgstr ""
|
||
"Verhindern Sie die erneute Generierung des Titels, wenn der Benutzer ihn in "
|
||
"einen benutzerdefinierten Titel geändert hat"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:293
|
||
msgid "Show date on stopped messages"
|
||
msgstr "Datum bei gestoppten Nachrichten anzeigen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:294
|
||
msgid "Fix clear chat error"
|
||
msgstr "Fehler beim Löschen des Chats behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:303
|
||
msgid "Changed shortcuts to standards"
|
||
msgstr "Tastenkombinationen auf Standards geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:304
|
||
msgid "Moved 'Manage Models' button to primary menu"
|
||
msgstr "Schaltfläche 'Modelle verwalten' in das Hauptmenü verschoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:305
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:327
|
||
msgid "Stable support for GGUF model files"
|
||
msgstr "Stabile Unterstützung für GGUF-Modelldateien"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:306
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:581
|
||
msgid "General optimizations"
|
||
msgstr "Allgemeine Optimierungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:310
|
||
msgid "Better handling of enter key (important for Japanese input)"
|
||
msgstr ""
|
||
"Bessere Behandlung der Eingabetaste (wichtig für die japanische Eingabe)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:311
|
||
msgid "Removed sponsor dialog"
|
||
msgstr "Sponsorendialog entfernt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:312
|
||
msgid "Added sponsor link in about dialog"
|
||
msgstr "Sponsorenlink im Info-Dialog hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:313
|
||
msgid "Changed window and elements dimensions"
|
||
msgstr "Fenster- und Elementabmessungen geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:314
|
||
msgid "Selected model changes when entering model manager"
|
||
msgstr "Ausgewähltes Modell ändert sich beim Aufrufen des Modell-Managers"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:315
|
||
msgid "Better image tooltips"
|
||
msgstr "Bessere Bild-Tooltips"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:316
|
||
msgid "GGUF Support"
|
||
msgstr "GGUF-Unterstützung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:325
|
||
msgid "Regenerate any response, even if they are incomplete"
|
||
msgstr "Jede Antwort neu generieren, auch wenn sie unvollständig sind"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:326
|
||
msgid "Support for pulling models by name:tag"
|
||
msgstr "Unterstützung für das Abrufen von Modellen nach Name:Tag"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:328
|
||
msgid "Restored sidebar toggle button"
|
||
msgstr "Seitenleisten-Umschalttaste wiederhergestellt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:332
|
||
msgid "Reverted back to standard styles"
|
||
msgstr "Zu Standardstilen zurückgekehrt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:333
|
||
msgid "Fixed generated titles having \"'S\" for some reason"
|
||
msgstr ""
|
||
"Es wurde behoben, dass generierte Titel aus irgendeinem Grund \"'S\" "
|
||
"enthalten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:334
|
||
msgid "Changed min width for model dropdown"
|
||
msgstr "Minimale Breite für Modell-Dropdown geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:335
|
||
msgid "Changed message entry shadow"
|
||
msgstr "Schatten der Nachrichteneingabe geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:336
|
||
msgid "The last model used is now restored when the user changes chat"
|
||
msgstr ""
|
||
"Das zuletzt verwendete Modell wird nun wiederhergestellt, wenn der Benutzer "
|
||
"den Chat wechselt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:337
|
||
msgid "Better check for message finishing"
|
||
msgstr "Bessere Prüfung auf Nachrichtenende"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:346
|
||
msgid "Added table rendering (Thanks Nokse)"
|
||
msgstr "Tabellen-Rendering hinzugefügt (Danke Nokse)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:350
|
||
msgid "Made support dialog more common"
|
||
msgstr "Support-Dialog gängiger gemacht"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:351
|
||
msgid ""
|
||
"Dialog title on tag chooser when downloading models didn't display properly"
|
||
msgstr ""
|
||
"Dialog-Titel auf Tag-Auswahl beim Herunterladen von Modellen wurde nicht "
|
||
"richtig angezeigt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:352
|
||
msgid "Prevent chat generation from generating a title with multiple lines"
|
||
msgstr ""
|
||
"Verhindern, dass bei der Chat-Generierung ein mehrzeiliger Titel generiert "
|
||
"wird"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:361
|
||
msgid "Bearer Token entry on connection error dialog"
|
||
msgstr "Eingabe des Bearer-Tokens im Verbindungsfehler-Dialog"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:362
|
||
msgid "Small appearance changes"
|
||
msgstr "Kleine Änderungen am Erscheinungsbild"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:363
|
||
msgid "Compatibility with code blocks without explicit language"
|
||
msgstr "Kompatibilität mit Codeblöcken ohne explizite Sprache"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:364
|
||
msgid "Rare, optional and dismissible support dialog"
|
||
msgstr "Seltener, optionaler und entfernbarer Support-Dialog"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:368
|
||
msgid "Date format for Simplified Chinese translation"
|
||
msgstr "Datumsformat für vereinfachte chinesische Übersetzung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:369
|
||
msgid "Bug with unsupported localizations"
|
||
msgstr "Fehler bei nicht unterstützten Lokalisierungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:370
|
||
msgid "Min height being too large to be used on mobile"
|
||
msgstr "Mindesthöhe zu groß für die Verwendung auf Mobilgeräten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:371
|
||
msgid "Remote connection checker bug"
|
||
msgstr "Fehler beim Prüfen der Remoteverbindung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:380
|
||
msgid "Models with capital letters on their tag don't work"
|
||
msgstr "Modelle mit Großbuchstaben in ihrem Tag funktionieren nicht"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:381
|
||
msgid "Ollama fails to launch on some systems"
|
||
msgstr "Ollama startet auf einigen Systemen nicht"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:382
|
||
msgid "YouTube transcripts are not being saved in the right TMP directory"
|
||
msgstr ""
|
||
"YouTube-Transkripte werden nicht im richtigen TMP-Verzeichnis gespeichert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:386
|
||
msgid "Debug messages are now shown on the 'About Alpaca' dialog"
|
||
msgstr "Debug-Meldungen werden jetzt im 'Über Alpaca'-Dialog angezeigt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:387
|
||
msgid "Updated Ollama to v0.3.0 (new models)"
|
||
msgstr "Ollama auf v0.3.0 aktualisiert (neue Modelle)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:396
|
||
msgid "Models with '-' in their names didn't work properly, this is now fixed"
|
||
msgstr ""
|
||
"Modelle mit '-' in ihren Namen funktionierten nicht richtig, dies ist jetzt "
|
||
"behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:397
|
||
msgid "Better connection check for Ollama"
|
||
msgstr "Bessere Verbindungsprüfung für Ollama"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:404
|
||
msgid "Stable Release"
|
||
msgstr "Stabiles Release"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:405
|
||
msgid ""
|
||
"The new icon was made by Tobias Bernard over the Gnome Gitlab, thanks for "
|
||
"the great icon!"
|
||
msgstr ""
|
||
"Das neue Icon wurde von Tobias Bernard über Gnome Gitlab erstellt, vielen "
|
||
"Dank für das tolle Icon!"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:406
|
||
msgid "Features and fixes"
|
||
msgstr "Funktionen und Fehlerbehebungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:408
|
||
msgid "Updated Ollama instance to 0.2.8"
|
||
msgstr "Ollama-Instanz auf 0.2.8 aktualisiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:409
|
||
msgid "Better model selector"
|
||
msgstr "Besserer Modellselektor"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:410
|
||
msgid "Model manager redesign"
|
||
msgstr "Überarbeitung des Modell-Managers"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:411
|
||
msgid "Better tag selector when pulling a model"
|
||
msgstr "Besserer Tag-Selektor beim Abrufen eines Modells"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:412
|
||
msgid "Model search"
|
||
msgstr "Modellsuche"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:413
|
||
msgid "Added support for bearer tokens on remote instances"
|
||
msgstr "Unterstützung für Bearer-Token auf Remote-Instanzen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:414
|
||
msgid "Preferences dialog redesign"
|
||
msgstr "Überarbeitung des Einstellungsdialogs"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:415
|
||
msgid "Added context menus to interact with a chat"
|
||
msgstr "Kontextmenüs zur Interaktion mit einem Chat hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:416
|
||
msgid "Redesigned primary and secondary menus"
|
||
msgstr "Überarbeitete primäre und sekundäre Menüs"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:417
|
||
msgid ""
|
||
"YouTube integration: Paste the URL of a video with a transcript and it will "
|
||
"be added to the prompt"
|
||
msgstr ""
|
||
"YouTube-Integration: Fügen Sie die URL eines Videos mit Transkript ein und "
|
||
"es wird zur Eingabeaufforderung hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:418
|
||
msgid ""
|
||
"Website integration (Experimental): Extract the text from the body of a "
|
||
"website by adding it's URL to the prompt"
|
||
msgstr ""
|
||
"Website-Integration (experimentell): Extrahieren Sie den Text aus dem Body "
|
||
"einer Website durch Hinzufügen der URL zur Eingabeaufforderung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:419
|
||
msgid "Chat title generation"
|
||
msgstr "Generierung von Chat-Titeln"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:420
|
||
msgid "Auto resizing of message entry"
|
||
msgstr "Automatische Größenänderung der Nachrichteneingabe"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:421
|
||
msgid "Chat notifications"
|
||
msgstr "Chat-Benachrichtigungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:422
|
||
msgid "Added indicator when an image is missing"
|
||
msgstr "Indikator hinzugefügt, wenn ein Bild fehlt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:423
|
||
msgid "Auto rearrange the order of chats when a message is received"
|
||
msgstr ""
|
||
"Automatisches Neuanordnen der Reihenfolge der Chats beim Empfang einer "
|
||
"Nachricht"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:424
|
||
msgid "Redesigned file preview dialog"
|
||
msgstr "Überarbeiteter Dateivorschau-Dialog"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:425
|
||
msgid "Credited new contributors"
|
||
msgstr "Neue Mitwirkende aufgeführt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:426
|
||
msgid "Better stability and optimization"
|
||
msgstr "Bessere Stabilität und Optimierung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:427
|
||
msgid "Edit messages to change the context of a conversation"
|
||
msgstr "Nachrichten bearbeiten, um den Kontext einer Unterhaltung zu ändern"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:428
|
||
msgid "Added disclaimers when pulling models"
|
||
msgstr "Haftungsausschlüsse beim Abrufen von Modellen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:429
|
||
msgid "Preview files before sending a message"
|
||
msgstr "Vorschau von Dateien vor dem Senden einer Nachricht"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:430
|
||
msgid "Better format for date and time on messages"
|
||
msgstr "Besseres Format für Datum und Uhrzeit in Nachrichten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:431
|
||
msgid "Error and debug logging on terminal"
|
||
msgstr "Fehler- und Debug-Protokollierung im Terminal"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:432
|
||
msgid "Auto-hiding sidebar button"
|
||
msgstr "Automatisch ausblendbare Seitenleisten-Schaltfläche"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:433
|
||
msgid "Various UI tweaks"
|
||
msgstr "Verschiedene Verbesserungen der Benutzeroberfläche"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:435
|
||
msgid "New Models"
|
||
msgstr "Neue Modelle"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:437
|
||
msgid "Gemma2"
|
||
msgstr "Gemma2"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:438
|
||
msgid "GLM4"
|
||
msgstr "GLM4"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:439
|
||
msgid "Codegeex4"
|
||
msgstr "Codegeex4"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:440
|
||
msgid "InternLM2"
|
||
msgstr "InternLM2"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:441
|
||
msgid "Llama3-groq-tool-use"
|
||
msgstr "Llama3-groq-tool-use"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:442
|
||
msgid "Mathstral"
|
||
msgstr "Mathstral"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:443
|
||
msgid "Mistral-nemo"
|
||
msgstr "Mistral-nemo"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:444
|
||
msgid "Firefunction-v2"
|
||
msgstr "Firefunction-v2"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:445
|
||
msgid "Nuextract"
|
||
msgstr "Nuextract"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:447
|
||
msgid "Translations"
|
||
msgstr "Übersetzungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:448
|
||
msgid ""
|
||
"These are all the available translations on 1.0.0, thanks to all the "
|
||
"contributors!"
|
||
msgstr ""
|
||
"Das sind alle verfügbaren Übersetzungen in Version 1.0.0, vielen Dank an "
|
||
"alle Mitwirkenden!"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:450
|
||
msgid "Russian: Alex K"
|
||
msgstr "Russisch: Alex K"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:451
|
||
msgid "Spanish: Jeffser"
|
||
msgstr "Spanisch: Jeffser"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:452
|
||
msgid "Brazilian Portuguese: Daimar Stein"
|
||
msgstr "Brasilianisches Portugiesisch: Daimar Stein"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:453
|
||
msgid "French: Louis Chauvet-Villaret"
|
||
msgstr "Französisch: Louis Chauvet-Villaret"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:454
|
||
msgid "Norwegian: CounterFlow64"
|
||
msgstr "Norwegisch: CounterFlow64"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:455
|
||
msgid "Bengali: Aritra Saha"
|
||
msgstr "Bengalisch: Aritra Saha"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:456
|
||
msgid "Simplified Chinese: Yuehao Sui"
|
||
msgstr "Vereinfachtes Chinesisch: Yuehao Sui"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:463
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:512
|
||
msgid "Fix"
|
||
msgstr "Fehlerbehebung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:464
|
||
msgid ""
|
||
"Removed DOCX compatibility temporally due to error with python-lxml "
|
||
"dependency"
|
||
msgstr ""
|
||
"DOCX-Kompatibilität vorübergehend entfernt aufgrund eines Fehlers mit der "
|
||
"python-lxml-Abhängigkeit"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:470
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:500
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:521
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:726
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:783
|
||
msgid "Big Update"
|
||
msgstr "Großes Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:472
|
||
msgid "Added compatibility for PDF"
|
||
msgstr "Kompatibilität für PDF hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:473
|
||
msgid "Added compatibility for DOCX"
|
||
msgstr "Kompatibilität für DOCX hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:474
|
||
msgid "Merged 'file attachment' menu into one button"
|
||
msgstr "Menü 'Dateianhang' in eine Schaltfläche zusammengeführt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:481
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:674
|
||
msgid "Quick Fix"
|
||
msgstr "Schnelle Fehlerbehebung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:482
|
||
msgid ""
|
||
"There were some errors when transitioning from the old version of chats to "
|
||
"the new version. I apologize if this caused any corruption in your chat "
|
||
"history. This should be the only time such a transition is needed."
|
||
msgstr ""
|
||
"Es gab einige Fehler beim Übergang von der alten Version der Chats zur neuen "
|
||
"Version. Ich entschuldige mich, wenn dies zu einer Beschädigung Ihres Chat-"
|
||
"Verlaufs geführt hat. Dies sollte das einzige Mal sein, dass ein solcher "
|
||
"Übergang erforderlich ist."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:488
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:640
|
||
msgid "Huge Update"
|
||
msgstr "Riesiges Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:490
|
||
msgid "Added: Support for plain text files"
|
||
msgstr "Hinzugefügt: Unterstützung für reine Textdateien"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:491
|
||
msgid "Added: New backend system for storing messages"
|
||
msgstr "Hinzugefügt: Neues Backend-System zum Speichern von Nachrichten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:492
|
||
msgid "Added: Support for changing Ollama's overrides"
|
||
msgstr "Hinzugefügt: Unterstützung zum Ändern der Überschreibungen von Ollama"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:493
|
||
msgid "General Optimization"
|
||
msgstr "Allgemeine Optimierung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:502
|
||
msgid "Added: Support for GGUF models (experimental)"
|
||
msgstr "Hinzugefügt: Unterstützung für GGUF-Modelle (experimentell)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:503
|
||
msgid "Added: Support for customization and creation of models"
|
||
msgstr ""
|
||
"Hinzugefügt: Unterstützung für die Anpassung und Erstellung von Modellen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:504
|
||
msgid "Fixed: Icons don't appear on non Gnome systems"
|
||
msgstr "Behoben: Symbole werden auf Nicht-Gnome-Systemen nicht angezeigt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:505
|
||
msgid "Update Ollama to v0.1.39"
|
||
msgstr "Ollama auf v0.1.39 aktualisiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:514
|
||
msgid ""
|
||
"Fixed: app didn't open if models tweaks wasn't present in the config files"
|
||
msgstr ""
|
||
"Behoben: App öffnete sich nicht, wenn Modellanpassungen nicht in den "
|
||
"Konfigurationsdateien vorhanden waren"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:523
|
||
msgid "Changed multiple icons (paper airplane for the send button)"
|
||
msgstr "Mehrere Symbole geändert (Papierflugzeug für die Senden-Schaltfläche)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:524
|
||
msgid "Combined export / import chat buttons into a menu"
|
||
msgstr "Export-/Import-Chat-Schaltflächen in ein Menü zusammengefasst"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:525
|
||
msgid "Added 'model tweaks' (temperature, seed, keep_alive)"
|
||
msgstr "'Modellanpassungen' (Temperatur, Seed, Keep_Alive) hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:526
|
||
msgid "Fixed send / stop button"
|
||
msgstr "Senden-/Stopp-Schaltfläche behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:527
|
||
msgid "Fixed app not checking if remote connection works when starting"
|
||
msgstr ""
|
||
"Behoben, dass die App beim Start nicht prüft, ob die Remoteverbindung "
|
||
"funktioniert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:534
|
||
msgid "Daily Update"
|
||
msgstr "Tägliches Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:536
|
||
msgid "Added text ellipsis to chat name so it doesn't change the button width"
|
||
msgstr ""
|
||
"Text-Ellipse zum Chatnamen hinzugefügt, damit sich die Schaltflächenbreite "
|
||
"nicht ändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:537
|
||
msgid "New shortcut for creating a chat (CTRL+N)"
|
||
msgstr "Neue Verknüpfung zum Erstellen eines Chats (STRG+N)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:538
|
||
msgid "New message entry design"
|
||
msgstr "Neues Design für die Nachrichteneingabe"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:539
|
||
msgid "Fixed: Can't rename the same chat multiple times"
|
||
msgstr "Behoben: Derselbe Chat kann nicht mehrmals umbenannt werden"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:546
|
||
msgid "The fix"
|
||
msgstr "Die Fehlerbehebung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:548
|
||
msgid ""
|
||
"Fixed: Ollama instance keeps running on the background even when it is "
|
||
"disabled"
|
||
msgstr ""
|
||
"Behoben: Ollama-Instanz läuft weiter im Hintergrund, auch wenn sie "
|
||
"deaktiviert ist"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:549
|
||
msgid "Fixed: Can't pull models on the integrated instance"
|
||
msgstr ""
|
||
"Behoben: Modelle können nicht in die integrierte Instanz gezogen werden"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:556
|
||
msgid "Quick tweaks"
|
||
msgstr "Schnelle Anpassungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:558
|
||
msgid "Added progress bar to models that are being pulled"
|
||
msgstr "Fortschrittsbalken für Modelle hinzugefügt, die abgerufen werden"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:559
|
||
msgid "Added size to tags when pulling a model"
|
||
msgstr "Größe zu Tags hinzugefügt, wenn ein Modell abgerufen wird"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:560
|
||
msgid "General optimizations on the background"
|
||
msgstr "Allgemeine Optimierungen im Hintergrund"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:567
|
||
msgid "Quick fixes"
|
||
msgstr "Schnelle Fehlerbehebungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:569
|
||
msgid "Fixed: Scroll when message is received"
|
||
msgstr "Behoben: Scrollen, wenn eine Nachricht empfangen wird"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:570
|
||
msgid "Fixed: Content doesn't change when creating a new chat"
|
||
msgstr "Behoben: Inhalt ändert sich nicht beim Erstellen eines neuen Chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:571
|
||
msgid "Added 'Featured Models' page on welcome dialog"
|
||
msgstr "Seite 'Empfohlene Modelle' im Begrüßungsdialog hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:578
|
||
msgid "Nice Update"
|
||
msgstr "Schönes Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:580
|
||
msgid "UI tweaks (Thanks Nokse22)"
|
||
msgstr "UI-Anpassungen (Danke Nokse22)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:582
|
||
msgid "Metadata fixes"
|
||
msgstr "Fehlerbehebungen bei den Metadaten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:589
|
||
msgid "Quick fix"
|
||
msgstr "Schnelle Fehlerbehebung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:591
|
||
msgid "Updated Spanish translation"
|
||
msgstr "Spanische Übersetzung aktualisiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:592
|
||
msgid "Added compatibility for PNG"
|
||
msgstr "Kompatibilität für PNG hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:599
|
||
msgid "New Update"
|
||
msgstr "Neues Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:601
|
||
msgid "Updated model list"
|
||
msgstr "Modellliste aktualisiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:602
|
||
msgid "Added image recognition to more models"
|
||
msgstr "Bilderkennung zu weiteren Modellen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:603
|
||
msgid "Added Brazilian Portuguese translation (Thanks Daimaar Stein)"
|
||
msgstr ""
|
||
"Brasilianische portugiesische Übersetzung hinzugefügt (Danke Daimaar Stein)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:604
|
||
msgid "Refined the general UI (Thanks Nokse22)"
|
||
msgstr "Allgemeine Benutzeroberfläche verfeinert (Danke Nokse22)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:605
|
||
msgid "Added 'delete message' feature"
|
||
msgstr "Funktion 'Nachricht löschen' hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:606
|
||
msgid ""
|
||
"Added metadata so that software distributors know that the app is compatible "
|
||
"with mobile"
|
||
msgstr ""
|
||
"Metadaten hinzugefügt, damit Softwareanbieter wissen, dass die App mit "
|
||
"Mobilgeräten kompatibel ist"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:607
|
||
msgid ""
|
||
"Changed 'send' shortcut to just the return/enter key (to add a new line use "
|
||
"shift+return)"
|
||
msgstr ""
|
||
"'Senden'-Verknüpfung auf die Eingabetaste geändert (für einen Zeilenumbruch "
|
||
"Umschalttaste+Eingabetaste verwenden)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:614
|
||
msgid "Bug Fixes"
|
||
msgstr "Fehlerbehebungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:616
|
||
msgid "Fixed: Minor spelling mistake"
|
||
msgstr "Behoben: Kleiner Rechtschreibfehler"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:617
|
||
msgid "Added 'mobile' as a supported form factor"
|
||
msgstr "'Mobil' als unterstützten Formfaktor hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:618
|
||
msgid "Fixed: 'Connection Error' dialog not working properly"
|
||
msgstr "Behoben: Dialog 'Verbindungsfehler' funktioniert nicht richtig"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:619
|
||
msgid "Fixed: App might freeze randomly on startup"
|
||
msgstr "Behoben: App kann beim Start zufällig einfrieren"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:620
|
||
msgid "Changed 'chats' label on sidebar for 'Alpaca'"
|
||
msgstr "Bezeichnung 'Chats' in der Seitenleiste in 'Alpaca' geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:627
|
||
msgid "Cool Update"
|
||
msgstr "Cooles Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:629
|
||
msgid "Better design for chat window"
|
||
msgstr "Besseres Design für das Chatfenster"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:630
|
||
msgid "Better design for chat sidebar"
|
||
msgstr "Besseres Design für die Chat-Seitenleiste"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:631
|
||
msgid "Fixed remote connections"
|
||
msgstr "Remote-Verbindungen repariert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:632
|
||
msgid "Fixed Ollama restarting in loop"
|
||
msgstr "Ollama-Neustart in Schleife behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:633
|
||
msgid "Other cool backend stuff"
|
||
msgstr "Andere coole Backend-Sachen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:642
|
||
msgid "Added Ollama as part of Alpaca, Ollama will run in a sandbox"
|
||
msgstr ""
|
||
"Ollama als Teil von Alpaca hinzugefügt, Ollama wird in einer Sandbox "
|
||
"ausgeführt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:643
|
||
msgid "Added option to connect to remote instances (how it worked before)"
|
||
msgstr ""
|
||
"Option zum Verbinden mit Remote-Instanzen hinzugefügt (wie es vorher "
|
||
"funktionierte)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:644
|
||
msgid "Added option to import and export chats"
|
||
msgstr "Option zum Importieren und Exportieren von Chats hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:645
|
||
msgid "Added option to run Alpaca with Ollama in the background"
|
||
msgstr "Option hinzugefügt, Alpaca mit Ollama im Hintergrund auszuführen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:646
|
||
msgid "Added preferences dialog"
|
||
msgstr "Einstellungsdialog hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:647
|
||
msgid "Changed the welcome dialog"
|
||
msgstr "Begrüßungsdialog geändert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:649
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:666
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:678
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:697
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:718
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:734
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:750
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:764
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:774
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:792
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:814
|
||
msgid "Please report any errors to the issues page, thank you."
|
||
msgstr "Bitte melden Sie alle Fehler auf der Problemseite, danke."
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:657
|
||
msgid "Yet Another Daily Update"
|
||
msgstr "Noch ein tägliches Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:659
|
||
msgid "Added better UI for 'Manage Models' dialog"
|
||
msgstr ""
|
||
"Bessere Benutzeroberfläche für den Dialog 'Modelle verwalten' hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:660
|
||
msgid "Added better UI for the chat sidebar"
|
||
msgstr "Bessere Benutzeroberfläche für die Chat-Seitenleiste hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:661
|
||
msgid ""
|
||
"Replaced model description with a button to open Ollama's website for the "
|
||
"model"
|
||
msgstr ""
|
||
"Modellbeschreibung durch eine Schaltfläche zum Öffnen der Ollama-Website für "
|
||
"das Modell ersetzt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:662
|
||
msgid "Added myself to the credits as the spanish translator"
|
||
msgstr "Mich selbst als spanischen Übersetzer zu den Credits hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:663
|
||
msgid "Using XDG properly to get config folder"
|
||
msgstr "XDG korrekt verwenden, um den Konfigurationsordner zu erhalten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:664
|
||
msgid "Update for translations"
|
||
msgstr "Update für Übersetzungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:676
|
||
msgid "The last update had some mistakes in the description of the update"
|
||
msgstr ""
|
||
"Das letzte Update enthielt einige Fehler in der Beschreibung des Updates"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:686
|
||
msgid "Another Daily Update"
|
||
msgstr "Ein weiteres tägliches Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:688
|
||
msgid "Added full Spanish translation"
|
||
msgstr "Vollständige spanische Übersetzung hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:689
|
||
msgid "Added support for background pulling of multiple models"
|
||
msgstr ""
|
||
"Unterstützung für das Abrufen mehrerer Modelle im Hintergrund hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:690
|
||
msgid "Added interrupt button"
|
||
msgstr "Schaltfläche zum Unterbrechen hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:691
|
||
msgid "Added basic shortcuts"
|
||
msgstr "Grundlegende Tastenkürzel hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:692
|
||
msgid "Better translation support"
|
||
msgstr "Bessere Unterstützung für Übersetzungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:693
|
||
msgid ""
|
||
"User can now leave chat name empty when creating a new one, it will add a "
|
||
"placeholder name"
|
||
msgstr ""
|
||
"Benutzer können jetzt den Chatnamen beim Erstellen eines neuen Chats leer "
|
||
"lassen, es wird ein Platzhalter-Name hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:694
|
||
msgid "Better scalling for different window sizes"
|
||
msgstr "Bessere Skalierung für verschiedene Fenstergrößen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:695
|
||
msgid "Fixed: Can't close app if first time setup fails"
|
||
msgstr ""
|
||
"Behoben: App kann nicht geschlossen werden, wenn die Ersteinrichtung "
|
||
"fehlschlägt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:705
|
||
msgid "Really Big Update"
|
||
msgstr "Wirklich großes Update"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:707
|
||
msgid "Added multiple chats support!"
|
||
msgstr "Unterstützung für mehrere Chats hinzugefügt!"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:708
|
||
msgid "Added Pango Markup support (bold, list, title, subtitle, monospace)"
|
||
msgstr ""
|
||
"Unterstützung für Pango Markup hinzugefügt (fett, Liste, Titel, Untertitel, "
|
||
"Monospace)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:709
|
||
msgid "Added autoscroll if the user is at the bottom of the chat"
|
||
msgstr ""
|
||
"Automatisches Scrollen hinzugefügt, wenn der Benutzer am unteren Rand des "
|
||
"Chats ist"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:710
|
||
msgid "Added support for multiple tags on a single model"
|
||
msgstr "Unterstützung für mehrere Tags bei einem einzelnen Modell hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:711
|
||
msgid "Added better model management dialog"
|
||
msgstr "Besseren Dialog zur Modellverwaltung hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:712
|
||
msgid "Added loading spinner when sending message"
|
||
msgstr "Ladespinner beim Senden einer Nachricht hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:713
|
||
msgid "Added notifications if app is not active and a model pull finishes"
|
||
msgstr ""
|
||
"Benachrichtigungen hinzugefügt, wenn die App nicht aktiv ist und ein Modell-"
|
||
"Pull abgeschlossen ist"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:714
|
||
msgid "Added new symbolic icon"
|
||
msgstr "Neues symbolisches Icon hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:715
|
||
msgid "Added frame to message textview widget"
|
||
msgstr "Rahmen zum Nachrichten-Textansichts-Widget hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:716
|
||
msgid "Fixed \"code blocks shouldn't be editable\""
|
||
msgstr "Behoben: \"Code-Blöcke sollten nicht editierbar sein\""
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:728
|
||
msgid "Added code highlighting"
|
||
msgstr "Code-Highlighting hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:729
|
||
msgid "Added image recognition (llava model)"
|
||
msgstr "Bilderkennung hinzugefügt (LLaVA-Modell)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:730
|
||
msgid "Added multiline prompt"
|
||
msgstr "Mehrzeilige Eingabe hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:731
|
||
msgid "Fixed some small bugs"
|
||
msgstr "Einige kleine Fehler behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:732
|
||
msgid "General optimization"
|
||
msgstr "Allgemeine Optimierung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:742
|
||
msgid "Fixes and features"
|
||
msgstr "Fehlerbehebungen und Funktionen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:744
|
||
msgid "Russian translation (thanks github/alexkdeveloper)"
|
||
msgstr "Russische Übersetzung (Danke github/alexkdeveloper)"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:745
|
||
msgid "Fixed: Cannot close app on first setup"
|
||
msgstr "Behoben: App kann bei der Ersteinrichtung nicht geschlossen werden"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:746
|
||
msgid "Fixed: Brand colors for Flathub"
|
||
msgstr "Behoben: Markenfarben für Flathub"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:747
|
||
msgid "Fixed: App description"
|
||
msgstr "Behoben: App-Beschreibung"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:748
|
||
msgid "Fixed: Only show 'save changes dialog' when you actually change the url"
|
||
msgstr ""
|
||
"Behoben: Dialog 'Änderungen speichern' nur anzeigen, wenn die URL "
|
||
"tatsächlich geändert wurde"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:758
|
||
msgid "0.2.2 Bug fixes"
|
||
msgstr "0.2.2 Fehlerbehebungen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:760
|
||
msgid "Toast messages appearing behind dialogs"
|
||
msgstr "Toast-Nachrichten erscheinen hinter Dialogen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:761
|
||
msgid "Local model list not updating when changing servers"
|
||
msgstr "Lokale Modellliste wird beim Wechseln der Server nicht aktualisiert"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:762
|
||
msgid "Closing the setup dialog closes the whole app"
|
||
msgstr "Das Schließen des Einrichtungsdialogs schließt die gesamte App"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:772
|
||
msgid "0.2.1 Data saving fix"
|
||
msgstr "0.2.1 Korrektur beim Speichern von Daten"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:773
|
||
msgid ""
|
||
"The app didn't save the config files and chat history to the right "
|
||
"directory, this is now fixed"
|
||
msgstr ""
|
||
"Die App hat die Konfigurationsdateien und den Chatverlauf nicht im richtigen "
|
||
"Verzeichnis gespeichert, dies ist jetzt behoben"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:782
|
||
msgid "0.2.0"
|
||
msgstr "0.2.0"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:784
|
||
msgid "New Features"
|
||
msgstr "Neue Funktionen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:786
|
||
msgid "Restore chat after closing the app"
|
||
msgstr "Chat nach dem Schließen der App wiederherstellen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:787
|
||
msgid "A button to clear the chat"
|
||
msgstr "Eine Schaltfläche zum Löschen des Chats"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:788
|
||
msgid "Fixed multiple bugs involving how messages are shown"
|
||
msgstr "Mehrere Fehler behoben, die die Darstellung von Nachrichten betreffen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:789
|
||
msgid "Added welcome dialog"
|
||
msgstr "Begrüßungsdialog hinzugefügt"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:790
|
||
msgid "More stability"
|
||
msgstr "Mehr Stabilität"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:800
|
||
msgid "0.1.2 Quick fixes"
|
||
msgstr "0.1.2 Schnelle Korrekturen"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:801
|
||
msgid ""
|
||
"This release fixes some metadata needed to have a proper Flatpak application"
|
||
msgstr ""
|
||
"Diese Version behebt einige Metadaten, die für eine ordnungsgemäße Flatpak-"
|
||
"Anwendung erforderlich sind"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:807
|
||
msgid "0.1.1 Stable Release"
|
||
msgstr "0.1.1 Stabiles Release"
|
||
|
||
#: data/com.jeffser.Alpaca.metainfo.xml.in:808
|
||
msgid "This is the first public version of Alpaca"
|
||
msgstr "Dies ist die erste öffentliche Version von Alpaca"
|
||
|
||
#: src/window.py:145
|
||
msgid "Please select a model before chatting"
|
||
msgstr "Bitte wählen Sie ein Modell aus, bevor Sie chatten"
|
||
|
||
#: src/window.py:199 src/window.py:200 src/window.ui:149
|
||
msgid "Close"
|
||
msgstr "Schließen"
|
||
|
||
#: src/window.py:202 src/window.py:203 src/window.ui:942
|
||
msgid "Next"
|
||
msgstr "Weiter"
|
||
|
||
#: src/window.py:303
|
||
msgid "image"
|
||
msgstr "Bild"
|
||
|
||
#: src/window.py:397
|
||
msgid "Missing file"
|
||
msgstr "Fehlende Datei"
|
||
|
||
#: src/window.py:530 src/window.py:587 src/window.py:607 src/window.py:609
|
||
#: src/window.ui:31 src/custom_widgets/chat_widget.py:320
|
||
msgid "New Chat"
|
||
msgstr "Neuer Chat"
|
||
|
||
#: src/window.py:633
|
||
msgid "Close Alpaca"
|
||
msgstr "Alpaca schließen"
|
||
|
||
#: src/window.py:634
|
||
msgid "Use Local Instance"
|
||
msgstr ""
|
||
|
||
#: src/window.py:635 src/window.py:840
|
||
msgid "Connect"
|
||
msgstr "Verbinden"
|
||
|
||
#: src/window.py:638 src/window.py:843
|
||
msgid "Server URL"
|
||
msgstr ""
|
||
|
||
#: src/window.py:639 src/window.py:844
|
||
msgid "Bearer Token (Optional)"
|
||
msgstr "Bearer-Token (Optional)"
|
||
|
||
#: src/window.py:641
|
||
msgid "Connection Error"
|
||
msgstr "Verbindungsfehler"
|
||
|
||
#: src/window.py:641
|
||
msgid "The remote instance has disconnected"
|
||
msgstr "Die Remote-Instanz hat die Verbindung getrennt"
|
||
|
||
#: src/window.py:644
|
||
msgid "There was an error with the local Ollama instance, so it has been reset"
|
||
msgstr ""
|
||
"Es gab einen Fehler mit der lokalen Ollama-Instanz, daher wurde sie "
|
||
"zurückgesetzt"
|
||
|
||
#: src/window.py:666
|
||
msgid "Cannot open image"
|
||
msgstr "Bild kann nicht geöffnet werden"
|
||
|
||
#: src/window.py:723
|
||
msgid "Delete Chat?"
|
||
msgstr "Chat löschen?"
|
||
|
||
#: src/window.py:724 src/custom_widgets/model_widget.py:330
|
||
msgid "Are you sure you want to delete '{}'?"
|
||
msgstr "Sind Sie sicher, dass Sie '{}' löschen möchten?"
|
||
|
||
#: src/window.py:726 src/custom_widgets/model_widget.py:332
|
||
msgid "Delete"
|
||
msgstr "Löschen"
|
||
|
||
#: src/window.py:733
|
||
msgid "Rename Chat?"
|
||
msgstr "Chat umbenennen?"
|
||
|
||
#: src/window.py:734
|
||
msgid "Renaming '{}'"
|
||
msgstr "'{}' umbenennen"
|
||
|
||
#: src/window.py:736
|
||
msgid "Chat name"
|
||
msgstr ""
|
||
|
||
#: src/window.py:737
|
||
msgid "Rename"
|
||
msgstr "Umbenennen"
|
||
|
||
#: src/window.py:765
|
||
msgid "This video does not have any transcriptions"
|
||
msgstr "Dieses Video hat keine Transkriptionen"
|
||
|
||
#: src/window.py:769
|
||
msgid "Attach YouTube Video?"
|
||
msgstr "YouTube-Video anhängen?"
|
||
|
||
#: src/window.py:770
|
||
msgid ""
|
||
"{}\n"
|
||
"\n"
|
||
"Please select a transcript to include"
|
||
msgstr ""
|
||
"{}\n"
|
||
"\n"
|
||
"Bitte wählen Sie ein Transkript zum Einbinden aus"
|
||
|
||
#: src/window.py:776
|
||
msgid "This video is not available"
|
||
msgstr "Dieses Video ist nicht verfügbar"
|
||
|
||
#: src/window.py:779
|
||
msgid "Attach Website? (Experimental)"
|
||
msgstr "Website anhängen? (Experimentell)"
|
||
|
||
#: src/window.py:780
|
||
msgid ""
|
||
"Are you sure you want to attach\n"
|
||
"'{}'?"
|
||
msgstr ""
|
||
"Sind Sie sicher, dass Sie\n"
|
||
"'{}' anhängen möchten?"
|
||
|
||
#: src/window.py:798 src/generic_actions.py:70
|
||
msgid "Image recognition is only available on specific models"
|
||
msgstr "Bilderkennung ist nur bei bestimmten Modellen verfügbar"
|
||
|
||
#: src/window.py:839 src/custom_widgets/message_widget.py:44
|
||
#: src/custom_widgets/dialog_widget.py:136
|
||
#: src/custom_widgets/dialog_widget.py:148
|
||
#: src/custom_widgets/dialog_widget.py:160
|
||
msgid "Cancel"
|
||
msgstr "Abbrechen"
|
||
|
||
#: src/window.py:847
|
||
msgid "Connect Remote Instance"
|
||
msgstr ""
|
||
|
||
#: src/window.py:848
|
||
msgid "Enter instance information to continue"
|
||
msgstr ""
|
||
|
||
#: src/window.py:923
|
||
msgid "Clear Chat?"
|
||
msgstr "Chat löschen?"
|
||
|
||
#: src/window.py:923
|
||
msgid "Are you sure you want to clear the chat?"
|
||
msgstr "Sind Sie sicher, dass Sie den Chat löschen möchten?"
|
||
|
||
#: src/window.py:923
|
||
msgid "Clear"
|
||
msgstr "Löschen"
|
||
|
||
#: src/window.py:925
|
||
msgid "Select Model"
|
||
msgstr "Modell auswählen"
|
||
|
||
#: src/window.py:925
|
||
msgid "This model will be used as the base for the new model"
|
||
msgstr "Dieses Modell wird als Basis für das neue Modell verwendet"
|
||
|
||
#: src/window.py:927
|
||
msgid "Pull Model"
|
||
msgstr "Modell abrufen"
|
||
|
||
#: src/window.py:927
|
||
msgid ""
|
||
"Input the name of the model in this format\n"
|
||
"name:tag"
|
||
msgstr ""
|
||
"Geben Sie den Namen des Modells in diesem Format ein\n"
|
||
"Name:Tag"
|
||
|
||
#: src/window.py:949
|
||
msgid "Remove Attachment?"
|
||
msgstr "Anhang entfernen?"
|
||
|
||
#: src/window.py:949
|
||
msgid "Are you sure you want to remove attachment?"
|
||
msgstr "Sind Sie sicher, dass Sie den Anhang entfernen möchten?"
|
||
|
||
#: src/window.py:949
|
||
msgid "Remove"
|
||
msgstr "Entfernen"
|
||
|
||
#: src/available_models_descriptions.py:2
|
||
msgid "Meta's Llama 3.2 goes small with 1B and 3B models."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:3
|
||
msgid ""
|
||
"Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and "
|
||
"405B parameter sizes."
|
||
msgstr ""
|
||
"Llama 3.1 ist ein neues State-of-the-Art-Modell von Meta, das in den Größen "
|
||
"8B, 70B und 405B Parameter verfügbar ist."
|
||
|
||
#: src/available_models_descriptions.py:4
|
||
msgid ""
|
||
"Google Gemma 2 is a high-performing and efficient model available in three "
|
||
"sizes: 2B, 9B, and 27B."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:5
|
||
msgid ""
|
||
"Qwen2.5 models are pretrained on Alibaba's latest large-scale dataset, "
|
||
"encompassing up to 18 trillion tokens. The model supports up to 128K tokens "
|
||
"and has multilingual support."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:6
|
||
msgid ""
|
||
"A lightweight AI model with 3.8 billion parameters with performance "
|
||
"overtaking similarly and larger sized models."
|
||
msgstr ""
|
||
"Ein leichtgewichtiges KI-Modell mit 3,8 Milliarden Parametern, dessen "
|
||
"Leistung vergleichbare und größere Modelle übertrifft."
|
||
|
||
#: src/available_models_descriptions.py:7
|
||
msgid ""
|
||
"A commercial-friendly small language model by NVIDIA optimized for roleplay, "
|
||
"RAG QA, and function calling."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:8
|
||
msgid ""
|
||
"Mistral Small is a lightweight model designed for cost-effective use in "
|
||
"tasks like translation and summarization."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:9
|
||
msgid ""
|
||
"A state-of-the-art 12B model with 128k context length, built by Mistral AI "
|
||
"in collaboration with NVIDIA."
|
||
msgstr ""
|
||
"Ein State-of-the-Art 12B-Modell mit 128k Kontextlänge, entwickelt von "
|
||
"Mistral AI in Zusammenarbeit mit NVIDIA."
|
||
|
||
#: src/available_models_descriptions.py:10
|
||
msgid ""
|
||
"An open-source Mixture-of-Experts code language model that achieves "
|
||
"performance comparable to GPT4-Turbo in code-specific tasks."
|
||
msgstr ""
|
||
"Ein Open-Source-Mixture-of-Experts-Codesprachmodell, das bei "
|
||
"codespezifischen Aufgaben eine Leistung vergleichbar mit GPT4-Turbo erreicht."
|
||
|
||
#: src/available_models_descriptions.py:11
|
||
msgid "The 7B model released by Mistral AI, updated to version 0.3."
|
||
msgstr ""
|
||
"Das von Mistral AI veröffentlichte 7B-Modell, aktualisiert auf Version 0.3."
|
||
|
||
#: src/available_models_descriptions.py:12
|
||
msgid ""
|
||
"A set of Mixture of Experts (MoE) model with open weights by Mistral AI in "
|
||
"8x7b and 8x22b parameter sizes."
|
||
msgstr ""
|
||
"Eine Reihe von Mixture-of-Experts (MoE)-Modellen mit offenen Gewichten von "
|
||
"Mistral AI in den Parametergrößen 8x7b und 8x22b."
|
||
|
||
#: src/available_models_descriptions.py:13
|
||
msgid ""
|
||
"CodeGemma is a collection of powerful, lightweight models that can perform a "
|
||
"variety of coding tasks like fill-in-the-middle code completion, code "
|
||
"generation, natural language understanding, mathematical reasoning, and "
|
||
"instruction following."
|
||
msgstr ""
|
||
"CodeGemma ist eine Sammlung leistungsstarker, leichtgewichtiger Modelle, die "
|
||
"eine Vielzahl von Codieraufgaben ausführen können, wie z. B. Fill-in-the-"
|
||
"Middle-Code-Vervollständigung, Code-Generierung, Verständnis natürlicher "
|
||
"Sprache, mathematisches Schlussfolgern und Anweisungsbefolgung."
|
||
|
||
#: src/available_models_descriptions.py:14
|
||
msgid ""
|
||
"Command R is a Large Language Model optimized for conversational interaction "
|
||
"and long context tasks."
|
||
msgstr ""
|
||
"Command R ist ein großes Sprachmodell, das für die Konversationsinteraktion "
|
||
"und Aufgaben mit langem Kontext optimiert ist."
|
||
|
||
#: src/available_models_descriptions.py:15
|
||
msgid ""
|
||
"Command R+ is a powerful, scalable large language model purpose-built to "
|
||
"excel at real-world enterprise use cases."
|
||
msgstr ""
|
||
"Command R+ ist ein leistungsstarkes, skalierbares großes Sprachmodell, das "
|
||
"speziell dafür entwickelt wurde, sich bei realen "
|
||
"Unternehmensanwendungsfällen zu bewähren."
|
||
|
||
#: src/available_models_descriptions.py:16
|
||
msgid ""
|
||
"🌋 LLaVA is a novel end-to-end trained large multimodal model that combines "
|
||
"a vision encoder and Vicuna for general-purpose visual and language "
|
||
"understanding. Updated to version 1.6."
|
||
msgstr ""
|
||
"🌋 LLaVA ist ein neuartiges End-to-End-trainiertes großes multimodales "
|
||
"Modell, das einen Vision-Encoder und Vicuna für das allgemeine Verständnis "
|
||
"von Bildern und Sprache kombiniert. Aktualisiert auf Version 1.6."
|
||
|
||
#: src/available_models_descriptions.py:17
|
||
msgid "Meta Llama 3: The most capable openly available LLM to date"
|
||
msgstr "Meta Llama 3: Das derzeit leistungsfähigste offen verfügbare LLM"
|
||
|
||
#: src/available_models_descriptions.py:18
|
||
msgid ""
|
||
"Gemma is a family of lightweight, state-of-the-art open models built by "
|
||
"Google DeepMind. Updated to version 1.1"
|
||
msgstr ""
|
||
"Gemma ist eine Familie leichtgewichtiger, hochmoderner offener Modelle, die "
|
||
"von Google DeepMind entwickelt wurden. Aktualisiert auf Version 1.1"
|
||
|
||
#: src/available_models_descriptions.py:19
|
||
msgid ""
|
||
"Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from "
|
||
"0.5B to 110B parameters"
|
||
msgstr ""
|
||
"Qwen 1.5 ist eine Reihe großer Sprachmodelle von Alibaba Cloud, die von 0,5 "
|
||
"Mrd. bis 110 Mrd. Parameter reichen"
|
||
|
||
#: src/available_models_descriptions.py:20
|
||
msgid "Qwen2 is a new series of large language models from Alibaba group"
|
||
msgstr "Qwen2 ist eine neue Serie großer Sprachmodelle der Alibaba Group"
|
||
|
||
#: src/available_models_descriptions.py:21
|
||
msgid ""
|
||
"Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art "
|
||
"open models by Microsoft."
|
||
msgstr ""
|
||
"Phi-3 ist eine Familie leichtgewichtiger 3B (Mini) und 14B (Medium) State-of-"
|
||
"the-Art Open-Source-Modelle von Microsoft."
|
||
|
||
#: src/available_models_descriptions.py:22
|
||
msgid ""
|
||
"Llama 2 is a collection of foundation language models ranging from 7B to 70B "
|
||
"parameters."
|
||
msgstr ""
|
||
"Llama 2 ist eine Sammlung von Grundlagensprachmodellen mit 7B bis 70B "
|
||
"Parametern."
|
||
|
||
#: src/available_models_descriptions.py:23
|
||
msgid ""
|
||
"A large language model that can use text prompts to generate and discuss "
|
||
"code."
|
||
msgstr ""
|
||
"Ein großes Sprachmodell, das Textaufforderungen verwenden kann, um Code zu "
|
||
"generieren und zu diskutieren."
|
||
|
||
#: src/available_models_descriptions.py:24
|
||
msgid ""
|
||
"A high-performing open embedding model with a large token context window."
|
||
msgstr ""
|
||
"Ein leistungsstarkes offenes Einbettungsmodell mit einem großen Token-"
|
||
"Kontextfenster."
|
||
|
||
#: src/available_models_descriptions.py:25
|
||
msgid "State-of-the-art large embedding model from mixedbread.ai"
|
||
msgstr "State-of-the-Art großes Einbettungsmodell von mixedbread.ai"
|
||
|
||
#: src/available_models_descriptions.py:26
|
||
msgid ""
|
||
"Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of "
|
||
"experts models that excels at coding tasks. Created by Eric Hartford."
|
||
msgstr ""
|
||
"Unzensierte, 8x7b und 8x22b feinabgestimmte Modelle basierend auf den "
|
||
"Mixtral-Mixture-of-Experts-Modellen, die sich bei Codierungsaufgaben "
|
||
"auszeichnen. Erstellt von Eric Hartford."
|
||
|
||
#: src/available_models_descriptions.py:27
|
||
msgid ""
|
||
"Phi-2: a 2.7B language model by Microsoft Research that demonstrates "
|
||
"outstanding reasoning and language understanding capabilities."
|
||
msgstr ""
|
||
"Phi-2: ein 2,7-Milliarden-Sprachmodell von Microsoft Research, das "
|
||
"herausragende Fähigkeiten beim Schlussfolgern und Sprachverständnis "
|
||
"demonstriert."
|
||
|
||
#: src/available_models_descriptions.py:28
|
||
msgid ""
|
||
"DeepSeek Coder is a capable coding model trained on two trillion code and "
|
||
"natural language tokens."
|
||
msgstr ""
|
||
"DeepSeek Coder ist ein leistungsfähiges Codiermodell, das mit zwei Billionen "
|
||
"Code- und natürlichen Sprach-Token trainiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:29
|
||
msgid ""
|
||
"StarCoder2 is the next generation of transparently trained open code LLMs "
|
||
"that comes in three sizes: 3B, 7B and 15B parameters."
|
||
msgstr ""
|
||
"StarCoder2 ist die nächste Generation transparent trainierter offener Code-"
|
||
"LLMs, die in drei Größen erhältlich ist: 3B, 7B und 15B Parameter."
|
||
|
||
#: src/available_models_descriptions.py:30
|
||
msgid "Uncensored Llama 2 model by George Sung and Jarrad Hope."
|
||
msgstr "Unzensiertes Llama 2-Modell von George Sung und Jarrad Hope."
|
||
|
||
#: src/available_models_descriptions.py:31
|
||
msgid ""
|
||
"The uncensored Dolphin model based on Mistral that excels at coding tasks. "
|
||
"Updated to version 2.8."
|
||
msgstr ""
|
||
"Das unzensierte Dolphin-Modell basierend auf Mistral, das sich bei "
|
||
"Codierungsaufgaben auszeichnet. Aktualisiert auf Version 2.8."
|
||
|
||
#: src/available_models_descriptions.py:32
|
||
msgid ""
|
||
"Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models "
|
||
"that are trained to act as helpful assistants."
|
||
msgstr ""
|
||
"Zephyr ist eine Reihe von feinabgestimmten Versionen der Mistral- und "
|
||
"Mixtral-Modelle, die darauf trainiert sind, als hilfreiche Assistenten zu "
|
||
"fungieren."
|
||
|
||
#: src/available_models_descriptions.py:33
|
||
msgid "Yi 1.5 is a high-performing, bilingual language model."
|
||
msgstr "Yi 1.5 ist ein leistungsstarkes zweisprachiges Sprachmodell."
|
||
|
||
#: src/available_models_descriptions.py:34
|
||
msgid ""
|
||
"Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on "
|
||
"Llama 3 that has a variety of instruction, conversational, and coding skills."
|
||
msgstr ""
|
||
"Dolphin 2.9 ist ein neues Modell mit 8B und 70B Größen von Eric Hartford "
|
||
"basierend auf Llama 3, das eine Vielzahl von Anweisungs-, Konversations- und "
|
||
"Codierungsfähigkeiten besitzt."
|
||
|
||
#: src/available_models_descriptions.py:35
|
||
msgid ""
|
||
"A general-purpose model ranging from 3 billion parameters to 70 billion, "
|
||
"suitable for entry-level hardware."
|
||
msgstr ""
|
||
"Ein universelles Modell mit 3 Milliarden bis 70 Milliarden Parametern, "
|
||
"geeignet für Einstiegshardware."
|
||
|
||
#: src/available_models_descriptions.py:36
|
||
msgid ""
|
||
"A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several "
|
||
"benchmarks."
|
||
msgstr ""
|
||
"Ein LLaVA-Modell, das von Llama 3 Instruct mit besseren Ergebnissen in "
|
||
"mehreren Benchmarks feinabgestimmt wurde."
|
||
|
||
#: src/available_models_descriptions.py:37
|
||
msgid ""
|
||
"The latest series of Code-Specific Qwen models, with significant "
|
||
"improvements in code generation, code reasoning, and code fixing."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:38
|
||
msgid ""
|
||
"Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the "
|
||
"Mistral 7B model using the OpenOrca dataset."
|
||
msgstr ""
|
||
"Mistral OpenOrca ist ein Modell mit 7 Milliarden Parametern, das auf dem "
|
||
"Mistral 7B-Modell unter Verwendung des OpenOrca-Datensatzes feinabgestimmt "
|
||
"wurde."
|
||
|
||
#: src/available_models_descriptions.py:39
|
||
msgid ""
|
||
"StarCoder is a code generation model trained on 80+ programming languages."
|
||
msgstr ""
|
||
"StarCoder ist ein Codegenerierungsmodell, das auf über 80 "
|
||
"Programmiersprachen trainiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:40
|
||
msgid ""
|
||
"The TinyLlama project is an open endeavor to train a compact 1.1B Llama "
|
||
"model on 3 trillion tokens."
|
||
msgstr ""
|
||
"Das TinyLlama-Projekt ist ein offenes Unterfangen, um ein kompaktes 1,1-"
|
||
"Milliarden-Llama-Modell mit 3 Billionen Token zu trainieren."
|
||
|
||
#: src/available_models_descriptions.py:41
|
||
msgid ""
|
||
"Codestral is Mistral AI’s first-ever code model designed for code generation "
|
||
"tasks."
|
||
msgstr ""
|
||
"Codestral ist Mistral AIs initiales Code-Modell welches auf Code-"
|
||
"Generierungsaufgaben getrimmt ist"
|
||
|
||
#: src/available_models_descriptions.py:42
|
||
msgid ""
|
||
"General use chat model based on Llama and Llama 2 with 2K to 16K context "
|
||
"sizes."
|
||
msgstr ""
|
||
"Allgemeines Chat-Modell basierend auf Llama und Llama 2 mit Kontextgrößen "
|
||
"von 2K bis 16K."
|
||
|
||
#: src/available_models_descriptions.py:43
|
||
msgid "Llama 2 based model fine tuned to improve Chinese dialogue ability."
|
||
msgstr ""
|
||
"Auf Llama 2 basierendes Modell, das zur Verbesserung der chinesischen "
|
||
"Dialogfähigkeit feinabgestimmt wurde."
|
||
|
||
#: src/available_models_descriptions.py:44
|
||
msgid ""
|
||
"A suite of text embedding models by Snowflake, optimized for performance."
|
||
msgstr ""
|
||
"Eine Sammlung von Text-Embedding-Modellen von Snowflake, optimiert für die "
|
||
"Leistung."
|
||
|
||
#: src/available_models_descriptions.py:45
|
||
msgid ""
|
||
"Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on "
|
||
"Llama 2 uncensored by Eric Hartford."
|
||
msgstr ""
|
||
"Wizard Vicuna Uncensored ist ein 7B-, 13B- und 30B-Parametermodell, das auf "
|
||
"Llama 2 Uncensored von Eric Hartford basiert."
|
||
|
||
#: src/available_models_descriptions.py:46
|
||
msgid "A family of open foundation models by IBM for Code Intelligence"
|
||
msgstr "Eine Familie offener Grundlagenmodelle von IBM für Code Intelligence"
|
||
|
||
#: src/available_models_descriptions.py:47
|
||
msgid ""
|
||
"A versatile model for AI software development scenarios, including code "
|
||
"completion."
|
||
msgstr ""
|
||
"Ein vielseitiges Modell für KI-Softwareentwicklungsszenarien, einschließlich "
|
||
"Code-vervollständigung."
|
||
|
||
#: src/available_models_descriptions.py:48
|
||
msgid ""
|
||
"The powerful family of models by Nous Research that excels at scientific "
|
||
"discussion and coding tasks."
|
||
msgstr ""
|
||
"Die leistungsstarke Modellfamilie von Nous Research, die sich bei "
|
||
"wissenschaftlichen Diskussionen und Codierungsaufgaben. "
|
||
|
||
#: src/available_models_descriptions.py:49
|
||
msgid "Embedding models on very large sentence level datasets."
|
||
msgstr "Einbettungsmodelle auf sehr großen Datensätzen auf Satzebene."
|
||
|
||
#: src/available_models_descriptions.py:50
|
||
msgid ""
|
||
"A family of open-source models trained on a wide variety of data, surpassing "
|
||
"ChatGPT on various benchmarks. Updated to version 3.5-0106."
|
||
msgstr ""
|
||
"Eine Familie von Open-Source-Modellen, die auf einer Vielzahl von Daten "
|
||
"trainiert wurden und ChatGPT bei verschiedenen Benchmarks übertreffen. "
|
||
"Aktualisiert auf Version 3.5-0106."
|
||
|
||
#: src/available_models_descriptions.py:51
|
||
msgid ""
|
||
"Aya 23, released by Cohere, is a new family of state-of-the-art, "
|
||
"multilingual models that support 23 languages."
|
||
msgstr ""
|
||
"Aya 23, veröffentlicht von Cohere, ist eine neue Familie von State-of-the-"
|
||
"Art, mehrsprachigen Modellen, die 23 Sprachen unterstützen."
|
||
|
||
#: src/available_models_descriptions.py:52
|
||
msgid ""
|
||
"CodeQwen1.5 is a large language model pretrained on a large amount of code "
|
||
"data."
|
||
msgstr ""
|
||
"CodeQwen1.5 ist ein großes Sprachmodell, das auf einer großen Menge von "
|
||
"Codedaten vortrainiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:53
|
||
msgid ""
|
||
"State of the art large language model from Microsoft AI with improved "
|
||
"performance on complex chat, multilingual, reasoning and agent use cases."
|
||
msgstr ""
|
||
"State-of-the-Art-Großsprachenmodell von Microsoft AI mit verbesserter "
|
||
"Leistung bei komplexen Chat-, mehrsprachigen, Reasoning- und Agenten-"
|
||
"Anwendungsfällen."
|
||
|
||
#: src/available_models_descriptions.py:54
|
||
msgid ""
|
||
"An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset "
|
||
"by Eric Hartford and based on TinyLlama."
|
||
msgstr ""
|
||
"Ein experimentelles 1,1-Milliarden-Parameter-Modell, das auf dem neuen "
|
||
"Dolphin-2.8-Datensatz von Eric Hartford trainiert und auf TinyLlama basiert "
|
||
"wurde."
|
||
|
||
#: src/available_models_descriptions.py:55
|
||
msgid "State-of-the-art code generation model"
|
||
msgstr "State-of-the-Art-Codegenerierungsmodell"
|
||
|
||
#: src/available_models_descriptions.py:56
|
||
msgid ""
|
||
"Stable Code 3B is a coding model with instruct and code completion variants "
|
||
"on par with models such as Code Llama 7B that are 2.5x larger."
|
||
msgstr ""
|
||
"Stable Code 3B ist ein Codierungsmodell mit Anweisungs- und "
|
||
"Codevervollständigungsvarianten, das Modellen wie Code Llama 7B, die 2,5-mal "
|
||
"größer sind, ebenbürtig ist."
|
||
|
||
#: src/available_models_descriptions.py:57
|
||
msgid ""
|
||
"OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully "
|
||
"open datasets."
|
||
msgstr ""
|
||
"OpenHermes 2.5 ist ein 7B-Modell, das von Teknium auf Mistral mit "
|
||
"vollständig offenen Datensätzen feinabgestimmt wurde."
|
||
|
||
#: src/available_models_descriptions.py:58
|
||
msgid ""
|
||
"Qwen2 Math is a series of specialized math language models built upon the "
|
||
"Qwen2 LLMs, which significantly outperforms the mathematical capabilities of "
|
||
"open-source models and even closed-source models (e.g., GPT4o)."
|
||
msgstr ""
|
||
"Qwen2 Math ist eine Reihe von spezialisierten mathematischen "
|
||
"Sprachmodellen, die auf den Qwen2 LLMs aufbaut, die die mathematischen "
|
||
"Fähigkeiten von Open-Source-Modellen und sogar Closed-Source-Modellen (z. "
|
||
"B. GPT4o) übertrifft."
|
||
|
||
#: src/available_models_descriptions.py:59
|
||
msgid ""
|
||
"BakLLaVA is a multimodal model consisting of the Mistral 7B base model "
|
||
"augmented with the LLaVA architecture."
|
||
msgstr ""
|
||
"BakLLaVA ist ein multimodales Modell, das aus dem Mistral-7B-Basismodell "
|
||
"besteht, das um die LLaVA-Architektur erweitert wurde."
|
||
|
||
#: src/available_models_descriptions.py:60
|
||
msgid ""
|
||
"Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model "
|
||
"trained on multilingual data in English, Spanish, German, Italian, French, "
|
||
"Portuguese, and Dutch."
|
||
msgstr ""
|
||
"Stable LM 2 ist ein State-of-the-Art-Sprachmodell mit 1,6 Mrd. und 12 Mrd. "
|
||
"Parametern, das auf mehrsprachigen Daten in Englisch, Spanisch, Deutsch, "
|
||
"Italienisch, Französisch, Portugiesisch und Niederländisch trainiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:61
|
||
msgid ""
|
||
"This model extends LLama-3 8B's context length from 8k to over 1m tokens."
|
||
msgstr ""
|
||
"Dieses Modell erweitert die Kontextlänge von LLama-3 8B von 8k auf über 1 "
|
||
"Million Token."
|
||
|
||
#: src/available_models_descriptions.py:62
|
||
msgid "An advanced language model crafted with 2 trillion bilingual tokens."
|
||
msgstr ""
|
||
"Ein fortschrittliches Sprachmodell, das mit 2 Billionen zweisprachigen Token "
|
||
"erstellt wurde."
|
||
|
||
#: src/available_models_descriptions.py:63
|
||
msgid "Model focused on math and logic problems"
|
||
msgstr "Modell, das sich auf Mathematik- und Logikprobleme konzentriert"
|
||
|
||
#: src/available_models_descriptions.py:64
|
||
msgid ""
|
||
"A strong multi-lingual general language model with competitive performance "
|
||
"to Llama 3."
|
||
msgstr ""
|
||
"Ein starkes mehrsprachiges allgemeines Sprachmodell mit wettbewerbsfähiger "
|
||
"Leistung gegenüber Llama 3."
|
||
|
||
#: src/available_models_descriptions.py:65
|
||
msgid ""
|
||
"A fine-tuned model based on Mistral with good coverage of domain and "
|
||
"language."
|
||
msgstr ""
|
||
"Ein feinabgestimmtes Modell basierend auf Mistral mit guter Abdeckung von "
|
||
"Domäne und Sprache."
|
||
|
||
#: src/available_models_descriptions.py:66
|
||
msgid ""
|
||
"A high-performing model trained with a new technique called Reflection-"
|
||
"tuning that teaches a LLM to detect mistakes in its reasoning and correct "
|
||
"course."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:67
|
||
msgid ""
|
||
"A model from NVIDIA based on Llama 3 that excels at conversational question "
|
||
"answering (QA) and retrieval-augmented generation (RAG)."
|
||
msgstr ""
|
||
"Ein Modell von NVIDIA, das auf Llama 3 basiert und sich bei "
|
||
"konversationeller Frage-beantwortung (QA) und abrufgestützter Generierung "
|
||
"(RAG) auszeichnet."
|
||
|
||
#: src/available_models_descriptions.py:68
|
||
msgid ""
|
||
"Mistral Large 2 is Mistral's new flagship model that is significantly more "
|
||
"capable in code generation, mathematics, and reasoning with 128k context "
|
||
"window and support for dozens of languages."
|
||
msgstr ""
|
||
"Mistral Large 2 ist Mistrals neues Vorzeigemodell, das bei der "
|
||
"Codegenerierung, Mathematik und logischem Denken mit einem 128k-"
|
||
"Kontextfenster und Unterstützung für Dutzende von Sprachen deutlich "
|
||
"leistungsfähiger ist."
|
||
|
||
#: src/available_models_descriptions.py:69
|
||
msgid ""
|
||
"moondream2 is a small vision language model designed to run efficiently on "
|
||
"edge devices."
|
||
msgstr ""
|
||
"moondream2 ist ein kleines Vision-Language-Modell, das für den effizienten "
|
||
"Betrieb auf Edge-Geräten entwickelt wurde."
|
||
|
||
#: src/available_models_descriptions.py:70
|
||
msgid ""
|
||
"Conversational model based on Llama 2 that performs competitively on various "
|
||
"benchmarks."
|
||
msgstr ""
|
||
"Konversationsmodell basierend auf Llama 2, das bei verschiedenen Benchmarks "
|
||
"konkurrenzfähig abschneidet."
|
||
|
||
#: src/available_models_descriptions.py:71
|
||
msgid "Code generation model based on Code Llama."
|
||
msgstr "Codegenerierungsmodell basierend auf Code Llama."
|
||
|
||
#: src/available_models_descriptions.py:72
|
||
msgid "General use models based on Llama and Llama 2 from Nous Research."
|
||
msgstr ""
|
||
"Allgemeine Gebrauchsmodelle basierend auf Llama und Llama 2 von Nous "
|
||
"Research."
|
||
|
||
#: src/available_models_descriptions.py:73
|
||
msgid ""
|
||
"SQLCoder is a code completion model fined-tuned on StarCoder for SQL "
|
||
"generation tasks"
|
||
msgstr ""
|
||
"SQLCoder ist ein Codevervollständigungsmodell, das auf StarCoder für SQL-"
|
||
"Generierungsaufgaben feinabgestimmt wurde"
|
||
|
||
#: src/available_models_descriptions.py:74
|
||
msgid ""
|
||
"A 7B and 15B uncensored variant of the Dolphin model family that excels at "
|
||
"coding, based on StarCoder2."
|
||
msgstr ""
|
||
"Eine 7B- und 15B-unzensierte Variante der Dolphin-Modellfamilie, die sich "
|
||
"beim Codieren auszeichnet, basierend auf StarCoder2."
|
||
|
||
#: src/available_models_descriptions.py:75
|
||
msgid "An extension of Llama 2 that supports a context of up to 128k tokens."
|
||
msgstr ""
|
||
"Eine Erweiterung von Llama 2, die einen Kontext von bis zu 128k Token "
|
||
"unterstützt."
|
||
|
||
#: src/available_models_descriptions.py:76
|
||
msgid ""
|
||
"🪐 A family of small models with 135M, 360M, and 1.7B parameters, trained on "
|
||
"a new high-quality dataset."
|
||
msgstr ""
|
||
"🪐 Eine Familie von kleinen Modellen mit 135M, 360M und 1.7B Parametern, "
|
||
"trainiert auf einem neuen hochwertigen Datensatz. "
|
||
|
||
#: src/available_models_descriptions.py:77
|
||
msgid "General use model based on Llama 2."
|
||
msgstr "Allgemeines Gebrauchsmodell basierend auf Llama 2."
|
||
|
||
#: src/available_models_descriptions.py:78
|
||
msgid "A strong, economical, and efficient Mixture-of-Experts language model."
|
||
msgstr ""
|
||
"Ein starkes, wirtschaftliches und effizientes Mixture-of-Experts-"
|
||
"Sprachmodell."
|
||
|
||
#: src/available_models_descriptions.py:79
|
||
msgid ""
|
||
"Starling is a large language model trained by reinforcement learning from AI "
|
||
"feedback focused on improving chatbot helpfulness."
|
||
msgstr ""
|
||
"Starling ist ein großes Sprachmodell, das durch Reinforcement Learning aus "
|
||
"KI-Feedback trainiert wurde, um die Nützlichkeit von Chatbots zu verbessern."
|
||
|
||
#: src/available_models_descriptions.py:80
|
||
msgid ""
|
||
"A companion assistant trained in philosophy, psychology, and personal "
|
||
"relationships. Based on Mistral."
|
||
msgstr ""
|
||
"Ein Begleitassistent, der in Philosophie, Psychologie und persönlichen "
|
||
"Beziehungen geschult ist. Basierend auf Mistral."
|
||
|
||
#: src/available_models_descriptions.py:81
|
||
msgid ""
|
||
"A compact, yet powerful 10.7B large language model designed for single-turn "
|
||
"conversation."
|
||
msgstr ""
|
||
"Ein kompaktes, aber leistungsstarkes 10,7-Milliarden-Large-Language-Modell, "
|
||
"das für einmalige Konversation entwickelt wurde."
|
||
|
||
#: src/available_models_descriptions.py:82
|
||
msgid ""
|
||
"Orca 2 is built by Microsoft research, and are a fine-tuned version of "
|
||
"Meta's Llama 2 models. The model is designed to excel particularly in "
|
||
"reasoning."
|
||
msgstr ""
|
||
"Orca 2 wurde von Microsoft Research entwickelt und ist eine feinabgestimmte "
|
||
"Version von Metas Llama-2-Modellen. Das Modell ist speziell darauf "
|
||
"ausgelegt, sich besonders beim logischen Schlussfolgern auszuzeichnen."
|
||
|
||
#: src/available_models_descriptions.py:83
|
||
msgid ""
|
||
"Llama 2 based model fine tuned on an Orca-style dataset. Originally called "
|
||
"Free Willy."
|
||
msgstr ""
|
||
"Auf Llama 2 basierendes Modell, das auf einem Orca-ähnlichen Datensatz "
|
||
"feinabgestimmt wurde. Ursprünglich Free Willy genannt."
|
||
|
||
#: src/available_models_descriptions.py:84
|
||
msgid ""
|
||
"2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language "
|
||
"model by Microsoft Research."
|
||
msgstr ""
|
||
"2,7-Milliarden-unzensiertes Dolphin-Modell von Eric Hartford, basierend auf "
|
||
"dem Phi-Sprachmodell von Microsoft Research."
|
||
|
||
#: src/available_models_descriptions.py:85
|
||
msgid "Uncensored version of Wizard LM model"
|
||
msgstr "Unzensierte Version des Wizard-LM-Modells"
|
||
|
||
#: src/available_models_descriptions.py:86
|
||
msgid ""
|
||
"Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous "
|
||
"Research"
|
||
msgstr ""
|
||
"Hermes 3 ist die neueste Version des Flaggschiffs der Hermes-Reihe von LLM "
|
||
"von Nous Research "
|
||
|
||
#: src/available_models_descriptions.py:87
|
||
msgid ""
|
||
"Yi-Coder is a series of open-source code language models that delivers state-"
|
||
"of-the-art coding performance with fewer than 10 billion parameters."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:88
|
||
msgid "A new small LLaVA model fine-tuned from Phi 3 Mini."
|
||
msgstr ""
|
||
"Ein neues kleines LLaVA-Modell, das von Phi 3 Mini feinabgestimmt wurde."
|
||
|
||
#: src/available_models_descriptions.py:89
|
||
msgid ""
|
||
"InternLM2.5 is a 7B parameter model tailored for practical scenarios with "
|
||
"outstanding reasoning capability."
|
||
msgstr ""
|
||
"InternLM2.5 ist ein 7B-Parametermodell, das auf praktische Szenarien mit "
|
||
"herausragender Schlussfolgerungsfähigkeit zugeschnitten ist."
|
||
|
||
#: src/available_models_descriptions.py:90
|
||
msgid "An extension of Mistral to support context windows of 64K or 128K."
|
||
msgstr ""
|
||
"Eine Erweiterung von Mistral zur Unterstützung von Kontextfenstern von 64K "
|
||
"oder 128K."
|
||
|
||
#: src/available_models_descriptions.py:91
|
||
msgid ""
|
||
"An expansion of Llama 2 that specializes in integrating both general "
|
||
"language understanding and domain-specific knowledge, particularly in "
|
||
"programming and mathematics."
|
||
msgstr ""
|
||
"Eine Erweiterung von Llama 2, die sich darauf spezialisiert hat, sowohl "
|
||
"allgemeines Sprachverständnis als auch domänenspezifisches Wissen, "
|
||
"insbesondere im Bereich Programmierung und Mathematik, zu integrieren."
|
||
|
||
#: src/available_models_descriptions.py:92
|
||
msgid ""
|
||
"Fine-tuned Llama 2 model to answer medical questions based on an open source "
|
||
"medical dataset."
|
||
msgstr ""
|
||
"Feinabgestimmtes Llama-2-Modell zur Beantwortung medizinischer Fragen "
|
||
"basierend auf einem Open-Source-Medizindatensatz."
|
||
|
||
#: src/available_models_descriptions.py:93
|
||
msgid ""
|
||
"Open-source medical large language model adapted from Llama 2 to the medical "
|
||
"domain."
|
||
msgstr ""
|
||
"Open-Source-medizinisches Großsprachenmodell, das von Llama 2 auf den "
|
||
"medizinischen Bereich angepasst wurde."
|
||
|
||
#: src/available_models_descriptions.py:94
|
||
msgid ""
|
||
"Nexus Raven is a 13B instruction tuned model for function calling tasks."
|
||
msgstr ""
|
||
"Nexus Raven ist ein 13B-Anweisungsmodell, das für Funktionsaufrufaufgaben "
|
||
"optimiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:95
|
||
msgid "The Nous Hermes 2 model from Nous Research, now trained over Mixtral."
|
||
msgstr ""
|
||
"Das Nous-Hermes-2-Modell von Nous Research, jetzt über Mixtral trainiert."
|
||
|
||
#: src/available_models_descriptions.py:96
|
||
msgid "Great code generation model based on Llama2."
|
||
msgstr "Großartiges Codegenerierungsmodell basierend auf Llama2."
|
||
|
||
#: src/available_models_descriptions.py:97
|
||
msgid ""
|
||
"A series of models from Groq that represent a significant advancement in "
|
||
"open-source AI capabilities for tool use/function calling."
|
||
msgstr ""
|
||
"Eine Reihe von Modellen von Groq, die einen bedeutenden Fortschritt in den "
|
||
"Open-Source-KI-Fähigkeiten für die Verwendung von Werkzeugen/"
|
||
"Funktionsaufrufen darstellen."
|
||
|
||
#: src/available_models_descriptions.py:98
|
||
msgid "Uncensored Llama2 based model with support for a 16K context window."
|
||
msgstr ""
|
||
"Unzensiertes Llama2-basiertes Modell mit Unterstützung für ein 16K-"
|
||
"Kontextfenster."
|
||
|
||
#: src/available_models_descriptions.py:99
|
||
msgid ""
|
||
"🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic "
|
||
"instruction data using OSS-Instruct, a novel approach to enlightening LLMs "
|
||
"with open-source code snippets."
|
||
msgstr ""
|
||
"🎩 Magicoder ist eine Familie von 7B-Parametermodellen, die mit 75K "
|
||
"synthetischen Anweisungsdaten unter Verwendung von OSS-Instruct trainiert "
|
||
"wurden, einem neuartigen Ansatz zur Aufklärung von LLMs mit Open-Source-"
|
||
"Codeschnipseln."
|
||
|
||
#: src/available_models_descriptions.py:100
|
||
msgid ""
|
||
"A lightweight chat model allowing accurate, and responsive output without "
|
||
"requiring high-end hardware."
|
||
msgstr ""
|
||
"Ein leichtgewichtiges Chatmodell, das eine genaue und reaktionsschnelle "
|
||
"Ausgabe ermöglicht, ohne High-End-Hardware zu benötigen."
|
||
|
||
#: src/available_models_descriptions.py:101
|
||
msgid ""
|
||
"A high-performing code instruct model created by merging two existing code "
|
||
"models."
|
||
msgstr ""
|
||
"Ein leistungsstarkes Code-Anweisungsmodell, das durch Zusammenführen von "
|
||
"zwei bestehenden Code-Modellen erstellt wurde."
|
||
|
||
#: src/available_models_descriptions.py:102
|
||
msgid ""
|
||
"Wizard Vicuna is a 13B parameter model based on Llama 2 trained by "
|
||
"MelodysDreamj."
|
||
msgstr ""
|
||
"Wizard Vicuna ist ein 13B-Parametermodell basierend auf Llama 2, das von "
|
||
"MelodysDreamj trainiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:103
|
||
msgid ""
|
||
"MistralLite is a fine-tuned model based on Mistral with enhanced "
|
||
"capabilities of processing long contexts."
|
||
msgstr ""
|
||
"MistralLite ist ein feinabgestimmtes Modell basierend auf Mistral mit "
|
||
"verbesserten Fähigkeiten zur Verarbeitung langer Kontexte."
|
||
|
||
#: src/available_models_descriptions.py:104
|
||
msgid ""
|
||
"Falcon2 is an 11B parameters causal decoder-only model built by TII and "
|
||
"trained over 5T tokens."
|
||
msgstr ""
|
||
"Falcon2 ist ein kausales Decoder-only-Modell mit 11B Parametern, das von TII "
|
||
"entwickelt und über 5T Token trainiert wurde."
|
||
|
||
#: src/available_models_descriptions.py:105
|
||
msgid "7B parameter text-to-SQL model made by MotherDuck and Numbers Station."
|
||
msgstr "7B-Parameter-Text-zu-SQL-Modell von MotherDuck und Numbers Station."
|
||
|
||
#: src/available_models_descriptions.py:106
|
||
msgid ""
|
||
"A series of multimodal LLMs (MLLMs) designed for vision-language "
|
||
"understanding."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:107
|
||
msgid ""
|
||
"MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by "
|
||
"interleaving the model with itself."
|
||
msgstr ""
|
||
"MegaDolphin-2.2-120b ist eine Transformation von Dolphin-2.2-70b, die durch "
|
||
"Verschachtelung des Modells mit sich selbst erstellt wurde."
|
||
|
||
#: src/available_models_descriptions.py:108
|
||
msgid ""
|
||
"A top-performing mixture of experts model, fine-tuned with high-quality data."
|
||
msgstr ""
|
||
"Ein leistungsstarkes Mixture-of-Experts-Modell, feinabgestimmt mit "
|
||
"hochwertigen Daten."
|
||
|
||
#: src/available_models_descriptions.py:109
|
||
msgid ""
|
||
"A language model created by combining two fine-tuned Llama 2 70B models into "
|
||
"one."
|
||
msgstr ""
|
||
"Ein Sprachmodell, das durch Kombination von zwei feinabgestimmten "
|
||
"Llama-2-70B-Modellen zu einem einzigen Modell erstellt wurde."
|
||
|
||
#: src/available_models_descriptions.py:110
|
||
msgid ""
|
||
"Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. "
|
||
"Designed for chat and code generation."
|
||
msgstr ""
|
||
"Zusammenführung des Open-Orca-OpenChat-Modells und des Garage-bAInd-"
|
||
"Platypus-2-Modells. Entwickelt für Chat und Codegenerierung."
|
||
|
||
#: src/available_models_descriptions.py:111
|
||
msgid "A 7B chat model fine-tuned with high-quality data and based on Zephyr."
|
||
msgstr ""
|
||
"Ein 7B-Chatmodell, feinabgestimmt mit hochwertigen Daten und basierend auf "
|
||
"Zephyr."
|
||
|
||
#: src/available_models_descriptions.py:112
|
||
msgid ""
|
||
"BGE-M3 is a new model from BAAI distinguished for its versatility in Multi-"
|
||
"Functionality, Multi-Linguality, and Multi-Granularity."
|
||
msgstr ""
|
||
"BGE-M3 ist ein neues Modell von BAAI, das sich durch seine Vielseitigkeit in "
|
||
"Multi-Funktionalität, Mehrsprachigkeit und Multigranularität."
|
||
|
||
#: src/available_models_descriptions.py:113
|
||
msgid ""
|
||
"MathΣtral: a 7B model designed for math reasoning and scientific discovery "
|
||
"by Mistral AI."
|
||
msgstr ""
|
||
"MathΣtral: ein 7B-Modell, das von Mistral AI für mathematisches "
|
||
"Schlussfolgern und wissenschaftliche Entdeckungen entwickelt wurde."
|
||
|
||
#: src/available_models_descriptions.py:114
|
||
msgid "DBRX is an open, general-purpose LLM created by Databricks."
|
||
msgstr ""
|
||
"DBRX ist ein offenes, allgemeines LLM, das von Databricks erstellt wurde."
|
||
|
||
#: src/available_models_descriptions.py:115
|
||
msgid ""
|
||
"Solar Pro Preview: an advanced large language model (LLM) with 22 billion "
|
||
"parameters designed to fit into a single GPU"
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:116
|
||
msgid ""
|
||
"A 3.8B model fine-tuned on a private high-quality synthetic dataset for "
|
||
"information extraction, based on Phi-3."
|
||
msgstr ""
|
||
"Ein 3,8B-Modell, das auf einem privaten synthetischen Datensatz von hoher "
|
||
"Qualität für die Informationsextraktion feinabgestimmt wurde, basierend auf "
|
||
"Phi-3."
|
||
|
||
#: src/available_models_descriptions.py:117
|
||
msgid ""
|
||
"A robust conversational model designed to be used for both chat and instruct "
|
||
"use cases."
|
||
msgstr ""
|
||
"Ein robustes Konversationsmodell, das sowohl für Chat- als auch für "
|
||
"Anweisungs-anwendungsfälle entwickelt wurde."
|
||
|
||
#: src/available_models_descriptions.py:118
|
||
msgid ""
|
||
"An open weights function calling model based on Llama 3, competitive with "
|
||
"GPT-4o function calling capabilities."
|
||
msgstr ""
|
||
"Ein offenes Funktionsaufrufmodell basierend auf Llama 3, das mit den "
|
||
"Funktionsaufruffähigkeiten von GPT-4o konkurrenzfähig ist."
|
||
|
||
#: src/available_models_descriptions.py:119
|
||
msgid ""
|
||
"A series of models that convert HTML content to Markdown content, which is "
|
||
"useful for content conversion tasks."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:120
|
||
msgid "Embedding model from BAAI mapping texts to vectors."
|
||
msgstr "Einbettungsmodell von BAAI, das Texte auf Vektoren abbildet."
|
||
|
||
#: src/available_models_descriptions.py:121
|
||
msgid ""
|
||
"An upgraded version of DeekSeek-V2 that integrates the general and coding "
|
||
"abilities of both DeepSeek-V2-Chat and DeepSeek-Coder-V2-Instruct."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:122
|
||
msgid "A state-of-the-art fact-checking model developed by Bespoke Labs."
|
||
msgstr ""
|
||
|
||
#: src/available_models_descriptions.py:123
|
||
msgid ""
|
||
"Sentence-transformers model that can be used for tasks like clustering or "
|
||
"semantic search."
|
||
msgstr ""
|
||
"Satztransformationsmodell, das für Aufgaben wie Clustering oder semantische "
|
||
"Suche verwendet werden kann."
|
||
|
||
#: src/connection_handler.py:14
|
||
msgid "Alpaca Support"
|
||
msgstr ""
|
||
|
||
#: src/connection_handler.py:25
|
||
msgid "Model request too large for system"
|
||
msgstr ""
|
||
|
||
#: src/connection_handler.py:28
|
||
msgid "AMD GPU detected but the extension is missing, Ollama will use CPU."
|
||
msgstr ""
|
||
|
||
#: src/connection_handler.py:30
|
||
msgid "AMD GPU detected but ROCm is missing, Ollama will use CPU."
|
||
msgstr ""
|
||
|
||
#: src/connection_handler.py:33
|
||
msgid "Using AMD GPU type '{}'"
|
||
msgstr ""
|
||
|
||
#: src/connection_handler.py:94
|
||
msgid "Ollama instance was shut down due to inactivity"
|
||
msgstr "Ollama-Instanz wurde wegen Inaktivität abgeschaltet"
|
||
|
||
#: src/connection_handler.py:132
|
||
msgid "Integrated Ollama instance is running"
|
||
msgstr ""
|
||
|
||
#: src/connection_handler.py:148 src/window.ui:479
|
||
msgid "Integrated Ollama instance is not running"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:42
|
||
msgid "Menu"
|
||
msgstr "Menü"
|
||
|
||
#: src/window.ui:64
|
||
msgid "Toggle Sidebar"
|
||
msgstr "Seitenleiste ein-/ausblenden"
|
||
|
||
#: src/window.ui:71
|
||
msgid "Search Messages"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:93
|
||
msgid "Loading Instance"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:105 src/window.ui:106 src/window.ui:528 src/window.ui:1007
|
||
#: src/custom_widgets/model_widget.py:24 src/custom_widgets/model_widget.py:25
|
||
msgid "Manage Models"
|
||
msgstr "Modelle verwalten"
|
||
|
||
#: src/window.ui:118
|
||
msgid "Chat Menu"
|
||
msgstr "Chat-Menü"
|
||
|
||
#: src/window.ui:127
|
||
msgid "Message search bar"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:134 src/window.ui:136
|
||
msgid "Search messages"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:150
|
||
msgid ""
|
||
"Warning: Power saver mode is enabled, this will slow down message generation"
|
||
msgstr ""
|
||
"Warnung: Der Energiesparmodus ist aktiviert, dies verlangsamt die "
|
||
"Nachrichtenerstellung"
|
||
|
||
#: src/window.ui:197
|
||
msgid "Attach File"
|
||
msgstr "Datei anhängen"
|
||
|
||
#: src/window.ui:235
|
||
msgid "Message text box"
|
||
msgstr "Nachrichtentextfeld"
|
||
|
||
#: src/window.ui:248 src/window.ui:1208
|
||
msgid "Send Message"
|
||
msgstr "Nachricht senden"
|
||
|
||
#: src/window.ui:298 src/window.ui:1013 src/window.ui:1149
|
||
msgid "Preferences"
|
||
msgstr "Einstellungen"
|
||
|
||
#: src/window.ui:301 src/window.ui:1127
|
||
msgid "General"
|
||
msgstr "Allgemein"
|
||
|
||
#: src/window.ui:307
|
||
msgid "Use Remote Connection to Ollama"
|
||
msgstr "Remote-Verbindung zu Ollama verwenden"
|
||
|
||
#: src/window.ui:317
|
||
msgid "Run Alpaca In Background"
|
||
msgstr "Alpaca im Hintergrund ausführen"
|
||
|
||
#: src/window.ui:323
|
||
msgid "Show Power Saver Warning"
|
||
msgstr "Zeige Energiesparmodus-Warnung"
|
||
|
||
#: src/window.ui:334
|
||
msgid "Temperature"
|
||
msgstr "Temperatur"
|
||
|
||
#: src/window.ui:335
|
||
msgid ""
|
||
"The temperature of the model. Increasing the temperature will make the model "
|
||
"answer more creatively. (Default: 0.8)"
|
||
msgstr ""
|
||
"Die Temperatur des Modells. Eine Erhöhung der Temperatur lässt das Modell "
|
||
"kreativer antworten. (Standard: 0,8)"
|
||
|
||
#: src/window.ui:350
|
||
msgid "Seed"
|
||
msgstr "Seed"
|
||
|
||
#: src/window.ui:351
|
||
msgid ""
|
||
"Sets the random number seed to use for generation. Setting this to a "
|
||
"specific number will make the model generate the same text for the same "
|
||
"prompt. (Default: 0 (random))"
|
||
msgstr ""
|
||
"Legt den Zufallszahlenseed für die Generierung fest. Wenn dies auf eine "
|
||
"bestimmte Zahl gesetzt wird, generiert das Modell für den gleichen Prompt "
|
||
"den gleichen Text. (Standard: 0 (zufällig))"
|
||
|
||
#: src/window.ui:365
|
||
msgid "Keep Alive Time"
|
||
msgstr "Aktivhaltungszeit"
|
||
|
||
#: src/window.ui:366
|
||
msgid ""
|
||
"Controls how long the model will stay loaded into memory following the "
|
||
"request in minutes (Default: 5)"
|
||
msgstr ""
|
||
"Steuert, wie lange das Modell nach der Anfrage in Minuten im Speicher "
|
||
"geladen bleibt (Standard: 5)"
|
||
|
||
#: src/window.ui:382
|
||
msgid "Ollama Instance"
|
||
msgstr "Ollama-Instanz"
|
||
|
||
#: src/window.ui:386
|
||
msgid "Ollama Overrides"
|
||
msgstr "Ollama-Überschreibungen"
|
||
|
||
#: src/window.ui:387
|
||
msgid ""
|
||
"Manage the arguments used on Ollama, any changes on this page only applies "
|
||
"to the integrated instance, the instance will restart if you make changes."
|
||
msgstr ""
|
||
"Verwalten Sie die in Ollama verwendeten Argumente, Änderungen auf dieser "
|
||
"Seite gelten nur für die integrierte Instanz, die Instanz wird bei "
|
||
"Änderungen neu gestartet."
|
||
|
||
#: src/window.ui:459
|
||
msgid "Idle Timer"
|
||
msgstr "Leerlauf-Timer"
|
||
|
||
#: src/window.ui:460
|
||
msgid ""
|
||
"Number of minutes the instance should remain idle before it is shut down (0 "
|
||
"means it won't be shut down)"
|
||
msgstr ""
|
||
"Anzahl der Minuten, die die Instanz im Leerlauf bleiben soll, bevor sie "
|
||
"heruntergefahren wird (0 bedeutet, dass sie nicht abgeschaltet wird)"
|
||
|
||
#: src/window.ui:494 src/window.ui:517
|
||
msgid "Manage models dialog"
|
||
msgstr "Modelle verwalten-Dialog"
|
||
|
||
#: src/window.ui:496
|
||
msgid "Terminal"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:538 src/window.ui:705
|
||
msgid "Create Model"
|
||
msgstr "Modell erstellen"
|
||
|
||
#: src/window.ui:545
|
||
msgid "Search Model"
|
||
msgstr "Modell suchen"
|
||
|
||
#: src/window.ui:554
|
||
msgid "Model search bar"
|
||
msgstr "Modell-Suchleiste"
|
||
|
||
#: src/window.ui:561 src/window.ui:563
|
||
msgid "Search models"
|
||
msgstr "Modelle suchen"
|
||
|
||
#: src/window.ui:586
|
||
msgid "No Models Found"
|
||
msgstr "Keine Modelle gefunden"
|
||
|
||
#: src/window.ui:587
|
||
msgid "Try a different search or pull an unlisted model from it's name"
|
||
msgstr ""
|
||
"Versuchen Sie eine andere Suche oder rufen Sie ein nicht gelistetes Modell "
|
||
"über seinen Namen ab"
|
||
|
||
#: src/window.ui:595
|
||
msgid "Pull Model From Name"
|
||
msgstr "Modell nach Namen abrufen"
|
||
|
||
#: src/window.ui:645
|
||
msgid ""
|
||
"By downloading this model you accept the license agreement available on the "
|
||
"model's website."
|
||
msgstr ""
|
||
"Durch das Herunterladen dieses Modells akzeptieren Sie die auf der Website "
|
||
"des Modells verfügbare Lizenzvereinbarung."
|
||
|
||
#: src/window.ui:670
|
||
msgid "Model Details"
|
||
msgstr ""
|
||
|
||
#: src/window.ui:733
|
||
msgid "Base"
|
||
msgstr "Basis"
|
||
|
||
#: src/window.ui:752
|
||
msgid "Name"
|
||
msgstr "Name"
|
||
|
||
#: src/window.ui:758
|
||
msgid "Context"
|
||
msgstr "Kontext"
|
||
|
||
#: src/window.ui:805
|
||
msgid ""
|
||
"Some models require a modelfile, Alpaca fills FROM and SYSTEM (context) "
|
||
"instructions automatically. Please visit the model's website or Ollama "
|
||
"documentation for more information if you're unsure."
|
||
msgstr ""
|
||
"Einige Modelle benötigen eine Modelldatei, Alpaca füllt FROM- und SYSTEM-"
|
||
"Anweisungen (Kontext) automatisch aus. Bitte besuchen Sie die Website des "
|
||
"Modells oder die Ollama-Dokumentation für weitere Informationen, wenn Sie "
|
||
"unsicher sind."
|
||
|
||
#: src/window.ui:821
|
||
msgid "Create"
|
||
msgstr "Erstellen"
|
||
|
||
#: src/window.ui:844
|
||
msgid "File preview dialog"
|
||
msgstr "Dateivorschau-Dialog"
|
||
|
||
#: src/window.ui:856
|
||
msgid "Open With Default App"
|
||
msgstr "Mit Standard-App öffnen"
|
||
|
||
#: src/window.ui:864
|
||
msgid "Remove Attachment"
|
||
msgstr "Anhang entfernen"
|
||
|
||
#: src/window.ui:926
|
||
msgid "Previous"
|
||
msgstr "Zurück"
|
||
|
||
#: src/window.ui:969
|
||
msgid "Welcome to Alpaca"
|
||
msgstr "Willkommen bei Alpaca"
|
||
|
||
#: src/window.ui:970
|
||
msgid "Powered by Ollama"
|
||
msgstr "Betrieben von Ollama"
|
||
|
||
#: src/window.ui:973
|
||
msgid "Ollama Website"
|
||
msgstr "Ollama-Website"
|
||
|
||
#: src/window.ui:990
|
||
msgid ""
|
||
"Alpaca and its developers are not liable for any damages to devices or "
|
||
"software resulting from the execution of code generated by an AI model. "
|
||
"Please exercise caution and review the code carefully before running it."
|
||
msgstr ""
|
||
"Alpaca und seine Entwickler haften nicht für Schäden an Geräten oder "
|
||
"Software, die durch die Ausführung von Code entstehen, der von einem KI-"
|
||
"Modell generiert wurde. Bitte seien Sie vorsichtig und überprüfen Sie den "
|
||
"Code sorgfältig, bevor Sie ihn ausführen."
|
||
|
||
#: src/window.ui:1003
|
||
msgid "Import Chat"
|
||
msgstr "Chat importieren"
|
||
|
||
#: src/window.ui:1017
|
||
msgid "Keyboard Shortcuts"
|
||
msgstr "Tastenkombinationen"
|
||
|
||
#: src/window.ui:1021
|
||
msgid "About Alpaca"
|
||
msgstr "Über Alpaca"
|
||
|
||
#: src/window.ui:1029 src/window.ui:1055
|
||
msgid "Rename Chat"
|
||
msgstr "Chat umbenennen"
|
||
|
||
#: src/window.ui:1033 src/window.ui:1059
|
||
msgid "Duplicate Chat"
|
||
msgstr "Chat duplizieren"
|
||
|
||
#: src/window.ui:1037 src/window.ui:1063
|
||
msgid "Export Chat"
|
||
msgstr "Chat exportieren"
|
||
|
||
#: src/window.ui:1041
|
||
msgid "Clear Chat"
|
||
msgstr "Chat löschen"
|
||
|
||
#: src/window.ui:1047 src/window.ui:1069
|
||
msgid "Delete Chat"
|
||
msgstr "Chat löschen"
|
||
|
||
#: src/window.ui:1077
|
||
msgid "From Existing Model"
|
||
msgstr "Aus bestehendem Modell"
|
||
|
||
#: src/window.ui:1081
|
||
msgid "From GGUF File"
|
||
msgstr "Aus GGUF-Datei"
|
||
|
||
#: src/window.ui:1085
|
||
msgid "From Name"
|
||
msgstr "Nach Namen"
|
||
|
||
#: src/window.ui:1131
|
||
msgid "Close application"
|
||
msgstr "Anwendung schließen"
|
||
|
||
#: src/window.ui:1137
|
||
msgid "Import chat"
|
||
msgstr "Chat importieren"
|
||
|
||
#: src/window.ui:1143
|
||
msgid "Clear chat"
|
||
msgstr "Chat löschen"
|
||
|
||
#: src/window.ui:1155
|
||
msgid "New chat"
|
||
msgstr "Neuer Chat"
|
||
|
||
#: src/window.ui:1161
|
||
msgid "Show shortcuts window"
|
||
msgstr "Tastenkombinationen-Fenster anzeigen"
|
||
|
||
#: src/window.ui:1167
|
||
msgid "Manage models"
|
||
msgstr "Modelle verwalten"
|
||
|
||
#: src/window.ui:1173
|
||
msgid "Toggle sidebar"
|
||
msgstr "Seitenleiste ein-/ausblenden"
|
||
|
||
#: src/window.ui:1179
|
||
msgid "Rename chat"
|
||
msgstr "Chat umbenennen"
|
||
|
||
#: src/window.ui:1186
|
||
msgid "Editor"
|
||
msgstr "Editor"
|
||
|
||
#: src/window.ui:1190
|
||
msgid "Copy"
|
||
msgstr "Kopieren"
|
||
|
||
#: src/window.ui:1196
|
||
msgid "Paste"
|
||
msgstr "Einfügen"
|
||
|
||
#: src/window.ui:1202
|
||
msgid "Insert new line"
|
||
msgstr "Neue Zeile einfügen"
|
||
|
||
#: src/generic_actions.py:57
|
||
msgid "An error occurred while extracting text from the website"
|
||
msgstr "Beim Extrahieren von Text von der Website ist ein Fehler aufgetreten"
|
||
|
||
#: src/custom_widgets/chat_widget.py:120
|
||
msgid "Send prompt: '{}'"
|
||
msgstr "Sende Prompt: '{}'"
|
||
|
||
#: src/custom_widgets/chat_widget.py:126 src/custom_widgets/chat_widget.py:127
|
||
msgid "Open Model Manager"
|
||
msgstr "Öffne Modell-Manager"
|
||
|
||
#: src/custom_widgets/chat_widget.py:136
|
||
msgid "Try one of these prompts"
|
||
msgstr "Teste einen dieser Prompts"
|
||
|
||
#: src/custom_widgets/chat_widget.py:136
|
||
msgid ""
|
||
"It looks like you don't have any models downloaded yet. Download models to "
|
||
"get started!"
|
||
msgstr ""
|
||
"Es sieht so aus, als hätten Sie noch keine Modelle heruntergeladen. Laden "
|
||
"Sie Modelle herunter, um loszulegen!"
|
||
|
||
#: src/custom_widgets/chat_widget.py:197
|
||
msgid "Regenerate Response"
|
||
msgstr "Antwort regenerieren"
|
||
|
||
#: src/custom_widgets/chat_widget.py:354
|
||
msgid "Copy of {}"
|
||
msgstr "Kopie von {}"
|
||
|
||
#: src/custom_widgets/chat_widget.py:366
|
||
msgid "Chat exported successfully"
|
||
msgstr "Chatverlauf erfolgreich exportiert"
|
||
|
||
#: src/custom_widgets/chat_widget.py:436
|
||
msgid "Chat imported successfully"
|
||
msgstr "Chatverlauf erfolgreich importiert"
|
||
|
||
#: src/custom_widgets/message_widget.py:53
|
||
msgid "Save Message"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/message_widget.py:87
|
||
msgid "Message edited successfully"
|
||
msgstr "Nachricht erfolgreich editiert"
|
||
|
||
#: src/custom_widgets/message_widget.py:112
|
||
msgid "Response message"
|
||
msgstr "Antwortnachricht"
|
||
|
||
#: src/custom_widgets/message_widget.py:112
|
||
msgid "User message"
|
||
msgstr "Benutzernachricht"
|
||
|
||
#: src/custom_widgets/message_widget.py:154
|
||
msgid "{}Code Block"
|
||
msgstr "{}Code-Block"
|
||
|
||
#: src/custom_widgets/message_widget.py:157
|
||
msgid "Code Block"
|
||
msgstr "Code-Block"
|
||
|
||
#: src/custom_widgets/message_widget.py:158
|
||
#: src/custom_widgets/message_widget.py:376
|
||
msgid "Copy Message"
|
||
msgstr "Kopiere Nachricht"
|
||
|
||
#: src/custom_widgets/message_widget.py:162
|
||
#: src/custom_widgets/message_widget.py:184
|
||
msgid "Run Script"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/message_widget.py:177
|
||
msgid "Code copied to the clipboard"
|
||
msgstr "Code in die Zwischenablage kopiert"
|
||
|
||
#: src/custom_widgets/message_widget.py:185
|
||
msgid ""
|
||
"Make sure you understand what this script does before running it, Alpaca is "
|
||
"not responsible for any damages to your device or data"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/message_widget.py:187
|
||
msgid "Execute"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/message_widget.py:270
|
||
#: src/custom_widgets/message_widget.py:272
|
||
msgid "Image"
|
||
msgstr "Bild"
|
||
|
||
#: src/custom_widgets/message_widget.py:280
|
||
#: src/custom_widgets/message_widget.py:296
|
||
msgid "Missing Image"
|
||
msgstr "Fehlendes Bild"
|
||
|
||
#: src/custom_widgets/message_widget.py:298
|
||
msgid "Missing image"
|
||
msgstr "Fehlendes Bild"
|
||
|
||
#: src/custom_widgets/message_widget.py:368
|
||
msgid "Remove Message"
|
||
msgstr "Nachricht entfernen"
|
||
|
||
#: src/custom_widgets/message_widget.py:384
|
||
msgid "Regenerate Message"
|
||
msgstr "Nachricht regenerieren"
|
||
|
||
#: src/custom_widgets/message_widget.py:391
|
||
msgid "Edit Message"
|
||
msgstr "Nachricht bearbeiten"
|
||
|
||
#: src/custom_widgets/message_widget.py:413
|
||
msgid "Message copied to the clipboard"
|
||
msgstr "Nachricht in die Zwischenablage kopiert"
|
||
|
||
#: src/custom_widgets/message_widget.py:437
|
||
msgid "Message cannot be regenerated while receiving a response"
|
||
msgstr ""
|
||
"Nachricht kann nicht regeneriert werden, während eine Antwort gesendet wird"
|
||
|
||
#: src/custom_widgets/model_widget.py:181
|
||
msgid "Stop Pulling '{}'"
|
||
msgstr "Abrufen von '{}' stoppen"
|
||
|
||
#: src/custom_widgets/model_widget.py:184
|
||
msgid "Stop Download?"
|
||
msgstr "Download stoppen?"
|
||
|
||
#: src/custom_widgets/model_widget.py:185
|
||
msgid "Are you sure you want to stop pulling '{}'?"
|
||
msgstr "Bist du Dir sicher, das beziehen von '{}' abzubrechen?"
|
||
|
||
#: src/custom_widgets/model_widget.py:187
|
||
msgid "Stop"
|
||
msgstr "Stoppen"
|
||
|
||
#: src/custom_widgets/model_widget.py:315
|
||
msgid "Details"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:325
|
||
msgid "Remove '{}'"
|
||
msgstr "'{}' entfernen"
|
||
|
||
#: src/custom_widgets/model_widget.py:329
|
||
msgid "Delete Model?"
|
||
msgstr "Modell löschen?"
|
||
|
||
#: src/custom_widgets/model_widget.py:362
|
||
msgid "Create Model Based on '{}'"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:378
|
||
msgid "Modified At"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:379
|
||
msgid "Parent Model"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:380
|
||
msgid "Format"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:381
|
||
msgid "Family"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:382
|
||
msgid "Parameter Size"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:383
|
||
msgid "Quantization Level"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/model_widget.py:449
|
||
msgid "Image Recognition"
|
||
msgstr "Bilderkennung"
|
||
|
||
#: src/custom_widgets/model_widget.py:475
|
||
msgid "Enter download menu for {}"
|
||
msgstr "Download-Menü für {} aufrufen"
|
||
|
||
#: src/custom_widgets/model_widget.py:517
|
||
msgid "Download {}:{}"
|
||
msgstr "{}:{} herunterladen"
|
||
|
||
#: src/custom_widgets/model_widget.py:584
|
||
msgid "Model deleted successfully"
|
||
msgstr "Modell erfolgreich gelöscht"
|
||
|
||
#: src/custom_widgets/model_widget.py:658
|
||
msgid "Task Complete"
|
||
msgstr "Aufgabe abgeschlossen"
|
||
|
||
#: src/custom_widgets/model_widget.py:658
|
||
#: src/custom_widgets/model_widget.py:659
|
||
msgid "Model '{}' pulled successfully."
|
||
msgstr "Modell '{}' erfolgreich abgerufen."
|
||
|
||
#: src/custom_widgets/model_widget.py:662
|
||
#: src/custom_widgets/model_widget.py:665
|
||
msgid "Pull Model Error"
|
||
msgstr "Fehler beim Abrufen des Modells"
|
||
|
||
#: src/custom_widgets/model_widget.py:662
|
||
msgid "Failed to pull model '{}': {}"
|
||
msgstr "Abrufen des Modells '{}' fehlgeschlagen: {}"
|
||
|
||
#: src/custom_widgets/model_widget.py:663
|
||
msgid "Error pulling '{}': {}"
|
||
msgstr "Fehler beim Abrufen von '{}': {}"
|
||
|
||
#: src/custom_widgets/model_widget.py:665
|
||
msgid "Failed to pull model '{}' due to network error."
|
||
msgstr ""
|
||
"Abrufen des Modells '{}' aufgrund eines Netzwerkfehlers fehlgeschlagen."
|
||
|
||
#: src/custom_widgets/model_widget.py:666
|
||
msgid "Error pulling '{}'"
|
||
msgstr "Fehler beim Abrufen von '{}'"
|
||
|
||
#: src/custom_widgets/dialog_widget.py:134
|
||
#: src/custom_widgets/dialog_widget.py:146
|
||
#: src/custom_widgets/dialog_widget.py:158
|
||
msgid "Accept"
|
||
msgstr "Akzeptieren"
|
||
|
||
#: src/custom_widgets/terminal_widget.py:64
|
||
msgid "Setting up Python environment..."
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/terminal_widget.py:75
|
||
msgid "Script exited"
|
||
msgstr ""
|
||
|
||
#: src/custom_widgets/terminal_widget.py:86
|
||
msgid "The script is contained inside Flatpak"
|
||
msgstr ""
|
||
|
||
#~ msgid "Select a Model"
|
||
#~ msgstr "Wähle ein Modell"
|
||
|
||
#~ msgid "Chat cannot be cleared while receiving a message"
|
||
#~ msgstr ""
|
||
#~ "Chat kann während des Empfangs einer Nachricht nicht gelöscht werden"
|
||
|
||
#~ msgid "Create Chat?"
|
||
#~ msgstr "Chat erstellen?"
|
||
|
||
#~ msgid "Enter name for new chat"
|
||
#~ msgstr "Namen für neuen Chat eingeben"
|
||
|
||
#~ msgid "Use local instance"
|
||
#~ msgstr "Lokale Instanz verwenden"
|
||
|
||
#~ msgid "An error occurred while creating the model"
|
||
#~ msgstr "Beim Erstellen des Modells ist ein Fehler aufgetreten"
|
||
|
||
#~ msgid "URL of Remote Instance"
|
||
#~ msgstr "URL der Remote-Instanz"
|
||
|
||
#~ msgid ""
|
||
#~ "Google Gemma 2 is a high-performing and efficient model by now available "
|
||
#~ "in three sizes: 2B, 9B, and 27B."
|
||
#~ msgstr ""
|
||
#~ "Google Gemma ist ein leistungsfähiges und effizientes Modell, dass ab "
|
||
#~ "sofort in drei Größen verfügbar ist: 2B, 9B und 27B."
|
||
|
||
#~ msgid "Loading instance"
|
||
#~ msgstr "Lade Instanz"
|
||
|
||
#~ msgid "Applying user preferences"
|
||
#~ msgstr "User-Vorgaben anwenden"
|
||
|
||
#~ msgid "Updating list of local models"
|
||
#~ msgstr "Liste der lokalen Modelle aktualisieren"
|
||
|
||
#~ msgid "Updating list of available models"
|
||
#~ msgstr "Liste der verfügaren Modelle aktualisieren"
|
||
|
||
#~ msgid "Loading chats"
|
||
#~ msgstr "Chats laden"
|
||
|
||
#~ msgid "Loading Alpaca dialog"
|
||
#~ msgstr "Laden des Alpaca Dialogs"
|
||
|
||
#~ msgid "Loading Alpaca..."
|
||
#~ msgstr "Lade Alpaca..."
|
||
|
||
#~ msgid ""
|
||
#~ "A lightweight AI model with 3.8 billion parameters with performance "
|
||
#~ "overtaking similarly and larger sized models. "
|
||
#~ msgstr ""
|
||
#~ "Ein leichtgewichtiges KI-Modell mit 3,8 Milliarden Parametern, dessen "
|
||
#~ "Leistung , das ähnliche und größere Modelle übertrifft. "
|
||
|
||
#~ msgid "Fixed generated titles having '\"S' for some reason"
|
||
#~ msgstr "Generierte Titel haben aus irgendeinem Grund '\"S' - behoben"
|
||
|
||
#~ msgid "Fixed 'code blocks shouldnt be editable'"
|
||
#~ msgstr "Behoben: 'Codeblöcke sollten nicht editierbar sein'"
|
||
|
||
#~ msgid "Failed to connect to server"
|
||
#~ msgstr "Verbindung zum Server fehlgeschlagen"
|
||
|
||
#~ msgid "Stop Creating '{}'"
|
||
#~ msgstr "Erstellung von '{}' stoppen"
|
||
|
||
#~ msgid "Google Gemma 2 is now available in 2 sizes, 9B and 27B."
|
||
#~ msgstr "Google Gemma 2 ist jetzt in 2 Größen verfügbar, 9B und 27B."
|
||
|
||
#~ msgid ""
|
||
#~ "Codestral is Mistral AI's first-ever code model designed for code "
|
||
#~ "generation tasks."
|
||
#~ msgstr ""
|
||
#~ "Codestral ist das allererste Codemodell von Mistral AI, das für "
|
||
#~ "Codegenerierungsaufgaben entwickelt wurde."
|
||
|
||
#~ msgid "Are you sure you want to stop pulling '{} ({})'?"
|
||
#~ msgstr ""
|
||
#~ "Sind Sie sicher, dass Sie das Abrufen von '{} ({})' stoppen möchten?"
|