French updated (#353)

This commit is contained in:
Louis Chauvet-Villaret 2024-10-17 22:55:24 +02:00 committed by GitHub
parent c3b105c30b
commit 91c54a4565
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

154
po/fr.po
View File

@ -1,22 +1,22 @@
# French translation for Alpaca package. # French translation for Alpaca package.
# Copyright (C) 2024 Jeffry Samuel Eduarte Rojas # Copyright (C) 2024 Jeffry Samuel Eduarte Rojas
# This file is distributed under the same license as the Alpaca package. # This file is distributed under the same license as the Alpaca package.
# Louis Chauvet-Villaret <louischauvet0@gmail.com>, 2024. # Louis Chauvet-Villaret <louis@revuejazz.fr>, 2024.
# #
msgid "" msgid ""
msgstr "" msgstr ""
"Project-Id-Version: 2.0.4\n" "Project-Id-Version: 2.0.4\n"
"Report-Msgid-Bugs-To: \n" "Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2024-10-15 21:46-0600\n" "POT-Creation-Date: 2024-10-15 21:46-0600\n"
"PO-Revision-Date: 2024-09-21 11:43+0200\n" "PO-Revision-Date: 2024-10-16 23:14+0200\n"
"Last-Translator: Louis Chauvet-Villaret <louischauvet0@gmail.com>\n" "Last-Translator: Louis Chauvet-Villaret <louis@revuejazz.fr>\n"
"Language-Team: French\n" "Language-Team: French\n"
"Language: fr\n" "Language: fr\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n"
"X-Generator: Gtranslator 46.1\n" "X-Generator: Gtranslator 47.0\n"
#: data/com.jeffser.Alpaca.desktop.in:3 #: data/com.jeffser.Alpaca.desktop.in:3
#: data/com.jeffser.Alpaca.metainfo.xml.in:7 #: data/com.jeffser.Alpaca.metainfo.xml.in:7
@ -114,7 +114,7 @@ msgstr "Une discussion montrant de la coloration syntaxique de code"
#: data/com.jeffser.Alpaca.metainfo.xml.in:66 #: data/com.jeffser.Alpaca.metainfo.xml.in:66
msgid "A Python script running inside integrated terminal" msgid "A Python script running inside integrated terminal"
msgstr "" msgstr "Un script Python lancé dans le terminal intégré"
#: data/com.jeffser.Alpaca.metainfo.xml.in:70 #: data/com.jeffser.Alpaca.metainfo.xml.in:70
msgid "A conversation involving a YouTube video transcript" msgid "A conversation involving a YouTube video transcript"
@ -143,33 +143,36 @@ msgstr "Nouveautés"
#: data/com.jeffser.Alpaca.metainfo.xml.in:90 #: data/com.jeffser.Alpaca.metainfo.xml.in:90
msgid "Details page for models" msgid "Details page for models"
msgstr "" msgstr "Ajouté : "
#: data/com.jeffser.Alpaca.metainfo.xml.in:91 #: data/com.jeffser.Alpaca.metainfo.xml.in:91
msgid "" msgid ""
"Model selector gets replaced with 'manage models' button when there are no " "Model selector gets replaced with 'manage models' button when there are no "
"models downloaded" "models downloaded"
msgstr "" msgstr ""
"Ajouté : sélecteur de modèle remplacé par le bouton 'gestionnaire de modèle' "
"lorsqu'il y a aucun modèle de téléchargé"
#: data/com.jeffser.Alpaca.metainfo.xml.in:92 #: data/com.jeffser.Alpaca.metainfo.xml.in:92
msgid "Added warning when model is too big for the device" msgid "Added warning when model is too big for the device"
msgstr "" msgstr ""
"Ajouté : avertissement lorsque le modèle est trop lourd pour l'appareil"
#: data/com.jeffser.Alpaca.metainfo.xml.in:93 #: data/com.jeffser.Alpaca.metainfo.xml.in:93
msgid "Added AMD GPU indicator in preferences" msgid "Added AMD GPU indicator in preferences"
msgstr "" msgstr "Ajouté : indicateur de GPU AMD dans les paramètres"
#: data/com.jeffser.Alpaca.metainfo.xml.in:102 #: data/com.jeffser.Alpaca.metainfo.xml.in:102
msgid "Better system for handling dialogs" msgid "Better system for handling dialogs"
msgstr "" msgstr "Changé : meilleur système de prise en charge des boites de dialogues"
#: data/com.jeffser.Alpaca.metainfo.xml.in:103 #: data/com.jeffser.Alpaca.metainfo.xml.in:103
msgid "Better system for handling instance switching" msgid "Better system for handling instance switching"
msgstr "" msgstr "Changé : meilleur système de prise en charge du changement d'instance"
#: data/com.jeffser.Alpaca.metainfo.xml.in:104 #: data/com.jeffser.Alpaca.metainfo.xml.in:104
msgid "Remote connection dialog" msgid "Remote connection dialog"
msgstr "" msgstr "Ajouté : boite de dialogue de connexion distante"
#: data/com.jeffser.Alpaca.metainfo.xml.in:106 #: data/com.jeffser.Alpaca.metainfo.xml.in:106
#: data/com.jeffser.Alpaca.metainfo.xml.in:120 #: data/com.jeffser.Alpaca.metainfo.xml.in:120
@ -194,90 +197,102 @@ msgstr "Correctifs"
#: data/com.jeffser.Alpaca.metainfo.xml.in:108 #: data/com.jeffser.Alpaca.metainfo.xml.in:108
msgid "Fixed: Models get duplicated when switching remote and local instance" msgid "Fixed: Models get duplicated when switching remote and local instance"
msgstr "" msgstr ""
"Résolu : les modèles se voyaient doublés lors du changement entre l'instance "
"distante vers l'instance locale"
#: data/com.jeffser.Alpaca.metainfo.xml.in:109 #: data/com.jeffser.Alpaca.metainfo.xml.in:109
msgid "Better internal instance manager" msgid "Better internal instance manager"
msgstr "" msgstr "Changé : meilleur gestionnaire de l'instance interne"
#: data/com.jeffser.Alpaca.metainfo.xml.in:118 #: data/com.jeffser.Alpaca.metainfo.xml.in:118
msgid "Added 'Cancel' and 'Save' buttons when editing a message" msgid "Added 'Cancel' and 'Save' buttons when editing a message"
msgstr "" msgstr ""
"Ajouté : bouton 'Annuler' et 'Sauvegarder' lors de l'édition d'un message"
#: data/com.jeffser.Alpaca.metainfo.xml.in:122 #: data/com.jeffser.Alpaca.metainfo.xml.in:122
msgid "Better handling of image recognition" msgid "Better handling of image recognition"
msgstr "" msgstr "Changé : meilleure prise en charge de la reconnaissance d'image"
#: data/com.jeffser.Alpaca.metainfo.xml.in:123 #: data/com.jeffser.Alpaca.metainfo.xml.in:123
msgid "Remove unused files when canceling a model download" msgid "Remove unused files when canceling a model download"
msgstr "" msgstr ""
"Résolu : suppression des fichiers inutilisés lors de l'arrêt du "
"téléchargement d'un modèle"
#: data/com.jeffser.Alpaca.metainfo.xml.in:124 #: data/com.jeffser.Alpaca.metainfo.xml.in:124
msgid "Better message blocks rendering" msgid "Better message blocks rendering"
msgstr "" msgstr "Changé : meilleur rendu de l'arrêt de génération de message"
#: data/com.jeffser.Alpaca.metainfo.xml.in:133 #: data/com.jeffser.Alpaca.metainfo.xml.in:133
msgid "Run bash and python scripts straight from chat" msgid "Run bash and python scripts straight from chat"
msgstr "" msgstr ""
"Ajouté : exécutez des scripts Python et bash directement depuis la discussion"
#: data/com.jeffser.Alpaca.metainfo.xml.in:134 #: data/com.jeffser.Alpaca.metainfo.xml.in:134
msgid "Updated Ollama to 0.3.12" msgid "Updated Ollama to 0.3.12"
msgstr "" msgstr "Changé : Ollama mis-à-jour vers la version 0.3.12"
#: data/com.jeffser.Alpaca.metainfo.xml.in:135 #: data/com.jeffser.Alpaca.metainfo.xml.in:135
msgid "New models!" msgid "New models!"
msgstr "" msgstr "Ajouté : nouveaux modèles"
#: data/com.jeffser.Alpaca.metainfo.xml.in:139 #: data/com.jeffser.Alpaca.metainfo.xml.in:139
msgid "Fixed and made faster the launch sequence" msgid "Fixed and made faster the launch sequence"
msgstr "" msgstr "Résolu : séquence de lancement plus rapide"
#: data/com.jeffser.Alpaca.metainfo.xml.in:140 #: data/com.jeffser.Alpaca.metainfo.xml.in:140
msgid "Better detection of code blocks in messages" msgid "Better detection of code blocks in messages"
msgstr "" msgstr "Résolu : meilleur détection des blocs de code dans les messages"
#: data/com.jeffser.Alpaca.metainfo.xml.in:141 #: data/com.jeffser.Alpaca.metainfo.xml.in:141
msgid "Fixed app not loading in certain setups with Nvidia GPUs" msgid "Fixed app not loading in certain setups with Nvidia GPUs"
msgstr "" msgstr ""
"Résolu : l'application ne se lançait pas dans certaines configuration avec "
"des GPU Nvidia"
#: data/com.jeffser.Alpaca.metainfo.xml.in:150 #: data/com.jeffser.Alpaca.metainfo.xml.in:150
msgid "" msgid ""
"Fixed message notification sometimes crashing text rendering because of them " "Fixed message notification sometimes crashing text rendering because of them "
"running on different threads" "running on different threads"
msgstr "" msgstr ""
"Résolu : la notification de message plantait sur le rendu du texte parce "
"qu'elle fonctionnait sur un thread différent"
#: data/com.jeffser.Alpaca.metainfo.xml.in:159 #: data/com.jeffser.Alpaca.metainfo.xml.in:159
msgid "Fixed message generation sometimes failing" msgid "Fixed message generation sometimes failing"
msgstr "" msgstr "Résolu : la génération de messages plantait parfois"
#: data/com.jeffser.Alpaca.metainfo.xml.in:168 #: data/com.jeffser.Alpaca.metainfo.xml.in:168
msgid "Sidebar resizes with the window" msgid "Sidebar resizes with the window"
msgstr "" msgstr "Changé : la barre latérale se redimensionne avec la fenêtre"
#: data/com.jeffser.Alpaca.metainfo.xml.in:169 #: data/com.jeffser.Alpaca.metainfo.xml.in:169
msgid "New welcome dialog" msgid "New welcome dialog"
msgstr "" msgstr "Ajouté : nouvelle boite de dialogue de bienvenue"
#: data/com.jeffser.Alpaca.metainfo.xml.in:170 #: data/com.jeffser.Alpaca.metainfo.xml.in:170
msgid "Message search" msgid "Message search"
msgstr "" msgstr "Ajouté : recherche de message"
#: data/com.jeffser.Alpaca.metainfo.xml.in:171 #: data/com.jeffser.Alpaca.metainfo.xml.in:171
msgid "Updated Ollama to v0.3.11" msgid "Updated Ollama to v0.3.11"
msgstr "" msgstr "Changé : Ollama mis-à-jour vers la version 0.3.11"
#: data/com.jeffser.Alpaca.metainfo.xml.in:172 #: data/com.jeffser.Alpaca.metainfo.xml.in:172
msgid "A lot of new models provided by Ollama repository" msgid "A lot of new models provided by Ollama repository"
msgstr "" msgstr "Ajouté : beaucoup de nouveau modèles du dépôt d'Ollama"
#: data/com.jeffser.Alpaca.metainfo.xml.in:176 #: data/com.jeffser.Alpaca.metainfo.xml.in:176
msgid "" msgid ""
"Fixed text inside model manager when the accessibility option 'large text' " "Fixed text inside model manager when the accessibility option 'large text' "
"is on" "is on"
msgstr "" msgstr ""
"Résolu : texte à l'intérieur du gestionnaire de modèle quand l'option "
"daccessibilité 'grand texte' est activée"
#: data/com.jeffser.Alpaca.metainfo.xml.in:177 #: data/com.jeffser.Alpaca.metainfo.xml.in:177
msgid "Fixed image recognition on unsupported models" msgid "Fixed image recognition on unsupported models"
msgstr "" msgstr "Résolu : reconnaissance d'image sur certains modèles"
#: data/com.jeffser.Alpaca.metainfo.xml.in:186 #: data/com.jeffser.Alpaca.metainfo.xml.in:186
msgid "Fixed spinner not hiding if the back end fails" msgid "Fixed spinner not hiding if the back end fails"
@ -1567,7 +1582,7 @@ msgstr "Fermer Alpaca"
#: src/window.py:634 #: src/window.py:634
msgid "Use Local Instance" msgid "Use Local Instance"
msgstr "" msgstr "Utiliser l'instance locale"
#: src/window.py:635 src/window.py:840 #: src/window.py:635 src/window.py:840
msgid "Connect" msgid "Connect"
@ -1575,7 +1590,7 @@ msgstr "Connecter"
#: src/window.py:638 src/window.py:843 #: src/window.py:638 src/window.py:843
msgid "Server URL" msgid "Server URL"
msgstr "" msgstr "URL du serveur"
#: src/window.py:639 src/window.py:844 #: src/window.py:639 src/window.py:844
msgid "Bearer Token (Optional)" msgid "Bearer Token (Optional)"
@ -1621,7 +1636,7 @@ msgstr "Renommer '{}'"
#: src/window.py:736 #: src/window.py:736
msgid "Chat name" msgid "Chat name"
msgstr "" msgstr "Nom de la discussion"
#: src/window.py:737 #: src/window.py:737
msgid "Rename" msgid "Rename"
@ -1675,11 +1690,11 @@ msgstr "Annuler"
#: src/window.py:847 #: src/window.py:847
msgid "Connect Remote Instance" msgid "Connect Remote Instance"
msgstr "" msgstr "Connecter l'instance distante"
#: src/window.py:848 #: src/window.py:848
msgid "Enter instance information to continue" msgid "Enter instance information to continue"
msgstr "" msgstr "Entrer les informations de l'instance distante pour continuer"
#: src/window.py:923 #: src/window.py:923
msgid "Clear Chat?" msgid "Clear Chat?"
@ -1728,6 +1743,7 @@ msgstr "Suprimmer"
#: src/available_models_descriptions.py:2 #: src/available_models_descriptions.py:2
msgid "Meta's Llama 3.2 goes small with 1B and 3B models." msgid "Meta's Llama 3.2 goes small with 1B and 3B models."
msgstr "" msgstr ""
"Meta's Llama 3.2 se décline en petits modèles avec 1B et 3B paramètres."
#: src/available_models_descriptions.py:3 #: src/available_models_descriptions.py:3
msgid "" msgid ""
@ -1742,6 +1758,8 @@ msgid ""
"Google Gemma 2 is a high-performing and efficient model available in three " "Google Gemma 2 is a high-performing and efficient model available in three "
"sizes: 2B, 9B, and 27B." "sizes: 2B, 9B, and 27B."
msgstr "" msgstr ""
"Google Gemma 2 est un modèle performant et efficace disponible en trois "
"tailles : 2B, 9B et 27B."
#: src/available_models_descriptions.py:5 #: src/available_models_descriptions.py:5
msgid "" msgid ""
@ -1749,6 +1767,9 @@ msgid ""
"encompassing up to 18 trillion tokens. The model supports up to 128K tokens " "encompassing up to 18 trillion tokens. The model supports up to 128K tokens "
"and has multilingual support." "and has multilingual support."
msgstr "" msgstr ""
"Les modèles Qwen2.5 sont préentraînés sur le dernier jeu de données à grande "
"échelle d'Alibaba, couvrant jusqu'à 18 000 milliards de tokens. Le modèle "
"supporte jusqu'à 128K tokens et propose un support multilingue."
#: src/available_models_descriptions.py:6 #: src/available_models_descriptions.py:6
msgid "" msgid ""
@ -1763,12 +1784,16 @@ msgid ""
"A commercial-friendly small language model by NVIDIA optimized for roleplay, " "A commercial-friendly small language model by NVIDIA optimized for roleplay, "
"RAG QA, and function calling." "RAG QA, and function calling."
msgstr "" msgstr ""
"Un petit modèle de langage à usage commercial proposé par NVIDIA, optimisé "
"pour les jeux de rôle, la RAG QA et les appels de fonctions."
#: src/available_models_descriptions.py:8 #: src/available_models_descriptions.py:8
msgid "" msgid ""
"Mistral Small is a lightweight model designed for cost-effective use in " "Mistral Small is a lightweight model designed for cost-effective use in "
"tasks like translation and summarization." "tasks like translation and summarization."
msgstr "" msgstr ""
"Mistral Small est un modèle léger conçu pour un usage rentable dans des "
"tâches comme la traduction et la synthèse."
#: src/available_models_descriptions.py:9 #: src/available_models_descriptions.py:9
msgid "" msgid ""
@ -1985,6 +2010,9 @@ msgid ""
"The latest series of Code-Specific Qwen models, with significant " "The latest series of Code-Specific Qwen models, with significant "
"improvements in code generation, code reasoning, and code fixing." "improvements in code generation, code reasoning, and code fixing."
msgstr "" msgstr ""
"La dernière série de modèles Qwen spécifiques au code, avec des "
"améliorations significatives dans la génération, le raisonnement et la "
"correction de code."
#: src/available_models_descriptions.py:38 #: src/available_models_descriptions.py:38
msgid "" msgid ""
@ -2203,6 +2231,9 @@ msgid ""
"tuning that teaches a LLM to detect mistakes in its reasoning and correct " "tuning that teaches a LLM to detect mistakes in its reasoning and correct "
"course." "course."
msgstr "" msgstr ""
"Un modèle performant entraîné avec une nouvelle technique appelée "
"\"Reflection-tuning\" qui apprend à un LLM à détecter les erreurs dans son "
"raisonnement et à corriger son cours."
#: src/available_models_descriptions.py:67 #: src/available_models_descriptions.py:67
msgid "" msgid ""
@ -2351,6 +2382,9 @@ msgid ""
"Yi-Coder is a series of open-source code language models that delivers state-" "Yi-Coder is a series of open-source code language models that delivers state-"
"of-the-art coding performance with fewer than 10 billion parameters." "of-the-art coding performance with fewer than 10 billion parameters."
msgstr "" msgstr ""
"Yi-Coder est une série de modèles de langage de code open-source offrant des "
"performances de codage à la pointe de la technologie avec moins de 10 "
"milliards de paramètres."
#: src/available_models_descriptions.py:88 #: src/available_models_descriptions.py:88
msgid "A new small LLaVA model fine-tuned from Phi 3 Mini." msgid "A new small LLaVA model fine-tuned from Phi 3 Mini."
@ -2487,6 +2521,8 @@ msgid ""
"A series of multimodal LLMs (MLLMs) designed for vision-language " "A series of multimodal LLMs (MLLMs) designed for vision-language "
"understanding." "understanding."
msgstr "" msgstr ""
"Une série de modèles multimodaux (MLLMs) conçus pour la compréhension vision-"
"langage."
#: src/available_models_descriptions.py:107 #: src/available_models_descriptions.py:107
msgid "" msgid ""
@ -2550,6 +2586,8 @@ msgid ""
"Solar Pro Preview: an advanced large language model (LLM) with 22 billion " "Solar Pro Preview: an advanced large language model (LLM) with 22 billion "
"parameters designed to fit into a single GPU" "parameters designed to fit into a single GPU"
msgstr "" msgstr ""
"Solar Pro Preview : un modèle de langage de grande taille (LLM) avancé avec "
"22 milliards de paramètres conçu pour tenir sur un seul GPU."
#: src/available_models_descriptions.py:116 #: src/available_models_descriptions.py:116
msgid "" msgid ""
@ -2580,6 +2618,8 @@ msgid ""
"A series of models that convert HTML content to Markdown content, which is " "A series of models that convert HTML content to Markdown content, which is "
"useful for content conversion tasks." "useful for content conversion tasks."
msgstr "" msgstr ""
"Une série de modèles qui convertissent le contenu HTML en contenu Markdown, "
"utile pour les tâches de conversion de contenu."
#: src/available_models_descriptions.py:120 #: src/available_models_descriptions.py:120
msgid "Embedding model from BAAI mapping texts to vectors." msgid "Embedding model from BAAI mapping texts to vectors."
@ -2590,10 +2630,13 @@ msgid ""
"An upgraded version of DeekSeek-V2 that integrates the general and coding " "An upgraded version of DeekSeek-V2 that integrates the general and coding "
"abilities of both DeepSeek-V2-Chat and DeepSeek-Coder-V2-Instruct." "abilities of both DeepSeek-V2-Chat and DeepSeek-Coder-V2-Instruct."
msgstr "" msgstr ""
"Une version améliorée de DeepSeek-V2 qui intègre les capacités générales et "
"de codage de DeepSeek-V2-Chat et DeepSeek-Coder-V2-Instruct."
#: src/available_models_descriptions.py:122 #: src/available_models_descriptions.py:122
msgid "A state-of-the-art fact-checking model developed by Bespoke Labs." msgid "A state-of-the-art fact-checking model developed by Bespoke Labs."
msgstr "" msgstr ""
"Un modèle de vérification des faits de pointe développé par Bespoke Labs."
#: src/available_models_descriptions.py:123 #: src/available_models_descriptions.py:123
msgid "" msgid ""
@ -2605,23 +2648,24 @@ msgstr ""
#: src/connection_handler.py:14 #: src/connection_handler.py:14
msgid "Alpaca Support" msgid "Alpaca Support"
msgstr "" msgstr "Support d'Alpaca"
#: src/connection_handler.py:25 #: src/connection_handler.py:25
msgid "Model request too large for system" msgid "Model request too large for system"
msgstr "" msgstr "Modèle demandé trop lourd pour le système"
#: src/connection_handler.py:28 #: src/connection_handler.py:28
msgid "AMD GPU detected but the extension is missing, Ollama will use CPU." msgid "AMD GPU detected but the extension is missing, Ollama will use CPU."
msgstr "" msgstr ""
"GPU AMD détecté mais l'extension est manquante. Ollama utilisera le CPU."
#: src/connection_handler.py:30 #: src/connection_handler.py:30
msgid "AMD GPU detected but ROCm is missing, Ollama will use CPU." msgid "AMD GPU detected but ROCm is missing, Ollama will use CPU."
msgstr "" msgstr "GPU AMD détecté mais ROCm est manquant. Ollama utilisera le CPU."
#: src/connection_handler.py:33 #: src/connection_handler.py:33
msgid "Using AMD GPU type '{}'" msgid "Using AMD GPU type '{}'"
msgstr "" msgstr "Utilisation de GPU AMD type '{}'"
#: src/connection_handler.py:94 #: src/connection_handler.py:94
msgid "Ollama instance was shut down due to inactivity" msgid "Ollama instance was shut down due to inactivity"
@ -2629,11 +2673,11 @@ msgstr "Ollama a été désactivée faute d'activitée"
#: src/connection_handler.py:132 #: src/connection_handler.py:132
msgid "Integrated Ollama instance is running" msgid "Integrated Ollama instance is running"
msgstr "" msgstr "L'instance intégrée d'Ollama est active"
#: src/connection_handler.py:148 src/window.ui:479 #: src/connection_handler.py:148 src/window.ui:479
msgid "Integrated Ollama instance is not running" msgid "Integrated Ollama instance is not running"
msgstr "" msgstr "L'instance intégrée d'Ollama est inactive"
#: src/window.ui:42 #: src/window.ui:42
msgid "Menu" msgid "Menu"
@ -2645,7 +2689,7 @@ msgstr "Basculer la barre latérale"
#: src/window.ui:71 #: src/window.ui:71
msgid "Search Messages" msgid "Search Messages"
msgstr "" msgstr "Chercher des messages"
#: src/window.ui:93 #: src/window.ui:93
msgid "Loading Instance" msgid "Loading Instance"
@ -2662,11 +2706,11 @@ msgstr "Menu de la discussion"
#: src/window.ui:127 #: src/window.ui:127
msgid "Message search bar" msgid "Message search bar"
msgstr "" msgstr "Barre de recherche des messages"
#: src/window.ui:134 src/window.ui:136 #: src/window.ui:134 src/window.ui:136
msgid "Search messages" msgid "Search messages"
msgstr "" msgstr "Chercher un message"
#: src/window.ui:150 #: src/window.ui:150
msgid "" msgid ""
@ -2777,7 +2821,7 @@ msgstr "Boite de dialogue "
#: src/window.ui:496 #: src/window.ui:496
msgid "Terminal" msgid "Terminal"
msgstr "" msgstr "Terminal"
#: src/window.ui:538 src/window.ui:705 #: src/window.ui:538 src/window.ui:705
msgid "Create Model" msgid "Create Model"
@ -2819,7 +2863,7 @@ msgstr ""
#: src/window.ui:670 #: src/window.ui:670
msgid "Model Details" msgid "Model Details"
msgstr "" msgstr "Détails du modèle"
#: src/window.ui:733 #: src/window.ui:733
msgid "Base" msgid "Base"
@ -3021,7 +3065,7 @@ msgstr "Discussion importée avec succès"
#: src/custom_widgets/message_widget.py:53 #: src/custom_widgets/message_widget.py:53
msgid "Save Message" msgid "Save Message"
msgstr "" msgstr "Sauvegarder le message"
#: src/custom_widgets/message_widget.py:87 #: src/custom_widgets/message_widget.py:87
msgid "Message edited successfully" msgid "Message edited successfully"
@ -3051,7 +3095,7 @@ msgstr "Copier le message"
#: src/custom_widgets/message_widget.py:162 #: src/custom_widgets/message_widget.py:162
#: src/custom_widgets/message_widget.py:184 #: src/custom_widgets/message_widget.py:184
msgid "Run Script" msgid "Run Script"
msgstr "" msgstr "Lancer le script"
#: src/custom_widgets/message_widget.py:177 #: src/custom_widgets/message_widget.py:177
msgid "Code copied to the clipboard" msgid "Code copied to the clipboard"
@ -3062,10 +3106,12 @@ msgid ""
"Make sure you understand what this script does before running it, Alpaca is " "Make sure you understand what this script does before running it, Alpaca is "
"not responsible for any damages to your device or data" "not responsible for any damages to your device or data"
msgstr "" msgstr ""
"Assurez-vous de ce que le script effectue avant de le lancer. Alpaca n'est "
"pas responsable des dommages causés à votre appareil ou à vos données.\t"
#: src/custom_widgets/message_widget.py:187 #: src/custom_widgets/message_widget.py:187
msgid "Execute" msgid "Execute"
msgstr "" msgstr "Exécuter"
#: src/custom_widgets/message_widget.py:270 #: src/custom_widgets/message_widget.py:270
#: src/custom_widgets/message_widget.py:272 #: src/custom_widgets/message_widget.py:272
@ -3120,7 +3166,7 @@ msgstr "Arrêter"
#: src/custom_widgets/model_widget.py:315 #: src/custom_widgets/model_widget.py:315
msgid "Details" msgid "Details"
msgstr "" msgstr "Détails"
#: src/custom_widgets/model_widget.py:325 #: src/custom_widgets/model_widget.py:325
msgid "Remove '{}'" msgid "Remove '{}'"
@ -3132,31 +3178,31 @@ msgstr "Supprimer le modèle ?"
#: src/custom_widgets/model_widget.py:362 #: src/custom_widgets/model_widget.py:362
msgid "Create Model Based on '{}'" msgid "Create Model Based on '{}'"
msgstr "" msgstr "Créer un modèle basé sur '{}'"
#: src/custom_widgets/model_widget.py:378 #: src/custom_widgets/model_widget.py:378
msgid "Modified At" msgid "Modified At"
msgstr "" msgstr "Modifié à"
#: src/custom_widgets/model_widget.py:379 #: src/custom_widgets/model_widget.py:379
msgid "Parent Model" msgid "Parent Model"
msgstr "" msgstr "Modèle parent"
#: src/custom_widgets/model_widget.py:380 #: src/custom_widgets/model_widget.py:380
msgid "Format" msgid "Format"
msgstr "" msgstr "Format"
#: src/custom_widgets/model_widget.py:381 #: src/custom_widgets/model_widget.py:381
msgid "Family" msgid "Family"
msgstr "" msgstr "Famille"
#: src/custom_widgets/model_widget.py:382 #: src/custom_widgets/model_widget.py:382
msgid "Parameter Size" msgid "Parameter Size"
msgstr "" msgstr "Taille de paramètre"
#: src/custom_widgets/model_widget.py:383 #: src/custom_widgets/model_widget.py:383
msgid "Quantization Level" msgid "Quantization Level"
msgstr "" msgstr "Niveau de quantification"
#: src/custom_widgets/model_widget.py:449 #: src/custom_widgets/model_widget.py:449
msgid "Image Recognition" msgid "Image Recognition"
@ -3212,15 +3258,15 @@ msgstr "Accepter"
#: src/custom_widgets/terminal_widget.py:64 #: src/custom_widgets/terminal_widget.py:64
msgid "Setting up Python environment..." msgid "Setting up Python environment..."
msgstr "" msgstr "Paramétrage de l'environnement Python..."
#: src/custom_widgets/terminal_widget.py:75 #: src/custom_widgets/terminal_widget.py:75
msgid "Script exited" msgid "Script exited"
msgstr "" msgstr "Script terminé"
#: src/custom_widgets/terminal_widget.py:86 #: src/custom_widgets/terminal_widget.py:86
msgid "The script is contained inside Flatpak" msgid "The script is contained inside Flatpak"
msgstr "" msgstr "Le script est conteneurisé dans Flatpak"
#~ msgid "Select a Model" #~ msgid "Select a Model"
#~ msgstr "Sélectionnez un modèle" #~ msgstr "Sélectionnez un modèle"