Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
55a636f4d1 | ||
|
|
0fc8730272 | ||
|
|
61a2bc466e | ||
|
|
62b1923bf4 | ||
|
|
8e25376a12 | ||
|
|
a9ab5d45a4 | ||
|
|
ce2a2f0b93 | ||
|
|
9cb6b0b665 | ||
|
|
dfc21fc0e9 | ||
|
|
19b089e6c6 | ||
|
|
02aa2734e0 | ||
|
|
66f9fd7231 | ||
|
|
1b125cb704 | ||
|
|
29f5d85c7b | ||
|
|
c192a1f31c | ||
|
|
3b20daf807 | ||
|
|
760c00e8ae | ||
|
|
6d8d3788a6 | ||
|
|
98e23e0033 | ||
|
|
4d7aff3458 | ||
|
|
33e47696dc | ||
|
|
c0f8825f83 | ||
|
|
7c26956cd4 | ||
|
|
52f02cd5d0 | ||
|
|
0df6b20147 | ||
|
|
253a2dda7d | ||
|
|
d762a85130 | ||
|
|
a765e8cf2e | ||
|
|
3e7fd1140c | ||
|
|
a56631510d |
@@ -22,7 +22,7 @@ Alpaca is an [Ollama](https://github.com/ollama/ollama) client where you can man
|
||||
- Import / Export chats
|
||||
- Delete / Edit messages
|
||||
- YouTube recognition (Ask questions about a YouTube video using the transcript)
|
||||
- Website recognition (Ask questions about a certain question by parsing the url)
|
||||
- Website recognition (Ask questions about a certain website by parsing the url)
|
||||
|
||||
## Screenies
|
||||
Chatting with a model | Image recognition | Code highlighting
|
||||
|
||||
@@ -122,16 +122,16 @@
|
||||
"sources": [
|
||||
{
|
||||
"type": "file",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.2.8/ollama-linux-amd64",
|
||||
"sha256": "7641b21e9d0822ba44e494f5ed3d3796d9e9fcdf4dbb66064f8c34c865bbec0b",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.0/ollama-linux-amd64",
|
||||
"sha256": "b8817c34882c7ac138565836ac1995a2c61261a79315a13a0aebbfe5435da855",
|
||||
"only-arches": [
|
||||
"x86_64"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.2.8/ollama-linux-arm64",
|
||||
"sha256": "8ccaea237c3ef2a34d0cc00d8a89ffb1179d5c49211b6cbdf80d8d88e3f0add6",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.0/ollama-linux-arm64",
|
||||
"sha256": "64be908749212052146f1008dd3867359c776ac1766e8d86291886f53d294d4d",
|
||||
"only-arches": [
|
||||
"aarch64"
|
||||
]
|
||||
|
||||
@@ -70,7 +70,9 @@
|
||||
<caption>Multiple models being downloaded</caption>
|
||||
</screenshot>
|
||||
</screenshots>
|
||||
<content_rating type="oars-1.1" />
|
||||
<content_rating type="oars-1.1">
|
||||
<content_attribute id="money-purchasing">mild</content_attribute>
|
||||
</content_rating>
|
||||
<url type="bugtracker">https://github.com/Jeffser/Alpaca/issues</url>
|
||||
<url type="homepage">https://jeffser.com/alpaca/</url>
|
||||
<url type="donation">https://github.com/sponsors/Jeffser</url>
|
||||
@@ -78,6 +80,51 @@
|
||||
<url type="contribute">https://github.com/Jeffser/Alpaca/discussions/154</url>
|
||||
<url type="vcs-browser">https://github.com/Jeffser/Alpaca</url>
|
||||
<releases>
|
||||
<release version="1.0.3" date="2024-08-01">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.3</url>
|
||||
<description>
|
||||
<p>New</p>
|
||||
<ul>
|
||||
<li>Bearer Token entry on connection error dialog</li>
|
||||
<li>Small appearance changes</li>
|
||||
<li>Compatibility with code blocks without explicit language</li>
|
||||
<li>Rare, optional and dismissible support dialog</li>
|
||||
</ul>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Date format for Simplified Chinese translation</li>
|
||||
<li>Bug with unsupported localizations</li>
|
||||
<li>Min height being too large to be used on mobile</li>
|
||||
<li>Remote connection checker bug</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="1.0.2" date="2024-07-29">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.2</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Models with capital letters on their tag don't work</li>
|
||||
<li>Ollama fails to launch on some systems</li>
|
||||
<li>YouTube transcripts are not being saved in the right TMP directory</li>
|
||||
</ul>
|
||||
<p>New</p>
|
||||
<ul>
|
||||
<li>Debug messages are now shown on the 'About Alpaca' dialog</li>
|
||||
<li>Updated Ollama to v0.3.0 (new models)</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="1.0.1" date="2024-07-23">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.1</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Models with '-' in their names didn't work properly, this is now fixed</li>
|
||||
<li>Better connection check for Ollama</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="1.0.0" date="2024-07-22">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.0</url>
|
||||
<description>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
project('Alpaca', 'c',
|
||||
version: '1.0.0',
|
||||
version: '1.0.3',
|
||||
meson_version: '>= 0.62.0',
|
||||
default_options: [ 'warning_level=2', 'werror=false', ],
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,18 @@
|
||||
descriptions = {
|
||||
'llama3.1': _("Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes."),
|
||||
'gemma2': _("Google Gemma 2 is now available in 2 sizes, 9B and 27B."),
|
||||
'llama3': _("Meta Llama 3: The most capable openly available LLM to date"),
|
||||
'mistral-nemo': _("A state-of-the-art 12B model with 128k context length, built by Mistral AI in collaboration with NVIDIA."),
|
||||
'mistral-large': _("Mistral Large 2 is Mistral's new flagship model that is significantly more capable in code generation, mathematics, and reasoning with 128k context window and support for dozens of languages."),
|
||||
'qwen2': _("Qwen2 is a new series of large language models from Alibaba group"),
|
||||
'deepseek-coder-v2': _("An open-source Mixture-of-Experts code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks."),
|
||||
'phi3': _("Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft."),
|
||||
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
||||
'mistral': _("The 7B model released by Mistral AI, updated to version 0.3."),
|
||||
'mixtral': _("A set of Mixture of Experts (MoE) model with open weights by Mistral AI in 8x7b and 8x22b parameter sizes."),
|
||||
'codegemma': _("CodeGemma is a collection of powerful, lightweight models that can perform a variety of coding tasks like fill-in-the-middle code completion, code generation, natural language understanding, mathematical reasoning, and instruction following."),
|
||||
'command-r': _("Command R is a Large Language Model optimized for conversational interaction and long context tasks."),
|
||||
'command-r-plus': _("Command R+ is a powerful, scalable large language model purpose-built to excel at real-world enterprise use cases."),
|
||||
'llava': _("🌋 LLaVA is a novel end-to-end trained large multimodal model that combines a vision encoder and Vicuna for general-purpose visual and language understanding. Updated to version 1.6."),
|
||||
'llama3': _("Meta Llama 3: The most capable openly available LLM to date"),
|
||||
'gemma': _("Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind. Updated to version 1.1"),
|
||||
'qwen': _("Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from 0.5B to 110B parameters"),
|
||||
'llama2': _("Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters."),
|
||||
@@ -18,49 +20,50 @@ descriptions = {
|
||||
'dolphin-mixtral': _("Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford."),
|
||||
'nomic-embed-text': _("A high-performing open embedding model with a large token context window."),
|
||||
'llama2-uncensored': _("Uncensored Llama 2 model by George Sung and Jarrad Hope."),
|
||||
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
||||
'phi': _("Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities."),
|
||||
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
||||
'dolphin-mistral': _("The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8."),
|
||||
'orca-mini': _("A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware."),
|
||||
'dolphin-llama3': _("Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills."),
|
||||
'mxbai-embed-large': _("State-of-the-art large embedding model from mixedbread.ai"),
|
||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||
'starcoder2': _("StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters."),
|
||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||
'yi': _("Yi 1.5 is a high-performing, bilingual language model."),
|
||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||
'llama2-chinese': _("Llama 2 based model fine tuned to improve Chinese dialogue ability."),
|
||||
'llava-llama3': _("A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks."),
|
||||
'vicuna': _("General use chat model based on Llama and Llama 2 with 2K to 16K context sizes."),
|
||||
'nous-hermes2': _("The powerful family of models by Nous Research that excels at scientific discussion and coding tasks."),
|
||||
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
||||
'tinyllama': _("The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens."),
|
||||
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
||||
'codestral': _("Codestral is Mistral AI’s first-ever code model designed for code generation tasks."),
|
||||
'starcoder': _("StarCoder is a code generation model trained on 80+ programming languages."),
|
||||
'wizardlm2': _("State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases."),
|
||||
'openchat': _("A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106."),
|
||||
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
||||
'tinydolphin': _("An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama."),
|
||||
'openhermes': _("OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets."),
|
||||
'wizardcoder': _("State-of-the-art code generation model"),
|
||||
'stable-code': _("Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger."),
|
||||
'codeqwen': _("CodeQwen1.5 is a large language model pretrained on a large amount of code data."),
|
||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||
'wizard-math': _("Model focused on math and logic problems"),
|
||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||
'stablelm2': _("Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch."),
|
||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||
'granite-code': _("A family of open foundation models by IBM for Code Intelligence"),
|
||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||
'phind-codellama': _("Code generation model based on Code Llama."),
|
||||
'dolphincoder': _("A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2."),
|
||||
'nous-hermes': _("General use models based on Llama and Llama 2 from Nous Research."),
|
||||
'sqlcoder': _("SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks"),
|
||||
'llama3-gradient': _("This model extends LLama-3 8B's context length from 8k to over 1m tokens."),
|
||||
'starling-lm': _("Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness."),
|
||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||
'yarn-llama2': _("An extension of Llama 2 that supports a context of up to 128k tokens."),
|
||||
'xwinlm': _("Conversational model based on Llama 2 that performs competitively on various benchmarks."),
|
||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||
'llama3-chatqa': _("A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG)."),
|
||||
'orca2': _("Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning."),
|
||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||
'wizardlm': _("General use model based on Llama 2."),
|
||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||
'samantha-mistral': _("A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral."),
|
||||
'dolphin-phi': _("2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research."),
|
||||
'stable-beluga': _("Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy."),
|
||||
@@ -68,22 +71,23 @@ descriptions = {
|
||||
'bakllava': _("BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture."),
|
||||
'wizardlm-uncensored': _("Uncensored version of Wizard LM model"),
|
||||
'snowflake-arctic-embed': _("A suite of text embedding models by Snowflake, optimized for performance."),
|
||||
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
||||
'medllama2': _("Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset."),
|
||||
'yarn-mistral': _("An extension of Mistral to support context windows of 64K or 128K."),
|
||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||
'llama-pro': _("An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics."),
|
||||
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||
'meditron': _("Open-source medical large language model adapted from Llama 2 to the medical domain."),
|
||||
'codeup': _("Great code generation model based on Llama2."),
|
||||
'nexusraven': _("Nexus Raven is a 13B instruction tuned model for function calling tasks."),
|
||||
'everythinglm': _("Uncensored Llama2 based model with support for a 16K context window."),
|
||||
'llava-phi3': _("A new small LLaVA model fine-tuned from Phi 3 Mini."),
|
||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'magicoder': _("🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets."),
|
||||
'stablelm-zephyr': _("A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware."),
|
||||
'codebooga': _("A high-performing code instruct model created by merging two existing code models."),
|
||||
'mistrallite': _("MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts."),
|
||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'wizard-vicuna': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'wizard-vicuna': _("Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj."),
|
||||
'duckdb-nsql': _("7B parameter text-to-SQL model made by MotherDuck and Numbers Station."),
|
||||
'megadolphin': _("MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself."),
|
||||
'goliath': _("A language model created by combining two fine-tuned Llama 2 70B models into one."),
|
||||
@@ -92,12 +96,10 @@ descriptions = {
|
||||
'falcon2': _("Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens."),
|
||||
'notus': _("A 7B chat model fine-tuned with high-quality data and based on Zephyr."),
|
||||
'dbrx': _("DBRX is an open, general-purpose LLM created by Databricks."),
|
||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
||||
'internlm2': _("InternLM2.5 is a 7B parameter model tailored for practical scenarios with outstanding reasoning capability."),
|
||||
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
||||
'llama3-groq-tool-use': _("A series of models from Groq that represent a significant advancement in open-source AI capabilities for tool use/function calling."),
|
||||
'mathstral': _("MathΣtral: a 7B model designed for math reasoning and scientific discovery by Mistral AI."),
|
||||
'mistral-nemo': _("A state-of-the-art 12B model with 128k context length, built by Mistral AI in collaboration with NVIDIA."),
|
||||
'firefunction-v2': _("An open weights function calling model based on Llama 3, competitive with GPT-4o function calling capabilities."),
|
||||
'nuextract': _("A 3.8B model fine-tuned on a private high-quality synthetic dataset for information extraction, based on Phi-3."),
|
||||
}
|
||||
@@ -172,26 +172,38 @@ def remove_attached_file(self, name):
|
||||
|
||||
# RECONNECT REMOTE | WORKS
|
||||
|
||||
def reconnect_remote_response(self, dialog, task, entry):
|
||||
def reconnect_remote_response(self, dialog, task, url_entry, bearer_entry):
|
||||
response = dialog.choose_finish(task)
|
||||
if not task or response == "remote":
|
||||
self.connect_remote(entry.get_text())
|
||||
self.connect_remote(url_entry.get_text(), bearer_entry.get_text())
|
||||
elif response == "local":
|
||||
self.connect_local()
|
||||
elif response == "close":
|
||||
self.destroy()
|
||||
|
||||
def reconnect_remote(self, current_url):
|
||||
entry = Gtk.Entry(
|
||||
def reconnect_remote(self, current_url, current_bearer_token):
|
||||
entry_url = Gtk.Entry(
|
||||
css_classes = ["error"],
|
||||
text = current_url
|
||||
text = current_url,
|
||||
placeholder_text = "URL"
|
||||
)
|
||||
entry_bearer_token = Gtk.Entry(
|
||||
css_classes = ["error"] if current_bearer_token else None,
|
||||
text = current_bearer_token,
|
||||
placeholder_text = "Bearer Token (Optional)"
|
||||
)
|
||||
container = Gtk.Box(
|
||||
orientation = 1,
|
||||
spacing = 10
|
||||
)
|
||||
container.append(entry_url)
|
||||
container.append(entry_bearer_token)
|
||||
dialog = Adw.AlertDialog(
|
||||
heading=_("Connection Error"),
|
||||
body=_("The remote instance has disconnected"),
|
||||
extra_child=entry
|
||||
extra_child=container
|
||||
)
|
||||
entry.connect("activate", lambda entry, dialog: reconnect_remote_response(self, dialog, None, entry))
|
||||
#entry.connect("activate", lambda entry, dialog: reconnect_remote_response(self, dialog, None, entry))
|
||||
dialog.add_response("close", _("Close Alpaca"))
|
||||
dialog.add_response("local", _("Use local instance"))
|
||||
dialog.add_response("remote", _("Connect"))
|
||||
@@ -199,7 +211,7 @@ def reconnect_remote(self, current_url):
|
||||
dialog.choose(
|
||||
parent = self,
|
||||
cancellable = None,
|
||||
callback = lambda dialog, task, entry=entry: reconnect_remote_response(self, dialog, task, entry)
|
||||
callback = lambda dialog, task, url_entry=entry_url, bearer_entry=entry_bearer_token: reconnect_remote_response(self, dialog, task, url_entry, bearer_entry)
|
||||
)
|
||||
|
||||
# CREATE MODEL | WORKS
|
||||
@@ -285,9 +297,9 @@ def youtube_caption_response(self, dialog, task, video_url, caption_drop_down):
|
||||
selected_caption = caption_drop_down.get_selected_item().get_string()
|
||||
for event in yt.captions[selected_caption.split(' | ')[1]].json_captions['events']:
|
||||
text += "{}\n".format(event['segs'][0]['utf8'].replace('\n', '\\n'))
|
||||
if not os.path.exists('/tmp/alpaca/youtube'):
|
||||
os.makedirs('/tmp/alpaca/youtube')
|
||||
file_path = os.path.join('/tmp/alpaca/youtube', f'{yt.title} ({selected_caption.split(" | ")[0]})')
|
||||
if not os.path.exists(os.path.join(self.cache_dir, 'tmp/youtube')):
|
||||
os.makedirs(os.path.join(self.cache_dir, 'tmp/youtube'))
|
||||
file_path = os.path.join(os.path.join(self.cache_dir, 'tmp/youtube'), f'{yt.title} ({selected_caption.split(" | ")[0]})')
|
||||
with open(file_path, 'w+') as f:
|
||||
f.write(text)
|
||||
self.attach_file(file_path, 'youtube')
|
||||
@@ -357,3 +369,31 @@ def attach_website(self, url):
|
||||
cancellable = None,
|
||||
callback = lambda dialog, task, url=url: attach_website_response(self, dialog, task, url)
|
||||
)
|
||||
|
||||
# Begging for money :3
|
||||
|
||||
def support_response(self, dialog, task):
|
||||
res = dialog.choose_finish(task)
|
||||
if res == 'later': return
|
||||
elif res == 'support':
|
||||
self.show_toast(_("Thank you!"), self.main_overlay)
|
||||
os.system('xdg-open https://github.com/sponsors/Jeffser')
|
||||
self.show_support = False
|
||||
self.save_server_config()
|
||||
|
||||
def support(self):
|
||||
dialog = Adw.AlertDialog(
|
||||
heading=_("Support"),
|
||||
body=_("Are you enjoying Alpaca? Consider sponsoring the project!"),
|
||||
close_response="nope"
|
||||
)
|
||||
dialog.add_response("nope", _("Don't show again"))
|
||||
dialog.set_response_appearance("nope", Adw.ResponseAppearance.DESTRUCTIVE)
|
||||
dialog.add_response("later", _("Later"))
|
||||
dialog.add_response("support", _("Support"))
|
||||
dialog.set_response_appearance("support", Adw.ResponseAppearance.SUGGESTED)
|
||||
dialog.choose(
|
||||
parent = self,
|
||||
cancellable = None,
|
||||
callback = lambda dialog, task: support_response(self, dialog, task)
|
||||
)
|
||||
|
||||
@@ -12,10 +12,13 @@ data_dir = os.getenv("XDG_DATA_HOME")
|
||||
overrides = {}
|
||||
|
||||
def start():
|
||||
if not os.path.isdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp/ollama')):
|
||||
os.mkdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp/ollama'))
|
||||
global instance, overrides
|
||||
params = overrides.copy()
|
||||
params["OLLAMA_HOST"] = f"127.0.0.1:{port}" # You can't change this directly sorry :3
|
||||
params["HOME"] = data_dir
|
||||
params["TMPDIR"] = os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp/ollama')
|
||||
instance = subprocess.Popen(["/app/bin/ollama", "serve"], env={**os.environ, **params}, stderr=subprocess.PIPE, text=True)
|
||||
logger.info("Starting Alpaca's Ollama instance...")
|
||||
logger.debug(params)
|
||||
|
||||
19
src/main.py
19
src/main.py
@@ -20,6 +20,7 @@
|
||||
import sys
|
||||
import logging
|
||||
import gi
|
||||
import os
|
||||
|
||||
gi.require_version('Gtk', '4.0')
|
||||
gi.require_version('Adw', '1')
|
||||
@@ -34,13 +35,13 @@ logger = logging.getLogger(__name__)
|
||||
class AlpacaApplication(Adw.Application):
|
||||
"""The main application singleton class."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, version):
|
||||
super().__init__(application_id='com.jeffser.Alpaca',
|
||||
flags=Gio.ApplicationFlags.DEFAULT_FLAGS)
|
||||
self.create_action('quit', lambda *_: self.quit(), ['<primary>q'])
|
||||
self.create_action('preferences', lambda *_: AlpacaWindow.show_preferences_dialog(self.props.active_window), ['<primary>p'])
|
||||
self.create_action('about', self.on_about_action)
|
||||
self.version = '1.0.0'
|
||||
self.version = version
|
||||
|
||||
def do_activate(self):
|
||||
win = self.props.active_window
|
||||
@@ -61,7 +62,8 @@ class AlpacaApplication(Adw.Application):
|
||||
copyright='© 2024 Jeffser\n© 2024 Ollama',
|
||||
issue_url='https://github.com/Jeffser/Alpaca/issues',
|
||||
license_type=3,
|
||||
website="https://jeffser.com/alpaca")
|
||||
website="https://jeffser.com/alpaca",
|
||||
debug_info=open(os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log'), 'r').read())
|
||||
about.present(parent=self.props.active_window)
|
||||
|
||||
def create_action(self, name, callback, shortcuts=None):
|
||||
@@ -73,10 +75,17 @@ class AlpacaApplication(Adw.Application):
|
||||
|
||||
|
||||
def main(version):
|
||||
if os.path.isfile(os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log')):
|
||||
os.remove(os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log'))
|
||||
if os.path.isdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp')):
|
||||
os.system('rm -rf ' + os.path.join(os.getenv("XDG_CACHE_HOME"), "tmp/*"))
|
||||
else:
|
||||
os.mkdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp'))
|
||||
logging.basicConfig(
|
||||
format="%(levelname)s\t[%(filename)s | %(funcName)s] %(message)s",
|
||||
level=logging.INFO
|
||||
level=logging.INFO,
|
||||
handlers=[logging.FileHandler(filename=os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log')), logging.StreamHandler(stream=sys.stdout)]
|
||||
)
|
||||
app = AlpacaApplication()
|
||||
app = AlpacaApplication(version)
|
||||
logger.info(f"Alpaca version: {app.version}")
|
||||
return app.run(sys.argv)
|
||||
|
||||
@@ -12,3 +12,7 @@
|
||||
border-radius: 5px;
|
||||
padding: 5px;
|
||||
}
|
||||
.chat_row:selected {
|
||||
background: mix(@theme_bg_color, @theme_selected_bg_color, 0.3);
|
||||
color: mix(@window_fg_color, @theme_selected_bg_color, 0.5);
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import gi
|
||||
gi.require_version('GtkSource', '5')
|
||||
gi.require_version('GdkPixbuf', '2.0')
|
||||
from gi.repository import Adw, Gtk, Gdk, GLib, GtkSource, Gio, GdkPixbuf
|
||||
import json, requests, threading, os, re, base64, sys, gettext, locale, subprocess, uuid, shutil, tarfile, tempfile, logging
|
||||
import json, requests, threading, os, re, base64, sys, gettext, locale, subprocess, uuid, shutil, tarfile, tempfile, logging, random
|
||||
from time import sleep
|
||||
from io import BytesIO
|
||||
from PIL import Image
|
||||
@@ -44,7 +44,6 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
|
||||
localedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')
|
||||
|
||||
locale.setlocale(locale.LC_ALL, '')
|
||||
gettext.bindtextdomain('com.jeffser.Alpaca', localedir)
|
||||
gettext.textdomain('com.jeffser.Alpaca')
|
||||
_ = gettext.gettext
|
||||
@@ -61,6 +60,7 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
pulling_models = {}
|
||||
chats = {"chats": {_("New Chat"): {"messages": {}}}, "selected_chat": "New Chat", "order": []}
|
||||
attachments = {}
|
||||
show_support = True
|
||||
|
||||
#Override elements
|
||||
override_HSA_OVERRIDE_GFX_VERSION = Gtk.Template.Child()
|
||||
@@ -167,7 +167,7 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
buffer = self.editing_message["text_view"].get_buffer()
|
||||
text = buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter(), False).rstrip('\n')
|
||||
footer = "<small>" + self.editing_message["footer"] + "</small>"
|
||||
buffer.insert_markup(buffer.get_end_iter(), footer, len(footer))
|
||||
buffer.insert_markup(buffer.get_end_iter(), footer, len(footer.encode('utf-8')))
|
||||
self.chats["chats"][self.chats["selected_chat"]]["messages"][self.editing_message["id"]]["content"] = text
|
||||
self.editing_message = None
|
||||
self.save_history()
|
||||
@@ -183,8 +183,8 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
self.chats['order'].remove(self.chats['selected_chat'])
|
||||
self.chats['order'].insert(0, self.chats['selected_chat'])
|
||||
self.save_history()
|
||||
current_model = self.model_drop_down.get_selected_item().get_string()
|
||||
current_model = current_model.replace(' (', ':')[:-1].lower()
|
||||
current_model = self.model_drop_down.get_selected_item().get_string().split(' (')
|
||||
current_model = '{}:{}'.format(current_model[0].replace(' ', '-').lower(), current_model[1][:-1])
|
||||
if current_model is None:
|
||||
self.show_toast(_("Please select a model before chatting"), self.main_overlay)
|
||||
return
|
||||
@@ -229,7 +229,7 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
|
||||
#self.attachments[name] = {"path": file_path, "type": file_type, "content": content}
|
||||
raw_message = self.message_text_view.get_buffer().get_text(self.message_text_view.get_buffer().get_start_iter(), self.message_text_view.get_buffer().get_end_iter(), False)
|
||||
formated_date = self.generate_datetime_format(current_datetime)
|
||||
formated_date = GLib.markup_escape_text(self.generate_datetime_format(current_datetime))
|
||||
self.show_message(raw_message, False, f"\n\n<small>{formated_date}</small>", attached_images, attached_files, id=id)
|
||||
self.message_text_view.get_buffer().set_text("", 0)
|
||||
self.loading_spinner = Gtk.Spinner(spinning=True, margin_top=12, margin_bottom=12, hexpand=True)
|
||||
@@ -290,6 +290,9 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def change_remote_url(self, entry):
|
||||
if not entry.get_text().startswith("http"):
|
||||
entry.set_text("http://{}".format(entry.get_text()))
|
||||
return
|
||||
self.remote_url = entry.get_text()
|
||||
logger.debug(f"Changing remote url: {self.remote_url}")
|
||||
if self.run_remote:
|
||||
@@ -571,11 +574,12 @@ Generate a title following these rules:
|
||||
```PROMPT
|
||||
{message['content']}
|
||||
```"""
|
||||
current_model = self.model_drop_down.get_selected_item().get_string()
|
||||
current_model = current_model.replace(' (', ':')[:-1].lower()
|
||||
current_model = self.model_drop_down.get_selected_item().get_string().split(' (')
|
||||
current_model = '{}:{}'.format(current_model[0].replace(' ', '-').lower(), current_model[1][:-1])
|
||||
data = {"model": current_model, "prompt": prompt, "stream": False}
|
||||
if 'images' in message: data["images"] = message['images']
|
||||
response = connection_handler.simple_post(f"{connection_handler.url}/api/generate", data=json.dumps(data))
|
||||
|
||||
new_chat_name = json.loads(response.text)["response"].strip().removeprefix("Title: ").removeprefix("title: ").strip('\'"').title()
|
||||
new_chat_name = new_chat_name[:50] + (new_chat_name[50:] and '...')
|
||||
self.rename_chat(label_element.get_name(), new_chat_name, label_element)
|
||||
@@ -595,7 +599,7 @@ Generate a title following these rules:
|
||||
)
|
||||
message_buffer = message_text.get_buffer()
|
||||
message_buffer.insert(message_buffer.get_end_iter(), msg)
|
||||
if footer is not None: message_buffer.insert_markup(message_buffer.get_end_iter(), footer, len(footer))
|
||||
if footer is not None: message_buffer.insert_markup(message_buffer.get_end_iter(), footer, len(footer.encode('utf-8')))
|
||||
|
||||
delete_button = Gtk.Button(
|
||||
icon_name = "user-trash-symbolic",
|
||||
@@ -777,16 +781,18 @@ Generate a title following these rules:
|
||||
|
||||
def save_server_config(self):
|
||||
with open(os.path.join(self.config_dir, "server.json"), "w+") as f:
|
||||
json.dump({'remote_url': self.remote_url, 'remote_bearer_token': self.remote_bearer_token, 'run_remote': self.run_remote, 'local_port': local_instance.port, 'run_on_background': self.run_on_background, 'model_tweaks': self.model_tweaks, 'ollama_overrides': local_instance.overrides}, f, indent=6)
|
||||
json.dump({'remote_url': self.remote_url, 'remote_bearer_token': self.remote_bearer_token, 'run_remote': self.run_remote, 'local_port': local_instance.port, 'run_on_background': self.run_on_background, 'model_tweaks': self.model_tweaks, 'ollama_overrides': local_instance.overrides, 'show_support': self.show_support}, f, indent=6)
|
||||
|
||||
def verify_connection(self):
|
||||
response = connection_handler.simple_get(connection_handler.url)
|
||||
if response.status_code == 200:
|
||||
if "Ollama is running" in response.text:
|
||||
try:
|
||||
response = connection_handler.simple_get(f"{connection_handler.url}/api/tags")
|
||||
if response.status_code == 200:
|
||||
self.save_server_config()
|
||||
self.update_list_local_models()
|
||||
return True
|
||||
return False
|
||||
return response.status_code == 200
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
return False
|
||||
|
||||
def add_code_blocks(self):
|
||||
text = self.bot_message.get_text(self.bot_message.get_start_iter(), self.bot_message.get_end_iter(), True)
|
||||
@@ -804,13 +810,23 @@ Generate a title following these rules:
|
||||
code_text = match.group(2)
|
||||
parts.append({"type": "code", "text": code_text, "language": language})
|
||||
pos = end
|
||||
# Match code blocks without language
|
||||
no_lang_code_block_pattern = re.compile(r'`\n(.*?)\n`', re.DOTALL)
|
||||
for match in no_lang_code_block_pattern.finditer(text):
|
||||
start, end = match.span()
|
||||
if pos < start:
|
||||
normal_text = text[pos:start]
|
||||
parts.append({"type": "normal", "text": normal_text.strip()})
|
||||
code_text = match.group(1)
|
||||
parts.append({"type": "code", "text": code_text, "language": None})
|
||||
pos = end
|
||||
# Extract any remaining normal text after the last code block
|
||||
if pos < len(text):
|
||||
normal_text = text[pos:]
|
||||
if normal_text.strip():
|
||||
parts.append({"type": "normal", "text": normal_text.strip()})
|
||||
bold_pattern = re.compile(r'\*\*(.*?)\*\*') #"**text**"
|
||||
code_pattern = re.compile(r'`(.*?)`') #"`text`"
|
||||
code_pattern = re.compile(r'`([^`\n]*?)`') #"`text`"
|
||||
h1_pattern = re.compile(r'^#\s(.*)$') #"# text"
|
||||
h2_pattern = re.compile(r'^##\s(.*)$') #"## text"
|
||||
markup_pattern = re.compile(r'<(b|u|tt|span.*)>(.*?)<\/(b|u|tt|span)>') #heh butt span, I'm so funny
|
||||
@@ -844,17 +860,19 @@ Generate a title following these rules:
|
||||
start, end = match.span()
|
||||
if position < start:
|
||||
message_buffer.insert(message_buffer.get_end_iter(), part['text'][position:start])
|
||||
message_buffer.insert_markup(message_buffer.get_end_iter(), match.group(0), len(match.group(0)))
|
||||
message_buffer.insert_markup(message_buffer.get_end_iter(), match.group(0), len(match.group(0).encode('utf-8')))
|
||||
position = end
|
||||
|
||||
if position < len(part['text']):
|
||||
message_buffer.insert(message_buffer.get_end_iter(), part['text'][position:])
|
||||
|
||||
if footer: message_buffer.insert_markup(message_buffer.get_end_iter(), footer, len(footer))
|
||||
if footer: message_buffer.insert_markup(message_buffer.get_end_iter(), footer, len(footer.encode('utf-8')))
|
||||
|
||||
self.bot_message_box.append(message_text)
|
||||
else:
|
||||
language = GtkSource.LanguageManager.get_default().get_language(part['language'])
|
||||
language = None
|
||||
if part['language']:
|
||||
language = GtkSource.LanguageManager.get_default().get_language(part['language'])
|
||||
if language:
|
||||
buffer = GtkSource.Buffer.new_with_language(language)
|
||||
else:
|
||||
@@ -872,7 +890,7 @@ Generate a title following these rules:
|
||||
source_view.set_editable(False)
|
||||
code_block_box = Gtk.Box(css_classes=["card"], orientation=1, overflow=1)
|
||||
title_box = Gtk.Box(margin_start=12, margin_top=3, margin_bottom=3, margin_end=3)
|
||||
title_box.append(Gtk.Label(label=language.get_name() if language else part['language'], hexpand=True, xalign=0))
|
||||
title_box.append(Gtk.Label(label=language.get_name() if language else _("Code Block"), hexpand=True, xalign=0))
|
||||
copy_button = Gtk.Button(icon_name="edit-copy-symbolic", css_classes=["flat", "circular"], tooltip_text=_("Copy Message"))
|
||||
copy_button.connect("clicked", self.on_copy_code_clicked, buffer)
|
||||
title_box.append(copy_button)
|
||||
@@ -918,9 +936,9 @@ Generate a title following these rules:
|
||||
if id not in self.chats["chats"][self.chats["selected_chat"]]["messages"] or vadjustment.get_value() + 50 >= vadjustment.get_upper() - vadjustment.get_page_size():
|
||||
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper())
|
||||
if data['done']:
|
||||
formated_date = self.generate_datetime_format(datetime.strptime(self.chats["chats"][self.chats["selected_chat"]]["messages"][id]["date"], '%Y/%m/%d %H:%M:%S'))
|
||||
text = f"\n\n<small>{data['model'].split(':')[0].replace('-', ' ').title()} ({data['model'].split(':')[1]})\n{formated_date}</small>"
|
||||
GLib.idle_add(self.bot_message.insert_markup, self.bot_message.get_end_iter(), text, len(text))
|
||||
formated_date = GLib.markup_escape_text(self.generate_datetime_format(datetime.strptime(self.chats["chats"][self.chats["selected_chat"]]["messages"][id]["date"], '%Y/%m/%d %H:%M:%S')))
|
||||
text = f"\n\n{data['model'].split(':')[0].replace('-', ' ').title()} ({data['model'].split(':')[1]})\n<small>{formated_date}</small>"
|
||||
GLib.idle_add(self.bot_message.insert_markup, self.bot_message.get_end_iter(), text, len(text.encode('utf-8')))
|
||||
self.save_history()
|
||||
GLib.idle_add(self.bot_message_button_container.set_visible, True)
|
||||
#Notification
|
||||
@@ -1083,11 +1101,11 @@ Generate a title following these rules:
|
||||
for widget in list(self.chat_container): self.chat_container.remove(widget)
|
||||
for key, message in self.chats['chats'][self.chats["selected_chat"]]['messages'].items():
|
||||
if message:
|
||||
formated_date = self.generate_datetime_format(datetime.strptime(message['date'] + (":00" if message['date'].count(":") == 1 else ""), '%Y/%m/%d %H:%M:%S'))
|
||||
formated_date = GLib.markup_escape_text(self.generate_datetime_format(datetime.strptime(message['date'] + (":00" if message['date'].count(":") == 1 else ""), '%Y/%m/%d %H:%M:%S')))
|
||||
if message['role'] == 'user':
|
||||
self.show_message(message['content'], False, f"\n\n<small>{formated_date}</small>", message['images'] if 'images' in message else None, message['files'] if 'files' in message else None, id=key)
|
||||
else:
|
||||
self.show_message(message['content'], True, f"\n\n<small>{message['model'].split(':')[0].replace('-', ' ').title()} ({message['model'].split(':')[1]})\n{formated_date}</small>", id=key)
|
||||
self.show_message(message['content'], True, f"\n\n{message['model'].split(':')[0].replace('-', ' ').title()} ({message['model'].split(':')[1]})\n<small>{formated_date}</small>", id=key)
|
||||
self.add_code_blocks()
|
||||
self.bot_message = None
|
||||
|
||||
@@ -1235,9 +1253,10 @@ Generate a title following these rules:
|
||||
logger.debug("Showing preferences dialog")
|
||||
self.preferences_dialog.present(self)
|
||||
|
||||
def connect_remote(self, url):
|
||||
def connect_remote(self, url, bearer_token):
|
||||
logger.debug(f"Connecting to remote: {url}")
|
||||
connection_handler.url = url
|
||||
connection_handler.bearer_token = bearer_token
|
||||
self.remote_url = connection_handler.url
|
||||
self.remote_connection_entry.set_text(self.remote_url)
|
||||
if self.verify_connection() == False: self.connection_error()
|
||||
@@ -1254,7 +1273,7 @@ Generate a title following these rules:
|
||||
def connection_error(self):
|
||||
logger.error("Connection error")
|
||||
if self.run_remote:
|
||||
dialogs.reconnect_remote(self, connection_handler.url)
|
||||
dialogs.reconnect_remote(self, connection_handler.url, connection_handler.bearer_token)
|
||||
else:
|
||||
local_instance.reset()
|
||||
self.show_toast(_("There was an error with the local Ollama instance, so it has been reset"), self.main_overlay)
|
||||
@@ -1559,7 +1578,11 @@ Generate a title following these rules:
|
||||
if override in local_instance.overrides:
|
||||
element.set_text(local_instance.overrides[override])
|
||||
|
||||
|
||||
#Support dialog
|
||||
if 'show_support' not in data or data['show_support']:
|
||||
if random.randint(0, 99) == 0:
|
||||
dialogs.support(self)
|
||||
if 'show_support' in data: self.show_support = data['show_support']
|
||||
self.background_switch.set_active(self.run_on_background)
|
||||
self.set_hide_on_close(self.run_on_background)
|
||||
self.remote_connection_entry.set_text(self.remote_url)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<signal name="close-request" handler="closing_app"/>
|
||||
<property name="resizable">True</property>
|
||||
<property name="width-request">360</property>
|
||||
<property name="height-request">700</property>
|
||||
<property name="height-request">400</property>
|
||||
<property name="default-width">1300</property>
|
||||
<property name="default-height">800</property>
|
||||
<property name="title">Alpaca</property>
|
||||
@@ -244,7 +244,7 @@
|
||||
<property name="valign">3</property>
|
||||
<property name="tooltip-text" translatable="yes">Send Message</property>
|
||||
<style>
|
||||
<class name="suggested-action"/>
|
||||
<class name="accent"/>
|
||||
<class name="circular"/>
|
||||
</style>
|
||||
<child>
|
||||
|
||||
Reference in New Issue
Block a user