First version upload

This commit is contained in:
jeffser
2024-05-12 18:18:25 -06:00
parent bd4a3c4b89
commit 5b77c2b73a
25 changed files with 1815 additions and 0 deletions

0
src/__init__.py Normal file
View File

7
src/alpaca.gresource.xml Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<gresources>
<gresource prefix="/com/jeffser/Alpaca">
<file preprocess="xml-stripblanks">window.ui</file>
<file preprocess="xml-stripblanks">gtk/help-overlay.ui</file>
</gresource>
</gresources>

46
src/alpaca.in Executable file
View File

@@ -0,0 +1,46 @@
#!@PYTHON@
# alpaca.in
#
# Copyright 2024 Unknown
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import os
import sys
import signal
import locale
import gettext
VERSION = '@VERSION@'
pkgdatadir = '@pkgdatadir@'
localedir = '@localedir@'
sys.path.insert(1, pkgdatadir)
signal.signal(signal.SIGINT, signal.SIG_DFL)
locale.bindtextdomain('alpaca', localedir)
locale.textdomain('alpaca')
gettext.install('alpaca', localedir)
if __name__ == '__main__':
import gi
from gi.repository import Gio
resource = Gio.Resource.load(os.path.join(pkgdatadir, 'alpaca.gresource'))
resource._register()
from alpaca import main
sys.exit(main.main(VERSION))

91
src/available_models.py Normal file
View File

@@ -0,0 +1,91 @@
# available_models.py
# There isn't an API to do this, sorry
available_models = {
"llama3":"Meta Llama 3: The most capable openly available LLM to date",
"phi3":"Phi-3 Mini is a 3.8B parameters, lightweight, state-of-the-art open model by Microsoft.",
"wizardlm2":"State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases.",
"mistral":"The 7B model released by Mistral AI, updated to version 0.2.",
"gemma":"Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind. Updated to version 1.1",
"mixtral":"A set of Mixture of Experts (MoE) model with open weights by Mistral AI in 8x7b and 8x22b parameter sizes.",
"llama2":"Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters.",
"codegemma":"CodeGemma is a collection of powerful, lightweight models that can perform a variety of coding tasks like fill-in-the-middle code completion, code generation, natural language understanding, mathematical reasoning, and instruction following.",
"command-r":"Command R is a Large Language Model optimized for conversational interaction and long context tasks.",
"command-r-plus":"Command R+ is a powerful, scalable large language model purpose-built to excel at real-world enterprise use cases.",
"llava":"🌋 LLaVA is a novel end-to-end trained large multimodal model that combines a vision encoder and Vicuna for general-purpose visual and language understanding. Updated to version 1.6.",
"dbrx":"DBRX is an open, general-purpose LLM created by Databricks.",
"codellama":"A large language model that can use text prompts to generate and discuss code.",
"qwen":"Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from 0.5B to 110B parameters",
"dolphin-mixtral":"Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford.",
"llama2-uncensored":"Uncensored Llama 2 model by George Sung and Jarrad Hope.",
"deepseek-coder":"DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens.",
"mistral-openorca":"Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset.",
"nomic-embed-text":"A high-performing open embedding model with a large token context window.",
"phi":"Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities.",
"dolphin-mistral":"The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8.",
"orca-mini":"A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware.",
"nous-hermes2":"The powerful family of models by Nous Research that excels at scientific discussion and coding tasks.",
"zephyr":"Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants.",
"llama2-chinese":"Llama 2 based model fine tuned to improve Chinese dialogue ability.",
"wizard-vicuna-uncensored":"Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford.",
"vicuna":"General use chat model based on Llama and Llama 2 with 2K to 16K context sizes.",
"starcoder2":"StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters.",
"openhermes":"OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets.",
"tinyllama":"The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens.",
"openchat":"A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106.",
"tinydolphin":"An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama.",
"starcoder":"StarCoder is a code generation model trained on 80+ programming languages.",
"wizardcoder":"State-of-the-art code generation model",
"stable-code":"Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger.",
"dolphin-llama3":"Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills.",
"yi":"A high-performing, bilingual language model.",
"mxbai-embed-large":"State-of-the-art large embedding model from mixedbread.ai",
"neural-chat":"A fine-tuned model based on Mistral with good coverage of domain and language.",
"phind-codellama":"Code generation model based on Code Llama.",
"wizard-math":"Model focused on math and logic problems",
"starling-lm":"Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness.",
"falcon":"A large language model built by the Technology Innovation Institute (TII) for use in summarization, text generation, and chat bots.",
"orca2":"Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning.",
"dolphincoder":"A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2.",
"dolphin-phi":"2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research.",
"nous-hermes":"General use models based on Llama and Llama 2 from Nous Research.",
"sqlcoder":"SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks",
"solar":"A compact, yet powerful 10.7B large language model designed for single-turn conversation.",
"stablelm2":"Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch.",
"bakllava":"BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture.",
"medllama2":"Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset.",
"yarn-llama2":"An extension of Llama 2 that supports a context of up to 128k tokens.",
"deepseek-llm":"An advanced language model crafted with 2 trillion bilingual tokens.",
"nous-hermes2-mixtral":"The Nous Hermes 2 model from Nous Research, now trained over Mixtral.",
"wizardlm-uncensored":"Uncensored version of Wizard LM model",
"codeqwen":"CodeQwen1.5 is a large language model pretrained on a large amount of code data.",
"all-minilm":"Embedding models on very large sentence level datasets.",
"samantha-mistral":"A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral.",
"codeup":"Great code generation model based on Llama2.",
"stable-beluga":"Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy.",
"llama3-gradient":"This model extends LLama-3 8B's context length from 8k to over 1m tokens.",
"everythinglm":"Uncensored Llama2 based model with support for a 16K context window.",
"xwinlm":"Conversational model based on Llama 2 that performs competitively on various benchmarks.",
"yarn-mistral":"An extension of Mistral to support context windows of 64K or 128K.",
"meditron":"Open-source medical large language model adapted from Llama 2 to the medical domain.",
"wizardlm":"General use model based on Llama 2.",
"llama-pro":"An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics.",
"magicoder":"🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets.",
"stablelm-zephyr":"A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware.",
"codebooga":"A high-performing code instruct model created by merging two existing code models.",
"nexusraven":"Nexus Raven is a 13B instruction tuned model for function calling tasks.",
"mistrallite":"MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts.",
"wizard-vicuna":"Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj.",
"goliath":"A language model created by combining two fine-tuned Llama 2 70B models into one.",
"open-orca-platypus2":"Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. Designed for chat and code generation.",
"notux":"A top-performing mixture of experts model, fine-tuned with high-quality data.",
"megadolphin":"MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself.",
"snowflake-arctic-embed":"A suite of text embedding models by Snowflake, optimized for performance.",
"duckdb-nsql":"7B parameter text-to-SQL model made by MotherDuck and Numbers Station.",
"moondream":"moondream is a small vision language model designed to run efficiently on edge devices.",
"notus":"A 7B chat model fine-tuned with high-quality data and based on Zephyr.",
"alfred":"A robust conversational model designed to be used for both chat and instruct use cases.",
"llava-llama3":"A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks.",
"llama3-chatqa":"A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG).",
"llava-phi3":"A new small LLaVA model fine-tuned from Phi 3 Mini."
}

61
src/connection_handler.py Normal file
View File

@@ -0,0 +1,61 @@
# connectionhandler.py
import json, requests
def simple_get(connection_url:str) -> dict:
try:
response = requests.get(connection_url)
if response.status_code == 200:
return {"status": "ok", "text": response.text, "status_code": response.status_code}
else:
return {"status": "error", "text": f"Failed to connect to {connection_url}. Status code: {response.status_code}", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "text": f"An error occurred while trying to connect to {connection_url}", "status_code": 0}
def simple_delete(connection_url:str, data) -> dict:
try:
response = requests.delete(connection_url, json=data)
if response.status_code == 200:
return {"status": "ok", "status_code": response.status_code}
else:
return {"status": "error", "text": "Failed to delete", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "text": f"An error occurred while trying to connect to {connection_url}", "status_code": 0}
def stream_post(connection_url:str, data, callback:callable) -> dict:
try:
headers = {
"Content-Type": "application/json"
}
response = requests.post(connection_url, headers=headers, data=data, stream=True)
if response.status_code == 200:
for line in response.iter_lines():
if line:
callback(json.loads(line.decode("utf-8")))
return {"status": "ok", "text": "All good", "status_code": response.status_code}
else:
return {"status": "error", "text": "Error posting data", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "text": f"An error occurred while trying to connect to {connection_url}", "status_code": 0}
from time import sleep
def stream_post_fake(connection_url:str, data, callback:callable) -> dict:
data = {
"status": "pulling manifest"
}
callback(data)
for i in range(2):
for a in range(11):
sleep(.1)
data = {
"status": f"downloading digestname {i}",
"digest": f"digestname {i}",
"total": 500,
"completed": a * 50
}
callback(data)
for msg in ["verifying sha256 digest", "writting manifest", "removing any unused layers", "success"]:
sleep(.1)
data = {"status": msg}
callback(data)
return {"status": "ok", "text": "All good", "status_code": 200}

29
src/gtk/help-overlay.ui Normal file
View File

@@ -0,0 +1,29 @@
<?xml version="1.0" encoding="UTF-8"?>
<interface>
<object class="GtkShortcutsWindow" id="help_overlay">
<property name="modal">True</property>
<child>
<object class="GtkShortcutsSection">
<property name="section-name">shortcuts</property>
<property name="max-height">10</property>
<child>
<object class="GtkShortcutsGroup">
<property name="title" translatable="yes" context="shortcut window">General</property>
<child>
<object class="GtkShortcutsShortcut">
<property name="title" translatable="yes" context="shortcut window">Show Shortcuts</property>
<property name="action-name">win.show-help-overlay</property>
</object>
</child>
<child>
<object class="GtkShortcutsShortcut">
<property name="title" translatable="yes" context="shortcut window">Quit</property>
<property name="action-name">app.quit</property>
</object>
</child>
</object>
</child>
</object>
</child>
</object>
</interface>

68
src/main.py Normal file
View File

@@ -0,0 +1,68 @@
# main.py
#
# Copyright 2024 Unknown
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import sys
import gi
gi.require_version('Gtk', '4.0')
gi.require_version('Adw', '1')
from gi.repository import Gtk, Gio, Adw
from .window import AlpacaWindow
class AlpacaApplication(Adw.Application):
"""The main application singleton class."""
def __init__(self):
super().__init__(application_id='com.jeffser.Alpaca',
flags=Gio.ApplicationFlags.DEFAULT_FLAGS)
self.create_action('quit', lambda *_: self.quit(), ['<primary>q'])
self.create_action('about', self.on_about_action)
self.create_action('preferences', self.on_preferences_action)
def do_activate(self):
win = self.props.active_window
if not win:
win = AlpacaWindow(application=self)
win.present()
def on_about_action(self, widget, _):
about = Adw.AboutWindow(transient_for=self.props.active_window,
application_name='Alpaca',
application_icon='com.jeffser.Alpaca',
developer_name='Jeffry Samuel Eduarte Rojas',
version='0.1.0',
developers=['Jeffser'],
copyright='© 2024 Jeffser')
about.present()
def on_preferences_action(self, widget, _):
print('app.preferences action activated')
def create_action(self, name, callback, shortcuts=None):
action = Gio.SimpleAction.new(name, None)
action.connect("activate", callback)
self.add_action(action)
if shortcuts:
self.set_accels_for_action(f"app.{name}", shortcuts)
def main(version):
app = AlpacaApplication()
return app.run(sys.argv)

37
src/meson.build Normal file
View File

@@ -0,0 +1,37 @@
pkgdatadir = get_option('prefix') / get_option('datadir') / meson.project_name()
moduledir = pkgdatadir / 'alpaca'
gnome = import('gnome')
gnome.compile_resources('alpaca',
'alpaca.gresource.xml',
gresource_bundle: true,
install: true,
install_dir: pkgdatadir,
)
python = import('python')
conf = configuration_data()
conf.set('PYTHON', python.find_installation('python3').full_path())
conf.set('VERSION', meson.project_version())
conf.set('localedir', get_option('prefix') / get_option('localedir'))
conf.set('pkgdatadir', pkgdatadir)
configure_file(
input: 'alpaca.in',
output: 'alpaca',
configuration: conf,
install: true,
install_dir: get_option('bindir'),
install_mode: 'r-xr-xr-x'
)
alpaca_sources = [
'__init__.py',
'main.py',
'window.py',
'connection_handler.py',
'available_models.py',
]
install_data(alpaca_sources, install_dir: moduledir)

280
src/window.py Normal file
View File

@@ -0,0 +1,280 @@
# window.py
#
# Copyright 2024 Unknown
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import gi
gi.require_version("Soup", "3.0")
from gi.repository import Adw, Gtk, GLib
import json, requests, threading
from datetime import datetime
from .connection_handler import simple_get, simple_delete, stream_post, stream_post_fake
from .available_models import available_models
@Gtk.Template(resource_path='/com/jeffser/Alpaca/window.ui')
class AlpacaWindow(Adw.ApplicationWindow):
__gtype_name__ = 'AlpacaWindow'
#Variables
ollama_url = None
local_models = []
messages_history = []
#Elements
bot_message : Gtk.TextBuffer = None
overlay = Gtk.Template.Child()
chat_container = Gtk.Template.Child()
message_entry = Gtk.Template.Child()
send_button = Gtk.Template.Child()
model_drop_down = Gtk.Template.Child()
model_string_list = Gtk.Template.Child()
manage_models_button = Gtk.Template.Child()
manage_models_dialog = Gtk.Template.Child()
model_list_box = Gtk.Template.Child()
pull_model_dialog = Gtk.Template.Child()
pull_model_status_page = Gtk.Template.Child()
pull_model_progress_bar = Gtk.Template.Child()
def show_toast(self, msg:str):
toast = Adw.Toast(
title=msg,
timeout=2
)
self.overlay.add_toast(toast)
def show_message(self, msg:str, bot:bool):
message_text = Gtk.TextView(
editable=False,
focusable=False,
wrap_mode= Gtk.WrapMode.WORD,
margin_top=12,
margin_bottom=12,
margin_start=12,
margin_end=12,
)
message_buffer = message_text.get_buffer()
message_buffer.insert(message_buffer.get_end_iter(), msg)
message_box = Adw.Bin(
child=message_text,
css_classes=["card" if bot else None]
)
message_text.set_valign(Gtk.Align.CENTER)
self.chat_container.append(message_box)
if bot: self.bot_message = message_buffer
def update_list_local_models(self):
self.local_models = []
response = simple_get(self.ollama_url + "/api/tags")
if response['status'] == 'ok':
for model in json.loads(response['text'])['models']:
self.model_string_list.append(model["name"])
self.local_models.append(model["name"])
self.model_drop_down.set_selected(0)
return
#IF IT CONTINUES THEN THERE WAS EN ERROR
self.show_toast(response['text'])
self.show_connection_dialog()
def dialog_response(self, dialog, task):
self.ollama_url = dialog.get_extra_child().get_text()
if dialog.choose_finish(task) == "login":
response = simple_get(self.ollama_url)
if response['status'] == 'ok':
if "Ollama is running" in response['text']:
self.message_entry.grab_focus_without_selecting()
self.update_list_local_models()
return
else:
response = {"status": "error", "text": f"Unexpected response from {self.ollama_url} : {response['text']}"}
#IF IT CONTINUES THEN THERE WAS EN ERROR
self.show_toast(response['text'])
self.show_connection_dialog()
else:
self.destroy()
def show_connection_dialog(self):
dialog = Adw.AlertDialog(
heading="Login",
body="Please enter the Ollama instance URL",
close_response="cancel"
)
dialog.add_response("cancel", "Cancel")
dialog.add_response("login", "Login")
dialog.set_response_appearance("login", Adw.ResponseAppearance.SUGGESTED)
entry = Gtk.Entry(text="http://localhost:11434") #FOR TESTING PURPOSES
dialog.set_extra_child(entry)
dialog.choose(parent = self, cancellable = None, callback = self.dialog_response)
def update_bot_message(self, data):
if data['done']:
try:
api_datetime = data['created_at']
api_datetime = api_datetime[:-4] + api_datetime[-1]
formated_datetime = datetime.strptime(api_datetime, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y/%m/%d %H:%M")
text = f"\n\n<small>{data['model']}\t|\t{formated_datetime}</small>"
GLib.idle_add(self.bot_message.insert_markup, self.bot_message.get_end_iter(), text, len(text))
except Exception as e: print(e)
self.bot_message = None
else:
if self.bot_message is None:
GLib.idle_add(self.show_message, data['message']['content'], True)
self.messages_history.append({
"role": "assistant",
"content": data['message']['content']
})
else:
GLib.idle_add(self.bot_message.insert_at_cursor, data['message']['content'], len(data['message']['content']))
self.messages_history[-1]['content'] += data['message']['content']
#else: GLib.idle_add(self.bot_message.insert, self.bot_message.get_end_iter(), data['message']['content'])
def send_message(self):
current_model = self.model_drop_down.get_selected_item()
if current_model is None:
GLib.idle_add(self.show_toast, "Please pull a model")
return
self.messages_history.append({
"role": "user",
"content": self.message_entry.get_text()
})
data = {
"model": current_model.get_string(),
"messages": self.messages_history
}
GLib.idle_add(self.message_entry.set_sensitive, False)
GLib.idle_add(self.send_button.set_sensitive, False)
GLib.idle_add(self.show_message, self.message_entry.get_text(), False)
GLib.idle_add(self.message_entry.get_buffer().set_text, "", 0)
response = stream_post(f"{self.ollama_url}/api/chat", data=json.dumps(data), callback=self.update_bot_message)
GLib.idle_add(self.send_button.set_sensitive, True)
GLib.idle_add(self.message_entry.set_sensitive, True)
if response['status'] == 'error':
self.show_toast(f"{response['text']}")
self.show_connection_dialog()
def send_button_activate(self, button):
if not self.message_entry.get_text(): return
thread = threading.Thread(target=self.send_message)
thread.start()
def delete_model(self, dialog, task, model_name, button):
if dialog.choose_finish(task) == "delete":
response = simple_delete(self.ollama_url + "/api/delete", data={"name": model_name})
print(response)
if response['status'] == 'ok':
button.set_icon_name("folder-download-symbolic")
button.set_css_classes(["accent", "pull"])
self.show_toast(f"Model '{model_name}' deleted successfully")
for i in range(self.model_string_list.get_n_items()):
if self.model_string_list.get_string(i) == model_name:
self.model_string_list.remove(i)
self.model_drop_down.set_selected(0)
break
elif response['status_code'] == '404':
self.show_toast(f"Delete request failed: Model was not found")
else:
self.show_toast(response['text'])
self.manage_models_dialog.close()
self.show_connection_dialog()
def pull_model_update(self, data):
try:
GLib.idle_add(self.pull_model_progress_bar.set_text, data['status'])
if 'completed' in data:
if 'total' in data: GLib.idle_add(self.pull_model_progress_bar.set_fraction, data['completed'] / data['total'])
else: GLib.idle_add(self.pull_model_progress_bar.set_fraction, 1.0)
else:
GLib.idle_add(self.pull_model_progress_bar.set_fraction, 0.0)
except Exception as e: print(e)
def pull_model(self, dialog, task, model_name, button):
if dialog.choose_finish(task) == "pull":
data = {"name":model_name}
GLib.idle_add(self.pull_model_dialog.present, self.manage_models_dialog)
response = stream_post(f"{self.ollama_url}/api/pull", data=json.dumps(data), callback=self.pull_model_update)
GLib.idle_add(self.pull_model_dialog.force_close)
if response['status'] == 'ok':
GLib.idle_add(button.set_icon_name, "user-trash-symbolic")
GLib.idle_add(button.set_css_classes, ["error", "delete"])
GLib.idle_add(self.model_string_list.append, model_name)
GLib.idle_add(self.show_toast, f"Model '{model_name}' pulled successfully")
else:
GLib.idle_add(self.show_toast, response['text'])
GLib.idle_add(self.manage_models_dialog.close)
GLib.idle_add(self.show_connection_dialog)
def pull_model_start(self, dialog, task, model_name, button):
self.pull_model_status_page.set_description(model_name)
thread = threading.Thread(target=self.pull_model, args=(dialog, task, model_name, button))
thread.start()
def model_action_button_activate(self, button, model_name):
action = list(set(button.get_css_classes()) & set(["delete", "pull"]))[0]
print(f"action: {action}")
dialog = Adw.AlertDialog(
heading=f"{action.capitalize()} Model",
body=f"Are you sure you want to {action} '{model_name}'?",
close_response="cancel"
)
dialog.add_response("cancel", "Cancel")
dialog.add_response(action, action.capitalize())
dialog.set_response_appearance(action, Adw.ResponseAppearance.DESTRUCTIVE if action == "delete" else Adw.ResponseAppearance.SUGGESTED)
dialog.choose(
parent = self.manage_models_dialog,
cancellable = None,
callback = lambda dialog, task, model_name = model_name, button = button:
self.delete_model(dialog, task, model_name, button) if action == "delete" else self.pull_model_start(dialog, task, model_name,button)
)
def update_list_available_models(self):
self.model_list_box.remove_all()
for model_name, model_description in available_models.items():
model = Adw.ActionRow(
title = model_name,
subtitle = model_description,
)
model_name += ":latest"
button = Gtk.Button(
icon_name = "folder-download-symbolic" if model_name not in self.local_models else "user-trash-symbolic",
vexpand = False,
valign = 3,
css_classes = ["accent", "pull"] if model_name not in self.local_models else ["error", "delete"])
button.connect("clicked", lambda button=button, model_name=model_name: self.model_action_button_activate(button, model_name))
model.add_suffix(button)
self.model_list_box.append(model)
def manage_models_button_activate(self, button):
self.manage_models_dialog.present(self)
self.update_list_available_models()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.manage_models_button.connect("clicked", self.manage_models_button_activate)
self.send_button.connect("clicked", self.send_button_activate)
self.set_default_widget(self.send_button)
self.message_entry.set_activates_default(self.send_button)
self.message_entry.set_text("Hi") #FOR TESTING PURPOSES
self.show_connection_dialog()

205
src/window.ui Normal file
View File

@@ -0,0 +1,205 @@
<?xml version="1.0" encoding="UTF-8"?>
<interface>
<requires lib="gtk" version="4.0"/>
<requires lib="Adw" version="1.0"/>
<template class="AlpacaWindow" parent="AdwApplicationWindow">
<property name="resizable">True</property>
<property name="content">
<object class="AdwToastOverlay" id="overlay">
<child>
<object class="AdwToolbarView">
<child type="top">
<object class="AdwHeaderBar" id="header_bar">
<property name="title-widget">
<object class="GtkBox">
<property name="orientation">0</property>
<property name="spacing">12</property>
<child>
<object class="GtkDropDown" id="model_drop_down">
<property name="enable-search">true</property>
<property name="model">
<object class="GtkStringList" id="model_string_list">
<items>
</items>
</object>
</property>
</object>
</child>
<child>
<object class="GtkButton" id="manage_models_button">
<property name="tooltip-text" translatable="yes">Manage models</property>
<child>
<object class="AdwButtonContent">
<property name="icon-name">package-x-generic-symbolic</property>
</object>
</child>
</object>
</child>
</object>
</property>
<child type="end">
<object class="GtkMenuButton">
<property name="primary">True</property>
<property name="icon-name">open-menu-symbolic</property>
<property name="tooltip-text" translatable="yes">Menu</property>
<property name="menu-model">primary_menu</property>
</object>
</child>
</object>
</child>
<property name="content">
<object class="GtkBox"><!--ACTUAL CONTENT-->
<property name="orientation">1</property>
<property name="margin-start">24</property>
<property name="margin-end">24</property>
<property name="margin-bottom">24</property>
<property name="vexpand">true</property>
<property name="hexpand">true</property>
<child>
<object class="GtkScrolledWindow" id="chat_window">
<property name="margin-bottom">12</property>
<property name="has-frame">true</property>
<property name="propagate-natural-height">true</property>
<property name="min-content-width">500</property>
<property name="min-content-height">600</property>
<property name="kinetic-scrolling">1</property>
<property name="vexpand">true</property>
<style>
<class name="undershoot-top"/>
<class name="undershoot-bottom"/>
<class name="card"/>
</style>
<child>
<object class="GtkBox" id="chat_container">
<property name="orientation">1</property>
<property name="homogeneous">false</property>
<property name="hexpand">false</property>
<property name="vexpand">true</property>
<property name="spacing">12</property>
<property name="margin-top">12</property>
<property name="margin-bottom">12</property>
<property name="margin-start">12</property>
<property name="margin-end">12</property>
</object>
</child>
</object>
</child>
<child>
<object class="GtkBox">
<property name="orientation">0</property>
<property name="spacing">12</property>
<child>
<object class="GtkEntry" id="message_entry">
<property name="hexpand">true</property>
</object>
</child>
<child>
<object class="GtkButton" id="send_button">
<style>
<class name="suggested-action"/>
</style>
<child>
<object class="AdwButtonContent">
<property name="label" translatable="true">Send</property>
<property name="icon-name">send-to-symbolic</property>
</object>
</child>
</object>
</child>
</object>
</child>
</object><!--END OF CONTENT-->
</property>
</object>
</child>
</object>
</property>
<object class="AdwDialog" id="pull_model_dialog">
<property name="can-close">false</property>
<property name="width-request">400</property>
<child>
<object class="AdwToolbarView">
<child>
<object class="AdwStatusPage" id="pull_model_status_page">
<property name="hexpand">true</property>
<property name="vexpand">true</property>
<property name="margin-top">24</property>
<property name="margin-bottom">24</property>
<property name="margin-start">24</property>
<property name="margin-end">24</property>
<property name="title" translatable="yes">Pulling Model</property>
<child>
<object class="GtkProgressBar" id="pull_model_progress_bar">
<property name="show-text">true</property>
</object>
</child>
</object>
</child>
</object>
</child>
</object>
<object class="AdwDialog" id="manage_models_dialog">
<property name="can-close">true</property>
<property name="width-request">400</property>
<property name="height-request">600</property>
<child>
<object class="AdwToolbarView">
<child type="top">
<object class="AdwHeaderBar">
<property name="title-widget">
<object class="AdwWindowTitle">
<property name="title">Manage models</property>
</object>
</property>
</object>
</child>
<child>
<object class="GtkBox">
<property name="hexpand">true</property>
<property name="vexpand">true</property>
<property name="margin-top">0</property>
<property name="margin-bottom">24</property>
<property name="margin-start">24</property>
<property name="margin-end">24</property>
<child>
<object class="GtkScrolledWindow">
<property name="hexpand">true</property>
<property name="vexpand">true</property>
<child>
<object class="GtkListBox" id="model_list_box">
<property name="selection-mode">none</property>
<style>
<class name="boxed-list"/>
</style>
</object>
</child>
</object>
</child>
</object>
</child>
</object>
</child>
</object>
</template>
<menu id="primary_menu">
<section>
<item>
<attribute name="label" translatable="yes">_Preferences</attribute>
<attribute name="action">app.preferences</attribute>
</item>
<item>
<attribute name="label" translatable="yes">_Keyboard Shortcuts</attribute>
<attribute name="action">win.show-help-overlay</attribute>
</item>
<item>
<attribute name="label" translatable="yes">_About Alpaca</attribute>
<attribute name="action">app.about</attribute>
</item>
</section>
</menu>
</interface>