Compare commits
92 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e81200f80 | ||
|
|
b952fa07b5 | ||
|
|
15bd4335e8 | ||
|
|
d79a1236a0 | ||
|
|
ce11a308bf | ||
|
|
aa1fbcebe7 | ||
|
|
efbfb1e82a | ||
|
|
f497f1c5dc | ||
|
|
9ecf231307 | ||
|
|
66a9627b29 | ||
|
|
f03c01b6a6 | ||
|
|
29a5251d63 | ||
|
|
fcb956ff23 | ||
|
|
363fb882f3 | ||
|
|
e24cbb65b1 | ||
|
|
cf4d37a1c0 | ||
|
|
6394569b3b | ||
|
|
e6c855fcf9 | ||
|
|
c00061f46b | ||
|
|
67d572bd64 | ||
|
|
06769aba90 | ||
|
|
f5845e95e6 | ||
|
|
4d529619d6 | ||
|
|
95561f205c | ||
|
|
e855466280 | ||
|
|
c1c30c993c | ||
|
|
7da70097f2 | ||
|
|
01b6ae6bee | ||
|
|
9c1e0ea263 | ||
|
|
22116b0d1e | ||
|
|
03d92de88b | ||
|
|
0be0942da3 | ||
|
|
b488b64473 | ||
|
|
56eac5ccd6 | ||
|
|
ad5d6dfa41 | ||
|
|
c4fb424514 | ||
|
|
28e09d5c2e | ||
|
|
633507fecd | ||
|
|
5f3c01d231 | ||
|
|
4851b7858b | ||
|
|
d4f359bba7 | ||
|
|
40afce9fb0 | ||
|
|
61d2e7c7a0 | ||
|
|
0cb1891d9e | ||
|
|
5bd55843db | ||
|
|
5150fd769a | ||
|
|
d5eea3397c | ||
|
|
11b0b6a8d7 | ||
|
|
ecc93cda78 | ||
|
|
3653af7b81 | ||
|
|
bed097c760 | ||
|
|
f8d18afd13 | ||
|
|
991c01cba0 | ||
|
|
3e6a2b040f | ||
|
|
22138933f7 | ||
|
|
3d1a3a9ece | ||
|
|
0d5350b24d | ||
|
|
4fb83ed441 | ||
|
|
e8cfc9a9ee | ||
|
|
13a076bd9f | ||
|
|
63296219cf | ||
|
|
1ee36b113a | ||
|
|
dce91739e7 | ||
|
|
dd29077499 | ||
|
|
e398d55211 | ||
|
|
cbdfe43896 | ||
|
|
25eb1526d3 | ||
|
|
318f15925f | ||
|
|
95912e0211 | ||
|
|
f96b652605 | ||
|
|
24b1ff2e1b | ||
|
|
7e79f715b1 | ||
|
|
a061feeb71 | ||
|
|
12790b5ae1 | ||
|
|
2e2626fa99 | ||
|
|
3962315a6e | ||
|
|
08c0074ae5 | ||
|
|
295429acdf | ||
|
|
a842258e9e | ||
|
|
053efabfc8 | ||
|
|
a12083bfe9 | ||
|
|
672b8098bd | ||
|
|
db03cce49f | ||
|
|
e8b0733c32 | ||
|
|
68d970716f | ||
|
|
a0338bcccb | ||
|
|
eb92126e4b | ||
|
|
d26caea5f0 | ||
|
|
6d339aad5e | ||
|
|
e7b6da4f62 | ||
|
|
37e36add45 | ||
|
|
ed2501adf4 |
25
README.md
25
README.md
@@ -8,9 +8,6 @@ Alpaca is an [Ollama](https://github.com/ollama/ollama) client where you can man
|
||||
|
||||
---
|
||||
|
||||
> [!NOTE]
|
||||
> Please checkout [this discussion](https://github.com/Jeffser/Alpaca/discussions/292), I want to start developing a new app alongside Alpaca but I need some suggestions, thanks!
|
||||
|
||||
> [!WARNING]
|
||||
> This project is not affiliated at all with Ollama, I'm not responsible for any damages to your device or software caused by running code given by any AI models.
|
||||
|
||||
@@ -48,6 +45,14 @@ You can find the latest stable version of the app on [Flathub](https://flathub.o
|
||||
|
||||
Everytime a new version is published they become available on the [releases page](https://github.com/Jeffser/Alpaca/releases) of the repository
|
||||
|
||||
### Snap Package
|
||||
|
||||
You can also find the Snap package on the [releases page](https://github.com/Jeffser/Alpaca/releases), to install it run this command:
|
||||
```BASH
|
||||
sudo snap install ./{package name} --dangerous
|
||||
```
|
||||
The `--dangerous` comes from the package being installed without any involvement of the SnapStore, I'm working on getting the app there, but for now you can test the app this way.
|
||||
|
||||
### Building Git Version
|
||||
|
||||
Note: This is not recommended since the prerelease versions of the app often present errors and general instability.
|
||||
@@ -71,6 +76,8 @@ Language | Contributors
|
||||
🇹🇷 Turkish | [YusaBecerikli](https://github.com/YusaBecerikli)
|
||||
🇺🇦 Ukrainian | [Simon](https://github.com/OriginalSimon)
|
||||
🇩🇪 German | [Marcel Margenberg](https://github.com/MehrzweckMandala)
|
||||
🇮🇱 Hebrew | [Yosef Or Boczko](https://github.com/yoseforb)
|
||||
🇮🇳 Telugu | [Aryan Karamtoth](https://github.com/SpaciousCoder78)
|
||||
|
||||
Want to add a language? Visit [this discussion](https://github.com/Jeffser/Alpaca/discussions/153) to get started!
|
||||
|
||||
@@ -86,3 +93,15 @@ Want to add a language? Visit [this discussion](https://github.com/Jeffser/Alpac
|
||||
- [Aleksana](https://github.com/Aleksanaa) for her help with better handling of directories
|
||||
- Sponsors for giving me enough money to be able to take a ride to my campus every time I need to <3
|
||||
- Everyone that has shared kind words of encouragement!
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
- [Requests](https://github.com/psf/requests)
|
||||
- [Pillow](https://github.com/python-pillow/Pillow)
|
||||
- [Pypdf](https://github.com/py-pdf/pypdf)
|
||||
- [Pytube](https://github.com/pytube/pytube)
|
||||
- [Html2Text](https://github.com/aaronsw/html2text)
|
||||
- [Ollama](https://github.com/ollama/ollama)
|
||||
- [Numactl](https://github.com/numactl/numactl)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id" : "com.jeffser.Alpaca",
|
||||
"runtime" : "org.gnome.Platform",
|
||||
"runtime-version" : "46",
|
||||
"runtime-version" : "47",
|
||||
"sdk" : "org.gnome.Sdk",
|
||||
"command" : "alpaca",
|
||||
"finish-args" : [
|
||||
@@ -11,7 +11,8 @@
|
||||
"--device=all",
|
||||
"--socket=wayland",
|
||||
"--filesystem=/sys/module/amdgpu:ro",
|
||||
"--env=LD_LIBRARY_PATH=/app/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/sdk/llvm15/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/ollama:/app/plugins/AMD/lib/ollama"
|
||||
"--env=LD_LIBRARY_PATH=/app/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/sdk/llvm15/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/ollama:/app/plugins/AMD/lib/ollama",
|
||||
"--env=GSK_RENDERER=ngl"
|
||||
],
|
||||
"add-extensions": {
|
||||
"com.jeffser.Alpaca.Plugins": {
|
||||
@@ -134,16 +135,16 @@
|
||||
"sources": [
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.9/ollama-linux-amd64.tgz",
|
||||
"sha256": "b0062fbccd46134818d9d59cfa3867ad6849163653cb1171bc852c5f379b0851",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.12/ollama-linux-amd64.tgz",
|
||||
"sha256": "f0efa42f7ad77cd156bd48c40cd22109473801e5113173b0ad04f094a4ef522b",
|
||||
"only-arches": [
|
||||
"x86_64"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.9/ollama-linux-arm64.tgz",
|
||||
"sha256": "8979484bcb1448ab9b45107fbcb3b9f43c2af46f961487449b9ebf3518cd70eb",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.12/ollama-linux-arm64.tgz",
|
||||
"sha256": "da631cbe4dd2c168dae58d6868b1ff60e881e050f2d07578f2f736e689fec04c",
|
||||
"only-arches": [
|
||||
"aarch64"
|
||||
]
|
||||
@@ -166,6 +167,18 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "vte",
|
||||
"buildsystem": "meson",
|
||||
"config-opts": ["-Dvapi=false"],
|
||||
"sources": [
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://gitlab.gnome.org/GNOME/vte/-/archive/0.78.0/vte-0.78.0.tar.gz",
|
||||
"sha256": "82e19d11780fed4b66400f000829ce5ca113efbbfb7975815f26ed93e4c05f2d"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name" : "alpaca",
|
||||
"builddir" : true,
|
||||
|
||||
@@ -78,6 +78,86 @@
|
||||
<url type="contribute">https://github.com/Jeffser/Alpaca/discussions/154</url>
|
||||
<url type="vcs-browser">https://github.com/Jeffser/Alpaca</url>
|
||||
<releases>
|
||||
<release version="2.5.0" date="2024-10-06">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.5.0</url>
|
||||
<description>
|
||||
<p>New</p>
|
||||
<ul>
|
||||
<li>Run bash and python scripts straight from chat</li>
|
||||
<li>Updated Ollama to 0.3.12</li>
|
||||
<li>New models!</li>
|
||||
</ul>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Fixed and made faster the launch sequence</li>
|
||||
<li>Better detection of code blocks in messages</li>
|
||||
<li>Fixed app not loading in certain setups with Nvidia GPUs</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.0.6" date="2024-09-29">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.6</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Fixed message notification sometimes crashing text rendering because of them running on different threads</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.0.5" date="2024-09-25">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.5</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Fixed message generation sometimes failing</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.0.4" date="2024-09-22">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.4</url>
|
||||
<description>
|
||||
<p>New</p>
|
||||
<ul>
|
||||
<li>Sidebar resizes with the window</li>
|
||||
<li>New welcome dialog</li>
|
||||
<li>Message search</li>
|
||||
<li>Updated Ollama to v0.3.11</li>
|
||||
<li>A lot of new models provided by Ollama repository</li>
|
||||
</ul>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Fixed text inside model manager when the accessibility option 'large text' is on</li>
|
||||
<li>Fixed image recognition on unsupported models</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.0.3" date="2024-09-18">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.3</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Fixed spinner not hiding if the back end fails</li>
|
||||
<li>Fixed image recognition with local images</li>
|
||||
<li>Changed appearance of delete / stop model buttons</li>
|
||||
<li>Fixed stop button crashing the app</li>
|
||||
</ul>
|
||||
<p>New</p>
|
||||
<ul>
|
||||
<li>Made sidebar resize a little when the window is smaller</li>
|
||||
<li>Instant launch</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.0.2" date="2024-09-11">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.2</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Fixed error on first run (welcome dialog)</li>
|
||||
<li>Fixed checker for Ollama instance (used on system packages)</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="2.0.1" date="2024-09-11">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.1</url>
|
||||
<description>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
project('Alpaca', 'c',
|
||||
version: '2.0.1',
|
||||
version: '2.5.0',
|
||||
meson_version: '>= 0.62.0',
|
||||
default_options: [ 'warning_level=2', 'werror=false', ],
|
||||
)
|
||||
|
||||
@@ -9,3 +9,5 @@ hi
|
||||
tr
|
||||
uk
|
||||
de
|
||||
he
|
||||
te
|
||||
|
||||
1944
po/alpaca.pot
1944
po/alpaca.pot
File diff suppressed because it is too large
Load Diff
1997
po/nb_NO.po
1997
po/nb_NO.po
File diff suppressed because it is too large
Load Diff
1944
po/pt_BR.po
1944
po/pt_BR.po
File diff suppressed because it is too large
Load Diff
2099
po/zh_Hans.po
2099
po/zh_Hans.po
File diff suppressed because it is too large
Load Diff
92
snap/snapcraft.yaml
Normal file
92
snap/snapcraft.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
name: alpaca
|
||||
base: core24
|
||||
adopt-info: alpaca
|
||||
|
||||
platforms:
|
||||
amd64:
|
||||
arm64:
|
||||
|
||||
confinement: strict
|
||||
grade: stable
|
||||
compression: lzo
|
||||
|
||||
slots:
|
||||
dbus-alpaca:
|
||||
interface: dbus
|
||||
bus: session
|
||||
name: com.jeffser.Alpaca
|
||||
|
||||
apps:
|
||||
alpaca:
|
||||
command: usr/bin/alpaca
|
||||
common-id: com.jeffser.Alpaca
|
||||
extensions:
|
||||
- gnome
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
- removable-media
|
||||
|
||||
ollama:
|
||||
command: bin/ollama
|
||||
plugs:
|
||||
- home
|
||||
- removable-media
|
||||
- network
|
||||
- network-bind
|
||||
|
||||
ollama-daemon:
|
||||
command: bin/ollama serve
|
||||
daemon: simple
|
||||
install-mode: enable
|
||||
restart-condition: on-failure
|
||||
plugs:
|
||||
- home
|
||||
- removable-media
|
||||
- network
|
||||
- network-bind
|
||||
|
||||
parts:
|
||||
# Python dependencies
|
||||
python-deps:
|
||||
plugin: python
|
||||
source: .
|
||||
python-packages:
|
||||
- requests==2.31.0
|
||||
- pillow==10.3.0
|
||||
- pypdf==4.2.0
|
||||
- pytube==15.0.0
|
||||
- html2text==2024.2.26
|
||||
|
||||
# Ollama plugin
|
||||
ollama:
|
||||
plugin: dump
|
||||
source:
|
||||
- on amd64: https://github.com/ollama/ollama/releases/download/v0.3.10/ollama-linux-amd64.tgz
|
||||
- on arm64: https://github.com/ollama/ollama/releases/download/v0.3.10/ollama-linux-arm64.tgz
|
||||
|
||||
# Alpaca app
|
||||
alpaca:
|
||||
plugin: meson
|
||||
source-type: git
|
||||
source: https://github.com/Jeffser/Alpaca.git
|
||||
source-depth: 1
|
||||
meson-parameters:
|
||||
- --prefix=/snap/alpaca/current/usr
|
||||
override-build: |
|
||||
craftctl default
|
||||
sed -i '1c#!/usr/bin/env python3' $CRAFT_PART_INSTALL/snap/alpaca/current/usr/bin/alpaca
|
||||
parse-info:
|
||||
- usr/share/metainfo/com.jeffser.Alpaca.metainfo.xml
|
||||
organize:
|
||||
snap/alpaca/current: .
|
||||
after: [python-deps]
|
||||
|
||||
deps:
|
||||
plugin: nil
|
||||
after: [alpaca]
|
||||
stage-packages:
|
||||
- libnuma1
|
||||
prime:
|
||||
- usr/lib/*/libnuma.so.1*
|
||||
@@ -31,6 +31,7 @@
|
||||
<file alias="icons/scalable/status/update-symbolic.svg">icons/update-symbolic.svg</file>
|
||||
<file alias="icons/scalable/status/down-symbolic.svg">icons/down-symbolic.svg</file>
|
||||
<file alias="icons/scalable/status/chat-bubble-text-symbolic.svg">icons/chat-bubble-text-symbolic.svg</file>
|
||||
<file alias="icons/scalable/status/execute-from-symbolic.svg">icons/execute-from-symbolic.svg</file>
|
||||
<file preprocess="xml-stripblanks">window.ui</file>
|
||||
<file preprocess="xml-stripblanks">gtk/help-overlay.ui</file>
|
||||
</gresource>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,13 @@
|
||||
descriptions = {
|
||||
'llama3.2': _("Meta's Llama 3.2 goes small with 1B and 3B models."),
|
||||
'llama3.1': _("Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes."),
|
||||
'gemma2': _("Google Gemma 2 is a high-performing and efficient model by now available in three sizes: 2B, 9B, and 27B."),
|
||||
'gemma2': _("Google Gemma 2 is a high-performing and efficient model available in three sizes: 2B, 9B, and 27B."),
|
||||
'qwen2.5': _("Qwen2.5 models are pretrained on Alibaba's latest large-scale dataset, encompassing up to 18 trillion tokens. The model supports up to 128K tokens and has multilingual support."),
|
||||
'phi3.5': _("A lightweight AI model with 3.8 billion parameters with performance overtaking similarly and larger sized models."),
|
||||
'nemotron-mini': _("A commercial-friendly small language model by NVIDIA optimized for roleplay, RAG QA, and function calling."),
|
||||
'mistral-small': _("Mistral Small is a lightweight model designed for cost-effective use in tasks like translation and summarization."),
|
||||
'mistral-nemo': _("A state-of-the-art 12B model with 128k context length, built by Mistral AI in collaboration with NVIDIA."),
|
||||
'mistral-large': _("Mistral Large 2 is Mistral's new flagship model that is significantly more capable in code generation, mathematics, and reasoning with 128k context window and support for dozens of languages."),
|
||||
'qwen2': _("Qwen2 is a new series of large language models from Alibaba group"),
|
||||
'deepseek-coder-v2': _("An open-source Mixture-of-Experts code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks."),
|
||||
'phi3': _("Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft."),
|
||||
'mistral': _("The 7B model released by Mistral AI, updated to version 0.3."),
|
||||
'mixtral': _("A set of Mixture of Experts (MoE) model with open weights by Mistral AI in 8x7b and 8x22b parameter sizes."),
|
||||
'codegemma': _("CodeGemma is a collection of powerful, lightweight models that can perform a variety of coding tasks like fill-in-the-middle code completion, code generation, natural language understanding, mathematical reasoning, and instruction following."),
|
||||
@@ -15,98 +17,108 @@ descriptions = {
|
||||
'llama3': _("Meta Llama 3: The most capable openly available LLM to date"),
|
||||
'gemma': _("Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind. Updated to version 1.1"),
|
||||
'qwen': _("Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from 0.5B to 110B parameters"),
|
||||
'qwen2': _("Qwen2 is a new series of large language models from Alibaba group"),
|
||||
'phi3': _("Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft."),
|
||||
'llama2': _("Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters."),
|
||||
'codellama': _("A large language model that can use text prompts to generate and discuss code."),
|
||||
'nomic-embed-text': _("A high-performing open embedding model with a large token context window."),
|
||||
'mxbai-embed-large': _("State-of-the-art large embedding model from mixedbread.ai"),
|
||||
'dolphin-mixtral': _("Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford."),
|
||||
'phi': _("Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities."),
|
||||
'llama2-uncensored': _("Uncensored Llama 2 model by George Sung and Jarrad Hope."),
|
||||
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
||||
'mxbai-embed-large': _("State-of-the-art large embedding model from mixedbread.ai"),
|
||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||
'dolphin-mistral': _("The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8."),
|
||||
'starcoder2': _("StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters."),
|
||||
'orca-mini': _("A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware."),
|
||||
'dolphin-llama3': _("Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills."),
|
||||
'llama2-uncensored': _("Uncensored Llama 2 model by George Sung and Jarrad Hope."),
|
||||
'dolphin-mistral': _("The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8."),
|
||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||
'yi': _("Yi 1.5 is a high-performing, bilingual language model."),
|
||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||
'dolphin-llama3': _("Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills."),
|
||||
'orca-mini': _("A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware."),
|
||||
'llava-llama3': _("A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks."),
|
||||
'qwen2.5-coder': _("The latest series of Code-Specific Qwen models, with significant improvements in code generation, code reasoning, and code fixing."),
|
||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||
'starcoder': _("StarCoder is a code generation model trained on 80+ programming languages."),
|
||||
'llama2-chinese': _("Llama 2 based model fine tuned to improve Chinese dialogue ability."),
|
||||
'vicuna': _("General use chat model based on Llama and Llama 2 with 2K to 16K context sizes."),
|
||||
'tinyllama': _("The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens."),
|
||||
'codestral': _("Codestral is Mistral AI’s first-ever code model designed for code generation tasks."),
|
||||
'vicuna': _("General use chat model based on Llama and Llama 2 with 2K to 16K context sizes."),
|
||||
'llama2-chinese': _("Llama 2 based model fine tuned to improve Chinese dialogue ability."),
|
||||
'snowflake-arctic-embed': _("A suite of text embedding models by Snowflake, optimized for performance."),
|
||||
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
||||
'granite-code': _("A family of open foundation models by IBM for Code Intelligence"),
|
||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||
'nous-hermes2': _("The powerful family of models by Nous Research that excels at scientific discussion and coding tasks."),
|
||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||
'openchat': _("A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106."),
|
||||
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
||||
'codeqwen': _("CodeQwen1.5 is a large language model pretrained on a large amount of code data."),
|
||||
'wizardlm2': _("State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases."),
|
||||
'tinydolphin': _("An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama."),
|
||||
'granite-code': _("A family of open foundation models by IBM for Code Intelligence"),
|
||||
'wizardcoder': _("State-of-the-art code generation model"),
|
||||
'stable-code': _("Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger."),
|
||||
'openhermes': _("OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets."),
|
||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||
'codeqwen': _("CodeQwen1.5 is a large language model pretrained on a large amount of code data."),
|
||||
'qwen2-math': _("Qwen2 Math is a series of specialized math language models built upon the Qwen2 LLMs, which significantly outperforms the mathematical capabilities of open-source models and even closed-source models (e.g., GPT4o)."),
|
||||
'bakllava': _("BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture."),
|
||||
'stablelm2': _("Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch."),
|
||||
'wizard-math': _("Model focused on math and logic problems"),
|
||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||
'llama3-gradient': _("This model extends LLama-3 8B's context length from 8k to over 1m tokens."),
|
||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||
'wizard-math': _("Model focused on math and logic problems"),
|
||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||
'reflection': _("A high-performing model trained with a new technique called Reflection-tuning that teaches a LLM to detect mistakes in its reasoning and correct course."),
|
||||
'llama3-chatqa': _("A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG)."),
|
||||
'mistral-large': _("Mistral Large 2 is Mistral's new flagship model that is significantly more capable in code generation, mathematics, and reasoning with 128k context window and support for dozens of languages."),
|
||||
'moondream': _("moondream2 is a small vision language model designed to run efficiently on edge devices."),
|
||||
'xwinlm': _("Conversational model based on Llama 2 that performs competitively on various benchmarks."),
|
||||
'phind-codellama': _("Code generation model based on Code Llama."),
|
||||
'nous-hermes': _("General use models based on Llama and Llama 2 from Nous Research."),
|
||||
'dolphincoder': _("A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2."),
|
||||
'sqlcoder': _("SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks"),
|
||||
'xwinlm': _("Conversational model based on Llama 2 that performs competitively on various benchmarks."),
|
||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||
'dolphincoder': _("A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2."),
|
||||
'yarn-llama2': _("An extension of Llama 2 that supports a context of up to 128k tokens."),
|
||||
'llama3-chatqa': _("A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG)."),
|
||||
'wizardlm': _("General use model based on Llama 2."),
|
||||
'starling-lm': _("Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness."),
|
||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||
'snowflake-arctic-embed': _("A suite of text embedding models by Snowflake, optimized for performance."),
|
||||
'orca2': _("Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning."),
|
||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||
'samantha-mistral': _("A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral."),
|
||||
'moondream': _("moondream2 is a small vision language model designed to run efficiently on edge devices."),
|
||||
'smollm': _("🪐 A family of small models with 135M, 360M, and 1.7B parameters, trained on a new high-quality dataset."),
|
||||
'stable-beluga': _("🪐 A family of small models with 135M, 360M, and 1.7B parameters, trained on a new high-quality dataset."),
|
||||
'qwen2-math': _("Qwen2 Math is a series of specialized math language models built upon the Qwen2 LLMs, which significantly outperforms the mathematical capabilities of open-source models and even closed-source models (e.g., GPT4o)."),
|
||||
'dolphin-phi': _("2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research."),
|
||||
'wizardlm': _("General use model based on Llama 2."),
|
||||
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
||||
'bakllava': _("BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture."),
|
||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'starling-lm': _("Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness."),
|
||||
'samantha-mistral': _("A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral."),
|
||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||
'orca2': _("Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning."),
|
||||
'stable-beluga': _("Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy."),
|
||||
'dolphin-phi': _("2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research."),
|
||||
'wizardlm-uncensored': _("Uncensored version of Wizard LM model"),
|
||||
'yarn-mistral': _("An extension of Mistral to support context windows of 64K or 128K."),
|
||||
'phi3.5': _("A lightweight AI model with 3.8 billion parameters with performance overtaking similarly and larger sized models."),
|
||||
'medllama2': _("Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset."),
|
||||
'llama-pro': _("An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics."),
|
||||
'llava-phi3': _("A new small LLaVA model fine-tuned from Phi 3 Mini."),
|
||||
'meditron': _("Open-source medical large language model adapted from Llama 2 to the medical domain."),
|
||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||
'nexusraven': _("Nexus Raven is a 13B instruction tuned model for function calling tasks."),
|
||||
'codeup': _("Great code generation model based on Llama2."),
|
||||
'everythinglm': _("Uncensored Llama2 based model with support for a 16K context window."),
|
||||
'hermes3': _("Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research"),
|
||||
'yi-coder': _("Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters."),
|
||||
'llava-phi3': _("A new small LLaVA model fine-tuned from Phi 3 Mini."),
|
||||
'internlm2': _("InternLM2.5 is a 7B parameter model tailored for practical scenarios with outstanding reasoning capability."),
|
||||
'yarn-mistral': _("An extension of Mistral to support context windows of 64K or 128K."),
|
||||
'llama-pro': _("An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics."),
|
||||
'medllama2': _("Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset."),
|
||||
'meditron': _("Open-source medical large language model adapted from Llama 2 to the medical domain."),
|
||||
'nexusraven': _("Nexus Raven is a 13B instruction tuned model for function calling tasks."),
|
||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||
'codeup': _("Great code generation model based on Llama2."),
|
||||
'llama3-groq-tool-use': _("A series of models from Groq that represent a significant advancement in open-source AI capabilities for tool use/function calling."),
|
||||
'everythinglm': _("Uncensored Llama2 based model with support for a 16K context window."),
|
||||
'magicoder': _("🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets."),
|
||||
'stablelm-zephyr': _("A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware."),
|
||||
'codebooga': _("A high-performing code instruct model created by merging two existing code models."),
|
||||
'mistrallite': _("MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts."),
|
||||
'llama3-groq-tool-use': _("A series of models from Groq that represent a significant advancement in open-source AI capabilities for tool use/function calling."),
|
||||
'falcon2': _("Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens."),
|
||||
'wizard-vicuna': _("Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj."),
|
||||
'mistrallite': _("MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts."),
|
||||
'falcon2': _("Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens."),
|
||||
'duckdb-nsql': _("7B parameter text-to-SQL model made by MotherDuck and Numbers Station."),
|
||||
'minicpm-v': _("A series of multimodal LLMs (MLLMs) designed for vision-language understanding."),
|
||||
'megadolphin': _("MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself."),
|
||||
'notux': _("A top-performing mixture of experts model, fine-tuned with high-quality data."),
|
||||
'goliath': _("A language model created by combining two fine-tuned Llama 2 70B models into one."),
|
||||
'open-orca-platypus2': _("Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. Designed for chat and code generation."),
|
||||
'notus': _("A 7B chat model fine-tuned with high-quality data and based on Zephyr."),
|
||||
'dbrx': _("DBRX is an open, general-purpose LLM created by Databricks."),
|
||||
'mathstral': _("MathΣtral: a 7B model designed for math reasoning and scientific discovery by Mistral AI."),
|
||||
'bge-m3': _("BGE-M3 is a new model from BAAI distinguished for its versatility in Multi-Functionality, Multi-Linguality, and Multi-Granularity."),
|
||||
'mathstral': _("MathΣtral: a 7B model designed for math reasoning and scientific discovery by Mistral AI."),
|
||||
'dbrx': _("DBRX is an open, general-purpose LLM created by Databricks."),
|
||||
'solar-pro': _("Solar Pro Preview: an advanced large language model (LLM) with 22 billion parameters designed to fit into a single GPU"),
|
||||
'nuextract': _("A 3.8B model fine-tuned on a private high-quality synthetic dataset for information extraction, based on Phi-3."),
|
||||
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
||||
'firefunction-v2': _("An open weights function calling model based on Llama 3, competitive with GPT-4o function calling capabilities."),
|
||||
'nuextract': _("A 3.8B model fine-tuned on a private high-quality synthetic dataset for information extraction, based on Phi-3."),
|
||||
'reader-lm': _("A series of models that convert HTML content to Markdown content, which is useful for content conversion tasks."),
|
||||
'bge-large': _("Embedding model from BAAI mapping texts to vectors."),
|
||||
'deepseek-v2.5': _("An upgraded version of DeekSeek-V2 that integrates the general and coding abilities of both DeepSeek-V2-Chat and DeepSeek-Coder-V2-Instruct."),
|
||||
'bespoke-minicheck': _("A state-of-the-art fact-checking model developed by Bespoke Labs."),
|
||||
'paraphrase-multilingual': _("Sentence-transformers model that can be used for tasks like clustering or semantic search."),
|
||||
}
|
||||
@@ -92,6 +92,7 @@ class instance():
|
||||
self.idle_timer.start()
|
||||
|
||||
def start(self):
|
||||
self.stop()
|
||||
if shutil.which('ollama'):
|
||||
if not os.path.isdir(os.path.join(cache_dir, 'tmp/ollama')):
|
||||
os.mkdir(os.path.join(cache_dir, 'tmp/ollama'))
|
||||
|
||||
@@ -6,7 +6,7 @@ Handles the chat widget (testing)
|
||||
import gi
|
||||
gi.require_version('Gtk', '4.0')
|
||||
gi.require_version('GtkSource', '5')
|
||||
from gi.repository import Gtk, Gio, Adw, Gdk
|
||||
from gi.repository import Gtk, Gio, Adw, Gdk, GLib
|
||||
import logging, os, datetime, shutil, random, tempfile, tarfile, json
|
||||
from ..internal import data_dir
|
||||
from .message_widget import message
|
||||
@@ -72,7 +72,8 @@ class chat(Gtk.ScrolledWindow):
|
||||
self.welcome_screen = None
|
||||
self.regenerate_button = None
|
||||
self.busy = False
|
||||
self.get_vadjustment().connect('notify::page-size', lambda va, *_: va.set_value(va.get_upper() - va.get_page_size()) if va.get_value() == 0 else None)
|
||||
#self.get_vadjustment().connect('notify::page-size', lambda va, *_: va.set_value(va.get_upper() - va.get_page_size()) if va.get_value() == 0 else None)
|
||||
##TODO Figure out how to do this with the search thing
|
||||
|
||||
def stop_message(self):
|
||||
self.busy = False
|
||||
@@ -121,7 +122,7 @@ class chat(Gtk.ScrolledWindow):
|
||||
tooltip_text=_("Open Model Manager"),
|
||||
css_classes=["suggested-action", "pill"]
|
||||
)
|
||||
button.connect('clicked', lambda *_ : window.manage_models_dialog.present(window))
|
||||
button.set_action_name('app.manage_models')
|
||||
button_container.append(button)
|
||||
|
||||
self.welcome_screen = Adw.StatusPage(
|
||||
@@ -153,8 +154,8 @@ class chat(Gtk.ScrolledWindow):
|
||||
for file_name, file_type in message_data['files'].items():
|
||||
files[os.path.join(data_dir, "chats", self.get_name(), message_id, file_name)] = file_type
|
||||
message_element.add_attachments(files)
|
||||
message_element.set_text(message_data['content'])
|
||||
message_element.add_footer(datetime.datetime.strptime(message_data['date'] + (":00" if message_data['date'].count(":") == 1 else ""), '%Y/%m/%d %H:%M:%S'))
|
||||
GLib.idle_add(message_element.set_text, message_data['content'])
|
||||
GLib.idle_add(message_element.add_footer, datetime.datetime.strptime(message_data['date'] + (":00" if message_data['date'].count(":") == 1 else ""), '%Y/%m/%d %H:%M:%S'))
|
||||
else:
|
||||
self.show_welcome_screen(len(window.model_manager.get_model_list()) > 0)
|
||||
|
||||
@@ -436,6 +437,10 @@ class chat_list(Gtk.ListBox):
|
||||
if row:
|
||||
current_tab_i = next((i for i, t in enumerate(self.tab_list) if t.chat_window == window.chat_stack.get_visible_child()), -1)
|
||||
if self.tab_list.index(row) != current_tab_i:
|
||||
if window.searchentry_messages.get_text() != '':
|
||||
window.searchentry_messages.set_text('')
|
||||
window.message_search_changed(window.searchentry_messages, window.chat_stack.get_visible_child())
|
||||
window.message_searchbar.set_search_mode(False)
|
||||
window.chat_stack.set_transition_type(4 if self.tab_list.index(row) > current_tab_i else 5)
|
||||
window.chat_stack.set_visible_child(row.chat_window)
|
||||
window.switch_send_stop_button(not row.chat_window.busy)
|
||||
|
||||
@@ -10,6 +10,7 @@ from gi.repository import Gtk, GObject, Gio, Adw, GtkSource, GLib, Gdk
|
||||
import logging, os, datetime, re, shutil, threading, sys
|
||||
from ..internal import config_dir, data_dir, cache_dir, source_dir
|
||||
from .table_widget import TableWidget
|
||||
from .. import dialogs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -59,7 +60,7 @@ class text_block(Gtk.Label):
|
||||
selectable=True
|
||||
)
|
||||
self.update_property([4, 7], [_("Response message") if bot else _("User message"), False])
|
||||
self.connect('notify::has-focus', lambda *_: None if self.has_focus() else self.remove_selection() )
|
||||
self.connect('notify::has-focus', lambda *_: GLib.idle_add(self.remove_selection) if self.has_focus() else None)
|
||||
|
||||
def remove_selection(self):
|
||||
self.set_selectable(False)
|
||||
@@ -103,10 +104,14 @@ class code_block(Gtk.Box):
|
||||
self.source_view.update_property([4], [_("{}Code Block").format('{} '.format(self.language.get_name()) if self.language else "")])
|
||||
|
||||
title_box = Gtk.Box(margin_start=12, margin_top=3, margin_bottom=3, margin_end=3)
|
||||
title_box.append(Gtk.Label(label=self.language.get_name() if self.language else _("Code Block"), hexpand=True, xalign=0))
|
||||
title_box.append(Gtk.Label(label=self.language.get_name() if self.language else (language_name.title() if language_name else _("Code Block")), hexpand=True, xalign=0))
|
||||
copy_button = Gtk.Button(icon_name="edit-copy-symbolic", css_classes=["flat", "circular"], tooltip_text=_("Copy Message"))
|
||||
copy_button.connect("clicked", lambda *_: self.on_copy())
|
||||
title_box.append(copy_button)
|
||||
if language_name and language_name.lower() in ['bash', 'python3']:
|
||||
run_button = Gtk.Button(icon_name="execute-from-symbolic", css_classes=["flat", "circular"], tooltip_text=_("Run Script"))
|
||||
run_button.connect("clicked", lambda *_: self.run_script(language_name))
|
||||
title_box.append(run_button)
|
||||
self.append(title_box)
|
||||
self.append(Gtk.Separator())
|
||||
self.append(self.source_view)
|
||||
@@ -121,6 +126,12 @@ class code_block(Gtk.Box):
|
||||
clipboard.set(text)
|
||||
window.show_toast(_("Code copied to the clipboard"), window.main_overlay)
|
||||
|
||||
def run_script(self, language_name):
|
||||
logger.debug("Running script")
|
||||
start = self.buffer.get_start_iter()
|
||||
end = self.buffer.get_end_iter()
|
||||
dialogs.run_script(window, self.buffer.get_text(start, end, False), language_name)
|
||||
|
||||
class attachment(Gtk.Button):
|
||||
__gtype_name__ = 'AlpacaAttachment'
|
||||
|
||||
@@ -345,6 +356,9 @@ class action_buttons(Gtk.Box):
|
||||
def regenerate_message(self):
|
||||
chat = self.get_parent().get_parent().get_parent().get_parent().get_parent()
|
||||
message_element = self.get_parent()
|
||||
if message_element.spinner:
|
||||
message_element.container.remove(message_element.spinner)
|
||||
message_element.spinner = None
|
||||
if not chat.busy:
|
||||
message_element.set_text()
|
||||
if message_element.footer:
|
||||
@@ -425,6 +439,8 @@ class message(Gtk.Overlay):
|
||||
if not self.action_buttons:
|
||||
self.action_buttons = action_buttons(self.bot)
|
||||
self.add_overlay(self.action_buttons)
|
||||
if not self.text:
|
||||
self.action_buttons.set_visible(False)
|
||||
|
||||
def update_message(self, data:dict):
|
||||
chat = self.get_parent().get_parent().get_parent().get_parent()
|
||||
@@ -437,7 +453,7 @@ class message(Gtk.Overlay):
|
||||
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper())
|
||||
elif vadjustment.get_value() + 50 >= vadjustment.get_upper() - vadjustment.get_page_size():
|
||||
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper() - vadjustment.get_page_size())
|
||||
self.content_children[-1].insert_at_end(data['message']['content'], False)
|
||||
GLib.idle_add(self.content_children[-1].insert_at_end, data['message']['content'], False)
|
||||
if 'done' in data and data['done']:
|
||||
window.chat_list_box.get_tab_by_name(chat.get_name()).spinner.set_visible(False)
|
||||
if window.chat_list_box.get_current_chat().get_name() != chat.get_name():
|
||||
@@ -446,12 +462,19 @@ class message(Gtk.Overlay):
|
||||
chat.container.remove(chat.welcome_screen)
|
||||
chat.welcome_screen = None
|
||||
chat.stop_message()
|
||||
self.set_text(self.content_children[-1].get_label())
|
||||
self.text = self.content_children[-1].get_label()
|
||||
GLib.idle_add(self.set_text, self.content_children[-1].get_label())
|
||||
self.dt = datetime.datetime.now()
|
||||
self.add_footer(self.dt)
|
||||
GLib.idle_add(self.add_footer, self.dt)
|
||||
window.show_notification(chat.get_name(), self.text[:200] + (self.text[200:] and '...'), Gio.ThemedIcon.new("chat-message-new-symbolic"))
|
||||
window.save_history(chat)
|
||||
GLib.idle_add(window.save_history, chat)
|
||||
else:
|
||||
if self.spinner:
|
||||
GLib.idle_add(self.container.remove, self.spinner)
|
||||
self.spinner = None
|
||||
chat_tab = window.chat_list_box.get_tab_by_name(chat.get_name())
|
||||
if chat_tab.spinner:
|
||||
GLib.idle_add(chat_tab.spinner.set_visible, False)
|
||||
sys.exit()
|
||||
|
||||
def set_text(self, text:str=None):
|
||||
@@ -461,8 +484,7 @@ class message(Gtk.Overlay):
|
||||
self.content_children = []
|
||||
if text:
|
||||
self.content_children = []
|
||||
code_block_pattern = re.compile(r'```(\w+)\n(.*?)\n```', re.DOTALL)
|
||||
no_lang_code_block_pattern = re.compile(r'`\n(.*?)\n`', re.DOTALL)
|
||||
code_block_pattern = re.compile(r'[```|`](\w*)\n(.*?)\n\s*[```|`]', re.DOTALL)
|
||||
table_pattern = re.compile(r'((\r?\n){2}|^)([^\r\n]*\|[^\r\n]*(\r?\n)?)+(?=(\r?\n){2}|$)', re.MULTILINE)
|
||||
bold_pattern = re.compile(r'\*\*(.*?)\*\*') #"**text**"
|
||||
code_pattern = re.compile(r'`([^`\n]*?)`') #"`text`"
|
||||
@@ -481,15 +503,6 @@ class message(Gtk.Overlay):
|
||||
code_text = match.group(2)
|
||||
parts.append({"type": "code", "text": code_text, "language": 'python3' if language == 'python' else language})
|
||||
pos = end
|
||||
# Code blocks (No language)
|
||||
for match in no_lang_code_block_pattern.finditer(self.text):
|
||||
start, end = match.span()
|
||||
if pos < start:
|
||||
normal_text = self.text[pos:start]
|
||||
parts.append({"type": "normal", "text": normal_text.strip()})
|
||||
code_text = match.group(1)
|
||||
parts.append({"type": "code", "text": code_text, "language": None})
|
||||
pos = end
|
||||
# Tables
|
||||
for match in table_pattern.finditer(self.text):
|
||||
start, end = match.span()
|
||||
@@ -538,8 +551,12 @@ class message(Gtk.Overlay):
|
||||
text_b = text_block(self.bot)
|
||||
text_b.set_visible(False)
|
||||
self.content_children.append(text_b)
|
||||
if self.spinner:
|
||||
self.container.remove(self.spinner)
|
||||
self.spinner = None
|
||||
self.spinner = Gtk.Spinner(spinning=True, margin_top=12, margin_bottom=12, hexpand=True)
|
||||
self.container.append(self.spinner)
|
||||
self.container.append(text_b)
|
||||
self.container.queue_draw()
|
||||
|
||||
|
||||
|
||||
@@ -69,7 +69,8 @@ class model_selector_button(Gtk.MenuButton):
|
||||
super().__init__(
|
||||
tooltip_text=_('Select a Model'),
|
||||
child=container,
|
||||
popover=self.popover
|
||||
popover=self.popover,
|
||||
halign=3
|
||||
)
|
||||
|
||||
def change_model(self, model_name:str):
|
||||
@@ -158,7 +159,7 @@ class pulling_model(Gtk.ListBoxRow):
|
||||
icon_name = "media-playback-stop-symbolic",
|
||||
vexpand = False,
|
||||
valign = 3,
|
||||
css_classes = ["destructive-action", "circular"],
|
||||
css_classes = ["error", "circular"],
|
||||
tooltip_text = _("Stop Pulling '{}'").format(window.convert_model_name(model_name, 0))
|
||||
)
|
||||
stop_button.connect('clicked', lambda *_: dialogs.stop_pull_model(window, self))
|
||||
@@ -237,7 +238,7 @@ class local_model(Gtk.ListBoxRow):
|
||||
icon_name = "user-trash-symbolic",
|
||||
vexpand = False,
|
||||
valign = 3,
|
||||
css_classes = ["destructive-action", "circular"],
|
||||
css_classes = ["error", "circular"],
|
||||
tooltip_text = _("Remove '{}'").format(window.convert_model_name(model_name, 0))
|
||||
)
|
||||
delete_button.connect('clicked', lambda *_, model_name=model_name: dialogs.delete_model(window, model_name))
|
||||
@@ -414,6 +415,7 @@ class model_manager_container(Gtk.Box):
|
||||
spacing=12,
|
||||
orientation=1
|
||||
)
|
||||
|
||||
self.pulling_list = pulling_model_list()
|
||||
self.append(self.pulling_list)
|
||||
self.local_list = local_model_list()
|
||||
@@ -421,7 +423,7 @@ class model_manager_container(Gtk.Box):
|
||||
self.available_list = available_model_list()
|
||||
self.append(self.available_list)
|
||||
self.model_selector = model_selector_button()
|
||||
window.header_bar.set_title_widget(self.model_selector)
|
||||
window.title_stack.add_named(self.model_selector, 'model_selector')
|
||||
|
||||
def add_local_model(self, model_name:str):
|
||||
self.local_list.add_model(model_name)
|
||||
@@ -470,6 +472,8 @@ class model_manager_container(Gtk.Box):
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
window.connection_error()
|
||||
window.title_stack.set_visible_child_name('model_selector')
|
||||
window.chat_list_box.update_welcome_screens(len(self.get_model_list()) > 0)
|
||||
|
||||
#Should only be called when the app starts
|
||||
def update_available_list(self):
|
||||
@@ -480,11 +484,37 @@ class model_manager_container(Gtk.Box):
|
||||
def change_model(self, model_name:str):
|
||||
self.model_selector.change_model(model_name)
|
||||
|
||||
def has_vision(self, model_name) -> bool:
|
||||
response = (
|
||||
window.ollama_instance.request(
|
||||
"POST", "api/show", json.dumps({"name": model_name})
|
||||
)
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(f"Status code was {response.status_code}")
|
||||
return False
|
||||
|
||||
try:
|
||||
model_info = json.loads(response.text)
|
||||
logger.debug(f"Vision for {model_name}: {'projector_info' in model_info}")
|
||||
return 'projector_info' in model_info
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching vision info: {str(e)}")
|
||||
return False
|
||||
|
||||
def verify_if_image_can_be_used(self):
|
||||
logger.debug("Verifying if image can be used")
|
||||
selected = self.get_selected_model()
|
||||
if selected == None:
|
||||
return False
|
||||
|
||||
# first try ollama show API.
|
||||
if self.has_vision(selected):
|
||||
return True
|
||||
|
||||
# then fall back to the old method.)
|
||||
|
||||
selected = selected.split(":")[0]
|
||||
with open(os.path.join(source_dir, 'available_models.json'), 'r', encoding="utf-8") as f:
|
||||
if selected in [key for key, value in json.load(f).items() if value["image"]]:
|
||||
@@ -529,4 +559,3 @@ class model_manager_container(Gtk.Box):
|
||||
GLib.idle_add(window.chat_list_box.update_welcome_screens, len(self.get_model_list()) > 0)
|
||||
if len(list(self.pulling_list)) == 0:
|
||||
GLib.idle_add(self.pulling_list.set_visible, False)
|
||||
|
||||
|
||||
49
src/custom_widgets/terminal_widget.py
Normal file
49
src/custom_widgets/terminal_widget.py
Normal file
@@ -0,0 +1,49 @@
|
||||
#chat_widget.py
|
||||
"""
|
||||
Handles the terminal widget
|
||||
"""
|
||||
|
||||
import gi
|
||||
gi.require_version('Gtk', '4.0')
|
||||
gi.require_version('Vte', '3.91')
|
||||
from gi.repository import Gtk, Vte, GLib, Pango, GLib, Gdk
|
||||
|
||||
class terminal(Vte.Terminal):
|
||||
__gtype_name__ = 'AlpacaTerminal'
|
||||
|
||||
def __init__(self, script:list):
|
||||
super().__init__(css_classes=["terminal"])
|
||||
self.set_font(Pango.FontDescription.from_string("Monospace 12"))
|
||||
self.set_clear_background(False)
|
||||
pty = Vte.Pty.new_sync(Vte.PtyFlags.DEFAULT, None)
|
||||
|
||||
self.set_pty(pty)
|
||||
|
||||
env = {
|
||||
'TERM': "xterm-256color",
|
||||
'SUDO_ASKPASS': "sh -c 'pkexec echo'"
|
||||
}
|
||||
|
||||
pty.spawn_async(
|
||||
GLib.get_current_dir(),
|
||||
script,
|
||||
[],
|
||||
GLib.SpawnFlags.DEFAULT,
|
||||
None,
|
||||
None,
|
||||
-1,
|
||||
None,
|
||||
None
|
||||
)
|
||||
|
||||
key_controller = Gtk.EventControllerKey()
|
||||
key_controller.connect("key-pressed", self.on_key_press)
|
||||
self.add_controller(key_controller)
|
||||
|
||||
def on_key_press(self, controller, keyval, keycode, state):
|
||||
ctrl = state & Gdk.ModifierType.CONTROL_MASK
|
||||
shift = state & Gdk.ModifierType.SHIFT_MASK
|
||||
if ctrl and keyval == Gdk.KEY_c:
|
||||
self.copy_clipboard()
|
||||
return True
|
||||
return False
|
||||
@@ -3,11 +3,11 @@
|
||||
Handles UI dialogs
|
||||
"""
|
||||
import os
|
||||
import logging, requests, threading, shutil
|
||||
import logging, requests, threading, shutil, subprocess, re
|
||||
from pytube import YouTube
|
||||
from html2text import html2text
|
||||
from gi.repository import Adw, Gtk
|
||||
from .internal import cache_dir
|
||||
from .internal import cache_dir, data_dir
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# CLEAR CHAT | WORKS
|
||||
@@ -318,7 +318,7 @@ def attach_file_response(self, file_dialog, result):
|
||||
file_type = next(key for key, value in file_types.items() if extension in value)
|
||||
if not file_type:
|
||||
return
|
||||
if file_type == 'image' and not self.verify_if_image_can_be_used():
|
||||
if file_type == 'image' and not self.model_manager.verify_if_image_can_be_used():
|
||||
self.show_toast(_("Image recognition is only available on specific models"), self.main_overlay)
|
||||
return
|
||||
self.attach_file(file.get_path(), file_type)
|
||||
@@ -416,3 +416,59 @@ def attach_website(self, url):
|
||||
cancellable = None,
|
||||
callback = lambda dialog, task, url=url: attach_website_response(self, dialog, task, url)
|
||||
)
|
||||
|
||||
# Run Script
|
||||
|
||||
def run_script_response(self, dialog, task, script, language_name):
|
||||
if dialog.choose_finish(task) == "accept":
|
||||
logger.info('Running: \n{}'.format(script))
|
||||
if language_name == 'python3':
|
||||
if not os.path.isdir(os.path.join(data_dir, 'pyenv')):
|
||||
os.mkdir(os.path.join(data_dir, 'pyenv'))
|
||||
with open(os.path.join(data_dir, 'pyenv', 'main.py'), 'w') as f:
|
||||
f.write(script)
|
||||
script = [
|
||||
'echo "🐍 {}\n"'.format(_('Setting up Python environment...')),
|
||||
'python3 -m venv "{}"'.format(os.path.join(data_dir, 'pyenv')),
|
||||
'{} {}'.format(os.path.join(data_dir, 'pyenv', 'bin', 'python3').replace(' ', '\\ '), os.path.join(data_dir, 'pyenv', 'main.py').replace(' ', '\\ '))
|
||||
]
|
||||
if os.path.isfile(os.path.join(data_dir, 'pyenv', 'requirements.txt')):
|
||||
script.insert(1, '{} install -r {} | grep -v "already satisfied"; clear'.format(os.path.join(data_dir, 'pyenv', 'bin', 'pip3'), os.path.join(data_dir, 'pyenv', 'requirements.txt')))
|
||||
else:
|
||||
with open(os.path.join(data_dir, 'pyenv', 'requirements.txt'), 'w') as f:
|
||||
f.write('')
|
||||
script = ';\n'.join(script)
|
||||
|
||||
script += '; echo "\n🦙 {}"'.format(_('Script exited'))
|
||||
if language_name == 'bash':
|
||||
script = re.sub(r'(?m)^\s*sudo', 'pkexec', script)
|
||||
if shutil.which('flatpak-spawn') and language_name == 'bash':
|
||||
sandbox = True
|
||||
try:
|
||||
process = subprocess.run(['flatpak-spawn', '--host', 'bash', '-c', 'echo "test"'], check=True)
|
||||
sandbox = False
|
||||
except Exception as e:
|
||||
pass
|
||||
if sandbox:
|
||||
script = 'echo "🦙 {}\n";'.format(_('The script is contained inside Flatpak')) + script
|
||||
self.run_terminal(['bash', '-c', script])
|
||||
else:
|
||||
self.run_terminal(['flatpak-spawn', '--host', 'bash', '-c', script])
|
||||
else:
|
||||
self.run_terminal(['bash', '-c', script])
|
||||
|
||||
def run_script(self, script:str, language_name:str):
|
||||
dialog = Adw.AlertDialog(
|
||||
heading=_("Run Script"),
|
||||
body=_("Make sure you understand what this script does before running it, Alpaca is not responsible for any damages to your device or data"),
|
||||
close_response="cancel"
|
||||
)
|
||||
dialog.add_response("cancel", _("Cancel"))
|
||||
dialog.add_response("accept", _("Accept"))
|
||||
dialog.set_response_appearance("accept", Adw.ResponseAppearance.SUGGESTED)
|
||||
dialog.set_default_response("accept")
|
||||
dialog.choose(
|
||||
parent = self,
|
||||
cancellable = None,
|
||||
callback = lambda dialog, task, script=script, language_name=language_name: run_script_response(self, dialog, task, script, language_name)
|
||||
)
|
||||
|
||||
2
src/icons/execute-from-symbolic.svg
Normal file
2
src/icons/execute-from-symbolic.svg
Normal file
@@ -0,0 +1,2 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" height="16px" viewBox="0 0 16 16" width="16px"><path d="m 4.992188 2.996094 v 10 h 1 c 0.175781 0 0.347656 -0.039063 0.5 -0.125 l 7 -4 c 0.308593 -0.171875 0.46875 -0.523438 0.46875 -0.875 c 0 -0.351563 -0.160157 -0.703125 -0.46875 -0.875 l -7 -4 c -0.152344 -0.085938 -0.324219 -0.125 -0.5 -0.125 z m 0 0" fill="#222222"/></svg>
|
||||
|
After Width: | Height: | Size: 409 B |
@@ -50,7 +50,8 @@ custom_widgets = [
|
||||
'custom_widgets/table_widget.py',
|
||||
'custom_widgets/message_widget.py',
|
||||
'custom_widgets/chat_widget.py',
|
||||
'custom_widgets/model_widget.py'
|
||||
'custom_widgets/model_widget.py',
|
||||
'custom_widgets/terminal_widget.py'
|
||||
]
|
||||
|
||||
install_data(alpaca_sources, install_dir: moduledir)
|
||||
|
||||
@@ -36,4 +36,7 @@ stacksidebar {
|
||||
}
|
||||
.code_block {
|
||||
font-family: monospace;
|
||||
}
|
||||
}
|
||||
.terminal {
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
133
src/window.py
133
src/window.py
@@ -32,7 +32,7 @@ gi.require_version('GdkPixbuf', '2.0')
|
||||
from gi.repository import Adw, Gtk, Gdk, GLib, GtkSource, Gio, GdkPixbuf
|
||||
|
||||
from . import dialogs, connection_handler
|
||||
from .custom_widgets import message_widget, chat_widget, model_widget
|
||||
from .custom_widgets import message_widget, chat_widget, model_widget, terminal_widget
|
||||
from .internal import config_dir, data_dir, cache_dir, source_dir
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -50,9 +50,7 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
_ = gettext.gettext
|
||||
|
||||
#Variables
|
||||
ready = False #Used with welcome dialog
|
||||
attachments = {}
|
||||
header_bar = Gtk.Template.Child()
|
||||
|
||||
#Override elements
|
||||
overrides_group = Gtk.Template.Child()
|
||||
@@ -94,11 +92,12 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
file_preview_remove_button = Gtk.Template.Child()
|
||||
secondary_menu_button = Gtk.Template.Child()
|
||||
model_searchbar = Gtk.Template.Child()
|
||||
message_searchbar = Gtk.Template.Child()
|
||||
message_search_button = Gtk.Template.Child()
|
||||
searchentry_messages = Gtk.Template.Child()
|
||||
no_results_page = Gtk.Template.Child()
|
||||
model_link_button = Gtk.Template.Child()
|
||||
launch_dialog = Gtk.Template.Child()
|
||||
launch_status = Gtk.Template.Child()
|
||||
launch_level_bar = Gtk.Template.Child()
|
||||
title_stack = Gtk.Template.Child()
|
||||
manage_models_dialog = Gtk.Template.Child()
|
||||
model_scroller = Gtk.Template.Child()
|
||||
|
||||
@@ -119,6 +118,9 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
|
||||
style_manager = Adw.StyleManager()
|
||||
|
||||
terminal_scroller = Gtk.Template.Child()
|
||||
terminal_dialog = Gtk.Template.Child()
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def stop_message(self, button=None):
|
||||
self.chat_list_box.get_current_chat().stop_message()
|
||||
@@ -207,8 +209,7 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
self.welcome_carousel.scroll_to(self.welcome_carousel.get_nth_page(self.welcome_carousel.get_position()+1), True)
|
||||
else:
|
||||
self.welcome_dialog.force_close()
|
||||
if not self.ready:
|
||||
self.launch_dialog.present(self)
|
||||
self.powersaver_warning_switch.set_active(True)
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def change_remote_connection(self, switcher, *_):
|
||||
@@ -326,19 +327,42 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
self.model_manager.pulling_list.set_visible(not button.get_active() and len(list(self.model_manager.pulling_list)) > 0)
|
||||
self.model_manager.local_list.set_visible(not button.get_active() and len(list(self.model_manager.local_list)) > 0)
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def message_search_toggle(self, button):
|
||||
self.message_searchbar.set_search_mode(button.get_active())
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def model_search_changed(self, entry):
|
||||
results = 0
|
||||
for model in list(self.model_manager.available_list):
|
||||
model.set_visible(re.search(entry.get_text(), '{} {} {} {} {}'.format(model.get_name(), model.model_title, model.model_author, model.model_description, (_('image') if model.image_recognition else '')), re.IGNORECASE))
|
||||
if model.get_visible():
|
||||
results += 1
|
||||
if entry.get_text() and results == 0:
|
||||
self.no_results_page.set_visible(True)
|
||||
self.model_scroller.set_visible(False)
|
||||
else:
|
||||
self.model_scroller.set_visible(True)
|
||||
self.no_results_page.set_visible(False)
|
||||
if self.model_manager:
|
||||
for model in list(self.model_manager.available_list):
|
||||
model.set_visible(re.search(entry.get_text(), '{} {} {} {} {}'.format(model.get_name(), model.model_title, model.model_author, model.model_description, (_('image') if model.image_recognition else '')), re.IGNORECASE))
|
||||
if model.get_visible():
|
||||
results += 1
|
||||
if entry.get_text() and results == 0:
|
||||
self.no_results_page.set_visible(True)
|
||||
self.model_scroller.set_visible(False)
|
||||
else:
|
||||
self.model_scroller.set_visible(True)
|
||||
self.no_results_page.set_visible(False)
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def message_search_changed(self, entry, current_chat=None):
|
||||
search_term=entry.get_text()
|
||||
results = 0
|
||||
if not current_chat:
|
||||
current_chat = self.chat_list_box.get_current_chat()
|
||||
if current_chat:
|
||||
for key, message in current_chat.messages.items():
|
||||
if message and message.text:
|
||||
message.set_visible(re.search(search_term, message.text, re.IGNORECASE))
|
||||
for block in message.content_children:
|
||||
if isinstance(block, message_widget.text_block):
|
||||
if search_term:
|
||||
highlighted_text = re.sub(f"({re.escape(search_term)})", r"<span background='yellow' bgalpha='30%'>\1</span>", block.get_text(),flags=re.IGNORECASE)
|
||||
block.set_markup(highlighted_text)
|
||||
else:
|
||||
block.set_markup(block.get_text())
|
||||
|
||||
@Gtk.Template.Callback()
|
||||
def on_clipboard_paste(self, textview):
|
||||
@@ -347,6 +371,10 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
clipboard.read_text_async(None, self.cb_text_received)
|
||||
clipboard.read_texture_async(None, self.cb_image_received)
|
||||
|
||||
def run_terminal(self, script:list):
|
||||
self.terminal_scroller.set_child(terminal_widget.terminal(script))
|
||||
self.terminal_dialog.present(self)
|
||||
|
||||
def convert_model_name(self, name:str, mode:int) -> str: # mode=0 name:tag -> Name (tag) | mode=1 Name (tag) -> name:tag
|
||||
try:
|
||||
if mode == 0:
|
||||
@@ -551,12 +579,17 @@ Generate a title following these rules:
|
||||
if self.regenerate_button:
|
||||
GLib.idle_add(self.chat_list_box.get_current_chat().remove, self.regenerate_button)
|
||||
try:
|
||||
response = self.ollama_instance.request("POST", "api/chat", json.dumps(data), lambda data, message_element=message_element: GLib.idle_add(message_element.update_message, data))
|
||||
response = self.ollama_instance.request("POST", "api/chat", json.dumps(data), lambda data, message_element=message_element: message_element.update_message(data))
|
||||
if response.status_code != 200:
|
||||
raise Exception('Network Error')
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.chat_list_box.get_tab_by_name(chat.get_name()).spinner.set_visible(False)
|
||||
chat.busy = False
|
||||
GLib.idle_add(message_element.add_action_buttons)
|
||||
if message_element.spinner:
|
||||
GLib.idle_add(message_element.container.remove, message_element.spinner)
|
||||
message_element.spinner = None
|
||||
GLib.idle_add(chat.show_regenerate_button, message_element)
|
||||
GLib.idle_add(self.connection_error)
|
||||
|
||||
@@ -787,19 +820,22 @@ Generate a title following these rules:
|
||||
def power_saver_toggled(self, monitor):
|
||||
self.banner.set_revealed(monitor.get_power_saver_enabled() and self.powersaver_warning_switch.get_active())
|
||||
|
||||
def prepare_alpaca(self, local_port:int, remote_url:str, remote:bool, tweaks:dict, overrides:dict, bearer_token:str, idle_timer_delay:int, save:bool, show_launch_dialog:bool):
|
||||
#Show launch dialog
|
||||
if show_launch_dialog:
|
||||
GLib.idle_add(self.launch_dialog.present, self)
|
||||
def prepare_alpaca(self, local_port:int, remote_url:str, remote:bool, tweaks:dict, overrides:dict, bearer_token:str, idle_timer_delay:int, save:bool):
|
||||
#Model Manager
|
||||
self.model_manager = model_widget.model_manager_container()
|
||||
self.model_scroller.set_child(self.model_manager)
|
||||
|
||||
#Chat History
|
||||
self.load_history()
|
||||
|
||||
#Instance
|
||||
self.launch_level_bar.set_value(0)
|
||||
self.launch_status.set_description(_('Loading instance'))
|
||||
self.ollama_instance = connection_handler.instance(local_port, remote_url, remote, tweaks, overrides, bearer_token, idle_timer_delay)
|
||||
|
||||
#Model Manager P.2
|
||||
self.model_manager.update_available_list()
|
||||
self.model_manager.update_local_list()
|
||||
|
||||
#User Preferences
|
||||
self.launch_level_bar.set_value(1)
|
||||
self.launch_status.set_description(_('Applying user preferences'))
|
||||
for element in list(list(list(list(self.tweaks_group)[0])[1])[0]):
|
||||
if element.get_name() in self.ollama_instance.tweaks:
|
||||
element.set_value(self.ollama_instance.tweaks[element.get_name()])
|
||||
@@ -815,34 +851,17 @@ Generate a title following these rules:
|
||||
self.remote_connection_switch.set_active(self.ollama_instance.remote)
|
||||
self.instance_idle_timer.set_value(self.ollama_instance.idle_timer_delay)
|
||||
|
||||
#Model Manager
|
||||
self.model_manager = model_widget.model_manager_container()
|
||||
self.model_scroller.set_child(self.model_manager)
|
||||
self.launch_level_bar.set_value(2)
|
||||
self.launch_status.set_description(_('Updating list of local models'))
|
||||
self.model_manager.update_local_list()
|
||||
self.launch_level_bar.set_value(3)
|
||||
self.launch_status.set_description(_('Updating list of available models'))
|
||||
self.model_manager.update_available_list()
|
||||
|
||||
#Chat History
|
||||
self.launch_level_bar.set_value(4)
|
||||
self.launch_status.set_description(_('Loading chats'))
|
||||
GLib.idle_add(self.load_history)
|
||||
self.launch_level_bar.set_value(5)
|
||||
|
||||
#Save preferences
|
||||
if save:
|
||||
self.save_server_config()
|
||||
|
||||
time.sleep(.5) #This is to prevent errors with gtk creating the launch dialog and closing it too quickly
|
||||
self.ready = True
|
||||
#Close launch dialog
|
||||
GLib.idle_add(self.launch_dialog.force_close)
|
||||
self.send_button.set_sensitive(True)
|
||||
self.attachment_button.set_sensitive(True)
|
||||
self.get_application().lookup_action('manage_models').set_enabled(True)
|
||||
self.get_application().lookup_action('preferences').set_enabled(True)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.message_searchbar.connect('notify::search-mode-enabled', lambda *_: self.message_search_button.set_active(self.message_searchbar.get_search_mode()))
|
||||
message_widget.window = self
|
||||
chat_widget.window = self
|
||||
model_widget.window = self
|
||||
@@ -879,12 +898,16 @@ Generate a title following these rules:
|
||||
'export_chat': [self.chat_actions],
|
||||
'export_current_chat': [self.current_chat_actions],
|
||||
'toggle_sidebar': [lambda *_: self.split_view_overlay.set_show_sidebar(not self.split_view_overlay.get_show_sidebar()), ['F9']],
|
||||
'manage_models': [lambda *_: self.manage_models_dialog.present(self), ['<primary>m']]
|
||||
'manage_models': [lambda *_: self.manage_models_dialog.present(self), ['<primary>m']],
|
||||
'search_messages': [lambda *_: self.message_searchbar.set_search_mode(not self.message_searchbar.get_search_mode()), ['<primary>f']]
|
||||
}
|
||||
|
||||
for action_name, data in universal_actions.items():
|
||||
self.get_application().create_action(action_name, data[0], data[1] if len(data) > 1 else None)
|
||||
|
||||
self.get_application().lookup_action('manage_models').set_enabled(False)
|
||||
self.get_application().lookup_action('preferences').set_enabled(False)
|
||||
|
||||
self.file_preview_remove_button.connect('clicked', lambda button : dialogs.remove_attached_file(self, button.get_name()))
|
||||
self.attachment_button.connect("clicked", lambda button, file_filter=self.file_filter_attachments: dialogs.attach_file(self, file_filter))
|
||||
self.create_model_name.get_delegate().connect("insert-text", lambda *_: self.check_alphanumeric(*_, ['-', '.', '_']))
|
||||
@@ -900,14 +923,16 @@ Generate a title following these rules:
|
||||
if 'powersaver_warning' not in data:
|
||||
data['powersaver_warning'] = True
|
||||
self.powersaver_warning_switch.set_active(data['powersaver_warning'])
|
||||
threading.Thread(target=self.prepare_alpaca, args=(data['local_port'], data['remote_url'], data['run_remote'], data['model_tweaks'], data['ollama_overrides'], data['remote_bearer_token'], round(data['idle_timer']), False, True)).start()
|
||||
threading.Thread(target=self.prepare_alpaca, args=(data['local_port'], data['remote_url'], data['run_remote'], data['model_tweaks'], data['ollama_overrides'], data['remote_bearer_token'], round(data['idle_timer']), False)).start()
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
threading.Thread(target=self.prepare_alpaca, args=(11435, '', False, {'temperature': 0.7, 'seed': 0, 'keep_alive': 5}, {}, '', 0, True, True)).start()
|
||||
threading.Thread(target=self.prepare_alpaca, args=(11435, '', False, {'temperature': 0.7, 'seed': 0, 'keep_alive': 5}, {}, '', 0, True)).start()
|
||||
self.powersaver_warning_switch.set_active(True)
|
||||
else:
|
||||
threading.Thread(target=self.prepare_alpaca, args=(11435, '', False, {'temperature': 0.7, 'seed': 0, 'keep_alive': 5}, {}, '', 0, True, False)).start()
|
||||
self.powersaver_warning_switch.set_active(True)
|
||||
if shutil.which('ollama'):
|
||||
threading.Thread(target=self.prepare_alpaca, args=(11435, '', False, {'temperature': 0.7, 'seed': 0, 'keep_alive': 5}, {}, '', 0, True)).start()
|
||||
else:
|
||||
threading.Thread(target=self.prepare_alpaca, args=(11435, 'http://0.0.0.0:11434', True, {'temperature': 0.7, 'seed': 0, 'keep_alive': 5}, {}, '', 0, True)).start()
|
||||
self.welcome_dialog.present(self)
|
||||
|
||||
if self.powersaver_warning_switch.get_active():
|
||||
|
||||
@@ -14,12 +14,13 @@
|
||||
<object class="AdwBreakpoint">
|
||||
<condition>max-width: 690sp</condition>
|
||||
<setter object="split_view_overlay" property="collapsed">true</setter>
|
||||
<setter object="terminal_dialog" property="width-request">400</setter>
|
||||
</object>
|
||||
</child>
|
||||
<property name="content">
|
||||
<object class="AdwOverlaySplitView" id="split_view_overlay">
|
||||
<property name="show-sidebar" bind-source="show_sidebar_button" bind-property="active" bind-flags="sync-create"/>
|
||||
<property name="sidebar-width-fraction">0.4</property>
|
||||
<property name="sidebar-width-fraction">0.3</property>
|
||||
<property name="sidebar">
|
||||
<object class="AdwToolbarView">
|
||||
<child type="top">
|
||||
@@ -54,8 +55,9 @@
|
||||
</property>
|
||||
<child>
|
||||
<object class="AdwToolbarView">
|
||||
<property name="height-request">140</property>
|
||||
<child type="top">
|
||||
<object class="AdwHeaderBar" id="header_bar">
|
||||
<object class="AdwHeaderBar">
|
||||
<child type="start">
|
||||
<object class="GtkToggleButton" id="show_sidebar_button">
|
||||
<property name="icon-name">sidebar-show-symbolic</property>
|
||||
@@ -63,6 +65,40 @@
|
||||
<property name="active" bind-source="split_view_overlay" bind-property="show-sidebar" bind-flags="sync-create"/>
|
||||
</object>
|
||||
</child>
|
||||
<child type="start">
|
||||
<object class="GtkToggleButton" id="message_search_button">
|
||||
<property name="icon-name">edit-find-symbolic</property>
|
||||
<property name="tooltip-text" translatable="yes">Search Messages</property>
|
||||
<signal name="clicked" handler="message_search_toggle"/>
|
||||
</object>
|
||||
</child>
|
||||
<child type="title">
|
||||
<object class="GtkStack" id="title_stack">
|
||||
<property name="transition_duration">100</property>
|
||||
<property name="transition_type">1</property>
|
||||
<child>
|
||||
<object class="GtkStackPage">
|
||||
<property name="name">loading</property>
|
||||
<property name="child">
|
||||
<object class="GtkBox">
|
||||
<property name="orientation">0</property>
|
||||
<property name="spacing">10</property>
|
||||
<child>
|
||||
<object class="GtkSpinner">
|
||||
<property name="spinning">true</property>
|
||||
</object>
|
||||
</child>
|
||||
<child>
|
||||
<object class="GtkLabel">
|
||||
<property name="label" translatable="yes">Loading Instance</property>
|
||||
</object>
|
||||
</child>
|
||||
</object>
|
||||
</property>
|
||||
</object>
|
||||
</child>
|
||||
</object>
|
||||
</child>
|
||||
<child type="end">
|
||||
<object class="GtkMenuButton" id="secondary_menu_button">
|
||||
<property name="primary">False</property>
|
||||
@@ -73,6 +109,24 @@
|
||||
</child>
|
||||
</object>
|
||||
</child>
|
||||
<child type="top">
|
||||
<object class="GtkSearchBar" id="message_searchbar">
|
||||
<accessibility>
|
||||
<property name="label" translatable="yes">Message search bar</property>
|
||||
</accessibility>
|
||||
<property name="key-capture-widget">AlpacaWindow</property>
|
||||
<child>
|
||||
<object class="GtkSearchEntry" id="searchentry_messages">
|
||||
<signal name="search-changed" handler="message_search_changed"/>
|
||||
<property name="search-delay">200</property>
|
||||
<property name="placeholder-text" translatable="yes">Search messages</property>
|
||||
<accessibility>
|
||||
<property name="label" translatable="yes">Search messages</property>
|
||||
</accessibility>
|
||||
</object>
|
||||
</child>
|
||||
</object>
|
||||
</child>
|
||||
<property name="content">
|
||||
<object class="GtkBox"><!--ACTUAL CONTENT-->
|
||||
<property name="orientation">1</property>
|
||||
@@ -127,6 +181,7 @@
|
||||
<object class="GtkButton" id="attachment_button">
|
||||
<property name="vexpand">false</property>
|
||||
<property name="valign">3</property>
|
||||
<property name="sensitive">false</property>
|
||||
<property name="tooltip-text" translatable="yes">Attach File</property>
|
||||
<style>
|
||||
<class name="circular"/>
|
||||
@@ -178,6 +233,7 @@
|
||||
<property name="vexpand">false</property>
|
||||
<property name="valign">3</property>
|
||||
<property name="tooltip-text" translatable="yes">Send Message</property>
|
||||
<property name="sensitive">false</property>
|
||||
<style>
|
||||
<class name="accent"/>
|
||||
<class name="circular"/>
|
||||
@@ -420,22 +476,24 @@
|
||||
</child>
|
||||
</object>
|
||||
|
||||
<object class="AdwDialog" id="launch_dialog">
|
||||
<object class="AdwDialog" id="terminal_dialog">
|
||||
<accessibility>
|
||||
<property name="label" translatable="yes">Loading Alpaca dialog</property>
|
||||
<property name="label" translatable="yes">Manage models dialog</property>
|
||||
</accessibility>
|
||||
<property name="width-request">400</property>
|
||||
<property name="can-close">false</property>
|
||||
<property name="title" translatable="yes">Terminal</property>
|
||||
<property name="can-close">true</property>
|
||||
<property name="width-request">600</property>
|
||||
<property name="height-request">600</property>
|
||||
<child>
|
||||
<object class="AdwStatusPage" id="launch_status">
|
||||
<property name="icon_name">com.jeffser.Alpaca</property>
|
||||
<property name="title" translatable="yes">Loading Alpaca...</property>
|
||||
<property name="child">
|
||||
<object class="GtkLevelBar" id="launch_level_bar">
|
||||
<property name="mode">1</property>
|
||||
<property name="min-value">0</property>
|
||||
<property name="max-value">5</property>
|
||||
</object>
|
||||
<object class="AdwToolbarView">
|
||||
<style>
|
||||
<class name="osd"/>
|
||||
</style>
|
||||
<child type="top">
|
||||
<object class="AdwHeaderBar"/>
|
||||
</child>
|
||||
<property name="content">
|
||||
<object class="GtkScrolledWindow" id="terminal_scroller"/>
|
||||
</property>
|
||||
</object>
|
||||
</child>
|
||||
@@ -501,6 +559,7 @@
|
||||
<property name="vexpand">true</property>
|
||||
<child>
|
||||
<object class="GtkScrolledWindow" id="model_scroller">
|
||||
<property name="hscrollbar-policy">2</property>
|
||||
<property name="hexpand">true</property>
|
||||
<property name="vexpand">true</property>
|
||||
</object>
|
||||
|
||||
@@ -24,3 +24,7 @@ echo "Updating Ukrainian"
|
||||
msgmerge --no-fuzzy-matching -U po/uk.po po/alpaca.pot
|
||||
echo "Updating German"
|
||||
msgmerge --no-fuzzy-matching -U po/de.po po/alpaca.pot
|
||||
echo "Updating Hebrew"
|
||||
msgmerge --no-fuzzy-matching -U po/he.po po/alpaca.pot
|
||||
echo "Updating Telugu"
|
||||
msgmerge --no-fuzzy-matching -U po/te.po po/alpaca.pot
|
||||
Reference in New Issue
Block a user