Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e81200f80 | ||
|
|
b952fa07b5 | ||
|
|
15bd4335e8 | ||
|
|
d79a1236a0 | ||
|
|
ce11a308bf | ||
|
|
aa1fbcebe7 | ||
|
|
efbfb1e82a | ||
|
|
f497f1c5dc | ||
|
|
9ecf231307 | ||
|
|
66a9627b29 | ||
|
|
f03c01b6a6 | ||
|
|
29a5251d63 | ||
|
|
fcb956ff23 | ||
|
|
363fb882f3 | ||
|
|
e24cbb65b1 | ||
|
|
cf4d37a1c0 | ||
|
|
6394569b3b | ||
|
|
e6c855fcf9 | ||
|
|
c00061f46b | ||
|
|
67d572bd64 | ||
|
|
06769aba90 | ||
|
|
f5845e95e6 | ||
|
|
4d529619d6 | ||
|
|
95561f205c | ||
|
|
e855466280 | ||
|
|
c1c30c993c | ||
|
|
7da70097f2 | ||
|
|
01b6ae6bee | ||
|
|
9c1e0ea263 | ||
|
|
22116b0d1e | ||
|
|
03d92de88b | ||
|
|
0be0942da3 | ||
|
|
b488b64473 | ||
|
|
56eac5ccd6 | ||
|
|
ad5d6dfa41 | ||
|
|
c4fb424514 | ||
|
|
28e09d5c2e | ||
|
|
633507fecd |
@@ -45,6 +45,14 @@ You can find the latest stable version of the app on [Flathub](https://flathub.o
|
|||||||
|
|
||||||
Everytime a new version is published they become available on the [releases page](https://github.com/Jeffser/Alpaca/releases) of the repository
|
Everytime a new version is published they become available on the [releases page](https://github.com/Jeffser/Alpaca/releases) of the repository
|
||||||
|
|
||||||
|
### Snap Package
|
||||||
|
|
||||||
|
You can also find the Snap package on the [releases page](https://github.com/Jeffser/Alpaca/releases), to install it run this command:
|
||||||
|
```BASH
|
||||||
|
sudo snap install ./{package name} --dangerous
|
||||||
|
```
|
||||||
|
The `--dangerous` comes from the package being installed without any involvement of the SnapStore, I'm working on getting the app there, but for now you can test the app this way.
|
||||||
|
|
||||||
### Building Git Version
|
### Building Git Version
|
||||||
|
|
||||||
Note: This is not recommended since the prerelease versions of the app often present errors and general instability.
|
Note: This is not recommended since the prerelease versions of the app often present errors and general instability.
|
||||||
|
|||||||
@@ -11,7 +11,8 @@
|
|||||||
"--device=all",
|
"--device=all",
|
||||||
"--socket=wayland",
|
"--socket=wayland",
|
||||||
"--filesystem=/sys/module/amdgpu:ro",
|
"--filesystem=/sys/module/amdgpu:ro",
|
||||||
"--env=LD_LIBRARY_PATH=/app/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/sdk/llvm15/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/ollama:/app/plugins/AMD/lib/ollama"
|
"--env=LD_LIBRARY_PATH=/app/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/x86_64-linux-gnu/openh264/extra:/usr/lib/sdk/llvm15/lib:/usr/lib/x86_64-linux-gnu/GL/default/lib:/usr/lib/ollama:/app/plugins/AMD/lib/ollama",
|
||||||
|
"--env=GSK_RENDERER=ngl"
|
||||||
],
|
],
|
||||||
"add-extensions": {
|
"add-extensions": {
|
||||||
"com.jeffser.Alpaca.Plugins": {
|
"com.jeffser.Alpaca.Plugins": {
|
||||||
@@ -134,16 +135,16 @@
|
|||||||
"sources": [
|
"sources": [
|
||||||
{
|
{
|
||||||
"type": "archive",
|
"type": "archive",
|
||||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.11/ollama-linux-amd64.tgz",
|
"url": "https://github.com/ollama/ollama/releases/download/v0.3.12/ollama-linux-amd64.tgz",
|
||||||
"sha256": "aa4d26889a6a413f676a7f80116983731f06287534bb72adec37dd39d168d40a",
|
"sha256": "f0efa42f7ad77cd156bd48c40cd22109473801e5113173b0ad04f094a4ef522b",
|
||||||
"only-arches": [
|
"only-arches": [
|
||||||
"x86_64"
|
"x86_64"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "archive",
|
"type": "archive",
|
||||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.11/ollama-linux-arm64.tgz",
|
"url": "https://github.com/ollama/ollama/releases/download/v0.3.12/ollama-linux-arm64.tgz",
|
||||||
"sha256": "61e3a21bec7f706b404424b80602240281d9b651ca4e00e8edee4527a533a15b",
|
"sha256": "da631cbe4dd2c168dae58d6868b1ff60e881e050f2d07578f2f736e689fec04c",
|
||||||
"only-arches": [
|
"only-arches": [
|
||||||
"aarch64"
|
"aarch64"
|
||||||
]
|
]
|
||||||
@@ -166,6 +167,18 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "vte",
|
||||||
|
"buildsystem": "meson",
|
||||||
|
"config-opts": ["-Dvapi=false"],
|
||||||
|
"sources": [
|
||||||
|
{
|
||||||
|
"type": "archive",
|
||||||
|
"url": "https://gitlab.gnome.org/GNOME/vte/-/archive/0.78.0/vte-0.78.0.tar.gz",
|
||||||
|
"sha256": "82e19d11780fed4b66400f000829ce5ca113efbbfb7975815f26ed93e4c05f2d"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name" : "alpaca",
|
"name" : "alpaca",
|
||||||
"builddir" : true,
|
"builddir" : true,
|
||||||
|
|||||||
@@ -78,6 +78,41 @@
|
|||||||
<url type="contribute">https://github.com/Jeffser/Alpaca/discussions/154</url>
|
<url type="contribute">https://github.com/Jeffser/Alpaca/discussions/154</url>
|
||||||
<url type="vcs-browser">https://github.com/Jeffser/Alpaca</url>
|
<url type="vcs-browser">https://github.com/Jeffser/Alpaca</url>
|
||||||
<releases>
|
<releases>
|
||||||
|
<release version="2.5.0" date="2024-10-06">
|
||||||
|
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.5.0</url>
|
||||||
|
<description>
|
||||||
|
<p>New</p>
|
||||||
|
<ul>
|
||||||
|
<li>Run bash and python scripts straight from chat</li>
|
||||||
|
<li>Updated Ollama to 0.3.12</li>
|
||||||
|
<li>New models!</li>
|
||||||
|
</ul>
|
||||||
|
<p>Fixes</p>
|
||||||
|
<ul>
|
||||||
|
<li>Fixed and made faster the launch sequence</li>
|
||||||
|
<li>Better detection of code blocks in messages</li>
|
||||||
|
<li>Fixed app not loading in certain setups with Nvidia GPUs</li>
|
||||||
|
</ul>
|
||||||
|
</description>
|
||||||
|
</release>
|
||||||
|
<release version="2.0.6" date="2024-09-29">
|
||||||
|
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.6</url>
|
||||||
|
<description>
|
||||||
|
<p>Fixes</p>
|
||||||
|
<ul>
|
||||||
|
<li>Fixed message notification sometimes crashing text rendering because of them running on different threads</li>
|
||||||
|
</ul>
|
||||||
|
</description>
|
||||||
|
</release>
|
||||||
|
<release version="2.0.5" date="2024-09-25">
|
||||||
|
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.5</url>
|
||||||
|
<description>
|
||||||
|
<p>Fixes</p>
|
||||||
|
<ul>
|
||||||
|
<li>Fixed message generation sometimes failing</li>
|
||||||
|
</ul>
|
||||||
|
</description>
|
||||||
|
</release>
|
||||||
<release version="2.0.4" date="2024-09-22">
|
<release version="2.0.4" date="2024-09-22">
|
||||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.4</url>
|
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/2.0.4</url>
|
||||||
<description>
|
<description>
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
project('Alpaca', 'c',
|
project('Alpaca', 'c',
|
||||||
version: '2.0.4',
|
version: '2.5.0',
|
||||||
meson_version: '>= 0.62.0',
|
meson_version: '>= 0.62.0',
|
||||||
default_options: [ 'warning_level=2', 'werror=false', ],
|
default_options: [ 'warning_level=2', 'werror=false', ],
|
||||||
)
|
)
|
||||||
|
|||||||
1828
po/alpaca.pot
1828
po/alpaca.pot
File diff suppressed because it is too large
Load Diff
1881
po/nb_NO.po
1881
po/nb_NO.po
File diff suppressed because it is too large
Load Diff
1828
po/pt_BR.po
1828
po/pt_BR.po
File diff suppressed because it is too large
Load Diff
1962
po/zh_Hans.po
1962
po/zh_Hans.po
File diff suppressed because it is too large
Load Diff
@@ -1,9 +0,0 @@
|
|||||||
[Desktop Entry]
|
|
||||||
Name=alpaca
|
|
||||||
Exec=alpaca
|
|
||||||
Icon=${SNAP}/meta/gui/com.jeffser.Alpaca.svg
|
|
||||||
Terminal=false
|
|
||||||
Type=Application
|
|
||||||
Categories=Utility;Development;Chat;
|
|
||||||
Keywords=ai;ollama;llm
|
|
||||||
X-Purism-FormFactor=Workstation;Mobile;
|
|
||||||
@@ -1,150 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
|
||||||
<svg
|
|
||||||
height="128px"
|
|
||||||
viewBox="0 0 128 128"
|
|
||||||
width="128px"
|
|
||||||
version="1.1"
|
|
||||||
id="svg26"
|
|
||||||
sodipodi:docname="com.jeffser.Alpaca.svg"
|
|
||||||
inkscape:version="1.3.2 (091e20ef0f, 2023-11-25)"
|
|
||||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
|
||||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
|
||||||
xmlns="http://www.w3.org/2000/svg"
|
|
||||||
xmlns:svg="http://www.w3.org/2000/svg">
|
|
||||||
<defs
|
|
||||||
id="defs26" />
|
|
||||||
<sodipodi:namedview
|
|
||||||
id="namedview26"
|
|
||||||
pagecolor="#ffffff"
|
|
||||||
bordercolor="#000000"
|
|
||||||
borderopacity="0.25"
|
|
||||||
inkscape:showpageshadow="2"
|
|
||||||
inkscape:pageopacity="0.0"
|
|
||||||
inkscape:pagecheckerboard="0"
|
|
||||||
inkscape:deskcolor="#d1d1d1"
|
|
||||||
inkscape:zoom="6.65625"
|
|
||||||
inkscape:cx="64"
|
|
||||||
inkscape:cy="64"
|
|
||||||
inkscape:window-width="1920"
|
|
||||||
inkscape:window-height="1011"
|
|
||||||
inkscape:window-x="0"
|
|
||||||
inkscape:window-y="0"
|
|
||||||
inkscape:window-maximized="1"
|
|
||||||
inkscape:current-layer="svg26" />
|
|
||||||
<linearGradient
|
|
||||||
id="a"
|
|
||||||
gradientUnits="userSpaceOnUse"
|
|
||||||
x1="48"
|
|
||||||
x2="48"
|
|
||||||
y1="88"
|
|
||||||
y2="48">
|
|
||||||
<stop
|
|
||||||
offset="0"
|
|
||||||
stop-color="#b6d1f2"
|
|
||||||
id="stop1" />
|
|
||||||
<stop
|
|
||||||
offset="1"
|
|
||||||
stop-color="#e9eef4"
|
|
||||||
id="stop2" />
|
|
||||||
</linearGradient>
|
|
||||||
<path
|
|
||||||
d="m 2 66 h 24 v 12 h -24 z m 0 0"
|
|
||||||
fill="#99c1f1"
|
|
||||||
id="path2" />
|
|
||||||
<path
|
|
||||||
d="m 12.324219 56 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 1.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -1.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#99c1f1"
|
|
||||||
id="path3" />
|
|
||||||
<g
|
|
||||||
fill="#5e5c64"
|
|
||||||
id="g7">
|
|
||||||
<path
|
|
||||||
d="m 82 101 h 8 v 23 h -8 z m 0 0"
|
|
||||||
id="path4" />
|
|
||||||
<path
|
|
||||||
d="m 100 101 h 8 v 23 h -8 z m 0 0"
|
|
||||||
id="path5" />
|
|
||||||
<path
|
|
||||||
d="m 20 101 h 8 v 23 h -8 z m 0 0"
|
|
||||||
id="path6" />
|
|
||||||
<path
|
|
||||||
d="m 38 101 h 8 v 23 h -8 z m 0 0"
|
|
||||||
id="path7" />
|
|
||||||
</g>
|
|
||||||
<path
|
|
||||||
d="m 40.324219 80 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#6b9bd2"
|
|
||||||
id="path8" />
|
|
||||||
<path
|
|
||||||
d="m 102.324219 80 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#6b9bd2"
|
|
||||||
id="path9" />
|
|
||||||
<path
|
|
||||||
d="m 40.324219 76 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#82adde"
|
|
||||||
id="path10" />
|
|
||||||
<path
|
|
||||||
d="m 102.324219 76 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#82adde"
|
|
||||||
id="path11" />
|
|
||||||
<path
|
|
||||||
d="m 22.324219 80 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#99c1f1"
|
|
||||||
id="path12" />
|
|
||||||
<path
|
|
||||||
d="m 84.324219 80 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#99c1f1"
|
|
||||||
id="path13" />
|
|
||||||
<path
|
|
||||||
d="m 28 58 h 72 c 8.835938 0 16 7.164062 16 16 v 18 c 0 8.835938 -7.164062 16 -16 16 h -72 c -8.835938 0 -16 -7.164062 -16 -16 v -18 c 0 -8.835938 7.164062 -16 16 -16 z m 0 0"
|
|
||||||
fill="#99c1f1"
|
|
||||||
id="path14" />
|
|
||||||
<path
|
|
||||||
d="m 22.324219 76 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#b6d1f2"
|
|
||||||
id="path15" />
|
|
||||||
<path
|
|
||||||
d="m 84.324219 76 h 3.351562 c 5.703125 0 10.324219 4.621094 10.324219 10.324219 v 17.351562 c 0 5.703125 -4.621094 10.324219 -10.324219 10.324219 h -3.351562 c -5.703125 0 -10.324219 -4.621094 -10.324219 -10.324219 v -17.351562 c 0 -5.703125 4.621094 -10.324219 10.324219 -10.324219 z m 0 0"
|
|
||||||
fill="#b6d1f2"
|
|
||||||
id="path16" />
|
|
||||||
<path
|
|
||||||
d="m 111 16 c -2.859375 0 -5.5 -1.523438 -6.929688 -4 c -1.425781 -2.476562 -1.425781 -5.523438 0 -8 c 1.429688 -2.476562 4.070313 -4 6.929688 -4"
|
|
||||||
fill="#bbd6f6"
|
|
||||||
id="path17" />
|
|
||||||
<path
|
|
||||||
d="m 103 8 h 8 v 16 h -8 z m 0 0"
|
|
||||||
fill="#99c1f1"
|
|
||||||
id="path18" />
|
|
||||||
<path
|
|
||||||
d="m 96 8 h 18 c 2.210938 0 4 1.789062 4 4 v 6 c 0 2.210938 -1.789062 4 -4 4 h -18 c -2.207031 0 -4 -1.789062 -4 -4 v -6 c 0 -2.210938 1.792969 -4 4 -4 z m 0 0"
|
|
||||||
fill="#e9eef4"
|
|
||||||
id="path19" />
|
|
||||||
<path
|
|
||||||
d="m 100 16 c -2.859375 0 -5.5 -1.523438 -6.929688 -4 c -1.425781 -2.476562 -1.425781 -5.523438 0 -8 c 1.429688 -2.476562 4.070313 -4 6.929688 -4"
|
|
||||||
fill="#e9eef4"
|
|
||||||
id="path20" />
|
|
||||||
<path
|
|
||||||
d="m 92 16 v 21.675781 c 0 5.71875 -4.605469 10.324219 -10.324219 10.324219 h -53.675781 c -8.863281 0 -16 7.136719 -16 16 v 24 c 0 8.863281 7.136719 16 16 16 h 72 c 8.863281 0 16 -7.136719 16 -16 v -72 z m 0 0"
|
|
||||||
fill="url(#a)"
|
|
||||||
id="path21" />
|
|
||||||
<path
|
|
||||||
d="m 92 8 h 13 v 16 h -13 z m 0 0"
|
|
||||||
fill="#e9eef4"
|
|
||||||
id="path22" />
|
|
||||||
<path
|
|
||||||
d="m 104 14 h 22 v 6 c -11.335938 0.6875 -11.078125 8.476562 -17 8 h -5 z m 0 0"
|
|
||||||
fill="#5e5c64"
|
|
||||||
id="path23" />
|
|
||||||
<path
|
|
||||||
d="m 118 14 c 0 1.378906 -1.117188 2.5 -2.5 2.5 c -1.378906 0 -2.5 -1.121094 -2.5 -2.5 s 1.121094 -2.5 2.5 -2.5 c 1.382812 0 2.5 1.121094 2.5 2.5 z m 0 0"
|
|
||||||
fill="#e9eef4"
|
|
||||||
id="path24" />
|
|
||||||
<path
|
|
||||||
d="m 113 14 c 0 1.378906 -1.121094 2.5 -2.5 2.5 c -1.382812 0 -2.5 -1.121094 -2.5 -2.5 s 1.117188 -2.5 2.5 -2.5 c 1.378906 0 2.5 1.121094 2.5 2.5 z m 0 0"
|
|
||||||
fill="#e9eef4"
|
|
||||||
id="path25" />
|
|
||||||
<path
|
|
||||||
d="m 108 14 c 0 1.378906 -1.117188 2.5 -2.5 2.5 c -1.378906 0 -2.5 -1.121094 -2.5 -2.5 s 1.121094 -2.5 2.5 -2.5 c 1.382812 0 2.5 1.121094 2.5 2.5 z m 0 0"
|
|
||||||
fill="#e9eef4"
|
|
||||||
id="path26" />
|
|
||||||
</svg>
|
|
||||||
|
Before Width: | Height: | Size: 6.7 KiB |
@@ -1,14 +1,10 @@
|
|||||||
name: alpaca
|
name: alpaca
|
||||||
base: core24
|
base: core24
|
||||||
adopt-info: alpaca
|
adopt-info: alpaca
|
||||||
summary: An Ollama client made with GTK4 and Adwaita
|
|
||||||
description: |
|
|
||||||
Alpaca is an Ollama client where you can manage and chat with multiple models,
|
|
||||||
Alpaca provides an easy and begginer friendly way of interacting with local AI,
|
|
||||||
everything is open source and powered by Ollama.
|
|
||||||
|
|
||||||
platforms:
|
platforms:
|
||||||
amd64:
|
amd64:
|
||||||
|
arm64:
|
||||||
|
|
||||||
confinement: strict
|
confinement: strict
|
||||||
grade: stable
|
grade: stable
|
||||||
@@ -23,21 +19,14 @@ slots:
|
|||||||
apps:
|
apps:
|
||||||
alpaca:
|
alpaca:
|
||||||
command: usr/bin/alpaca
|
command: usr/bin/alpaca
|
||||||
desktop: meta/gui/alpaca.desktop
|
|
||||||
common-id: com.jeffser.Alpaca
|
common-id: com.jeffser.Alpaca
|
||||||
extensions:
|
extensions:
|
||||||
- gnome
|
- gnome
|
||||||
plugs:
|
plugs:
|
||||||
- network
|
- network
|
||||||
- network-bind
|
- network-bind
|
||||||
- desktop
|
|
||||||
- desktop-legacy
|
|
||||||
- wayland
|
|
||||||
- opengl
|
|
||||||
- home
|
- home
|
||||||
- removable-media
|
- removable-media
|
||||||
slots:
|
|
||||||
- dbus-alpaca
|
|
||||||
|
|
||||||
ollama:
|
ollama:
|
||||||
command: bin/ollama
|
command: bin/ollama
|
||||||
@@ -46,7 +35,6 @@ apps:
|
|||||||
- removable-media
|
- removable-media
|
||||||
- network
|
- network
|
||||||
- network-bind
|
- network-bind
|
||||||
- opengl
|
|
||||||
|
|
||||||
ollama-daemon:
|
ollama-daemon:
|
||||||
command: bin/ollama serve
|
command: bin/ollama serve
|
||||||
@@ -58,7 +46,6 @@ apps:
|
|||||||
- removable-media
|
- removable-media
|
||||||
- network
|
- network
|
||||||
- network-bind
|
- network-bind
|
||||||
- opengl
|
|
||||||
|
|
||||||
parts:
|
parts:
|
||||||
# Python dependencies
|
# Python dependencies
|
||||||
@@ -75,37 +62,31 @@ parts:
|
|||||||
# Ollama plugin
|
# Ollama plugin
|
||||||
ollama:
|
ollama:
|
||||||
plugin: dump
|
plugin: dump
|
||||||
source: https://github.com/ollama/ollama/releases/download/v0.3.10/ollama-linux-amd64.tgz
|
source:
|
||||||
|
- on amd64: https://github.com/ollama/ollama/releases/download/v0.3.10/ollama-linux-amd64.tgz
|
||||||
|
- on arm64: https://github.com/ollama/ollama/releases/download/v0.3.10/ollama-linux-arm64.tgz
|
||||||
|
|
||||||
# Alpaca app
|
# Alpaca app
|
||||||
alpaca:
|
alpaca:
|
||||||
plugin: meson
|
plugin: meson
|
||||||
source-type: git
|
source-type: git
|
||||||
source: https://github.com/Jeffser/Alpaca.git
|
source: https://github.com/Jeffser/Alpaca.git
|
||||||
source-tag: '2.0.2'
|
|
||||||
source-depth: 1
|
source-depth: 1
|
||||||
meson-parameters:
|
meson-parameters:
|
||||||
- --prefix=/snap/alpaca/current/usr
|
- --prefix=/snap/alpaca/current/usr
|
||||||
build-packages:
|
|
||||||
- meson
|
|
||||||
- ninja-build
|
|
||||||
- pkg-config
|
|
||||||
- libcairo2-dev
|
|
||||||
- libgtk-3-dev
|
|
||||||
- libglib2.0-dev
|
|
||||||
- gettext
|
|
||||||
- desktop-file-utils
|
|
||||||
- appstream
|
|
||||||
override-pull: |
|
|
||||||
craftctl default
|
|
||||||
craftctl set version=$(git describe --tags --abbrev=0)
|
|
||||||
override-build: |
|
override-build: |
|
||||||
craftctl default
|
craftctl default
|
||||||
sed -i '1c#!/usr/bin/env python3' $CRAFT_PART_INSTALL/snap/alpaca/current/usr/bin/alpaca
|
sed -i '1c#!/usr/bin/env python3' $CRAFT_PART_INSTALL/snap/alpaca/current/usr/bin/alpaca
|
||||||
stage-packages:
|
|
||||||
- libnuma1
|
|
||||||
parse-info:
|
parse-info:
|
||||||
- usr/share/metainfo/com.jeffser.Alpaca.metainfo.xml
|
- usr/share/metainfo/com.jeffser.Alpaca.metainfo.xml
|
||||||
organize:
|
organize:
|
||||||
snap/alpaca/current/usr: usr
|
snap/alpaca/current: .
|
||||||
after: [python-deps]
|
after: [python-deps]
|
||||||
|
|
||||||
|
deps:
|
||||||
|
plugin: nil
|
||||||
|
after: [alpaca]
|
||||||
|
stage-packages:
|
||||||
|
- libnuma1
|
||||||
|
prime:
|
||||||
|
- usr/lib/*/libnuma.so.1*
|
||||||
|
|||||||
@@ -31,6 +31,7 @@
|
|||||||
<file alias="icons/scalable/status/update-symbolic.svg">icons/update-symbolic.svg</file>
|
<file alias="icons/scalable/status/update-symbolic.svg">icons/update-symbolic.svg</file>
|
||||||
<file alias="icons/scalable/status/down-symbolic.svg">icons/down-symbolic.svg</file>
|
<file alias="icons/scalable/status/down-symbolic.svg">icons/down-symbolic.svg</file>
|
||||||
<file alias="icons/scalable/status/chat-bubble-text-symbolic.svg">icons/chat-bubble-text-symbolic.svg</file>
|
<file alias="icons/scalable/status/chat-bubble-text-symbolic.svg">icons/chat-bubble-text-symbolic.svg</file>
|
||||||
|
<file alias="icons/scalable/status/execute-from-symbolic.svg">icons/execute-from-symbolic.svg</file>
|
||||||
<file preprocess="xml-stripblanks">window.ui</file>
|
<file preprocess="xml-stripblanks">window.ui</file>
|
||||||
<file preprocess="xml-stripblanks">gtk/help-overlay.ui</file>
|
<file preprocess="xml-stripblanks">gtk/help-overlay.ui</file>
|
||||||
</gresource>
|
</gresource>
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
|||||||
descriptions = {
|
descriptions = {
|
||||||
|
'llama3.2': _("Meta's Llama 3.2 goes small with 1B and 3B models."),
|
||||||
'llama3.1': _("Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes."),
|
'llama3.1': _("Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes."),
|
||||||
'gemma2': _("Google Gemma 2 is a high-performing and efficient model available in three sizes: 2B, 9B, and 27B."),
|
'gemma2': _("Google Gemma 2 is a high-performing and efficient model available in three sizes: 2B, 9B, and 27B."),
|
||||||
'qwen2.5': _("Qwen2.5 models are pretrained on Alibaba's latest large-scale dataset, encompassing up to 18 trillion tokens. The model supports up to 128K tokens and has multilingual support."),
|
'qwen2.5': _("Qwen2.5 models are pretrained on Alibaba's latest large-scale dataset, encompassing up to 18 trillion tokens. The model supports up to 128K tokens and has multilingual support."),
|
||||||
@@ -21,87 +22,88 @@ descriptions = {
|
|||||||
'llama2': _("Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters."),
|
'llama2': _("Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters."),
|
||||||
'codellama': _("A large language model that can use text prompts to generate and discuss code."),
|
'codellama': _("A large language model that can use text prompts to generate and discuss code."),
|
||||||
'nomic-embed-text': _("A high-performing open embedding model with a large token context window."),
|
'nomic-embed-text': _("A high-performing open embedding model with a large token context window."),
|
||||||
|
'mxbai-embed-large': _("State-of-the-art large embedding model from mixedbread.ai"),
|
||||||
'dolphin-mixtral': _("Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford."),
|
'dolphin-mixtral': _("Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford."),
|
||||||
'phi': _("Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities."),
|
'phi': _("Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities."),
|
||||||
'llama2-uncensored': _("Uncensored Llama 2 model by George Sung and Jarrad Hope."),
|
|
||||||
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
||||||
'mxbai-embed-large': _("State-of-the-art large embedding model from mixedbread.ai"),
|
|
||||||
'starcoder2': _("StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters."),
|
'starcoder2': _("StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters."),
|
||||||
|
'llama2-uncensored': _("Uncensored Llama 2 model by George Sung and Jarrad Hope."),
|
||||||
'dolphin-mistral': _("The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8."),
|
'dolphin-mistral': _("The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8."),
|
||||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||||
|
'yi': _("Yi 1.5 is a high-performing, bilingual language model."),
|
||||||
'dolphin-llama3': _("Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills."),
|
'dolphin-llama3': _("Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills."),
|
||||||
'orca-mini': _("A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware."),
|
'orca-mini': _("A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware."),
|
||||||
'yi': _("Yi 1.5 is a high-performing, bilingual language model."),
|
|
||||||
'llava-llama3': _("A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks."),
|
'llava-llama3': _("A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks."),
|
||||||
|
'qwen2.5-coder': _("The latest series of Code-Specific Qwen models, with significant improvements in code generation, code reasoning, and code fixing."),
|
||||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||||
'starcoder': _("StarCoder is a code generation model trained on 80+ programming languages."),
|
'starcoder': _("StarCoder is a code generation model trained on 80+ programming languages."),
|
||||||
'tinyllama': _("The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens."),
|
'tinyllama': _("The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens."),
|
||||||
'vicuna': _("General use chat model based on Llama and Llama 2 with 2K to 16K context sizes."),
|
|
||||||
'codestral': _("Codestral is Mistral AI’s first-ever code model designed for code generation tasks."),
|
'codestral': _("Codestral is Mistral AI’s first-ever code model designed for code generation tasks."),
|
||||||
|
'vicuna': _("General use chat model based on Llama and Llama 2 with 2K to 16K context sizes."),
|
||||||
'llama2-chinese': _("Llama 2 based model fine tuned to improve Chinese dialogue ability."),
|
'llama2-chinese': _("Llama 2 based model fine tuned to improve Chinese dialogue ability."),
|
||||||
|
'snowflake-arctic-embed': _("A suite of text embedding models by Snowflake, optimized for performance."),
|
||||||
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
||||||
|
'granite-code': _("A family of open foundation models by IBM for Code Intelligence"),
|
||||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||||
'nous-hermes2': _("The powerful family of models by Nous Research that excels at scientific discussion and coding tasks."),
|
'nous-hermes2': _("The powerful family of models by Nous Research that excels at scientific discussion and coding tasks."),
|
||||||
'granite-code': _("A family of open foundation models by IBM for Code Intelligence"),
|
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||||
'openchat': _("A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106."),
|
'openchat': _("A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106."),
|
||||||
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
||||||
'wizardlm2': _("State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases."),
|
|
||||||
'codeqwen': _("CodeQwen1.5 is a large language model pretrained on a large amount of code data."),
|
'codeqwen': _("CodeQwen1.5 is a large language model pretrained on a large amount of code data."),
|
||||||
|
'wizardlm2': _("State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases."),
|
||||||
'tinydolphin': _("An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama."),
|
'tinydolphin': _("An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama."),
|
||||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
|
||||||
'wizardcoder': _("State-of-the-art code generation model"),
|
'wizardcoder': _("State-of-the-art code generation model"),
|
||||||
'stable-code': _("Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger."),
|
'stable-code': _("Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger."),
|
||||||
'openhermes': _("OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets."),
|
'openhermes': _("OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets."),
|
||||||
|
'qwen2-math': _("Qwen2 Math is a series of specialized math language models built upon the Qwen2 LLMs, which significantly outperforms the mathematical capabilities of open-source models and even closed-source models (e.g., GPT4o)."),
|
||||||
'bakllava': _("BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture."),
|
'bakllava': _("BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture."),
|
||||||
'stablelm2': _("Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch."),
|
'stablelm2': _("Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch."),
|
||||||
'qwen2-math': _("Qwen2 Math is a series of specialized math language models built upon the Qwen2 LLMs, which significantly outperforms the mathematical capabilities of open-source models and even closed-source models (e.g., GPT4o)."),
|
|
||||||
'wizard-math': _("Model focused on math and logic problems"),
|
|
||||||
'llama3-gradient': _("This model extends LLama-3 8B's context length from 8k to over 1m tokens."),
|
'llama3-gradient': _("This model extends LLama-3 8B's context length from 8k to over 1m tokens."),
|
||||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
|
||||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||||
|
'wizard-math': _("Model focused on math and logic problems"),
|
||||||
|
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||||
|
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||||
|
'reflection': _("A high-performing model trained with a new technique called Reflection-tuning that teaches a LLM to detect mistakes in its reasoning and correct course."),
|
||||||
|
'llama3-chatqa': _("A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG)."),
|
||||||
|
'mistral-large': _("Mistral Large 2 is Mistral's new flagship model that is significantly more capable in code generation, mathematics, and reasoning with 128k context window and support for dozens of languages."),
|
||||||
|
'moondream': _("moondream2 is a small vision language model designed to run efficiently on edge devices."),
|
||||||
|
'xwinlm': _("Conversational model based on Llama 2 that performs competitively on various benchmarks."),
|
||||||
'phind-codellama': _("Code generation model based on Code Llama."),
|
'phind-codellama': _("Code generation model based on Code Llama."),
|
||||||
'nous-hermes': _("General use models based on Llama and Llama 2 from Nous Research."),
|
'nous-hermes': _("General use models based on Llama and Llama 2 from Nous Research."),
|
||||||
'xwinlm': _("Conversational model based on Llama 2 that performs competitively on various benchmarks."),
|
|
||||||
'sqlcoder': _("SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks"),
|
'sqlcoder': _("SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks"),
|
||||||
'dolphincoder': _("A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2."),
|
'dolphincoder': _("A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2."),
|
||||||
'llama3-chatqa': _("A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG)."),
|
|
||||||
'yarn-llama2': _("An extension of Llama 2 that supports a context of up to 128k tokens."),
|
'yarn-llama2': _("An extension of Llama 2 that supports a context of up to 128k tokens."),
|
||||||
'mistral-large': _("Mistral Large 2 is Mistral's new flagship model that is significantly more capable in code generation, mathematics, and reasoning with 128k context window and support for dozens of languages."),
|
|
||||||
'wizardlm': _("General use model based on Llama 2."),
|
|
||||||
'smollm': _("🪐 A family of small models with 135M, 360M, and 1.7B parameters, trained on a new high-quality dataset."),
|
'smollm': _("🪐 A family of small models with 135M, 360M, and 1.7B parameters, trained on a new high-quality dataset."),
|
||||||
|
'wizardlm': _("General use model based on Llama 2."),
|
||||||
|
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
||||||
'starling-lm': _("Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness."),
|
'starling-lm': _("Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness."),
|
||||||
'reflection': _("A high-performing model trained with a new technique called Reflection-tuning that teaches a LLM to detect mistakes in its reasoning and correct course."),
|
|
||||||
'moondream': _("moondream2 is a small vision language model designed to run efficiently on edge devices."),
|
|
||||||
'snowflake-arctic-embed': _("A suite of text embedding models by Snowflake, optimized for performance."),
|
|
||||||
'samantha-mistral': _("A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral."),
|
'samantha-mistral': _("A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral."),
|
||||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||||
'orca2': _("Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning."),
|
'orca2': _("Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning."),
|
||||||
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
|
||||||
'stable-beluga': _("Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy."),
|
'stable-beluga': _("Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy."),
|
||||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
|
||||||
'dolphin-phi': _("2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research."),
|
'dolphin-phi': _("2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research."),
|
||||||
'wizardlm-uncensored': _("Uncensored version of Wizard LM model"),
|
'wizardlm-uncensored': _("Uncensored version of Wizard LM model"),
|
||||||
'llava-phi3': _("A new small LLaVA model fine-tuned from Phi 3 Mini."),
|
|
||||||
'hermes3': _("Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research"),
|
'hermes3': _("Hermes 3 is the latest version of the flagship Hermes series of LLMs by Nous Research"),
|
||||||
|
'yi-coder': _("Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters."),
|
||||||
|
'llava-phi3': _("A new small LLaVA model fine-tuned from Phi 3 Mini."),
|
||||||
|
'internlm2': _("InternLM2.5 is a 7B parameter model tailored for practical scenarios with outstanding reasoning capability."),
|
||||||
'yarn-mistral': _("An extension of Mistral to support context windows of 64K or 128K."),
|
'yarn-mistral': _("An extension of Mistral to support context windows of 64K or 128K."),
|
||||||
'llama-pro': _("An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics."),
|
'llama-pro': _("An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics."),
|
||||||
'medllama2': _("Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset."),
|
'medllama2': _("Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset."),
|
||||||
'yi-coder': _("Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters."),
|
|
||||||
'internlm2': _("InternLM2.5 is a 7B parameter model tailored for practical scenarios with outstanding reasoning capability."),
|
|
||||||
'meditron': _("Open-source medical large language model adapted from Llama 2 to the medical domain."),
|
'meditron': _("Open-source medical large language model adapted from Llama 2 to the medical domain."),
|
||||||
'nexusraven': _("Nexus Raven is a 13B instruction tuned model for function calling tasks."),
|
'nexusraven': _("Nexus Raven is a 13B instruction tuned model for function calling tasks."),
|
||||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||||
'codeup': _("Great code generation model based on Llama2."),
|
'codeup': _("Great code generation model based on Llama2."),
|
||||||
'everythinglm': _("Uncensored Llama2 based model with support for a 16K context window."),
|
|
||||||
'llama3-groq-tool-use': _("A series of models from Groq that represent a significant advancement in open-source AI capabilities for tool use/function calling."),
|
'llama3-groq-tool-use': _("A series of models from Groq that represent a significant advancement in open-source AI capabilities for tool use/function calling."),
|
||||||
|
'everythinglm': _("Uncensored Llama2 based model with support for a 16K context window."),
|
||||||
'magicoder': _("🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets."),
|
'magicoder': _("🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets."),
|
||||||
'stablelm-zephyr': _("A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware."),
|
'stablelm-zephyr': _("A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware."),
|
||||||
'codebooga': _("A high-performing code instruct model created by merging two existing code models."),
|
'codebooga': _("A high-performing code instruct model created by merging two existing code models."),
|
||||||
|
'wizard-vicuna': _("Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj."),
|
||||||
'mistrallite': _("MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts."),
|
'mistrallite': _("MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts."),
|
||||||
'falcon2': _("Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens."),
|
'falcon2': _("Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens."),
|
||||||
'wizard-vicuna': _("Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj."),
|
|
||||||
'duckdb-nsql': _("7B parameter text-to-SQL model made by MotherDuck and Numbers Station."),
|
'duckdb-nsql': _("7B parameter text-to-SQL model made by MotherDuck and Numbers Station."),
|
||||||
'qwen2.5-coder': _("The latest series of Code-Specific Qwen models, with significant improvements in code generation, code reasoning, and code fixing."),
|
'minicpm-v': _("A series of multimodal LLMs (MLLMs) designed for vision-language understanding."),
|
||||||
'megadolphin': _("MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself."),
|
'megadolphin': _("MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself."),
|
||||||
'notux': _("A top-performing mixture of experts model, fine-tuned with high-quality data."),
|
'notux': _("A top-performing mixture of experts model, fine-tuned with high-quality data."),
|
||||||
'goliath': _("A language model created by combining two fine-tuned Llama 2 70B models into one."),
|
'goliath': _("A language model created by combining two fine-tuned Llama 2 70B models into one."),
|
||||||
@@ -110,14 +112,13 @@ descriptions = {
|
|||||||
'bge-m3': _("BGE-M3 is a new model from BAAI distinguished for its versatility in Multi-Functionality, Multi-Linguality, and Multi-Granularity."),
|
'bge-m3': _("BGE-M3 is a new model from BAAI distinguished for its versatility in Multi-Functionality, Multi-Linguality, and Multi-Granularity."),
|
||||||
'mathstral': _("MathΣtral: a 7B model designed for math reasoning and scientific discovery by Mistral AI."),
|
'mathstral': _("MathΣtral: a 7B model designed for math reasoning and scientific discovery by Mistral AI."),
|
||||||
'dbrx': _("DBRX is an open, general-purpose LLM created by Databricks."),
|
'dbrx': _("DBRX is an open, general-purpose LLM created by Databricks."),
|
||||||
'minicpm-v': _("A series of multimodal LLMs (MLLMs) designed for vision-language understanding."),
|
'solar-pro': _("Solar Pro Preview: an advanced large language model (LLM) with 22 billion parameters designed to fit into a single GPU"),
|
||||||
'nuextract': _("A 3.8B model fine-tuned on a private high-quality synthetic dataset for information extraction, based on Phi-3."),
|
'nuextract': _("A 3.8B model fine-tuned on a private high-quality synthetic dataset for information extraction, based on Phi-3."),
|
||||||
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
||||||
'firefunction-v2': _("An open weights function calling model based on Llama 3, competitive with GPT-4o function calling capabilities."),
|
'firefunction-v2': _("An open weights function calling model based on Llama 3, competitive with GPT-4o function calling capabilities."),
|
||||||
'solar-pro': _("Solar Pro Preview: an advanced large language model (LLM) with 22 billion parameters designed to fit into a single GPU"),
|
|
||||||
'bge-large': _("Embedding model from BAAI mapping texts to vectors."),
|
|
||||||
'reader-lm': _("A series of models that convert HTML content to Markdown content, which is useful for content conversion tasks."),
|
'reader-lm': _("A series of models that convert HTML content to Markdown content, which is useful for content conversion tasks."),
|
||||||
|
'bge-large': _("Embedding model from BAAI mapping texts to vectors."),
|
||||||
'deepseek-v2.5': _("An upgraded version of DeekSeek-V2 that integrates the general and coding abilities of both DeepSeek-V2-Chat and DeepSeek-Coder-V2-Instruct."),
|
'deepseek-v2.5': _("An upgraded version of DeekSeek-V2 that integrates the general and coding abilities of both DeepSeek-V2-Chat and DeepSeek-Coder-V2-Instruct."),
|
||||||
'paraphrase-multilingual': _("Sentence-transformers model that can be used for tasks like clustering or semantic search."),
|
|
||||||
'bespoke-minicheck': _("A state-of-the-art fact-checking model developed by Bespoke Labs."),
|
'bespoke-minicheck': _("A state-of-the-art fact-checking model developed by Bespoke Labs."),
|
||||||
|
'paraphrase-multilingual': _("Sentence-transformers model that can be used for tasks like clustering or semantic search."),
|
||||||
}
|
}
|
||||||
@@ -6,7 +6,7 @@ Handles the chat widget (testing)
|
|||||||
import gi
|
import gi
|
||||||
gi.require_version('Gtk', '4.0')
|
gi.require_version('Gtk', '4.0')
|
||||||
gi.require_version('GtkSource', '5')
|
gi.require_version('GtkSource', '5')
|
||||||
from gi.repository import Gtk, Gio, Adw, Gdk
|
from gi.repository import Gtk, Gio, Adw, Gdk, GLib
|
||||||
import logging, os, datetime, shutil, random, tempfile, tarfile, json
|
import logging, os, datetime, shutil, random, tempfile, tarfile, json
|
||||||
from ..internal import data_dir
|
from ..internal import data_dir
|
||||||
from .message_widget import message
|
from .message_widget import message
|
||||||
@@ -154,8 +154,8 @@ class chat(Gtk.ScrolledWindow):
|
|||||||
for file_name, file_type in message_data['files'].items():
|
for file_name, file_type in message_data['files'].items():
|
||||||
files[os.path.join(data_dir, "chats", self.get_name(), message_id, file_name)] = file_type
|
files[os.path.join(data_dir, "chats", self.get_name(), message_id, file_name)] = file_type
|
||||||
message_element.add_attachments(files)
|
message_element.add_attachments(files)
|
||||||
message_element.set_text(message_data['content'])
|
GLib.idle_add(message_element.set_text, message_data['content'])
|
||||||
message_element.add_footer(datetime.datetime.strptime(message_data['date'] + (":00" if message_data['date'].count(":") == 1 else ""), '%Y/%m/%d %H:%M:%S'))
|
GLib.idle_add(message_element.add_footer, datetime.datetime.strptime(message_data['date'] + (":00" if message_data['date'].count(":") == 1 else ""), '%Y/%m/%d %H:%M:%S'))
|
||||||
else:
|
else:
|
||||||
self.show_welcome_screen(len(window.model_manager.get_model_list()) > 0)
|
self.show_welcome_screen(len(window.model_manager.get_model_list()) > 0)
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from gi.repository import Gtk, GObject, Gio, Adw, GtkSource, GLib, Gdk
|
|||||||
import logging, os, datetime, re, shutil, threading, sys
|
import logging, os, datetime, re, shutil, threading, sys
|
||||||
from ..internal import config_dir, data_dir, cache_dir, source_dir
|
from ..internal import config_dir, data_dir, cache_dir, source_dir
|
||||||
from .table_widget import TableWidget
|
from .table_widget import TableWidget
|
||||||
|
from .. import dialogs
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -103,10 +104,14 @@ class code_block(Gtk.Box):
|
|||||||
self.source_view.update_property([4], [_("{}Code Block").format('{} '.format(self.language.get_name()) if self.language else "")])
|
self.source_view.update_property([4], [_("{}Code Block").format('{} '.format(self.language.get_name()) if self.language else "")])
|
||||||
|
|
||||||
title_box = Gtk.Box(margin_start=12, margin_top=3, margin_bottom=3, margin_end=3)
|
title_box = Gtk.Box(margin_start=12, margin_top=3, margin_bottom=3, margin_end=3)
|
||||||
title_box.append(Gtk.Label(label=self.language.get_name() if self.language else _("Code Block"), hexpand=True, xalign=0))
|
title_box.append(Gtk.Label(label=self.language.get_name() if self.language else (language_name.title() if language_name else _("Code Block")), hexpand=True, xalign=0))
|
||||||
copy_button = Gtk.Button(icon_name="edit-copy-symbolic", css_classes=["flat", "circular"], tooltip_text=_("Copy Message"))
|
copy_button = Gtk.Button(icon_name="edit-copy-symbolic", css_classes=["flat", "circular"], tooltip_text=_("Copy Message"))
|
||||||
copy_button.connect("clicked", lambda *_: self.on_copy())
|
copy_button.connect("clicked", lambda *_: self.on_copy())
|
||||||
title_box.append(copy_button)
|
title_box.append(copy_button)
|
||||||
|
if language_name and language_name.lower() in ['bash', 'python3']:
|
||||||
|
run_button = Gtk.Button(icon_name="execute-from-symbolic", css_classes=["flat", "circular"], tooltip_text=_("Run Script"))
|
||||||
|
run_button.connect("clicked", lambda *_: self.run_script(language_name))
|
||||||
|
title_box.append(run_button)
|
||||||
self.append(title_box)
|
self.append(title_box)
|
||||||
self.append(Gtk.Separator())
|
self.append(Gtk.Separator())
|
||||||
self.append(self.source_view)
|
self.append(self.source_view)
|
||||||
@@ -121,6 +126,12 @@ class code_block(Gtk.Box):
|
|||||||
clipboard.set(text)
|
clipboard.set(text)
|
||||||
window.show_toast(_("Code copied to the clipboard"), window.main_overlay)
|
window.show_toast(_("Code copied to the clipboard"), window.main_overlay)
|
||||||
|
|
||||||
|
def run_script(self, language_name):
|
||||||
|
logger.debug("Running script")
|
||||||
|
start = self.buffer.get_start_iter()
|
||||||
|
end = self.buffer.get_end_iter()
|
||||||
|
dialogs.run_script(window, self.buffer.get_text(start, end, False), language_name)
|
||||||
|
|
||||||
class attachment(Gtk.Button):
|
class attachment(Gtk.Button):
|
||||||
__gtype_name__ = 'AlpacaAttachment'
|
__gtype_name__ = 'AlpacaAttachment'
|
||||||
|
|
||||||
@@ -442,7 +453,7 @@ class message(Gtk.Overlay):
|
|||||||
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper())
|
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper())
|
||||||
elif vadjustment.get_value() + 50 >= vadjustment.get_upper() - vadjustment.get_page_size():
|
elif vadjustment.get_value() + 50 >= vadjustment.get_upper() - vadjustment.get_page_size():
|
||||||
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper() - vadjustment.get_page_size())
|
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper() - vadjustment.get_page_size())
|
||||||
self.content_children[-1].insert_at_end(data['message']['content'], False)
|
GLib.idle_add(self.content_children[-1].insert_at_end, data['message']['content'], False)
|
||||||
if 'done' in data and data['done']:
|
if 'done' in data and data['done']:
|
||||||
window.chat_list_box.get_tab_by_name(chat.get_name()).spinner.set_visible(False)
|
window.chat_list_box.get_tab_by_name(chat.get_name()).spinner.set_visible(False)
|
||||||
if window.chat_list_box.get_current_chat().get_name() != chat.get_name():
|
if window.chat_list_box.get_current_chat().get_name() != chat.get_name():
|
||||||
@@ -451,11 +462,12 @@ class message(Gtk.Overlay):
|
|||||||
chat.container.remove(chat.welcome_screen)
|
chat.container.remove(chat.welcome_screen)
|
||||||
chat.welcome_screen = None
|
chat.welcome_screen = None
|
||||||
chat.stop_message()
|
chat.stop_message()
|
||||||
self.set_text(self.content_children[-1].get_label())
|
self.text = self.content_children[-1].get_label()
|
||||||
|
GLib.idle_add(self.set_text, self.content_children[-1].get_label())
|
||||||
self.dt = datetime.datetime.now()
|
self.dt = datetime.datetime.now()
|
||||||
self.add_footer(self.dt)
|
GLib.idle_add(self.add_footer, self.dt)
|
||||||
window.show_notification(chat.get_name(), self.text[:200] + (self.text[200:] and '...'), Gio.ThemedIcon.new("chat-message-new-symbolic"))
|
window.show_notification(chat.get_name(), self.text[:200] + (self.text[200:] and '...'), Gio.ThemedIcon.new("chat-message-new-symbolic"))
|
||||||
window.save_history(chat)
|
GLib.idle_add(window.save_history, chat)
|
||||||
else:
|
else:
|
||||||
if self.spinner:
|
if self.spinner:
|
||||||
GLib.idle_add(self.container.remove, self.spinner)
|
GLib.idle_add(self.container.remove, self.spinner)
|
||||||
@@ -472,8 +484,7 @@ class message(Gtk.Overlay):
|
|||||||
self.content_children = []
|
self.content_children = []
|
||||||
if text:
|
if text:
|
||||||
self.content_children = []
|
self.content_children = []
|
||||||
code_block_pattern = re.compile(r'```(\w+)\n(.*?)\n```', re.DOTALL)
|
code_block_pattern = re.compile(r'[```|`](\w*)\n(.*?)\n\s*[```|`]', re.DOTALL)
|
||||||
no_lang_code_block_pattern = re.compile(r'`\n(.*?)\n`', re.DOTALL)
|
|
||||||
table_pattern = re.compile(r'((\r?\n){2}|^)([^\r\n]*\|[^\r\n]*(\r?\n)?)+(?=(\r?\n){2}|$)', re.MULTILINE)
|
table_pattern = re.compile(r'((\r?\n){2}|^)([^\r\n]*\|[^\r\n]*(\r?\n)?)+(?=(\r?\n){2}|$)', re.MULTILINE)
|
||||||
bold_pattern = re.compile(r'\*\*(.*?)\*\*') #"**text**"
|
bold_pattern = re.compile(r'\*\*(.*?)\*\*') #"**text**"
|
||||||
code_pattern = re.compile(r'`([^`\n]*?)`') #"`text`"
|
code_pattern = re.compile(r'`([^`\n]*?)`') #"`text`"
|
||||||
@@ -492,15 +503,6 @@ class message(Gtk.Overlay):
|
|||||||
code_text = match.group(2)
|
code_text = match.group(2)
|
||||||
parts.append({"type": "code", "text": code_text, "language": 'python3' if language == 'python' else language})
|
parts.append({"type": "code", "text": code_text, "language": 'python3' if language == 'python' else language})
|
||||||
pos = end
|
pos = end
|
||||||
# Code blocks (No language)
|
|
||||||
for match in no_lang_code_block_pattern.finditer(self.text):
|
|
||||||
start, end = match.span()
|
|
||||||
if pos < start:
|
|
||||||
normal_text = self.text[pos:start]
|
|
||||||
parts.append({"type": "normal", "text": normal_text.strip()})
|
|
||||||
code_text = match.group(1)
|
|
||||||
parts.append({"type": "code", "text": code_text, "language": None})
|
|
||||||
pos = end
|
|
||||||
# Tables
|
# Tables
|
||||||
for match in table_pattern.finditer(self.text):
|
for match in table_pattern.finditer(self.text):
|
||||||
start, end = match.span()
|
start, end = match.span()
|
||||||
|
|||||||
49
src/custom_widgets/terminal_widget.py
Normal file
49
src/custom_widgets/terminal_widget.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
#chat_widget.py
|
||||||
|
"""
|
||||||
|
Handles the terminal widget
|
||||||
|
"""
|
||||||
|
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '4.0')
|
||||||
|
gi.require_version('Vte', '3.91')
|
||||||
|
from gi.repository import Gtk, Vte, GLib, Pango, GLib, Gdk
|
||||||
|
|
||||||
|
class terminal(Vte.Terminal):
|
||||||
|
__gtype_name__ = 'AlpacaTerminal'
|
||||||
|
|
||||||
|
def __init__(self, script:list):
|
||||||
|
super().__init__(css_classes=["terminal"])
|
||||||
|
self.set_font(Pango.FontDescription.from_string("Monospace 12"))
|
||||||
|
self.set_clear_background(False)
|
||||||
|
pty = Vte.Pty.new_sync(Vte.PtyFlags.DEFAULT, None)
|
||||||
|
|
||||||
|
self.set_pty(pty)
|
||||||
|
|
||||||
|
env = {
|
||||||
|
'TERM': "xterm-256color",
|
||||||
|
'SUDO_ASKPASS': "sh -c 'pkexec echo'"
|
||||||
|
}
|
||||||
|
|
||||||
|
pty.spawn_async(
|
||||||
|
GLib.get_current_dir(),
|
||||||
|
script,
|
||||||
|
[],
|
||||||
|
GLib.SpawnFlags.DEFAULT,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
-1,
|
||||||
|
None,
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
|
key_controller = Gtk.EventControllerKey()
|
||||||
|
key_controller.connect("key-pressed", self.on_key_press)
|
||||||
|
self.add_controller(key_controller)
|
||||||
|
|
||||||
|
def on_key_press(self, controller, keyval, keycode, state):
|
||||||
|
ctrl = state & Gdk.ModifierType.CONTROL_MASK
|
||||||
|
shift = state & Gdk.ModifierType.SHIFT_MASK
|
||||||
|
if ctrl and keyval == Gdk.KEY_c:
|
||||||
|
self.copy_clipboard()
|
||||||
|
return True
|
||||||
|
return False
|
||||||
@@ -3,11 +3,11 @@
|
|||||||
Handles UI dialogs
|
Handles UI dialogs
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import logging, requests, threading, shutil
|
import logging, requests, threading, shutil, subprocess, re
|
||||||
from pytube import YouTube
|
from pytube import YouTube
|
||||||
from html2text import html2text
|
from html2text import html2text
|
||||||
from gi.repository import Adw, Gtk
|
from gi.repository import Adw, Gtk
|
||||||
from .internal import cache_dir
|
from .internal import cache_dir, data_dir
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
# CLEAR CHAT | WORKS
|
# CLEAR CHAT | WORKS
|
||||||
@@ -416,3 +416,59 @@ def attach_website(self, url):
|
|||||||
cancellable = None,
|
cancellable = None,
|
||||||
callback = lambda dialog, task, url=url: attach_website_response(self, dialog, task, url)
|
callback = lambda dialog, task, url=url: attach_website_response(self, dialog, task, url)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Run Script
|
||||||
|
|
||||||
|
def run_script_response(self, dialog, task, script, language_name):
|
||||||
|
if dialog.choose_finish(task) == "accept":
|
||||||
|
logger.info('Running: \n{}'.format(script))
|
||||||
|
if language_name == 'python3':
|
||||||
|
if not os.path.isdir(os.path.join(data_dir, 'pyenv')):
|
||||||
|
os.mkdir(os.path.join(data_dir, 'pyenv'))
|
||||||
|
with open(os.path.join(data_dir, 'pyenv', 'main.py'), 'w') as f:
|
||||||
|
f.write(script)
|
||||||
|
script = [
|
||||||
|
'echo "🐍 {}\n"'.format(_('Setting up Python environment...')),
|
||||||
|
'python3 -m venv "{}"'.format(os.path.join(data_dir, 'pyenv')),
|
||||||
|
'{} {}'.format(os.path.join(data_dir, 'pyenv', 'bin', 'python3').replace(' ', '\\ '), os.path.join(data_dir, 'pyenv', 'main.py').replace(' ', '\\ '))
|
||||||
|
]
|
||||||
|
if os.path.isfile(os.path.join(data_dir, 'pyenv', 'requirements.txt')):
|
||||||
|
script.insert(1, '{} install -r {} | grep -v "already satisfied"; clear'.format(os.path.join(data_dir, 'pyenv', 'bin', 'pip3'), os.path.join(data_dir, 'pyenv', 'requirements.txt')))
|
||||||
|
else:
|
||||||
|
with open(os.path.join(data_dir, 'pyenv', 'requirements.txt'), 'w') as f:
|
||||||
|
f.write('')
|
||||||
|
script = ';\n'.join(script)
|
||||||
|
|
||||||
|
script += '; echo "\n🦙 {}"'.format(_('Script exited'))
|
||||||
|
if language_name == 'bash':
|
||||||
|
script = re.sub(r'(?m)^\s*sudo', 'pkexec', script)
|
||||||
|
if shutil.which('flatpak-spawn') and language_name == 'bash':
|
||||||
|
sandbox = True
|
||||||
|
try:
|
||||||
|
process = subprocess.run(['flatpak-spawn', '--host', 'bash', '-c', 'echo "test"'], check=True)
|
||||||
|
sandbox = False
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
if sandbox:
|
||||||
|
script = 'echo "🦙 {}\n";'.format(_('The script is contained inside Flatpak')) + script
|
||||||
|
self.run_terminal(['bash', '-c', script])
|
||||||
|
else:
|
||||||
|
self.run_terminal(['flatpak-spawn', '--host', 'bash', '-c', script])
|
||||||
|
else:
|
||||||
|
self.run_terminal(['bash', '-c', script])
|
||||||
|
|
||||||
|
def run_script(self, script:str, language_name:str):
|
||||||
|
dialog = Adw.AlertDialog(
|
||||||
|
heading=_("Run Script"),
|
||||||
|
body=_("Make sure you understand what this script does before running it, Alpaca is not responsible for any damages to your device or data"),
|
||||||
|
close_response="cancel"
|
||||||
|
)
|
||||||
|
dialog.add_response("cancel", _("Cancel"))
|
||||||
|
dialog.add_response("accept", _("Accept"))
|
||||||
|
dialog.set_response_appearance("accept", Adw.ResponseAppearance.SUGGESTED)
|
||||||
|
dialog.set_default_response("accept")
|
||||||
|
dialog.choose(
|
||||||
|
parent = self,
|
||||||
|
cancellable = None,
|
||||||
|
callback = lambda dialog, task, script=script, language_name=language_name: run_script_response(self, dialog, task, script, language_name)
|
||||||
|
)
|
||||||
|
|||||||
2
src/icons/execute-from-symbolic.svg
Normal file
2
src/icons/execute-from-symbolic.svg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" height="16px" viewBox="0 0 16 16" width="16px"><path d="m 4.992188 2.996094 v 10 h 1 c 0.175781 0 0.347656 -0.039063 0.5 -0.125 l 7 -4 c 0.308593 -0.171875 0.46875 -0.523438 0.46875 -0.875 c 0 -0.351563 -0.160157 -0.703125 -0.46875 -0.875 l -7 -4 c -0.152344 -0.085938 -0.324219 -0.125 -0.5 -0.125 z m 0 0" fill="#222222"/></svg>
|
||||||
|
After Width: | Height: | Size: 409 B |
@@ -50,7 +50,8 @@ custom_widgets = [
|
|||||||
'custom_widgets/table_widget.py',
|
'custom_widgets/table_widget.py',
|
||||||
'custom_widgets/message_widget.py',
|
'custom_widgets/message_widget.py',
|
||||||
'custom_widgets/chat_widget.py',
|
'custom_widgets/chat_widget.py',
|
||||||
'custom_widgets/model_widget.py'
|
'custom_widgets/model_widget.py',
|
||||||
|
'custom_widgets/terminal_widget.py'
|
||||||
]
|
]
|
||||||
|
|
||||||
install_data(alpaca_sources, install_dir: moduledir)
|
install_data(alpaca_sources, install_dir: moduledir)
|
||||||
|
|||||||
@@ -37,3 +37,6 @@ stacksidebar {
|
|||||||
.code_block {
|
.code_block {
|
||||||
font-family: monospace;
|
font-family: monospace;
|
||||||
}
|
}
|
||||||
|
.terminal {
|
||||||
|
padding: 10px;
|
||||||
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ gi.require_version('GdkPixbuf', '2.0')
|
|||||||
from gi.repository import Adw, Gtk, Gdk, GLib, GtkSource, Gio, GdkPixbuf
|
from gi.repository import Adw, Gtk, Gdk, GLib, GtkSource, Gio, GdkPixbuf
|
||||||
|
|
||||||
from . import dialogs, connection_handler
|
from . import dialogs, connection_handler
|
||||||
from .custom_widgets import message_widget, chat_widget, model_widget
|
from .custom_widgets import message_widget, chat_widget, model_widget, terminal_widget
|
||||||
from .internal import config_dir, data_dir, cache_dir, source_dir
|
from .internal import config_dir, data_dir, cache_dir, source_dir
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -118,6 +118,9 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
|||||||
|
|
||||||
style_manager = Adw.StyleManager()
|
style_manager = Adw.StyleManager()
|
||||||
|
|
||||||
|
terminal_scroller = Gtk.Template.Child()
|
||||||
|
terminal_dialog = Gtk.Template.Child()
|
||||||
|
|
||||||
@Gtk.Template.Callback()
|
@Gtk.Template.Callback()
|
||||||
def stop_message(self, button=None):
|
def stop_message(self, button=None):
|
||||||
self.chat_list_box.get_current_chat().stop_message()
|
self.chat_list_box.get_current_chat().stop_message()
|
||||||
@@ -351,14 +354,15 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
|||||||
current_chat = self.chat_list_box.get_current_chat()
|
current_chat = self.chat_list_box.get_current_chat()
|
||||||
if current_chat:
|
if current_chat:
|
||||||
for key, message in current_chat.messages.items():
|
for key, message in current_chat.messages.items():
|
||||||
message.set_visible(re.search(search_term, message.text, re.IGNORECASE))
|
if message and message.text:
|
||||||
for block in message.content_children:
|
message.set_visible(re.search(search_term, message.text, re.IGNORECASE))
|
||||||
if isinstance(block, message_widget.text_block):
|
for block in message.content_children:
|
||||||
if search_term:
|
if isinstance(block, message_widget.text_block):
|
||||||
highlighted_text = re.sub(f"({re.escape(search_term)})", r"<span background='yellow' bgalpha='30%'>\1</span>", block.get_text(),flags=re.IGNORECASE)
|
if search_term:
|
||||||
block.set_markup(highlighted_text)
|
highlighted_text = re.sub(f"({re.escape(search_term)})", r"<span background='yellow' bgalpha='30%'>\1</span>", block.get_text(),flags=re.IGNORECASE)
|
||||||
else:
|
block.set_markup(highlighted_text)
|
||||||
block.set_markup(block.get_text())
|
else:
|
||||||
|
block.set_markup(block.get_text())
|
||||||
|
|
||||||
@Gtk.Template.Callback()
|
@Gtk.Template.Callback()
|
||||||
def on_clipboard_paste(self, textview):
|
def on_clipboard_paste(self, textview):
|
||||||
@@ -367,6 +371,10 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
|||||||
clipboard.read_text_async(None, self.cb_text_received)
|
clipboard.read_text_async(None, self.cb_text_received)
|
||||||
clipboard.read_texture_async(None, self.cb_image_received)
|
clipboard.read_texture_async(None, self.cb_image_received)
|
||||||
|
|
||||||
|
def run_terminal(self, script:list):
|
||||||
|
self.terminal_scroller.set_child(terminal_widget.terminal(script))
|
||||||
|
self.terminal_dialog.present(self)
|
||||||
|
|
||||||
def convert_model_name(self, name:str, mode:int) -> str: # mode=0 name:tag -> Name (tag) | mode=1 Name (tag) -> name:tag
|
def convert_model_name(self, name:str, mode:int) -> str: # mode=0 name:tag -> Name (tag) | mode=1 Name (tag) -> name:tag
|
||||||
try:
|
try:
|
||||||
if mode == 0:
|
if mode == 0:
|
||||||
@@ -575,6 +583,7 @@ Generate a title following these rules:
|
|||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
raise Exception('Network Error')
|
raise Exception('Network Error')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
logger.error(e)
|
||||||
self.chat_list_box.get_tab_by_name(chat.get_name()).spinner.set_visible(False)
|
self.chat_list_box.get_tab_by_name(chat.get_name()).spinner.set_visible(False)
|
||||||
chat.busy = False
|
chat.busy = False
|
||||||
GLib.idle_add(message_element.add_action_buttons)
|
GLib.idle_add(message_element.add_action_buttons)
|
||||||
@@ -812,9 +821,20 @@ Generate a title following these rules:
|
|||||||
self.banner.set_revealed(monitor.get_power_saver_enabled() and self.powersaver_warning_switch.get_active())
|
self.banner.set_revealed(monitor.get_power_saver_enabled() and self.powersaver_warning_switch.get_active())
|
||||||
|
|
||||||
def prepare_alpaca(self, local_port:int, remote_url:str, remote:bool, tweaks:dict, overrides:dict, bearer_token:str, idle_timer_delay:int, save:bool):
|
def prepare_alpaca(self, local_port:int, remote_url:str, remote:bool, tweaks:dict, overrides:dict, bearer_token:str, idle_timer_delay:int, save:bool):
|
||||||
|
#Model Manager
|
||||||
|
self.model_manager = model_widget.model_manager_container()
|
||||||
|
self.model_scroller.set_child(self.model_manager)
|
||||||
|
|
||||||
|
#Chat History
|
||||||
|
self.load_history()
|
||||||
|
|
||||||
#Instance
|
#Instance
|
||||||
self.ollama_instance = connection_handler.instance(local_port, remote_url, remote, tweaks, overrides, bearer_token, idle_timer_delay)
|
self.ollama_instance = connection_handler.instance(local_port, remote_url, remote, tweaks, overrides, bearer_token, idle_timer_delay)
|
||||||
|
|
||||||
|
#Model Manager P.2
|
||||||
|
self.model_manager.update_available_list()
|
||||||
|
self.model_manager.update_local_list()
|
||||||
|
|
||||||
#User Preferences
|
#User Preferences
|
||||||
for element in list(list(list(list(self.tweaks_group)[0])[1])[0]):
|
for element in list(list(list(list(self.tweaks_group)[0])[1])[0]):
|
||||||
if element.get_name() in self.ollama_instance.tweaks:
|
if element.get_name() in self.ollama_instance.tweaks:
|
||||||
@@ -831,23 +851,13 @@ Generate a title following these rules:
|
|||||||
self.remote_connection_switch.set_active(self.ollama_instance.remote)
|
self.remote_connection_switch.set_active(self.ollama_instance.remote)
|
||||||
self.instance_idle_timer.set_value(self.ollama_instance.idle_timer_delay)
|
self.instance_idle_timer.set_value(self.ollama_instance.idle_timer_delay)
|
||||||
|
|
||||||
#Model Manager
|
|
||||||
self.model_manager = model_widget.model_manager_container()
|
|
||||||
self.model_scroller.set_child(self.model_manager)
|
|
||||||
|
|
||||||
#Chat History
|
|
||||||
self.load_history()
|
|
||||||
|
|
||||||
#Model Manager P.2
|
|
||||||
self.model_manager.update_available_list()
|
|
||||||
self.model_manager.update_local_list()
|
|
||||||
self.get_application().lookup_action("manage_models").set_enabled(True)
|
|
||||||
|
|
||||||
#Save preferences
|
#Save preferences
|
||||||
if save:
|
if save:
|
||||||
self.save_server_config()
|
self.save_server_config()
|
||||||
|
|
||||||
self.send_button.set_sensitive(True)
|
self.send_button.set_sensitive(True)
|
||||||
|
self.attachment_button.set_sensitive(True)
|
||||||
|
self.get_application().lookup_action('manage_models').set_enabled(True)
|
||||||
|
self.get_application().lookup_action('preferences').set_enabled(True)
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
@@ -894,7 +904,9 @@ Generate a title following these rules:
|
|||||||
|
|
||||||
for action_name, data in universal_actions.items():
|
for action_name, data in universal_actions.items():
|
||||||
self.get_application().create_action(action_name, data[0], data[1] if len(data) > 1 else None)
|
self.get_application().create_action(action_name, data[0], data[1] if len(data) > 1 else None)
|
||||||
self.get_application().lookup_action("manage_models").set_enabled(False)
|
|
||||||
|
self.get_application().lookup_action('manage_models').set_enabled(False)
|
||||||
|
self.get_application().lookup_action('preferences').set_enabled(False)
|
||||||
|
|
||||||
self.file_preview_remove_button.connect('clicked', lambda button : dialogs.remove_attached_file(self, button.get_name()))
|
self.file_preview_remove_button.connect('clicked', lambda button : dialogs.remove_attached_file(self, button.get_name()))
|
||||||
self.attachment_button.connect("clicked", lambda button, file_filter=self.file_filter_attachments: dialogs.attach_file(self, file_filter))
|
self.attachment_button.connect("clicked", lambda button, file_filter=self.file_filter_attachments: dialogs.attach_file(self, file_filter))
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
<object class="AdwBreakpoint">
|
<object class="AdwBreakpoint">
|
||||||
<condition>max-width: 690sp</condition>
|
<condition>max-width: 690sp</condition>
|
||||||
<setter object="split_view_overlay" property="collapsed">true</setter>
|
<setter object="split_view_overlay" property="collapsed">true</setter>
|
||||||
|
<setter object="terminal_dialog" property="width-request">400</setter>
|
||||||
</object>
|
</object>
|
||||||
</child>
|
</child>
|
||||||
<property name="content">
|
<property name="content">
|
||||||
@@ -180,6 +181,7 @@
|
|||||||
<object class="GtkButton" id="attachment_button">
|
<object class="GtkButton" id="attachment_button">
|
||||||
<property name="vexpand">false</property>
|
<property name="vexpand">false</property>
|
||||||
<property name="valign">3</property>
|
<property name="valign">3</property>
|
||||||
|
<property name="sensitive">false</property>
|
||||||
<property name="tooltip-text" translatable="yes">Attach File</property>
|
<property name="tooltip-text" translatable="yes">Attach File</property>
|
||||||
<style>
|
<style>
|
||||||
<class name="circular"/>
|
<class name="circular"/>
|
||||||
@@ -474,6 +476,29 @@
|
|||||||
</child>
|
</child>
|
||||||
</object>
|
</object>
|
||||||
|
|
||||||
|
<object class="AdwDialog" id="terminal_dialog">
|
||||||
|
<accessibility>
|
||||||
|
<property name="label" translatable="yes">Manage models dialog</property>
|
||||||
|
</accessibility>
|
||||||
|
<property name="title" translatable="yes">Terminal</property>
|
||||||
|
<property name="can-close">true</property>
|
||||||
|
<property name="width-request">600</property>
|
||||||
|
<property name="height-request">600</property>
|
||||||
|
<child>
|
||||||
|
<object class="AdwToolbarView">
|
||||||
|
<style>
|
||||||
|
<class name="osd"/>
|
||||||
|
</style>
|
||||||
|
<child type="top">
|
||||||
|
<object class="AdwHeaderBar"/>
|
||||||
|
</child>
|
||||||
|
<property name="content">
|
||||||
|
<object class="GtkScrolledWindow" id="terminal_scroller"/>
|
||||||
|
</property>
|
||||||
|
</object>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
|
||||||
<object class="AdwDialog" id="manage_models_dialog">
|
<object class="AdwDialog" id="manage_models_dialog">
|
||||||
<accessibility>
|
<accessibility>
|
||||||
<property name="label" translatable="yes">Manage models dialog</property>
|
<property name="label" translatable="yes">Manage models dialog</property>
|
||||||
|
|||||||
Reference in New Issue
Block a user