Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d8d3788a6 | ||
|
|
98e23e0033 | ||
|
|
4d7aff3458 | ||
|
|
33e47696dc | ||
|
|
c0f8825f83 | ||
|
|
7c26956cd4 | ||
|
|
52f02cd5d0 | ||
|
|
0df6b20147 | ||
|
|
253a2dda7d | ||
|
|
d762a85130 | ||
|
|
a765e8cf2e | ||
|
|
3e7fd1140c | ||
|
|
a56631510d |
@@ -122,16 +122,16 @@
|
||||
"sources": [
|
||||
{
|
||||
"type": "file",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.2.8/ollama-linux-amd64",
|
||||
"sha256": "7641b21e9d0822ba44e494f5ed3d3796d9e9fcdf4dbb66064f8c34c865bbec0b",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.0/ollama-linux-amd64",
|
||||
"sha256": "b8817c34882c7ac138565836ac1995a2c61261a79315a13a0aebbfe5435da855",
|
||||
"only-arches": [
|
||||
"x86_64"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.2.8/ollama-linux-arm64",
|
||||
"sha256": "8ccaea237c3ef2a34d0cc00d8a89ffb1179d5c49211b6cbdf80d8d88e3f0add6",
|
||||
"url": "https://github.com/ollama/ollama/releases/download/v0.3.0/ollama-linux-arm64",
|
||||
"sha256": "64be908749212052146f1008dd3867359c776ac1766e8d86291886f53d294d4d",
|
||||
"only-arches": [
|
||||
"aarch64"
|
||||
]
|
||||
|
||||
@@ -78,6 +78,32 @@
|
||||
<url type="contribute">https://github.com/Jeffser/Alpaca/discussions/154</url>
|
||||
<url type="vcs-browser">https://github.com/Jeffser/Alpaca</url>
|
||||
<releases>
|
||||
<release version="1.0.2" date="2024-07-29">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.2</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Models with capital letters on their tag don't work</li>
|
||||
<li>Ollama fails to launch on some systems</li>
|
||||
<li>YouTube transcripts are not being saved in the right TMP directory</li>
|
||||
</ul>
|
||||
<p>New</p>
|
||||
<ul>
|
||||
<li>Debug messages are now shown on the 'About Alpaca' dialog</li>
|
||||
<li>Updated Ollama to v0.3.0 (new models)</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="1.0.1" date="2024-07-23">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.1</url>
|
||||
<description>
|
||||
<p>Fixes</p>
|
||||
<ul>
|
||||
<li>Models with '-' in their names didn't work properly, this is now fixed</li>
|
||||
<li>Better connection check for Ollama</li>
|
||||
</ul>
|
||||
</description>
|
||||
</release>
|
||||
<release version="1.0.0" date="2024-07-22">
|
||||
<url type="details">https://github.com/Jeffser/Alpaca/releases/tag/1.0.0</url>
|
||||
<description>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
project('Alpaca', 'c',
|
||||
version: '1.0.0',
|
||||
version: '1.0.2',
|
||||
meson_version: '>= 0.62.0',
|
||||
default_options: [ 'warning_level=2', 'werror=false', ],
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,18 @@
|
||||
descriptions = {
|
||||
'llama3.1': _("Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes."),
|
||||
'gemma2': _("Google Gemma 2 is now available in 2 sizes, 9B and 27B."),
|
||||
'llama3': _("Meta Llama 3: The most capable openly available LLM to date"),
|
||||
'mistral-nemo': _("A state-of-the-art 12B model with 128k context length, built by Mistral AI in collaboration with NVIDIA."),
|
||||
'mistral-large': _("Mistral Large 2 is Mistral's new flagship model that is significantly more capable in code generation, mathematics, and reasoning with 128k context window and support for dozens of languages."),
|
||||
'qwen2': _("Qwen2 is a new series of large language models from Alibaba group"),
|
||||
'deepseek-coder-v2': _("An open-source Mixture-of-Experts code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks."),
|
||||
'phi3': _("Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft."),
|
||||
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
||||
'mistral': _("The 7B model released by Mistral AI, updated to version 0.3."),
|
||||
'mixtral': _("A set of Mixture of Experts (MoE) model with open weights by Mistral AI in 8x7b and 8x22b parameter sizes."),
|
||||
'codegemma': _("CodeGemma is a collection of powerful, lightweight models that can perform a variety of coding tasks like fill-in-the-middle code completion, code generation, natural language understanding, mathematical reasoning, and instruction following."),
|
||||
'command-r': _("Command R is a Large Language Model optimized for conversational interaction and long context tasks."),
|
||||
'command-r-plus': _("Command R+ is a powerful, scalable large language model purpose-built to excel at real-world enterprise use cases."),
|
||||
'llava': _("🌋 LLaVA is a novel end-to-end trained large multimodal model that combines a vision encoder and Vicuna for general-purpose visual and language understanding. Updated to version 1.6."),
|
||||
'llama3': _("Meta Llama 3: The most capable openly available LLM to date"),
|
||||
'gemma': _("Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind. Updated to version 1.1"),
|
||||
'qwen': _("Qwen 1.5 is a series of large language models by Alibaba Cloud spanning from 0.5B to 110B parameters"),
|
||||
'llama2': _("Llama 2 is a collection of foundation language models ranging from 7B to 70B parameters."),
|
||||
@@ -18,49 +20,50 @@ descriptions = {
|
||||
'dolphin-mixtral': _("Uncensored, 8x7b and 8x22b fine-tuned models based on the Mixtral mixture of experts models that excels at coding tasks. Created by Eric Hartford."),
|
||||
'nomic-embed-text': _("A high-performing open embedding model with a large token context window."),
|
||||
'llama2-uncensored': _("Uncensored Llama 2 model by George Sung and Jarrad Hope."),
|
||||
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
||||
'phi': _("Phi-2: a 2.7B language model by Microsoft Research that demonstrates outstanding reasoning and language understanding capabilities."),
|
||||
'deepseek-coder': _("DeepSeek Coder is a capable coding model trained on two trillion code and natural language tokens."),
|
||||
'dolphin-mistral': _("The uncensored Dolphin model based on Mistral that excels at coding tasks. Updated to version 2.8."),
|
||||
'orca-mini': _("A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware."),
|
||||
'dolphin-llama3': _("Dolphin 2.9 is a new model with 8B and 70B sizes by Eric Hartford based on Llama 3 that has a variety of instruction, conversational, and coding skills."),
|
||||
'mxbai-embed-large': _("State-of-the-art large embedding model from mixedbread.ai"),
|
||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||
'starcoder2': _("StarCoder2 is the next generation of transparently trained open code LLMs that comes in three sizes: 3B, 7B and 15B parameters."),
|
||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||
'mistral-openorca': _("Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset."),
|
||||
'yi': _("Yi 1.5 is a high-performing, bilingual language model."),
|
||||
'zephyr': _("Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants."),
|
||||
'llama2-chinese': _("Llama 2 based model fine tuned to improve Chinese dialogue ability."),
|
||||
'llava-llama3': _("A LLaVA model fine-tuned from Llama 3 Instruct with better scores in several benchmarks."),
|
||||
'vicuna': _("General use chat model based on Llama and Llama 2 with 2K to 16K context sizes."),
|
||||
'nous-hermes2': _("The powerful family of models by Nous Research that excels at scientific discussion and coding tasks."),
|
||||
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
||||
'tinyllama': _("The TinyLlama project is an open endeavor to train a compact 1.1B Llama model on 3 trillion tokens."),
|
||||
'wizard-vicuna-uncensored': _("Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford."),
|
||||
'codestral': _("Codestral is Mistral AI’s first-ever code model designed for code generation tasks."),
|
||||
'starcoder': _("StarCoder is a code generation model trained on 80+ programming languages."),
|
||||
'wizardlm2': _("State of the art large language model from Microsoft AI with improved performance on complex chat, multilingual, reasoning and agent use cases."),
|
||||
'openchat': _("A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks. Updated to version 3.5-0106."),
|
||||
'aya': _("Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages."),
|
||||
'tinydolphin': _("An experimental 1.1B parameter model trained on the new Dolphin 2.8 dataset by Eric Hartford and based on TinyLlama."),
|
||||
'openhermes': _("OpenHermes 2.5 is a 7B model fine-tuned by Teknium on Mistral with fully open datasets."),
|
||||
'wizardcoder': _("State-of-the-art code generation model"),
|
||||
'stable-code': _("Stable Code 3B is a coding model with instruct and code completion variants on par with models such as Code Llama 7B that are 2.5x larger."),
|
||||
'codeqwen': _("CodeQwen1.5 is a large language model pretrained on a large amount of code data."),
|
||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||
'wizard-math': _("Model focused on math and logic problems"),
|
||||
'neural-chat': _("A fine-tuned model based on Mistral with good coverage of domain and language."),
|
||||
'stablelm2': _("Stable LM 2 is a state-of-the-art 1.6B and 12B parameter language model trained on multilingual data in English, Spanish, German, Italian, French, Portuguese, and Dutch."),
|
||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||
'granite-code': _("A family of open foundation models by IBM for Code Intelligence"),
|
||||
'all-minilm': _("Embedding models on very large sentence level datasets."),
|
||||
'phind-codellama': _("Code generation model based on Code Llama."),
|
||||
'dolphincoder': _("A 7B and 15B uncensored variant of the Dolphin model family that excels at coding, based on StarCoder2."),
|
||||
'nous-hermes': _("General use models based on Llama and Llama 2 from Nous Research."),
|
||||
'sqlcoder': _("SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks"),
|
||||
'llama3-gradient': _("This model extends LLama-3 8B's context length from 8k to over 1m tokens."),
|
||||
'starling-lm': _("Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness."),
|
||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||
'yarn-llama2': _("An extension of Llama 2 that supports a context of up to 128k tokens."),
|
||||
'xwinlm': _("Conversational model based on Llama 2 that performs competitively on various benchmarks."),
|
||||
'deepseek-llm': _("An advanced language model crafted with 2 trillion bilingual tokens."),
|
||||
'llama3-chatqa': _("A model from NVIDIA based on Llama 3 that excels at conversational question answering (QA) and retrieval-augmented generation (RAG)."),
|
||||
'orca2': _("Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta's Llama 2 models. The model is designed to excel particularly in reasoning."),
|
||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||
'wizardlm': _("General use model based on Llama 2."),
|
||||
'solar': _("A compact, yet powerful 10.7B large language model designed for single-turn conversation."),
|
||||
'samantha-mistral': _("A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral."),
|
||||
'dolphin-phi': _("2.7B uncensored Dolphin model by Eric Hartford, based on the Phi language model by Microsoft Research."),
|
||||
'stable-beluga': _("Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy."),
|
||||
@@ -68,22 +71,23 @@ descriptions = {
|
||||
'bakllava': _("BakLLaVA is a multimodal model consisting of the Mistral 7B base model augmented with the LLaVA architecture."),
|
||||
'wizardlm-uncensored': _("Uncensored version of Wizard LM model"),
|
||||
'snowflake-arctic-embed': _("A suite of text embedding models by Snowflake, optimized for performance."),
|
||||
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
||||
'medllama2': _("Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset."),
|
||||
'yarn-mistral': _("An extension of Mistral to support context windows of 64K or 128K."),
|
||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||
'llama-pro': _("An expansion of Llama 2 that specializes in integrating both general language understanding and domain-specific knowledge, particularly in programming and mathematics."),
|
||||
'deepseek-v2': _("A strong, economical, and efficient Mixture-of-Experts language model."),
|
||||
'nous-hermes2-mixtral': _("The Nous Hermes 2 model from Nous Research, now trained over Mixtral."),
|
||||
'meditron': _("Open-source medical large language model adapted from Llama 2 to the medical domain."),
|
||||
'codeup': _("Great code generation model based on Llama2."),
|
||||
'nexusraven': _("Nexus Raven is a 13B instruction tuned model for function calling tasks."),
|
||||
'everythinglm': _("Uncensored Llama2 based model with support for a 16K context window."),
|
||||
'llava-phi3': _("A new small LLaVA model fine-tuned from Phi 3 Mini."),
|
||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'magicoder': _("🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets."),
|
||||
'stablelm-zephyr': _("A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware."),
|
||||
'codebooga': _("A high-performing code instruct model created by merging two existing code models."),
|
||||
'mistrallite': _("MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts."),
|
||||
'glm4': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'wizard-vicuna': _("A strong multi-lingual general language model with competitive performance to Llama 3."),
|
||||
'wizard-vicuna': _("Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj."),
|
||||
'duckdb-nsql': _("7B parameter text-to-SQL model made by MotherDuck and Numbers Station."),
|
||||
'megadolphin': _("MegaDolphin-2.2-120b is a transformation of Dolphin-2.2-70b created by interleaving the model with itself."),
|
||||
'goliath': _("A language model created by combining two fine-tuned Llama 2 70B models into one."),
|
||||
@@ -92,12 +96,10 @@ descriptions = {
|
||||
'falcon2': _("Falcon2 is an 11B parameters causal decoder-only model built by TII and trained over 5T tokens."),
|
||||
'notus': _("A 7B chat model fine-tuned with high-quality data and based on Zephyr."),
|
||||
'dbrx': _("DBRX is an open, general-purpose LLM created by Databricks."),
|
||||
'codegeex4': _("A versatile model for AI software development scenarios, including code completion."),
|
||||
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
||||
'internlm2': _("InternLM2.5 is a 7B parameter model tailored for practical scenarios with outstanding reasoning capability."),
|
||||
'alfred': _("A robust conversational model designed to be used for both chat and instruct use cases."),
|
||||
'llama3-groq-tool-use': _("A series of models from Groq that represent a significant advancement in open-source AI capabilities for tool use/function calling."),
|
||||
'mathstral': _("MathΣtral: a 7B model designed for math reasoning and scientific discovery by Mistral AI."),
|
||||
'mistral-nemo': _("A state-of-the-art 12B model with 128k context length, built by Mistral AI in collaboration with NVIDIA."),
|
||||
'firefunction-v2': _("An open weights function calling model based on Llama 3, competitive with GPT-4o function calling capabilities."),
|
||||
'nuextract': _("A 3.8B model fine-tuned on a private high-quality synthetic dataset for information extraction, based on Phi-3."),
|
||||
}
|
||||
@@ -285,9 +285,9 @@ def youtube_caption_response(self, dialog, task, video_url, caption_drop_down):
|
||||
selected_caption = caption_drop_down.get_selected_item().get_string()
|
||||
for event in yt.captions[selected_caption.split(' | ')[1]].json_captions['events']:
|
||||
text += "{}\n".format(event['segs'][0]['utf8'].replace('\n', '\\n'))
|
||||
if not os.path.exists('/tmp/alpaca/youtube'):
|
||||
os.makedirs('/tmp/alpaca/youtube')
|
||||
file_path = os.path.join('/tmp/alpaca/youtube', f'{yt.title} ({selected_caption.split(" | ")[0]})')
|
||||
if not os.path.exists(os.path.join(self.cache_dir, 'tmp/youtube')):
|
||||
os.makedirs(os.path.join(self.cache_dir, 'tmp/youtube'))
|
||||
file_path = os.path.join(os.path.join(self.cache_dir, 'tmp/youtube'), f'{yt.title} ({selected_caption.split(" | ")[0]})')
|
||||
with open(file_path, 'w+') as f:
|
||||
f.write(text)
|
||||
self.attach_file(file_path, 'youtube')
|
||||
|
||||
@@ -12,10 +12,13 @@ data_dir = os.getenv("XDG_DATA_HOME")
|
||||
overrides = {}
|
||||
|
||||
def start():
|
||||
if not os.path.isdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp/ollama')):
|
||||
os.mkdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp/ollama'))
|
||||
global instance, overrides
|
||||
params = overrides.copy()
|
||||
params["OLLAMA_HOST"] = f"127.0.0.1:{port}" # You can't change this directly sorry :3
|
||||
params["HOME"] = data_dir
|
||||
params["TMPDIR"] = os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp/ollama')
|
||||
instance = subprocess.Popen(["/app/bin/ollama", "serve"], env={**os.environ, **params}, stderr=subprocess.PIPE, text=True)
|
||||
logger.info("Starting Alpaca's Ollama instance...")
|
||||
logger.debug(params)
|
||||
|
||||
19
src/main.py
19
src/main.py
@@ -20,6 +20,7 @@
|
||||
import sys
|
||||
import logging
|
||||
import gi
|
||||
import os
|
||||
|
||||
gi.require_version('Gtk', '4.0')
|
||||
gi.require_version('Adw', '1')
|
||||
@@ -34,13 +35,13 @@ logger = logging.getLogger(__name__)
|
||||
class AlpacaApplication(Adw.Application):
|
||||
"""The main application singleton class."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, version):
|
||||
super().__init__(application_id='com.jeffser.Alpaca',
|
||||
flags=Gio.ApplicationFlags.DEFAULT_FLAGS)
|
||||
self.create_action('quit', lambda *_: self.quit(), ['<primary>q'])
|
||||
self.create_action('preferences', lambda *_: AlpacaWindow.show_preferences_dialog(self.props.active_window), ['<primary>p'])
|
||||
self.create_action('about', self.on_about_action)
|
||||
self.version = '1.0.0'
|
||||
self.version = version
|
||||
|
||||
def do_activate(self):
|
||||
win = self.props.active_window
|
||||
@@ -61,7 +62,8 @@ class AlpacaApplication(Adw.Application):
|
||||
copyright='© 2024 Jeffser\n© 2024 Ollama',
|
||||
issue_url='https://github.com/Jeffser/Alpaca/issues',
|
||||
license_type=3,
|
||||
website="https://jeffser.com/alpaca")
|
||||
website="https://jeffser.com/alpaca",
|
||||
debug_info=open(os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log'), 'r').read())
|
||||
about.present(parent=self.props.active_window)
|
||||
|
||||
def create_action(self, name, callback, shortcuts=None):
|
||||
@@ -73,10 +75,17 @@ class AlpacaApplication(Adw.Application):
|
||||
|
||||
|
||||
def main(version):
|
||||
if os.path.isfile(os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log')):
|
||||
os.remove(os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log'))
|
||||
if os.path.isdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp')):
|
||||
os.system('rm -rf ' + os.path.join(os.getenv("XDG_CACHE_HOME"), "tmp/*"))
|
||||
else:
|
||||
os.mkdir(os.path.join(os.getenv("XDG_CACHE_HOME"), 'tmp'))
|
||||
logging.basicConfig(
|
||||
format="%(levelname)s\t[%(filename)s | %(funcName)s] %(message)s",
|
||||
level=logging.INFO
|
||||
level=logging.INFO,
|
||||
handlers=[logging.FileHandler(filename=os.path.join(os.getenv("XDG_DATA_HOME"), 'tmp.log')), logging.StreamHandler(stream=sys.stdout)]
|
||||
)
|
||||
app = AlpacaApplication()
|
||||
app = AlpacaApplication(version)
|
||||
logger.info(f"Alpaca version: {app.version}")
|
||||
return app.run(sys.argv)
|
||||
|
||||
@@ -183,8 +183,8 @@ class AlpacaWindow(Adw.ApplicationWindow):
|
||||
self.chats['order'].remove(self.chats['selected_chat'])
|
||||
self.chats['order'].insert(0, self.chats['selected_chat'])
|
||||
self.save_history()
|
||||
current_model = self.model_drop_down.get_selected_item().get_string()
|
||||
current_model = current_model.replace(' (', ':')[:-1].lower()
|
||||
current_model = self.model_drop_down.get_selected_item().get_string().split(' (')
|
||||
current_model = '{}:{}'.format(current_model[0].replace(' ', '-').lower(), current_model[1][:-1])
|
||||
if current_model is None:
|
||||
self.show_toast(_("Please select a model before chatting"), self.main_overlay)
|
||||
return
|
||||
@@ -571,11 +571,12 @@ Generate a title following these rules:
|
||||
```PROMPT
|
||||
{message['content']}
|
||||
```"""
|
||||
current_model = self.model_drop_down.get_selected_item().get_string()
|
||||
current_model = current_model.replace(' (', ':')[:-1].lower()
|
||||
current_model = self.model_drop_down.get_selected_item().get_string().split(' (')
|
||||
current_model = '{}:{}'.format(current_model[0].replace(' ', '-').lower(), current_model[1][:-1])
|
||||
data = {"model": current_model, "prompt": prompt, "stream": False}
|
||||
if 'images' in message: data["images"] = message['images']
|
||||
response = connection_handler.simple_post(f"{connection_handler.url}/api/generate", data=json.dumps(data))
|
||||
|
||||
new_chat_name = json.loads(response.text)["response"].strip().removeprefix("Title: ").removeprefix("title: ").strip('\'"').title()
|
||||
new_chat_name = new_chat_name[:50] + (new_chat_name[50:] and '...')
|
||||
self.rename_chat(label_element.get_name(), new_chat_name, label_element)
|
||||
@@ -780,13 +781,11 @@ Generate a title following these rules:
|
||||
json.dump({'remote_url': self.remote_url, 'remote_bearer_token': self.remote_bearer_token, 'run_remote': self.run_remote, 'local_port': local_instance.port, 'run_on_background': self.run_on_background, 'model_tweaks': self.model_tweaks, 'ollama_overrides': local_instance.overrides}, f, indent=6)
|
||||
|
||||
def verify_connection(self):
|
||||
response = connection_handler.simple_get(connection_handler.url)
|
||||
response = connection_handler.simple_get(f"{connection_handler.url}/api/tags")
|
||||
if response.status_code == 200:
|
||||
if "Ollama is running" in response.text:
|
||||
self.save_server_config()
|
||||
self.update_list_local_models()
|
||||
return True
|
||||
return False
|
||||
self.save_server_config()
|
||||
self.update_list_local_models()
|
||||
return response.status_code == 200
|
||||
|
||||
def add_code_blocks(self):
|
||||
text = self.bot_message.get_text(self.bot_message.get_start_iter(), self.bot_message.get_end_iter(), True)
|
||||
|
||||
Reference in New Issue
Block a user