diff --git a/src/connection_handler.py b/src/connection_handler.py
index db2b071..07321cb 100644
--- a/src/connection_handler.py
+++ b/src/connection_handler.py
@@ -23,9 +23,9 @@ def log_output(pipe):
window.show_toast(_("Model request too large for system"), window.main_overlay)
elif 'msg="amdgpu detected, but no compatible rocm library found.' in line:
if bool(os.getenv("FLATPAK_ID")):
- window.ollama_information_label.set_label(_("AMD GPU detected but the extension is missing, Ollama will use CPU"))
+ window.ollama_information_label.set_label(_("AMD GPU detected but the extension is missing, Ollama will use CPU\n{}Alpaca support{}").format("", ""))
else:
- window.ollama_information_label.set_label(_("AMD GPU detected but ROCm is missing, Ollama will use CPU"))
+ window.ollama_information_label.set_label(_("AMD GPU detected but ROCm is missing, Ollama will use CPU\n{}Alpaca support{}").format("", ""))
window.ollama_information_label.set_css_classes(['dim-label', 'error'])
elif 'msg="amdgpu is supported"' in line:
window.ollama_information_label.set_label(_("Using AMD GPU type '{}'").format(line.split('=')[-1]))
diff --git a/src/window.ui b/src/window.ui
index 11fdeca..d8660a4 100644
--- a/src/window.ui
+++ b/src/window.ui
@@ -477,6 +477,7 @@
true
true
Integrated Ollama instance is not running
+ 2