diff --git a/namespaces/ai/ollama/config.toml b/namespaces/ai/ollama/config.toml deleted file mode 100644 index f640ec1..0000000 --- a/namespaces/ai/ollama/config.toml +++ /dev/null @@ -1,17 +0,0 @@ -[model.completion.http] -kind = "ollama/completion" -model_name = "deepseek-r1:8b" -api_endpoint = "http://ollama:11434" -# prompt_template = "
 {prefix} {suffix} "  # Example prompt template for the CodeLlama model series.
-
-# Chat model
-[model.chat.http]
-kind = "openai/chat"
-model_name = "deepseek-r1:8b"
-api_endpoint = "http://ollama:11434/v1"
-
-# Embedding model
-[model.embedding.http]
-kind = "ollama/embedding"
-model_name = "ordis/jina-embeddings-v2-base-code"
-api_endpoint = "http://ollama:11434"
diff --git a/namespaces/ai/tabby/config.toml b/namespaces/ai/tabby/config.toml
index f640ec1..6aa4585 100644
--- a/namespaces/ai/tabby/config.toml
+++ b/namespaces/ai/tabby/config.toml
@@ -1,17 +1,17 @@
 [model.completion.http]
 kind = "ollama/completion"
 model_name = "deepseek-r1:8b"
-api_endpoint = "http://ollama:11434"
+api_endpoint = "http://ollama-svc:11434"
 # prompt_template = "
 {prefix} {suffix} "  # Example prompt template for the CodeLlama model series.
 
 # Chat model
 [model.chat.http]
 kind = "openai/chat"
 model_name = "deepseek-r1:8b"
-api_endpoint = "http://ollama:11434/v1"
+api_endpoint = "http://ollama-svc:11434/v1"
 
 # Embedding model
 [model.embedding.http]
 kind = "ollama/embedding"
 model_name = "ordis/jina-embeddings-v2-base-code"
-api_endpoint = "http://ollama:11434"
+api_endpoint = "http://ollama-svc:11434"