mirror of
https://github.com/XNM1/linux-nixos-hyprland-config-dotfiles.git
synced 2025-09-15 09:45:58 +03:00
feat(ai): 🤖 update LLM models and configuration
- Changed default model from `ollama:llama3.2:3b` to `ollama:gemma3:4b` - Updated model token limits and added new models: - Added `gemma3:4b` with vision support - Removed `phi4:14b` and added `phi4-reasoning:14b` - Updated token limits for existing models - Added new AI-related roles: - `commit-message.md` - `email-answer.md` - `emoji-commit-message.md` - `git-branch.md` - `improve-prompt.md` - `improve-writing.md` - `linkedin-answer.md` - Enhanced `to-emoji.md` and added `to-emojies.md` - Added `ai` alias for `aichat` in fish config - Updated NixOS configuration to load new models
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
|
||||
services.ollama = {
|
||||
enable = true;
|
||||
loadModels = [ "llama3.2:3b" "llama3.2-vision:11b" "phi4-reasoning:14b" "deepseek-r1:7b" "dolphin3:8b" "smallthinker:3b" "nomic-embed-text" "gemma3:12b" "gemma3:27b" "deepcoder:14b" "qwen3:14b" ];
|
||||
loadModels = [ "llama3.2:3b" "phi4-reasoning:14b" "dolphin3:8b" "smallthinker:3b" "gemma3:4b" "gemma3:12b" "gemma3:27b" "deepcoder:14b" "qwen3:14b" "nomic-embed-text" ];
|
||||
acceleration = "cuda";
|
||||
};
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
oterm
|
||||
alpaca
|
||||
aichat
|
||||
fabric-ai
|
||||
aider-chat
|
||||
|
||||
# tgpt
|
||||
|
Reference in New Issue
Block a user