default_backend: ollama backends: ollama: host: "localhost:11434" model: "codellama" temperature: 0.1 max_tokens: 500 llama_cpp: model_path: "~/.cache/llama-cpp/models/" n_ctx: 2048 n_threads: 4 temperature: 0.1 max_tokens: 500 shell: default: bash supported: - bash - zsh safety: auto_execute_safe: false require_confirmation: true dangerous_patterns: - "rm -rf" - "rm -r /" - "dd if=" - "mkfs" - "format" - "shred" - "fork bomb" safe_patterns: - "ls" - "cat" - "grep" - "find" - "echo" - "pwd" - "cd" - "git" - "npm install" - "pip install" output: theme: default syntax_highlighting: true show_explanation: true history: enabled: true path: "~/.shellgen/history.db" max_entries: 1000