# Local Code Assistant Configuration # Copy this file to .env and modify the values as needed # Ollama API endpoint URL # Default: http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434 # Default model to use for code assistance # Available models: codellama, llama3, mistral, deepseek-coder, etc. # Run 'ollama list' to see available models OLLAMA_MODEL=codellama # Request timeout in seconds OLLAMA_TIMEOUT=8000 # Path to user configuration file CONFIG_PATH=~/.config/local-code-assistant/config.yaml # Enable verbose logging VERBOSE=false # Enable streaming responses STREAMING=true