llm: endpoint: "http://localhost:11434" model: "llama3" temperature: 0.7 timeout: 30 audit: max_depth: 3 severity_levels: - critical - warning - info fix: create_backup: true backup_dir: ".config_auditor_backup" dry_run_default: false