{ "buffer_font_family": "JetBrains Mono", "ui_font_family": "JetBrains Mono", "agent_servers": { "OpenCode": { "type": "custom", "command": "opencode", "args": ["acp"], }, }, "disable_ai": false, "edit_predictions": { "provider": "none", }, "git_panel": { "status_style": "icon", "default_width": 360, }, "tabs": { "git_status": false, }, "vim_mode": true, "title_bar": { "show_sign_in": false, }, "outline_panel": { "button": false, }, "collaboration_panel": { "button": false, }, "notification_panel": { "button": false, }, "agent": { "favorite_models": [ { "provider": "LM_Studio", "model": "qwen/qwen3.5-9b", "enable_thinking": false, }, ], "single_file_review": true, "enable_feedback": false, "tool_permissions": { "default": "allow", }, "default_profile": "write", "default_model": { "provider": "LM_Studio", "model": "qwen/qwen3.6-35b-a3b", }, "commit_message_model": { "provider": "openai_compatible.LM_Studio", "model": "nvidia/nemotron-3-nano-4b", }, "model_parameters": [], }, "language_models": { "openai_compatible": { "LiteLLM": { "api_url": "http://192.168.10.10:4000/v1", "available_models": [ { "name": "claude-haiku-4-5", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": true, "parallel_tool_calls": false, "prompt_cache_key": false, }, }, { "name": "claude-sonnet-4-5", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": true, "parallel_tool_calls": false, "prompt_cache_key": false, }, }, { "name": "gpt-5-nano", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": false, "parallel_tool_calls": false, "prompt_cache_key": false, }, }, { "name": "gpt-4.1-nano", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": false, "parallel_tool_calls": false, "prompt_cache_key": true, }, }, ], }, "LM_Studio": { "api_url": "http://192.168.10.64:1234/v1", "available_models": [ { "name": "qwen/qwen3.6-35b-a3b", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": false, "parallel_tool_calls": true, "prompt_cache_key": false, }, }, { "name": "google/gemma-4-26b-a4b", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": true, "parallel_tool_calls": true, "prompt_cache_key": false, }, }, { "name": "nvidia/nemotron-3-nano-4b", "max_tokens": 200000, "max_output_tokens": 32000, "max_completion_tokens": 200000, "capabilities": { "tools": true, "images": true, "parallel_tool_calls": true, "prompt_cache_key": false, }, }, ], }, }, "openai": { "available_models": [ { "display_name": "gpt-4.1-nano", "name": "gpt-4.1-nano", "max_tokens": 128000, }, ], }, }, "base_keymap": "VSCode", "auto_indent_on_paste": true, "tab_size": 2, "icon_theme": "Zed (Default)", "ui_font_size": 16.0, "buffer_font_size": 16.0, "theme": { "mode": "dark", "light": "One Light", "dark": "One Dark", }, "context_servers": { "MCP_DOCKER": { "env": {}, "enabled": false, "command": "docker", "args": ["mcp", "gateway", "run"], }, }, }