Last active
October 25, 2025 18:03
-
-
Save alessandrobologna/81ed8e3124c307928083e5d285c72061 to your computer and use it in GitHub Desktop.
configure copilot for lm studio models
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # /// script | |
| # requires-python = ">=3.13" | |
| # dependencies = [ | |
| # "requests", | |
| # "click", | |
| # "json5", | |
| # ] | |
| # /// | |
| """ | |
| LM Studio to GitHub Copilot Custom Models Generator | |
| This script automatically discovers LLM models from your LM Studio instance | |
| and generates the configuration needed for GitHub Copilot's custom OpenAI models feature. | |
| Features: | |
| - Auto-discovery of all available LLM models | |
| - Proper capability detection (tool calling, context length) | |
| - Direct VS Code settings.json update support | |
| - Cross-platform compatibility (macOS, Windows, Linux) | |
| - JSONC format support (handles comments and trailing commas) | |
| Usage: | |
| # Install dependencies and run with uv (recommended) | |
| uv run generate-custom-oai-models.py --help | |
| # Or install manually and run with python | |
| pip install requests click json5 | |
| python generate-custom-oai-models.py --help | |
| gist: https://github.com/your-username/lm-studio-copilot-config | |
| Author: Alessandro Bologna | |
| License: MIT | |
| """ | |
| import requests | |
| import json | |
| import json5 | |
| import click | |
| from pathlib import Path | |
| def fetch_models(api_base): | |
| resp = requests.get(api_base) | |
| resp.raise_for_status() | |
| return resp.json()["data"] | |
| def generate_copilot_config(api_base, openai_url): | |
| models = fetch_models(api_base) | |
| config = {} | |
| for model in models: | |
| # Only include LLM models, skip embeddings and other types | |
| if model.get("type") not in ["llm", "vlm"]: | |
| continue | |
| model_id = model["id"] | |
| capabilities = model.get("capabilities", []) | |
| max_context = model.get("max_context_length", 8192) | |
| config[model_id] = { | |
| "name": model_id, | |
| "url": openai_url, | |
| "toolCalling": "tool_use" in capabilities, | |
| "vision": "vision" in capabilities, # Check for vision capability | |
| "thinking": True, # Default to True, can be customized per model | |
| "maxInputTokens": max_context, | |
| "maxOutputTokens": max_context, | |
| "requiresAPIKey": False | |
| } | |
| return config | |
| def update_settings_file(settings_path, config): | |
| """Update the settings.json file with the new model configuration.""" | |
| settings_file = Path(settings_path) | |
| if settings_file.exists(): | |
| try: | |
| # Use json5 to parse JSONC files (handles comments and trailing commas) | |
| with open(settings_file, 'r', encoding='utf-8') as f: | |
| content = f.read() | |
| settings = json5.loads(content) | |
| except Exception as e: | |
| # If parsing fails, create a minimal settings structure | |
| print(f"⚠️ Could not parse existing settings ({e}), creating new structure...") | |
| settings = {} | |
| else: | |
| settings = {} | |
| # Update the customOAIModels section | |
| settings["github.copilot.chat.customOAIModels"] = config | |
| # Write back to file (as regular JSON with proper formatting) | |
| with open(settings_file, 'w', encoding='utf-8') as f: | |
| json.dump(settings, f, indent=4) | |
| print(f"✅ Updated {settings_file} with {len(config)} models") | |
| @click.command() | |
| @click.option( | |
| '--base-url', | |
| default='http://localhost:1234', | |
| help='Base URL for the LM Studio API (default: http://localhost:1234)' | |
| ) | |
| @click.option( | |
| '--settings-path', | |
| type=click.Path(), | |
| help='Path to VS Code settings.json file. If provided, will update the file directly.' | |
| ) | |
| def main(base_url, settings_path): | |
| """ | |
| Generate GitHub Copilot custom OpenAI models configuration from LM Studio API. | |
| This script automatically discovers all LLM models available in your LM Studio instance | |
| and generates the proper configuration for GitHub Copilot's custom OpenAI models feature. | |
| It reads model capabilities (tool calling, context length) directly from the API. | |
| \b | |
| EXAMPLES: | |
| # Generate config and print to stdout (copy/paste into VS Code settings) | |
| uv run generate-custom-oai-models.py | |
| # Use custom LM Studio URL | |
| uv run generate-custom-oai-models.py --base-url http://studio.local:1234 | |
| # Update VS Code settings file directly (macOS) | |
| uv run generate-custom-oai-models.py --settings-path "~/Library/Application Support/Code/User/settings.json" | |
| # Update VS Code Insiders settings (macOS) | |
| uv run generate-custom-oai-models.py --settings-path "~/Library/Application Support/Code - Insiders/User/settings.json" | |
| # Windows VS Code settings | |
| uv run generate-custom-oai-models.py --settings-path "%APPDATA%/Code/User/settings.json" | |
| # Linux VS Code settings | |
| uv run generate-custom-oai-models.py --settings-path "~/.config/Code/User/settings.json" | |
| \b | |
| SETUP: | |
| 1. Start LM Studio with your desired models loaded | |
| 2. Run this script to generate or update your configuration | |
| 3. Restart VS Code to pick up the new models | |
| 4. Access your local models via GitHub Copilot chat model selector | |
| The script automatically detects tool calling capabilities, context lengths, and filters | |
| out non-LLM models (like embeddings). All models are configured with thinking=true | |
| and vision=false by default (adjust manually if needed). | |
| """ | |
| # Construct API URLs | |
| api_base = f"{base_url}/api/v0/models" | |
| openai_url = f"{base_url}/v1/chat/completions", | |
| try: | |
| config = generate_copilot_config(api_base, openai_url) | |
| if settings_path: | |
| # Update the settings file directly | |
| update_settings_file(settings_path, config) | |
| else: | |
| # Print the configuration to stdout | |
| output = {"github.copilot.chat.customOAIModels": config} | |
| print(json.dumps(output, indent=2)) | |
| except requests.exceptions.RequestException as e: | |
| click.echo(f"❌ Error connecting to LM Studio API at {base_url}: {e}", err=True) | |
| exit(1) | |
| except Exception as e: | |
| click.echo(f"❌ Error: {e}", err=True) | |
| exit(1) | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment