This commit is contained in:
Prashant Choudhary 2025-07-06 23:49:52 -04:00 committed by GitHub
commit 45e8678adf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 616 additions and 37 deletions

448
config.schema.json Normal file
View file

@ -0,0 +1,448 @@
{
"type": "object",
"properties": {
"$schema": {
"type": "string",
"description": "JSON schema reference for configuration validation"
},
"theme": {
"type": "string",
"description": "Theme name to use for the interface"
},
"keybinds": {
"type": "object",
"properties": {
"leader": {
"type": "string",
"description": "Leader key for keybind combinations"
},
"help": {
"type": "string",
"description": "Show help dialog"
},
"editor_open": {
"type": "string",
"description": "Open external editor"
},
"session_new": {
"type": "string",
"description": "Create a new session"
},
"session_list": {
"type": "string",
"description": "List all sessions"
},
"session_share": {
"type": "string",
"description": "Share current session"
},
"session_interrupt": {
"type": "string",
"description": "Interrupt current session"
},
"session_compact": {
"type": "string",
"description": "Toggle compact mode for session"
},
"tool_details": {
"type": "string",
"description": "Show tool details"
},
"model_list": {
"type": "string",
"description": "List available models"
},
"theme_list": {
"type": "string",
"description": "List available themes"
},
"project_init": {
"type": "string",
"description": "Initialize project configuration"
},
"input_clear": {
"type": "string",
"description": "Clear input field"
},
"input_paste": {
"type": "string",
"description": "Paste from clipboard"
},
"input_submit": {
"type": "string",
"description": "Submit input"
},
"input_newline": {
"type": "string",
"description": "Insert newline in input"
},
"history_previous": {
"type": "string",
"description": "Navigate to previous history item"
},
"history_next": {
"type": "string",
"description": "Navigate to next history item"
},
"messages_page_up": {
"type": "string",
"description": "Scroll messages up by one page"
},
"messages_page_down": {
"type": "string",
"description": "Scroll messages down by one page"
},
"messages_half_page_up": {
"type": "string",
"description": "Scroll messages up by half page"
},
"messages_half_page_down": {
"type": "string",
"description": "Scroll messages down by half page"
},
"messages_previous": {
"type": "string",
"description": "Navigate to previous message"
},
"messages_next": {
"type": "string",
"description": "Navigate to next message"
},
"messages_first": {
"type": "string",
"description": "Navigate to first message"
},
"messages_last": {
"type": "string",
"description": "Navigate to last message"
},
"app_exit": {
"type": "string",
"description": "Exit the application"
}
},
"additionalProperties": false,
"description": "Custom keybind configurations"
},
"autoshare": {
"type": "boolean",
"description": "Share newly created sessions automatically"
},
"autoupdate": {
"type": "boolean",
"description": "Automatically update to the latest version"
},
"disabled_providers": {
"type": "array",
"items": {
"type": "string"
},
"description": "Disable providers that are loaded automatically"
},
"model": {
"type": "string",
"description": "Model to use in the format of provider/model, eg anthropic/claude-2"
},
"use_standard_pricing_only": {
"type": "boolean",
"description": "Force the use of standard pricing tier for Gemini models, even for large contexts."
},
"provider": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"api": {
"type": "string"
},
"name": {
"type": "string"
},
"env": {
"type": "array",
"items": {
"type": "string"
}
},
"id": {
"type": "string"
},
"npm": {
"type": "string"
},
"models": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"release_date": {
"type": "string"
},
"attachment": {
"type": "boolean"
},
"reasoning": {
"type": "boolean"
},
"temperature": {
"type": "boolean"
},
"tool_call": {
"type": "boolean"
},
"cost": {
"anyOf": [
{
"type": "object",
"properties": {
"input": {
"type": "number"
},
"output": {
"type": "number"
},
"cache_read": {
"type": "number"
},
"cache_write": {
"type": "number"
}
},
"required": [
"input",
"output"
],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"standard": {
"type": "object",
"properties": {
"input": {
"type": "number"
},
"output": {
"type": "number"
},
"cache_read": {
"type": "number"
},
"cache_write": {
"type": "number"
}
},
"required": [
"input",
"output"
],
"additionalProperties": false
},
"large_context": {
"type": "object",
"properties": {
"input": {
"type": "number"
},
"output": {
"type": "number"
},
"cache_read": {
"type": "number"
},
"cache_write": {
"type": "number"
}
},
"required": [
"input",
"output"
],
"additionalProperties": false
}
},
"required": [
"standard",
"large_context"
],
"additionalProperties": false
}
]
},
"limit": {
"type": "object",
"properties": {
"context": {
"type": "number"
},
"output": {
"type": "number"
},
"standard_context_threshold": {
"type": "number"
}
},
"required": [
"context",
"output"
],
"additionalProperties": false
},
"options": {
"type": "object",
"additionalProperties": {}
}
},
"additionalProperties": false
}
},
"options": {
"type": "object",
"additionalProperties": {}
}
},
"required": [
"models"
],
"additionalProperties": false
},
"description": "Custom provider configurations and model overrides"
},
"mcp": {
"type": "object",
"additionalProperties": {
"anyOf": [
{
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "local",
"description": "Type of MCP server connection"
},
"command": {
"type": "array",
"items": {
"type": "string"
},
"description": "Command and arguments to run the MCP server"
},
"environment": {
"type": "object",
"additionalProperties": {
"type": "string"
},
"description": "Environment variables to set when running the MCP server"
},
"enabled": {
"type": "boolean",
"description": "Enable or disable the MCP server on startup"
}
},
"required": [
"type",
"command"
],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "remote",
"description": "Type of MCP server connection"
},
"url": {
"type": "string",
"description": "URL of the remote MCP server"
},
"enabled": {
"type": "boolean",
"description": "Enable or disable the MCP server on startup"
}
},
"required": [
"type",
"url"
],
"additionalProperties": false
}
]
},
"description": "MCP (Model Context Protocol) server configurations"
},
"experimental": {
"type": "object",
"properties": {
"hook": {
"type": "object",
"properties": {
"file_edited": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"properties": {
"command": {
"type": "array",
"items": {
"type": "string"
}
},
"environment": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": [
"command"
],
"additionalProperties": false
}
}
},
"session_completed": {
"type": "array",
"items": {
"type": "object",
"properties": {
"command": {
"type": "array",
"items": {
"type": "string"
}
},
"environment": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": [
"command"
],
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"$schema": "http://json-schema.org/draft-07/schema#"
}

View file

@ -163,6 +163,12 @@ export namespace Config {
"Model to use in the format of provider/model, eg anthropic/claude-2",
)
.optional(),
use_standard_pricing_only: z
.boolean()
.optional()
.describe(
"Force the use of standard pricing tier for Gemini models, even for large contexts.",
),
provider: z
.record(
ModelsDev.Provider.partial().extend({
@ -176,10 +182,6 @@ export namespace Config {
.record(z.string(), Mcp)
.optional()
.describe("MCP (Model Context Protocol) server configurations"),
instructions: z
.array(z.string())
.optional()
.describe("Additional instruction files or patterns to include"),
experimental: z
.object({
hook: z

View file

@ -17,15 +17,32 @@ export namespace ModelsDev {
reasoning: z.boolean(),
temperature: z.boolean(),
tool_call: z.boolean(),
cost: z.object({
input: z.number(),
output: z.number(),
cache_read: z.number().optional(),
cache_write: z.number().optional(),
}),
cost: z.union([
z.object({
input: z.number(),
output: z.number(),
cache_read: z.number().optional(),
cache_write: z.number().optional(),
}),
z.object({
standard: z.object({
input: z.number(),
output: z.number(),
cache_read: z.number().optional(),
cache_write: z.number().optional(),
}),
large_context: z.object({
input: z.number(),
output: z.number(),
cache_read: z.number().optional(),
cache_write: z.number().optional(),
}),
}),
]),
limit: z.object({
context: z.number(),
output: z.number(),
standard_context_threshold: z.number().optional(),
}),
options: z.record(z.any()),
})

View file

@ -37,6 +37,18 @@ export namespace Provider {
type Source = "env" | "config" | "custom" | "api"
// Helper function to get cost based on context size
export function getCost(model: ModelsDev.Model, use_standard_pricing_only: boolean, contextSize?: number) {
if ("standard" in model.cost && "large_context" in model.cost) {
const threshold = model.limit.standard_context_threshold ?? 200000; // Default threshold if not specified
if (contextSize && contextSize > threshold && !use_standard_pricing_only) {
return model.cost.large_context;
}
return model.cost.standard;
}
return model.cost; // Fallback for models without tiered pricing
}
const CUSTOM_LOADERS: Record<string, CustomLoader> = {
async anthropic(provider) {
const access = await AuthAnthropic.access()
@ -99,25 +111,11 @@ export namespace Provider {
})
info.access = tokens.access
}
let isAgentCall = false
try {
const body =
typeof init.body === "string"
? JSON.parse(init.body)
: init.body
if (body?.messages) {
isAgentCall = body.messages.some(
(msg: any) =>
msg.role && ["tool", "assistant"].includes(msg.role),
)
}
} catch {}
const headers = {
...init.headers,
...copilot.HEADERS,
Authorization: `Bearer ${info.access}`,
"Openai-Intent": "conversation-edits",
"X-Initiator": isAgentCall ? "agent" : "user",
}
delete headers["x-api-key"]
return fetch(input, {
@ -205,17 +203,6 @@ export namespace Provider {
},
}
},
openrouter: async () => {
return {
autoload: false,
options: {
headers: {
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "opencode",
},
},
}
},
}
const state = App.state("provider", async () => {

View file

@ -0,0 +1,110 @@
import { expect, test, describe } from "bun:test";
// Config is no longer needed here for these specific tests
import { ModelsDev } from "../src/provider/models";
import { Provider } from "../src/provider/provider";
describe("Provider Logic", () => {
// No longer need to mock Config.get for these tests
test("should use standard pricing for small contexts", () => {
const model: ModelsDev.Model = {
id: "gemini-pro",
name: "Gemini Pro",
release_date: "2023-12-15",
attachment: false,
reasoning: true,
temperature: true,
tool_call: true,
cost: {
standard: { input: 1.25, output: 10.00 },
large_context: { input: 2.50, output: 15.00 },
},
limit: {
context: 1000000,
output: 8192,
standard_context_threshold: 200000,
},
options: {},
};
const cost = Provider.getCost(model, false, 100000); // Pass false directly
expect(cost.input).toBe(1.25);
expect(cost.output).toBe(10.00);
});
test("should use large context pricing for large contexts", () => {
const model: ModelsDev.Model = {
id: "gemini-pro",
name: "Gemini Pro",
release_date: "2023-12-15",
attachment: false,
reasoning: true,
temperature: true,
tool_call: true,
cost: {
standard: { input: 1.25, output: 10.00 },
large_context: { input: 2.50, output: 15.00 },
},
limit: {
context: 1000000,
output: 8192,
standard_context_threshold: 200000,
},
options: {},
};
const cost = Provider.getCost(model, false, 300000); // Pass false directly
expect(cost.input).toBe(2.50);
expect(cost.output).toBe(15.00);
});
test("should use standard pricing for large contexts when use_standard_pricing_only is true", () => {
const model: ModelsDev.Model = {
id: "gemini-pro",
name: "Gemini Pro",
release_date: "2023-12-15",
attachment: false,
reasoning: true,
temperature: true,
tool_call: true,
cost: {
standard: { input: 1.25, output: 10.00 },
large_context: { input: 2.50, output: 15.00 },
},
limit: {
context: 1000000,
output: 8192,
standard_context_threshold: 200000,
},
options: {},
};
const cost = Provider.getCost(model, true, 300000); // Pass true directly
expect(cost.input).toBe(1.25);
expect(cost.output).toBe(10.00);
});
test("should use standard pricing when tiered pricing is not available", () => {
const model: ModelsDev.Model = {
id: "some-other-model",
name: "Some Other Model",
release_date: "2023-12-15",
attachment: false,
reasoning: true,
temperature: true,
tool_call: true,
cost: { input: 1.00, output: 5.00 }, // No tiered pricing
limit: {
context: 1000000,
output: 8192,
},
options: {},
};
// For models without tiered pricing, the use_standard_pricing_only flag doesn't change the outcome.
// We can test with either true or false, e.g., false.
const cost = Provider.getCost(model, false, 300000);
expect(cost.input).toBe(1.00);
expect(cost.output).toBe(5.00);
});
});

View file

@ -1,6 +1,5 @@
---
title: Config
description: Using the opencode JSON config.
---
You can configure opencode using a JSON config file that can be placed in:
@ -14,7 +13,8 @@ You can configure opencode using a JSON config file that can be placed in:
"theme": "opencode",
"model": "anthropic/claude-sonnet-4-20250514",
"autoshare": false,
"autoupdate": true
"autoupdate": true,
"use_standard_pricing_only": false
}
```
@ -93,6 +93,21 @@ You can configure MCP servers you want to use through the `mcp` option.
---
### Gemini Pricing Tier
For Gemini models that offer different pricing tiers based on context size (e.g., Gemini 2.5 Pro), OpenCode will automatically select the appropriate tier. However, you can force the use of the standard (lower-cost) tier even for larger contexts by setting `use_standard_pricing_only` to `true`.
```json title="opencode.json"
{
"$schema": "https://opencode.ai/config.json",
"use_standard_pricing_only": true
}
```
This can be useful if you want to strictly control costs and avoid potentially higher charges for large context windows, even if it means the model might not be able to process the entire context. By default, this is `false`, allowing OpenCode to use the higher-cost tier for large contexts if necessary.
---
### Disabled providers
You can disable providers that are loaded automatically through the `disabled_providers` option. This is useful when you want to prevent certain providers from being loaded even if their credentials are available.