mirror of
https://github.com/sst/opencode.git
synced 2025-08-09 07:48:02 +00:00

* Add support for OpenRouter as a new model provider - Introduced `ProviderOpenRouter` in the `models` package. - Added OpenRouter-specific models, including `GPT41`, `GPT41Mini`, `GPT4o`, and others, with their configurations and costs. - Updated `generateSchema` to include OpenRouter as a provider. - Added OpenRouter-specific environment variable handling (`OPENROUTER_API_KEY`) in `config.go`. - Implemented default model settings for OpenRouter agents in `setDefaultModelForAgent`. - Updated `getProviderAPIKey` to retrieve the OpenRouter API key. - Extended `SupportedModels` to include OpenRouter models. - Added OpenRouter client initialization in the `provider` package. - Modified `processGeneration` to handle `FinishReasonUnknown` in addition to `FinishReasonToolUse`. * [feature/openrouter-provider] Add new models and provider to schema - Added "deepseek-chat-free" and "deepseek-r1-free" to the list of supported models in `opencode-schema.json`. * [feature/openrouter-provider] Add OpenRouter provider support and integrate new models - Updated README.md to include OpenRouter as a supported provider and its configuration details. - Added `OPENROUTER_API_KEY` to environment variable configuration. - Introduced OpenRouter-specific models in `internal/llm/models/openrouter.go` with mappings to existing cost and token configurations. - Updated `internal/config/config.go` to set default models for OpenRouter agents. - Extended `opencode-schema.json` to include OpenRouter models in the schema definitions. - Refactored model IDs and names to align with OpenRouter naming conventions. * [feature/openrouter-provider] Refactor finish reason handling and tool call logic in agent and OpenAI provider - Simplified finish reason check in `agent.go` by removing redundant variable assignment. - Updated `openai.go` to override the finish reason to `FinishReasonToolUse` when tool calls are present. - Ensured consistent finish reason handling in both `send` and `stream` methods of the OpenAI provider. [feature/openrouter-provider] Refactor finish reason handling and tool call logic in agent and OpenAI provider - Simplified finish reason check in `agent.go` by removing redundant variable assignment. - Updated `openai.go` to override the finish reason to `FinishReasonToolUse` when tool calls are present. - Ensured consistent finish reason handling in both `send` and `stream` methods of the OpenAI provider. * **[feature/openrouter-provider] Add support for custom headers in OpenAI client configuration** - Introduced a new `extraHeaders` field in the `openaiOptions` struct to allow specifying additional HTTP headers. - Added logic in `newOpenAIClient` to apply `extraHeaders` to the OpenAI client configuration. - Implemented a new option function `WithOpenAIExtraHeaders` to set custom headers in `openaiOptions`. - Updated the OpenRouter provider configuration in `NewProvider` to include default headers (`HTTP-Referer` and `X-Title`) for OpenRouter API requests. * Update OpenRouter model config and remove unsupported models * [feature/openrouter-provider] Update OpenRouter models and default configurations - Added new OpenRouter models: `claude-3.5-sonnet`, `claude-3-haiku`, `claude-3.7-sonnet`, `claude-3.5-haiku`, and `claude-3-opus` in `openrouter.go`. - Updated default agent models in `config.go`: - `agents.coder.model` now uses `claude-3.7-sonnet`. - `agents.task.model` now uses `claude-3.7-sonnet`. - `agents.title.model` now uses `claude-3.5-haiku`. - Updated `opencode-schema.json` to include the new models in the allowed list for schema validation. - Adjusted logic in `setDefaultModelForAgent` to reflect the new default models. * [feature/openrouter-provider] Remove unused ProviderEvent emission in stream function The changes remove the emission of a `ProviderEvent` with type `EventContentStop` in the `stream` function of the `openaiClient` implementation. This event was sent upon successful stream completion but is no longer used.
262 lines
12 KiB
Go
262 lines
12 KiB
Go
package models
|
||
|
||
const (
|
||
ProviderOpenRouter ModelProvider = "openrouter"
|
||
|
||
OpenRouterGPT41 ModelID = "openrouter.gpt-4.1"
|
||
OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini"
|
||
OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano"
|
||
OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview"
|
||
OpenRouterGPT4o ModelID = "openrouter.gpt-4o"
|
||
OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini"
|
||
OpenRouterO1 ModelID = "openrouter.o1"
|
||
OpenRouterO1Pro ModelID = "openrouter.o1-pro"
|
||
OpenRouterO1Mini ModelID = "openrouter.o1-mini"
|
||
OpenRouterO3 ModelID = "openrouter.o3"
|
||
OpenRouterO3Mini ModelID = "openrouter.o3-mini"
|
||
OpenRouterO4Mini ModelID = "openrouter.o4-mini"
|
||
OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash"
|
||
OpenRouterGemini25 ModelID = "openrouter.gemini-2.5"
|
||
OpenRouterClaude35Sonnet ModelID = "openrouter.claude-3.5-sonnet"
|
||
OpenRouterClaude3Haiku ModelID = "openrouter.claude-3-haiku"
|
||
OpenRouterClaude37Sonnet ModelID = "openrouter.claude-3.7-sonnet"
|
||
OpenRouterClaude35Haiku ModelID = "openrouter.claude-3.5-haiku"
|
||
OpenRouterClaude3Opus ModelID = "openrouter.claude-3-opus"
|
||
)
|
||
|
||
var OpenRouterModels = map[ModelID]Model{
|
||
OpenRouterGPT41: {
|
||
ID: OpenRouterGPT41,
|
||
Name: "OpenRouter – GPT 4.1",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/gpt-4.1",
|
||
CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[GPT41].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[GPT41].DefaultMaxTokens,
|
||
},
|
||
OpenRouterGPT41Mini: {
|
||
ID: OpenRouterGPT41Mini,
|
||
Name: "OpenRouter – GPT 4.1 mini",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/gpt-4.1-mini",
|
||
CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[GPT41Mini].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[GPT41Mini].DefaultMaxTokens,
|
||
},
|
||
OpenRouterGPT41Nano: {
|
||
ID: OpenRouterGPT41Nano,
|
||
Name: "OpenRouter – GPT 4.1 nano",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/gpt-4.1-nano",
|
||
CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[GPT41Nano].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[GPT41Nano].DefaultMaxTokens,
|
||
},
|
||
OpenRouterGPT45Preview: {
|
||
ID: OpenRouterGPT45Preview,
|
||
Name: "OpenRouter – GPT 4.5 preview",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/gpt-4.5-preview",
|
||
CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[GPT45Preview].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[GPT45Preview].DefaultMaxTokens,
|
||
},
|
||
OpenRouterGPT4o: {
|
||
ID: OpenRouterGPT4o,
|
||
Name: "OpenRouter – GPT 4o",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/gpt-4o",
|
||
CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[GPT4o].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[GPT4o].DefaultMaxTokens,
|
||
},
|
||
OpenRouterGPT4oMini: {
|
||
ID: OpenRouterGPT4oMini,
|
||
Name: "OpenRouter – GPT 4o mini",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/gpt-4o-mini",
|
||
CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[GPT4oMini].ContextWindow,
|
||
},
|
||
OpenRouterO1: {
|
||
ID: OpenRouterO1,
|
||
Name: "OpenRouter – O1",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/o1",
|
||
CostPer1MIn: OpenAIModels[O1].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[O1].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[O1].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[O1].DefaultMaxTokens,
|
||
CanReason: OpenAIModels[O1].CanReason,
|
||
},
|
||
OpenRouterO1Pro: {
|
||
ID: OpenRouterO1Pro,
|
||
Name: "OpenRouter – o1 pro",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/o1-pro",
|
||
CostPer1MIn: OpenAIModels[O1Pro].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[O1Pro].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[O1Pro].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[O1Pro].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[O1Pro].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[O1Pro].DefaultMaxTokens,
|
||
CanReason: OpenAIModels[O1Pro].CanReason,
|
||
},
|
||
OpenRouterO1Mini: {
|
||
ID: OpenRouterO1Mini,
|
||
Name: "OpenRouter – o1 mini",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/o1-mini",
|
||
CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[O1Mini].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[O1Mini].DefaultMaxTokens,
|
||
CanReason: OpenAIModels[O1Mini].CanReason,
|
||
},
|
||
OpenRouterO3: {
|
||
ID: OpenRouterO3,
|
||
Name: "OpenRouter – o3",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/o3",
|
||
CostPer1MIn: OpenAIModels[O3].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[O3].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[O3].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[O3].DefaultMaxTokens,
|
||
CanReason: OpenAIModels[O3].CanReason,
|
||
},
|
||
OpenRouterO3Mini: {
|
||
ID: OpenRouterO3Mini,
|
||
Name: "OpenRouter – o3 mini",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/o3-mini-high",
|
||
CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[O3Mini].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[O3Mini].DefaultMaxTokens,
|
||
CanReason: OpenAIModels[O3Mini].CanReason,
|
||
},
|
||
OpenRouterO4Mini: {
|
||
ID: OpenRouterO4Mini,
|
||
Name: "OpenRouter – o4 mini",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "openai/o4-mini-high",
|
||
CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn,
|
||
CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached,
|
||
CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut,
|
||
CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached,
|
||
ContextWindow: OpenAIModels[O4Mini].ContextWindow,
|
||
DefaultMaxTokens: OpenAIModels[O4Mini].DefaultMaxTokens,
|
||
CanReason: OpenAIModels[O4Mini].CanReason,
|
||
},
|
||
OpenRouterGemini25Flash: {
|
||
ID: OpenRouterGemini25Flash,
|
||
Name: "OpenRouter – Gemini 2.5 Flash",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "google/gemini-2.5-flash-preview:thinking",
|
||
CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn,
|
||
CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached,
|
||
CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut,
|
||
CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached,
|
||
ContextWindow: GeminiModels[Gemini25Flash].ContextWindow,
|
||
DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens,
|
||
},
|
||
OpenRouterGemini25: {
|
||
ID: OpenRouterGemini25,
|
||
Name: "OpenRouter – Gemini 2.5 Pro",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "google/gemini-2.5-pro-preview-03-25",
|
||
CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn,
|
||
CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached,
|
||
CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut,
|
||
CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached,
|
||
ContextWindow: GeminiModels[Gemini25].ContextWindow,
|
||
DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens,
|
||
},
|
||
OpenRouterClaude35Sonnet: {
|
||
ID: OpenRouterClaude35Sonnet,
|
||
Name: "OpenRouter – Claude 3.5 Sonnet",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "anthropic/claude-3.5-sonnet",
|
||
CostPer1MIn: AnthropicModels[Claude35Sonnet].CostPer1MIn,
|
||
CostPer1MInCached: AnthropicModels[Claude35Sonnet].CostPer1MInCached,
|
||
CostPer1MOut: AnthropicModels[Claude35Sonnet].CostPer1MOut,
|
||
CostPer1MOutCached: AnthropicModels[Claude35Sonnet].CostPer1MOutCached,
|
||
ContextWindow: AnthropicModels[Claude35Sonnet].ContextWindow,
|
||
DefaultMaxTokens: AnthropicModels[Claude35Sonnet].DefaultMaxTokens,
|
||
},
|
||
OpenRouterClaude3Haiku: {
|
||
ID: OpenRouterClaude3Haiku,
|
||
Name: "OpenRouter – Claude 3 Haiku",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "anthropic/claude-3-haiku",
|
||
CostPer1MIn: AnthropicModels[Claude3Haiku].CostPer1MIn,
|
||
CostPer1MInCached: AnthropicModels[Claude3Haiku].CostPer1MInCached,
|
||
CostPer1MOut: AnthropicModels[Claude3Haiku].CostPer1MOut,
|
||
CostPer1MOutCached: AnthropicModels[Claude3Haiku].CostPer1MOutCached,
|
||
ContextWindow: AnthropicModels[Claude3Haiku].ContextWindow,
|
||
DefaultMaxTokens: AnthropicModels[Claude3Haiku].DefaultMaxTokens,
|
||
},
|
||
OpenRouterClaude37Sonnet: {
|
||
ID: OpenRouterClaude37Sonnet,
|
||
Name: "OpenRouter – Claude 3.7 Sonnet",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "anthropic/claude-3.7-sonnet",
|
||
CostPer1MIn: AnthropicModels[Claude37Sonnet].CostPer1MIn,
|
||
CostPer1MInCached: AnthropicModels[Claude37Sonnet].CostPer1MInCached,
|
||
CostPer1MOut: AnthropicModels[Claude37Sonnet].CostPer1MOut,
|
||
CostPer1MOutCached: AnthropicModels[Claude37Sonnet].CostPer1MOutCached,
|
||
ContextWindow: AnthropicModels[Claude37Sonnet].ContextWindow,
|
||
DefaultMaxTokens: AnthropicModels[Claude37Sonnet].DefaultMaxTokens,
|
||
CanReason: AnthropicModels[Claude37Sonnet].CanReason,
|
||
},
|
||
OpenRouterClaude35Haiku: {
|
||
ID: OpenRouterClaude35Haiku,
|
||
Name: "OpenRouter – Claude 3.5 Haiku",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "anthropic/claude-3.5-haiku",
|
||
CostPer1MIn: AnthropicModels[Claude35Haiku].CostPer1MIn,
|
||
CostPer1MInCached: AnthropicModels[Claude35Haiku].CostPer1MInCached,
|
||
CostPer1MOut: AnthropicModels[Claude35Haiku].CostPer1MOut,
|
||
CostPer1MOutCached: AnthropicModels[Claude35Haiku].CostPer1MOutCached,
|
||
ContextWindow: AnthropicModels[Claude35Haiku].ContextWindow,
|
||
DefaultMaxTokens: AnthropicModels[Claude35Haiku].DefaultMaxTokens,
|
||
},
|
||
OpenRouterClaude3Opus: {
|
||
ID: OpenRouterClaude3Opus,
|
||
Name: "OpenRouter – Claude 3 Opus",
|
||
Provider: ProviderOpenRouter,
|
||
APIModel: "anthropic/claude-3-opus",
|
||
CostPer1MIn: AnthropicModels[Claude3Opus].CostPer1MIn,
|
||
CostPer1MInCached: AnthropicModels[Claude3Opus].CostPer1MInCached,
|
||
CostPer1MOut: AnthropicModels[Claude3Opus].CostPer1MOut,
|
||
CostPer1MOutCached: AnthropicModels[Claude3Opus].CostPer1MOutCached,
|
||
ContextWindow: AnthropicModels[Claude3Opus].ContextWindow,
|
||
DefaultMaxTokens: AnthropicModels[Claude3Opus].DefaultMaxTokens,
|
||
},
|
||
}
|