feat: support VertexAI provider (#153)

* support: vertexai

fix

fix

set default for vertexai

added comment

fix

fix

* create schema

* fix README.md

* fix order

* added pupularity

* set tools if tools is exists

restore commentout

* fix comment

* set summarizer model
This commit is contained in:
mineo 2025-05-16 03:25:21 +09:00 committed by adamdottv
parent 5f5f9dad87
commit 87237b6462
No known key found for this signature in database
GPG key ID: 9CB48779AF150E75
9 changed files with 272 additions and 111 deletions

View file

@ -74,6 +74,8 @@ You can configure OpenCode using environment variables:
| `ANTHROPIC_API_KEY` | For Claude models | | `ANTHROPIC_API_KEY` | For Claude models |
| `OPENAI_API_KEY` | For OpenAI models | | `OPENAI_API_KEY` | For OpenAI models |
| `GEMINI_API_KEY` | For Google Gemini models | | `GEMINI_API_KEY` | For Google Gemini models |
| `VERTEXAI_PROJECT` | For Google Cloud VertexAI (Gemini) |
| `VERTEXAI_LOCATION` | For Google Cloud VertexAI (Gemini) |
| `GROQ_API_KEY` | For Groq models | | `GROQ_API_KEY` | For Groq models |
| `AWS_ACCESS_KEY_ID` | For AWS Bedrock (Claude) | | `AWS_ACCESS_KEY_ID` | For AWS Bedrock (Claude) |
| `AWS_SECRET_ACCESS_KEY` | For AWS Bedrock (Claude) | | `AWS_SECRET_ACCESS_KEY` | For AWS Bedrock (Claude) |
@ -189,6 +191,11 @@ OpenCode supports a variety of AI models from different providers:
- O3 family (o3, o3-mini) - O3 family (o3, o3-mini)
- O4 Mini - O4 Mini
### Google Cloud VertexAI
- Gemini 2.5
- Gemini 2.5 Flash
## Usage ## Usage
```bash ```bash

View file

@ -227,6 +227,7 @@ func generateSchema() map[string]any {
string(models.ProviderOpenRouter), string(models.ProviderOpenRouter),
string(models.ProviderBedrock), string(models.ProviderBedrock),
string(models.ProviderAzure), string(models.ProviderAzure),
string(models.ProviderVertexAI),
} }
providerSchema["additionalProperties"].(map[string]any)["properties"].(map[string]any)["provider"] = map[string]any{ providerSchema["additionalProperties"].(map[string]any)["properties"].(map[string]any)["provider"] = map[string]any{

View file

@ -235,6 +235,7 @@ func setProviderDefaults() {
// 5. OpenRouter // 5. OpenRouter
// 6. AWS Bedrock // 6. AWS Bedrock
// 7. Azure // 7. Azure
// 8. Google Cloud VertexAI
// Anthropic configuration // Anthropic configuration
if key := viper.GetString("providers.anthropic.apiKey"); strings.TrimSpace(key) != "" { if key := viper.GetString("providers.anthropic.apiKey"); strings.TrimSpace(key) != "" {
@ -299,6 +300,15 @@ func setProviderDefaults() {
viper.SetDefault("agents.title.model", models.AzureGPT41Mini) viper.SetDefault("agents.title.model", models.AzureGPT41Mini)
return return
} }
// Google Cloud VertexAI configuration
if hasVertexAICredentials() {
viper.SetDefault("agents.coder.model", models.VertexAIGemini25)
viper.SetDefault("agents.summarizer.model", models.VertexAIGemini25)
viper.SetDefault("agents.task.model", models.VertexAIGemini25Flash)
viper.SetDefault("agents.title.model", models.VertexAIGemini25Flash)
return
}
} }
// hasAWSCredentials checks if AWS credentials are available in the environment. // hasAWSCredentials checks if AWS credentials are available in the environment.
@ -327,6 +337,19 @@ func hasAWSCredentials() bool {
return false return false
} }
// hasVertexAICredentials checks if VertexAI credentials are available in the environment.
func hasVertexAICredentials() bool {
// Check for explicit VertexAI parameters
if os.Getenv("VERTEXAI_PROJECT") != "" && os.Getenv("VERTEXAI_LOCATION") != "" {
return true
}
// Check for Google Cloud project and location
if os.Getenv("GOOGLE_CLOUD_PROJECT") != "" && (os.Getenv("GOOGLE_CLOUD_REGION") != "" || os.Getenv("GOOGLE_CLOUD_LOCATION") != "") {
return true
}
return false
}
// readConfig handles the result of reading a configuration file. // readConfig handles the result of reading a configuration file.
func readConfig(err error) error { func readConfig(err error) error {
if err == nil { if err == nil {
@ -549,6 +572,10 @@ func getProviderAPIKey(provider models.ModelProvider) string {
if hasAWSCredentials() { if hasAWSCredentials() {
return "aws-credentials-available" return "aws-credentials-available"
} }
case models.ProviderVertexAI:
if hasVertexAICredentials() {
return "vertex-ai-credentials-available"
}
} }
return "" return ""
} }
@ -669,6 +696,24 @@ func setDefaultModelForAgent(agent AgentName) bool {
return true return true
} }
if hasVertexAICredentials() {
var model models.ModelID
maxTokens := int64(5000)
if agent == AgentTitle {
model = models.VertexAIGemini25Flash
maxTokens = 80
} else {
model = models.VertexAIGemini25
}
cfg.Agents[agent] = Agent{
Model: model,
MaxTokens: maxTokens,
}
return true
}
return false return false
} }

View file

@ -43,6 +43,7 @@ var ProviderPopularity = map[ModelProvider]int{
ProviderOpenRouter: 5, ProviderOpenRouter: 5,
ProviderBedrock: 6, ProviderBedrock: 6,
ProviderAzure: 7, ProviderAzure: 7,
ProviderVertexAI: 8,
} }
var SupportedModels = map[ModelID]Model{ var SupportedModels = map[ModelID]Model{
@ -95,4 +96,5 @@ func init() {
maps.Copy(SupportedModels, AzureModels) maps.Copy(SupportedModels, AzureModels)
maps.Copy(SupportedModels, OpenRouterModels) maps.Copy(SupportedModels, OpenRouterModels)
maps.Copy(SupportedModels, XAIModels) maps.Copy(SupportedModels, XAIModels)
maps.Copy(SupportedModels, VertexAIGeminiModels)
} }

View file

@ -0,0 +1,38 @@
package models
const (
ProviderVertexAI ModelProvider = "vertexai"
// Models
VertexAIGemini25Flash ModelID = "vertexai.gemini-2.5-flash"
VertexAIGemini25 ModelID = "vertexai.gemini-2.5"
)
var VertexAIGeminiModels = map[ModelID]Model{
VertexAIGemini25Flash: {
ID: VertexAIGemini25Flash,
Name: "VertexAI: Gemini 2.5 Flash",
Provider: ProviderVertexAI,
APIModel: "gemini-2.5-flash-preview-04-17",
CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn,
CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached,
CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut,
CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached,
ContextWindow: GeminiModels[Gemini25Flash].ContextWindow,
DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens,
SupportsAttachments: true,
},
VertexAIGemini25: {
ID: VertexAIGemini25,
Name: "VertexAI: Gemini 2.5 Pro",
Provider: ProviderVertexAI,
APIModel: "gemini-2.5-pro-preview-03-25",
CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn,
CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached,
CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut,
CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached,
ContextWindow: GeminiModels[Gemini25].ContextWindow,
DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens,
SupportsAttachments: true,
},
}

View file

@ -176,13 +176,16 @@ func (g *geminiClient) send(ctx context.Context, messages []message.Message, too
history := geminiMessages[:len(geminiMessages)-1] // All but last message history := geminiMessages[:len(geminiMessages)-1] // All but last message
lastMsg := geminiMessages[len(geminiMessages)-1] lastMsg := geminiMessages[len(geminiMessages)-1]
chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, &genai.GenerateContentConfig{ config := &genai.GenerateContentConfig{
MaxOutputTokens: int32(g.providerOptions.maxTokens), MaxOutputTokens: int32(g.providerOptions.maxTokens),
SystemInstruction: &genai.Content{ SystemInstruction: &genai.Content{
Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}}, Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}},
}, },
Tools: g.convertTools(tools), }
}, history) if len(tools) > 0 {
config.Tools = g.convertTools(tools)
}
chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, config, history)
attempts := 0 attempts := 0
for { for {
@ -262,13 +265,16 @@ func (g *geminiClient) stream(ctx context.Context, messages []message.Message, t
history := geminiMessages[:len(geminiMessages)-1] // All but last message history := geminiMessages[:len(geminiMessages)-1] // All but last message
lastMsg := geminiMessages[len(geminiMessages)-1] lastMsg := geminiMessages[len(geminiMessages)-1]
chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, &genai.GenerateContentConfig{ config := &genai.GenerateContentConfig{
MaxOutputTokens: int32(g.providerOptions.maxTokens), MaxOutputTokens: int32(g.providerOptions.maxTokens),
SystemInstruction: &genai.Content{ SystemInstruction: &genai.Content{
Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}}, Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}},
}, },
Tools: g.convertTools(tools), }
}, history) if len(tools) > 0 {
config.Tools = g.convertTools(tools)
}
chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, config, history)
attempts := 0 attempts := 0
eventChan := make(chan ProviderEvent) eventChan := make(chan ProviderEvent)

View file

@ -123,6 +123,11 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption
options: clientOptions, options: clientOptions,
client: newAzureClient(clientOptions), client: newAzureClient(clientOptions),
}, nil }, nil
case models.ProviderVertexAI:
return &baseProvider[VertexAIClient]{
options: clientOptions,
client: newVertexAIClient(clientOptions),
}, nil
case models.ProviderOpenRouter: case models.ProviderOpenRouter:
clientOptions.openaiOptions = append(clientOptions.openaiOptions, clientOptions.openaiOptions = append(clientOptions.openaiOptions,
WithOpenAIBaseURL("https://openrouter.ai/api/v1"), WithOpenAIBaseURL("https://openrouter.ai/api/v1"),

View file

@ -0,0 +1,34 @@
package provider
import (
"context"
"os"
"github.com/opencode-ai/opencode/internal/logging"
"google.golang.org/genai"
)
type VertexAIClient ProviderClient
func newVertexAIClient(opts providerClientOptions) VertexAIClient {
geminiOpts := geminiOptions{}
for _, o := range opts.geminiOptions {
o(&geminiOpts)
}
client, err := genai.NewClient(context.Background(), &genai.ClientConfig{
Project: os.Getenv("VERTEXAI_PROJECT"),
Location: os.Getenv("VERTEXAI_LOCATION"),
Backend: genai.BackendVertexAI,
})
if err != nil {
logging.Error("Failed to create VertexAI client", "error", err)
return nil
}
return &geminiClient{
providerOptions: opts,
options: geminiOpts,
client: client,
}
}

View file

@ -12,63 +12,74 @@
"model": { "model": {
"description": "Model ID for the agent", "description": "Model ID for the agent",
"enum": [ "enum": [
"azure.o1-mini",
"openrouter.gemini-2.5-flash",
"claude-3-haiku",
"o1-mini",
"qwen-qwq",
"llama-3.3-70b-versatile",
"openrouter.claude-3.5-sonnet",
"o3-mini",
"o4-mini",
"gpt-4.1", "gpt-4.1",
"azure.o3-mini",
"openrouter.gpt-4.1-nano",
"openrouter.gpt-4o",
"gemini-2.5",
"azure.gpt-4o",
"azure.gpt-4o-mini",
"claude-3.7-sonnet",
"azure.gpt-4.1-nano",
"openrouter.o1",
"openrouter.claude-3-haiku",
"bedrock.claude-3.7-sonnet",
"gemini-2.5-flash",
"azure.o3",
"openrouter.gemini-2.5",
"openrouter.o3", "openrouter.o3",
"openrouter.o3-mini",
"openrouter.gpt-4.1-mini",
"openrouter.gpt-4.5-preview",
"openrouter.gpt-4o-mini",
"gpt-4.1-mini",
"meta-llama/llama-4-scout-17b-16e-instruct",
"openrouter.o1-mini",
"gpt-4.5-preview",
"o3",
"openrouter.claude-3.5-haiku",
"claude-3-opus",
"o1-pro",
"gemini-2.0-flash",
"azure.o4-mini",
"openrouter.o4-mini",
"claude-3.5-sonnet",
"meta-llama/llama-4-maverick-17b-128e-instruct",
"azure.o1",
"openrouter.gpt-4.1", "openrouter.gpt-4.1",
"openrouter.o1-pro", "meta-llama/llama-4-scout-17b-16e-instruct",
"gpt-4.1-nano", "openrouter.gpt-4o",
"azure.gpt-4.5-preview", "o1-pro",
"openrouter.claude-3-opus", "claude-3-haiku",
"gpt-4o-mini",
"o1", "o1",
"deepseek-r1-distill-llama-70b", "gemini-2.5-flash",
"azure.gpt-4.1", "vertexai.gemini-2.5-flash",
"gpt-4o",
"azure.gpt-4.1-mini",
"openrouter.claude-3.7-sonnet",
"claude-3.5-haiku", "claude-3.5-haiku",
"gemini-2.0-flash-lite" "gpt-4o-mini",
"o3-mini",
"gpt-4.5-preview",
"azure.gpt-4o",
"azure.o4-mini",
"openrouter.claude-3.5-sonnet",
"gpt-4o",
"o3",
"gpt-4.1-mini",
"llama-3.3-70b-versatile",
"azure.gpt-4o-mini",
"gpt-4.1-nano",
"o4-mini",
"qwen-qwq",
"openrouter.claude-3.5-haiku",
"openrouter.qwen-3-14b",
"vertexai.gemini-2.5",
"gemini-2.5",
"azure.gpt-4.1-nano",
"openrouter.o1-mini",
"openrouter.qwen-3-30b",
"claude-3.7-sonnet",
"claude-3.5-sonnet",
"gemini-2.0-flash",
"meta-llama/llama-4-maverick-17b-128e-instruct",
"openrouter.o3-mini",
"openrouter.o4-mini",
"openrouter.gpt-4.1-mini",
"openrouter.o1",
"o1-mini",
"azure.gpt-4.1-mini",
"openrouter.o1-pro",
"grok-3-beta",
"grok-3-mini-fast-beta",
"openrouter.claude-3.7-sonnet",
"openrouter.claude-3-opus",
"openrouter.qwen-3-235b",
"openrouter.gpt-4.1-nano",
"bedrock.claude-3.7-sonnet",
"openrouter.qwen-3-8b",
"claude-3-opus",
"azure.o1-mini",
"deepseek-r1-distill-llama-70b",
"gemini-2.0-flash-lite",
"openrouter.qwen-3-32b",
"openrouter.gpt-4.5-preview",
"grok-3-mini-beta",
"grok-3-fast-beta",
"azure.o3-mini",
"openrouter.claude-3-haiku",
"azure.gpt-4.1",
"azure.o1",
"azure.o3",
"azure.gpt-4.5-preview",
"openrouter.gemini-2.5-flash",
"openrouter.gpt-4o-mini",
"openrouter.gemini-2.5"
], ],
"type": "string" "type": "string"
}, },
@ -102,63 +113,74 @@
"model": { "model": {
"description": "Model ID for the agent", "description": "Model ID for the agent",
"enum": [ "enum": [
"azure.o1-mini",
"openrouter.gemini-2.5-flash",
"claude-3-haiku",
"o1-mini",
"qwen-qwq",
"llama-3.3-70b-versatile",
"openrouter.claude-3.5-sonnet",
"o3-mini",
"o4-mini",
"gpt-4.1", "gpt-4.1",
"azure.o3-mini",
"openrouter.gpt-4.1-nano",
"openrouter.gpt-4o",
"gemini-2.5",
"azure.gpt-4o",
"azure.gpt-4o-mini",
"claude-3.7-sonnet",
"azure.gpt-4.1-nano",
"openrouter.o1",
"openrouter.claude-3-haiku",
"bedrock.claude-3.7-sonnet",
"gemini-2.5-flash",
"azure.o3",
"openrouter.gemini-2.5",
"openrouter.o3", "openrouter.o3",
"openrouter.o3-mini",
"openrouter.gpt-4.1-mini",
"openrouter.gpt-4.5-preview",
"openrouter.gpt-4o-mini",
"gpt-4.1-mini",
"meta-llama/llama-4-scout-17b-16e-instruct",
"openrouter.o1-mini",
"gpt-4.5-preview",
"o3",
"openrouter.claude-3.5-haiku",
"claude-3-opus",
"o1-pro",
"gemini-2.0-flash",
"azure.o4-mini",
"openrouter.o4-mini",
"claude-3.5-sonnet",
"meta-llama/llama-4-maverick-17b-128e-instruct",
"azure.o1",
"openrouter.gpt-4.1", "openrouter.gpt-4.1",
"openrouter.o1-pro", "meta-llama/llama-4-scout-17b-16e-instruct",
"gpt-4.1-nano", "openrouter.gpt-4o",
"azure.gpt-4.5-preview", "o1-pro",
"openrouter.claude-3-opus", "claude-3-haiku",
"gpt-4o-mini",
"o1", "o1",
"deepseek-r1-distill-llama-70b", "gemini-2.5-flash",
"azure.gpt-4.1", "vertexai.gemini-2.5-flash",
"gpt-4o",
"azure.gpt-4.1-mini",
"openrouter.claude-3.7-sonnet",
"claude-3.5-haiku", "claude-3.5-haiku",
"gemini-2.0-flash-lite" "gpt-4o-mini",
"o3-mini",
"gpt-4.5-preview",
"azure.gpt-4o",
"azure.o4-mini",
"openrouter.claude-3.5-sonnet",
"gpt-4o",
"o3",
"gpt-4.1-mini",
"llama-3.3-70b-versatile",
"azure.gpt-4o-mini",
"gpt-4.1-nano",
"o4-mini",
"qwen-qwq",
"openrouter.claude-3.5-haiku",
"openrouter.qwen-3-14b",
"vertexai.gemini-2.5",
"gemini-2.5",
"azure.gpt-4.1-nano",
"openrouter.o1-mini",
"openrouter.qwen-3-30b",
"claude-3.7-sonnet",
"claude-3.5-sonnet",
"gemini-2.0-flash",
"meta-llama/llama-4-maverick-17b-128e-instruct",
"openrouter.o3-mini",
"openrouter.o4-mini",
"openrouter.gpt-4.1-mini",
"openrouter.o1",
"o1-mini",
"azure.gpt-4.1-mini",
"openrouter.o1-pro",
"grok-3-beta",
"grok-3-mini-fast-beta",
"openrouter.claude-3.7-sonnet",
"openrouter.claude-3-opus",
"openrouter.qwen-3-235b",
"openrouter.gpt-4.1-nano",
"bedrock.claude-3.7-sonnet",
"openrouter.qwen-3-8b",
"claude-3-opus",
"azure.o1-mini",
"deepseek-r1-distill-llama-70b",
"gemini-2.0-flash-lite",
"openrouter.qwen-3-32b",
"openrouter.gpt-4.5-preview",
"grok-3-mini-beta",
"grok-3-fast-beta",
"azure.o3-mini",
"openrouter.claude-3-haiku",
"azure.gpt-4.1",
"azure.o1",
"azure.o3",
"azure.gpt-4.5-preview",
"openrouter.gemini-2.5-flash",
"openrouter.gpt-4o-mini",
"openrouter.gemini-2.5"
], ],
"type": "string" "type": "string"
}, },
@ -341,7 +363,8 @@
"groq", "groq",
"openrouter", "openrouter",
"bedrock", "bedrock",
"azure" "azure",
"vertexai"
], ],
"type": "string" "type": "string"
} }