Compare commits

..

No commits in common. "dev" and "v0.1.194" have entirely different histories.

424 changed files with 12076 additions and 51030 deletions

View file

@ -1,14 +0,0 @@
name: discord
on:
release:
types: [published] # fires only when a release is published
jobs:
notify:
runs-on: ubuntu-latest
steps:
- name: Send nicely-formatted embed to Discord
uses: SethCohen/github-releases-to-discord@v1
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK }}

View file

@ -1,29 +0,0 @@
name: opencode
on:
issue_comment:
types: [created]
jobs:
opencode:
if: |
contains(github.event.comment.body, ' /oc') ||
startsWith(github.event.comment.body, '/oc') ||
contains(github.event.comment.body, ' /opencode') ||
startsWith(github.event.comment.body, '/opencode')
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run opencode
uses: sst/opencode/github@latest
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
with:
model: anthropic/claude-sonnet-4-20250514

View file

@ -1,30 +0,0 @@
name: publish-github-action
on:
workflow_dispatch:
push:
tags:
- "github-v*.*.*"
- "!github-v1"
concurrency: ${{ github.workflow }}-${{ github.ref }}
permissions:
contents: write
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- run: git fetch --force --tags
- name: Publish
run: |
git config --global user.email "opencode@sst.dev"
git config --global user.name "opencode"
./script/publish
working-directory: ./github

View file

@ -1,36 +0,0 @@
name: publish-vscode
on:
workflow_dispatch:
push:
tags:
- "vscode-v*.*.*"
concurrency: ${{ github.workflow }}-${{ github.ref }}
permissions:
contents: write
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: oven-sh/setup-bun@v2
with:
bun-version: 1.2.17
- run: git fetch --force --tags
- run: bun install -g @vscode/vsce
- name: Publish
run: |
bun install
./script/publish
working-directory: ./sdks/vscode
env:
VSCE_PAT: ${{ secrets.VSCE_PAT }}
OPENVSX_TOKEN: ${{ secrets.OPENVSX_TOKEN }}

View file

@ -1,17 +1,12 @@
name: publish name: publish
run-name: "${{ format('v{0}', inputs.version) }}"
on: on:
workflow_dispatch: workflow_dispatch:
inputs: push:
version: branches:
description: "Version to publish" - dev
required: true tags:
type: string - "*"
title:
description: "Custom title for this run"
required: false
type: string
concurrency: ${{ github.workflow }}-${{ github.ref }} concurrency: ${{ github.workflow }}-${{ github.ref }}
@ -37,16 +32,7 @@ jobs:
- uses: oven-sh/setup-bun@v2 - uses: oven-sh/setup-bun@v2
with: with:
bun-version: 1.2.19 bun-version: 1.2.17
- name: Cache ~/.bun
id: cache-bun
uses: actions/cache@v3
with:
path: ~/.bun
key: ${{ runner.os }}-bun-${{ hashFiles('bun.lock') }}
restore-keys: |
${{ runner.os }}-bun-
- name: Install makepkg - name: Install makepkg
run: | run: |
@ -62,12 +48,15 @@ jobs:
git config --global user.email "opencode@sst.dev" git config --global user.email "opencode@sst.dev"
git config --global user.name "opencode" git config --global user.name "opencode"
- name: Install dependencies
run: bun install
- name: Publish - name: Publish
run: | run: |
OPENCODE_VERSION=${{ inputs.version }} ./script/publish.ts bun install
if [ "${{ startsWith(github.ref, 'refs/tags/') }}" = "true" ]; then
./script/publish.ts
else
./script/publish.ts --snapshot
fi
working-directory: ./packages/opencode
env: env:
GITHUB_TOKEN: ${{ secrets.SST_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.SST_GITHUB_TOKEN }}
AUR_KEY: ${{ secrets.AUR_KEY }} AUR_KEY: ${{ secrets.AUR_KEY }}

View file

@ -21,12 +21,12 @@ jobs:
bun-version: latest bun-version: latest
- name: Run stats script - name: Run stats script
run: bun script/stats.ts run: bun scripts/stats.ts
- name: Commit stats - name: Commit stats
run: | run: |
git config --local user.email "action@github.com" git config --local user.email "action@github.com"
git config --local user.name "GitHub Action" git config --local user.name "GitHub Action"
git add STATS.md git add STATS.md
git diff --staged --quiet || git commit -m "ignore: update download stats $(date -I)" git diff --staged --quiet || git commit -m "Update download stats $(date -I)"
git push git push

View file

@ -1,24 +0,0 @@
name: Typecheck
on:
pull_request:
branches: [dev]
workflow_dispatch:
jobs:
typecheck:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: 1.2.19
- name: Install dependencies
run: bun install
- name: Run typecheck
run: bun typecheck

2
.gitignore vendored
View file

@ -1,8 +1,8 @@
.DS_Store .DS_Store
node_modules node_modules
.opencode
.sst .sst
.env .env
.idea .idea
.vscode .vscode
openapi.json openapi.json
playground

View file

@ -1,44 +0,0 @@
---
description: >-
Use this agent when you need to create or improve documentation that requires
concrete examples to illustrate every concept. Examples include:
<example>Context: User has written a new API endpoint and needs documentation.
user: 'I just created a POST /users endpoint that accepts name and email
fields. Can you document this?' assistant: 'I'll use the
example-driven-docs-writer agent to create documentation with practical
examples for your API endpoint.' <commentary>Since the user needs
documentation with examples, use the example-driven-docs-writer agent to
create comprehensive docs with code samples.</commentary></example>
<example>Context: User has a complex configuration file that needs
documentation. user: 'This config file has multiple sections and I need docs
that show how each option works' assistant: 'Let me use the
example-driven-docs-writer agent to create documentation that breaks down each
configuration option with practical examples.' <commentary>The user needs
documentation that demonstrates configuration options, perfect for the
example-driven-docs-writer agent.</commentary></example>
---
You are an expert technical documentation writer who specializes in creating clear, example-rich documentation that never leaves readers guessing. Your core principle is that every concept must be immediately illustrated with concrete examples, code samples, or practical demonstrations.
Your documentation approach:
- Never write more than one sentence in any section without providing an example, code snippet, diagram, or practical illustration
- Break up longer explanations with multiple examples showing different scenarios or use cases
- Use concrete, realistic examples rather than abstract or placeholder content
- Include both basic and advanced examples when covering complex topics
- Show expected inputs, outputs, and results for all examples
- Use code blocks, bullet points, tables, or other formatting to visually separate examples from explanatory text
Structural requirements:
- Start each section with a brief one-sentence explanation followed immediately by an example
- For multi-step processes, provide an example after each step
- Include error examples and edge cases alongside success scenarios
- Use consistent formatting and naming conventions throughout examples
- Ensure examples are copy-pasteable and functional when applicable
Quality standards:
- Verify that no paragraph exceeds one sentence without an accompanying example
- Test that examples are accurate and would work in real scenarios
- Ensure examples progress logically from simple to complex
- Include context for when and why to use different approaches shown in examples
- Provide troubleshooting examples for common issues
When you receive a documentation request, immediately identify what needs examples and plan to illustrate every single concept, feature, or instruction with concrete demonstrations. Ask for clarification if you need more context to create realistic, useful examples.

View file

@ -1,10 +0,0 @@
import { Plugin } from "./index"
export const ExamplePlugin: Plugin = async ({ app, client, $ }) => {
return {
permission: {},
async "chat.params"(input, output) {
output.topP = 1
},
}
}

View file

@ -1,13 +0,0 @@
## Style
- prefer single word variable/function names
- avoid try catch where possible - prefer to let exceptions bubble up
- avoid else statements where possible
- do not make useless helper functions - inline functionality unless the
function is reusable or composable
- prefer Bun apis
## Workflow
- you can regenerate the golang sdk by calling ./scripts/stainless.ts
- we use bun for everything

View file

@ -9,7 +9,7 @@
</p> </p>
<p align="center">AI coding agent, built for the terminal.</p> <p align="center">AI coding agent, built for the terminal.</p>
<p align="center"> <p align="center">
<a href="https://opencode.ai/discord"><img alt="Discord" src="https://img.shields.io/discord/1391832426048651334?style=flat-square&label=discord" /></a> <a href="https://opencode.ai/docs"><img alt="View docs" src="https://img.shields.io/badge/view-docs-blue?style=flat-square" /></a>
<a href="https://www.npmjs.com/package/opencode-ai"><img alt="npm" src="https://img.shields.io/npm/v/opencode-ai?style=flat-square" /></a> <a href="https://www.npmjs.com/package/opencode-ai"><img alt="npm" src="https://img.shields.io/npm/v/opencode-ai?style=flat-square" /></a>
<a href="https://github.com/sst/opencode/actions/workflows/publish.yml"><img alt="Build status" src="https://img.shields.io/github/actions/workflow/status/sst/opencode/publish.yml?style=flat-square&branch=dev" /></a> <a href="https://github.com/sst/opencode/actions/workflows/publish.yml"><img alt="Build status" src="https://img.shields.io/github/actions/workflow/status/sst/opencode/publish.yml?style=flat-square&branch=dev" /></a>
</p> </p>
@ -30,23 +30,7 @@ brew install sst/tap/opencode # macOS
paru -S opencode-bin # Arch Linux paru -S opencode-bin # Arch Linux
``` ```
> [!TIP] > **Note:** Remove versions older than 0.1.x before installing
> Remove versions older than 0.1.x before installing.
#### Installation Directory
The install script respects the following priority order for the installation path:
1. `$OPENCODE_INSTALL_DIR` - Custom installation directory
2. `$XDG_BIN_DIR` - XDG Base Directory Specification compliant path
3. `$HOME/bin` - Standard user binary directory (if exists or can be created)
4. `$HOME/.opencode/bin` - Default fallback
```bash
# Examples
OPENCODE_INSTALL_DIR=/usr/local/bin curl -fsSL https://opencode.ai/install | bash
XDG_BIN_DIR=$HOME/.local/bin curl -fsSL https://opencode.ai/install | bash
```
### Documentation ### Documentation
@ -54,25 +38,10 @@ For more info on how to configure opencode [**head over to our docs**](https://o
### Contributing ### Contributing
opencode is an opinionated tool so any fundamental feature needs to go through a For any new features we'd appreciate it if you could open an issue first to discuss what you'd like to implement. We're pretty responsive there and it'll save you from working on something that we don't end up using. No need to do this for simpler fixes.
design process with the core team.
> [!IMPORTANT] > **Note**: Please talk to us via github issues before spending time working on
> We do not accept PRs for core features. > a new feature
However we still merge a ton of PRs - you can contribute:
- Bug fixes
- Improvements to LLM performance
- Support for new providers
- Fixes for env specific quirks
- Missing standard behavior
- Documentation
Take a look at the git history to see what kind of PRs we end up merging.
> [!NOTE]
> If you do not follow the above guidelines we might close your PR.
To run opencode locally you need. To run opencode locally you need.
@ -97,7 +66,7 @@ $ bun run packages/opencode/src/index.ts
It's very similar to Claude Code in terms of capability. Here are the key differences: It's very similar to Claude Code in terms of capability. Here are the key differences:
- 100% open source - 100% open source
- Not coupled to any provider. Although Anthropic is recommended, opencode can be used with OpenAI, Google or even local models. As models evolve the gaps between them will close and pricing will drop so being provider-agnostic is important. - Not coupled to any provider. Although Anthropic is recommended, opencode can be used with OpenAI, Google or even local models. As models evolve the gaps between them will close and pricing will drop so being provider agnostic is important.
- A focus on TUI. opencode is built by neovim users and the creators of [terminal.shop](https://terminal.shop); we are going to push the limits of what's possible in the terminal. - A focus on TUI. opencode is built by neovim users and the creators of [terminal.shop](https://terminal.shop); we are going to push the limits of what's possible in the terminal.
- A client/server architecture. This for example can allow opencode to run on your computer, while you can drive it remotely from a mobile app. Meaning that the TUI frontend is just one of the possible clients. - A client/server architecture. This for example can allow opencode to run on your computer, while you can drive it remotely from a mobile app. Meaning that the TUI frontend is just one of the possible clients.
@ -107,4 +76,4 @@ The other confusingly named repo has no relation to this one. You can [read the
--- ---
**Join our community** [Discord](https://discord.gg/opencode) | [YouTube](https://www.youtube.com/c/sst-dev) | [X.com](https://x.com/SST_dev) **Join our community** [YouTube](https://www.youtube.com/c/sst-dev) | [X.com](https://x.com/SST_dev)

View file

@ -1,40 +1,10 @@
# Download Stats # Download Stats
| Date | GitHub Downloads | npm Downloads | Total | | Date | GitHub Downloads | npm Downloads | Total |
| ---------- | ---------------- | ---------------- | ---------------- | | ---------- | ---------------- | --------------- | --------------- |
| 2025-06-29 | 18,789 (+0) | 39,420 (+0) | 58,209 (+0) | | 2025-06-29 | 18,789 (+0) | 39,420 (+0) | 58,209 (+0) |
| 2025-06-30 | 20,127 (+1,338) | 41,059 (+1,639) | 61,186 (+2,977) | | 2025-06-30 | 20,127 (+1,338) | 41,059 (+1,639) | 61,186 (+2,977) |
| 2025-07-01 | 22,108 (+1,981) | 43,745 (+2,686) | 65,853 (+4,667) | | 2025-07-01 | 22,108 (+1,981) | 43,745 (+2,686) | 65,853 (+4,667) |
| 2025-07-02 | 24,814 (+2,706) | 46,168 (+2,423) | 70,982 (+5,129) | | 2025-07-02 | 24,814 (+2,706) | 46,168 (+2,423) | 70,982 (+5,129) |
| 2025-07-03 | 27,834 (+3,020) | 49,955 (+3,787) | 77,789 (+6,807) | | 2025-07-03 | 27,834 (+3,020) | 49,955 (+3,787) | 77,789 (+6,807) |
| 2025-07-04 | 30,608 (+2,774) | 54,758 (+4,803) | 85,366 (+7,577) | | 2025-07-04 | 30,608 (+2,774) | 54,758 (+4,803) | 85,366 (+7,577) |
| 2025-07-05 | 32,524 (+1,916) | 58,371 (+3,613) | 90,895 (+5,529) |
| 2025-07-06 | 33,766 (+1,242) | 59,694 (+1,323) | 93,460 (+2,565) |
| 2025-07-08 | 38,052 (+4,286) | 64,468 (+4,774) | 102,520 (+9,060) |
| 2025-07-09 | 40,924 (+2,872) | 67,935 (+3,467) | 108,859 (+6,339) |
| 2025-07-10 | 43,796 (+2,872) | 71,402 (+3,467) | 115,198 (+6,339) |
| 2025-07-11 | 46,982 (+3,186) | 77,462 (+6,060) | 124,444 (+9,246) |
| 2025-07-12 | 49,302 (+2,320) | 82,177 (+4,715) | 131,479 (+7,035) |
| 2025-07-13 | 50,803 (+1,501) | 86,394 (+4,217) | 137,197 (+5,718) |
| 2025-07-14 | 53,283 (+2,480) | 87,860 (+1,466) | 141,143 (+3,946) |
| 2025-07-15 | 57,590 (+4,307) | 91,036 (+3,176) | 148,626 (+7,483) |
| 2025-07-16 | 62,313 (+4,723) | 95,258 (+4,222) | 157,571 (+8,945) |
| 2025-07-17 | 66,684 (+4,371) | 100,048 (+4,790) | 166,732 (+9,161) |
| 2025-07-18 | 70,379 (+3,695) | 102,587 (+2,539) | 172,966 (+6,234) |
| 2025-07-19 | 73,497 (+3,117) | 105,904 (+3,317) | 179,401 (+6,434) |
| 2025-07-20 | 76,453 (+2,956) | 109,044 (+3,140) | 185,497 (+6,096) |
| 2025-07-21 | 80,197 (+3,744) | 113,537 (+4,493) | 193,734 (+8,237) |
| 2025-07-22 | 84,251 (+4,054) | 118,073 (+4,536) | 202,324 (+8,590) |
| 2025-07-23 | 88,589 (+4,338) | 121,436 (+3,363) | 210,025 (+7,701) |
| 2025-07-24 | 92,469 (+3,880) | 124,091 (+2,655) | 216,560 (+6,535) |
| 2025-07-25 | 96,417 (+3,948) | 126,985 (+2,894) | 223,402 (+6,842) |
| 2025-07-26 | 100,646 (+4,229) | 131,411 (+4,426) | 232,057 (+8,655) |
| 2025-07-27 | 102,644 (+1,998) | 134,736 (+3,325) | 237,380 (+5,323) |
| 2025-07-28 | 105,446 (+2,802) | 136,016 (+1,280) | 241,462 (+4,082) |
| 2025-07-29 | 108,998 (+3,552) | 137,542 (+1,526) | 246,540 (+5,078) |
| 2025-07-30 | 113,544 (+4,546) | 140,317 (+2,775) | 253,861 (+7,321) |
| 2025-07-31 | 118,339 (+4,795) | 143,344 (+3,027) | 261,683 (+7,822) |
| 2025-08-01 | 123,539 (+5,200) | 146,680 (+3,336) | 270,219 (+8,536) |
| 2025-08-02 | 127,864 (+4,325) | 149,236 (+2,556) | 277,100 (+6,881) |
| 2025-08-03 | 131,397 (+3,533) | 150,451 (+1,215) | 281,848 (+4,748) |
| 2025-08-04 | 136,266 (+4,869) | 153,260 (+2,809) | 289,526 (+7,678) |

800
bun.lock

File diff suppressed because it is too large Load diff

View file

@ -1,137 +0,0 @@
# opencode GitHub Action
A GitHub Action that integrates [opencode](https://opencode.ai) directly into your GitHub workflow.
Mention `/opencode` in your comment, and opencode will execute tasks within your GitHub Actions runner.
## Features
#### Explain an issues
Leave the following comment on a GitHub issue. `opencode` will read the entire thread, including all comments, and reply with a clear explanation.
```
/opencode explain this issue
```
#### Fix an issues
Leave the following comment on a GitHub issue. opencode will create a new branch, implement the changes, and open a PR with the changes.
```
/opencode fix this
```
#### Review PRs and make changes
Leave the following comment on a GitHub PR. opencode will implement the requested change and commit it to the same PR.
```
Delete the attachment from S3 when the note is removed /oc
```
## Installation
Run the following command in the terminal from your GitHub repo:
```bash
opencode github install
```
This will walk you through installing the GitHub app, creating the workflow, and setting up secrets.
### Manual Setup
1. Install the GitHub app https://github.com/apps/opencode-agent. Make sure it is installed on the target repository.
2. Add the following workflow file to `.github/workflows/opencode.yml` in your repo. Set the appropriate `model` and required API keys in `env`.
```yml
name: opencode
on:
issue_comment:
types: [created]
jobs:
opencode:
if: |
contains(github.event.comment.body, '/oc') ||
contains(github.event.comment.body, '/opencode')
runs-on: ubuntu-latest
permissions:
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run opencode
uses: sst/opencode/github@latest
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
with:
model: anthropic/claude-sonnet-4-20250514
```
3. Store the API keys in secrets. In your organization or project **settings**, expand **Secrets and variables** on the left and select **Actions**. Add the required API keys.
## Support
This is an early release. If you encounter issues or have feedback, please create an issue at https://github.com/sst/opencode/issues.
## Development
To test locally:
1. Navigate to a test repo (e.g. `hello-world`):
```bash
cd hello-world
```
2. Run:
```bash
MODEL=anthropic/claude-sonnet-4-20250514 \
ANTHROPIC_API_KEY=sk-ant-api03-1234567890 \
GITHUB_RUN_ID=dummy \
bun /path/to/opencode/packages/opencode/src/index.ts github run \
--token 'github_pat_1234567890' \
--event '{"eventName":"issue_comment",...}'
```
- `MODEL`: The model used by opencode. Same as the `MODEL` defined in the GitHub workflow.
- `ANTHROPIC_API_KEY`: Your model provider API key. Same as the keys defined in the GitHub workflow.
- `GITHUB_RUN_ID`: Dummy value to emulate GitHub action environment.
- `/path/to/opencode`: Path to your cloned opencode repo. `bun /path/to/opencode/packages/opencode/src/index.ts` runs your local version of `opencode`.
- `--token`: A GitHub persontal access token. This token is used to verify you have `admin` or `write` access to the test repo. Generate a token [here](https://github.com/settings/personal-access-tokens).
- `--event`: Mock GitHub event payload (see templates below).
### Issue comment event
```
--event '{"eventName":"issue_comment","repo":{"owner":"sst","repo":"hello-world"},"actor":"fwang","payload":{"issue":{"number":4},"comment":{"id":1,"body":"hey opencode, summarize thread"}}}'
```
Replace:
- `"owner":"sst"` with repo owner
- `"repo":"hello-world"` with repo name
- `"actor":"fwang"` with the GitHub username of commentor
- `"number":4` with the GitHub issue id
- `"body":"hey opencode, summarize thread"` with comment body
### Issue comment with image attachment.
```
--event '{"eventName":"issue_comment","repo":{"owner":"sst","repo":"hello-world"},"actor":"fwang","payload":{"issue":{"number":4},"comment":{"id":1,"body":"hey opencode, what is in my image ![Image](https://github.com/user-attachments/assets/xxxxxxxx)"}}}'
```
Replace the image URL `https://github.com/user-attachments/assets/xxxxxxxx` with a valid GitHub attachment (you can generate one by commenting with an image in any issue).
### PR comment event
```
--event '{"eventName":"issue_comment","repo":{"owner":"sst","repo":"hello-world"},"actor":"fwang","payload":{"issue":{"number":4,"pull_request":{}},"comment":{"id":1,"body":"hey opencode, summarize thread"}}}'
```

View file

@ -1,29 +0,0 @@
name: "opencode GitHub Action"
description: "Run opencode in GitHub Actions workflows"
branding:
icon: "code"
color: "orange"
inputs:
model:
description: "Model to use"
required: true
share:
description: "Share the opencode session (defaults to true for public repos)"
required: false
runs:
using: "composite"
steps:
- name: Install opencode
shell: bash
run: curl -fsSL https://opencode.ai/install | bash
- name: Run opencode
shell: bash
id: run_opencode
run: opencode github run
env:
MODEL: ${{ inputs.model }}
SHARE: ${{ inputs.share }}

View file

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# Get the latest Git tag
latest_tag=$(git tag --sort=committerdate | grep -E '^github-v[0-9]+\.[0-9]+\.[0-9]+$' | tail -1)
if [ -z "$latest_tag" ]; then
echo "No tags found"
exit 1
fi
echo "Latest tag: $latest_tag"
# Update latest tag
git tag -d latest
git push origin :refs/tags/latest
git tag -a latest $latest_tag -m "Update latest to $latest_tag"
git push origin latest

View file

@ -4,8 +4,6 @@ export const domain = (() => {
return `${$app.stage}.dev.opencode.ai` return `${$app.stage}.dev.opencode.ai`
})() })()
const GITHUB_APP_ID = new sst.Secret("GITHUB_APP_ID")
const GITHUB_APP_PRIVATE_KEY = new sst.Secret("GITHUB_APP_PRIVATE_KEY")
const bucket = new sst.cloudflare.Bucket("Bucket") const bucket = new sst.cloudflare.Bucket("Bucket")
export const api = new sst.cloudflare.Worker("Api", { export const api = new sst.cloudflare.Worker("Api", {
@ -15,7 +13,7 @@ export const api = new sst.cloudflare.Worker("Api", {
WEB_DOMAIN: domain, WEB_DOMAIN: domain,
}, },
url: true, url: true,
link: [bucket, GITHUB_APP_ID, GITHUB_APP_PRIVATE_KEY], link: [bucket],
transform: { transform: {
worker: (args) => { worker: (args) => {
args.logpush = true args.logpush = true
@ -41,8 +39,6 @@ new sst.cloudflare.x.Astro("Web", {
domain, domain,
path: "packages/web", path: "packages/web",
environment: { environment: {
// For astro config
SST_STAGE: $app.stage,
VITE_API_URL: api.url, VITE_API_URL: api.url,
}, },
}) })

View file

@ -48,7 +48,7 @@ if [ -z "$requested_version" ]; then
url="https://github.com/sst/opencode/releases/latest/download/$filename" url="https://github.com/sst/opencode/releases/latest/download/$filename"
specific_version=$(curl -s https://api.github.com/repos/sst/opencode/releases/latest | awk -F'"' '/"tag_name": "/ {gsub(/^v/, "", $4); print $4}') specific_version=$(curl -s https://api.github.com/repos/sst/opencode/releases/latest | awk -F'"' '/"tag_name": "/ {gsub(/^v/, "", $4); print $4}')
if [[ $? -ne 0 || -z "$specific_version" ]]; then if [[ $? -ne 0 ]]; then
echo "${RED}Failed to fetch version information${NC}" echo "${RED}Failed to fetch version information${NC}"
exit 1 exit 1
fi fi
@ -186,3 +186,4 @@ if [ -n "${GITHUB_ACTIONS-}" ] && [ "${GITHUB_ACTIONS}" == "true" ]; then
echo "$INSTALL_DIR" >> $GITHUB_PATH echo "$INSTALL_DIR" >> $GITHUB_PATH
print_message info "Added $INSTALL_DIR to \$GITHUB_PATH" print_message info "Added $INSTALL_DIR to \$GITHUB_PATH"
fi fi

View file

@ -1,13 +1,19 @@
{ {
"$schema": "https://opencode.ai/config.json", "$schema": "https://opencode.ai/config.json",
"mcp": { "experimental": {
"context7": { "hook": {
"type": "remote", "file_edited": {
"url": "https://mcp.context7.com/sse" ".json": [
{
"command": ["bun", "run", "prettier", "$FILE"]
}
]
}, },
"weather": { "session_completed": [
"type": "local", {
"command": ["opencode", "x", "@h1deya/mcp-server-weather"] "command": ["touch", "./node_modules/foo"]
}
]
} }
} }
} }

View file

@ -5,24 +5,20 @@
"type": "module", "type": "module",
"packageManager": "bun@1.2.14", "packageManager": "bun@1.2.14",
"scripts": { "scripts": {
"dev": "bun run --conditions=development packages/opencode/src/index.ts", "dev": "bun run packages/opencode/src/index.ts",
"typecheck": "bun run --filter='*' typecheck", "typecheck": "bun run --filter='*' typecheck",
"stainless": "./scripts/stainless", "stainless": "bun run ./packages/opencode/src/index.ts serve ",
"postinstall": "./script/hooks" "postinstall": "./scripts/hooks"
}, },
"workspaces": { "workspaces": {
"packages": [ "packages": [
"packages/*", "packages/*"
"packages/sdk/js"
], ],
"catalog": { "catalog": {
"@types/node": "22.13.9",
"@tsconfig/node22": "22.0.2",
"ai": "5.0.0-beta.34",
"hono": "4.7.10",
"typescript": "5.8.2", "typescript": "5.8.2",
"zod": "3.25.49", "@types/node": "22.13.9",
"remeda": "2.26.0" "zod": "3.24.2",
"ai": "4.3.16"
} }
}, },
"devDependencies": { "devDependencies": {
@ -35,8 +31,10 @@
}, },
"license": "MIT", "license": "MIT",
"prettier": { "prettier": {
"semi": false, "semi": false
"printWidth": 120 },
"overrides": {
"zod": "3.24.2"
}, },
"trustedDependencies": [ "trustedDependencies": [
"esbuild", "esbuild",
@ -44,6 +42,6 @@
"sharp" "sharp"
], ],
"patchedDependencies": { "patchedDependencies": {
"marked-shiki@1.2.0": "patches/marked-shiki@1.2.0.patch" "ai@4.3.16": "patches/ai@4.3.16.patch"
} }
} }

View file

@ -1,6 +1,6 @@
{ {
"name": "@opencode/function", "name": "@opencode/function",
"version": "0.3.126", "version": "0.0.1",
"$schema": "https://json.schemastore.org/package.json", "$schema": "https://json.schemastore.org/package.json",
"private": true, "private": true,
"type": "module", "type": "module",
@ -8,11 +8,5 @@
"@cloudflare/workers-types": "4.20250522.0", "@cloudflare/workers-types": "4.20250522.0",
"typescript": "catalog:", "typescript": "catalog:",
"@types/node": "catalog:" "@types/node": "catalog:"
},
"dependencies": {
"@octokit/auth-app": "8.0.1",
"@octokit/rest": "22.0.0",
"hono": "catalog:",
"jose": "6.0.11"
} }
} }

View file

@ -1,10 +1,5 @@
import { Hono } from "hono"
import { DurableObject } from "cloudflare:workers" import { DurableObject } from "cloudflare:workers"
import { randomUUID } from "node:crypto" import { randomUUID } from "node:crypto"
import { jwtVerify, createRemoteJWKSet } from "jose"
import { createAppAuth } from "@octokit/auth-app"
import { Octokit } from "@octokit/rest"
import { Resource } from "sst"
type Env = { type Env = {
SYNC_SERVER: DurableObjectNamespace<SyncServer> SYNC_SERVER: DurableObjectNamespace<SyncServer>
@ -45,8 +40,7 @@ export class SyncServer extends DurableObject<Env> {
const sessionID = await this.getSessionID() const sessionID = await this.getSessionID()
if ( if (
!key.startsWith(`session/info/${sessionID}`) && !key.startsWith(`session/info/${sessionID}`) &&
!key.startsWith(`session/message/${sessionID}/`) && !key.startsWith(`session/message/${sessionID}/`)
!key.startsWith(`session/part/${sessionID}/`)
) )
return new Response("Error: Invalid key", { status: 400 }) return new Response("Error: Invalid key", { status: 400 })
@ -76,7 +70,7 @@ export class SyncServer extends DurableObject<Env> {
} }
public async getData() { public async getData() {
const data = (await this.ctx.storage.list()) as Map<string, any> const data = await this.ctx.storage.list()
return Array.from(data.entries()) return Array.from(data.entries())
.filter(([key, _]) => key.startsWith("session/")) .filter(([key, _]) => key.startsWith("session/"))
.map(([key, content]) => ({ key, content })) .map(([key, content]) => ({ key, content }))
@ -112,66 +106,96 @@ export class SyncServer extends DurableObject<Env> {
} }
} }
export default new Hono<{ Bindings: Env }>() export default {
.get("/", (c) => c.text("Hello, world!")) async fetch(request: Request, env: Env, ctx: ExecutionContext) {
.post("/share_create", async (c) => { const url = new URL(request.url)
const body = await c.req.json<{ sessionID: string }>() const splits = url.pathname.split("/")
const method = splits[1]
if (request.method === "GET" && method === "") {
return new Response("Hello, world!", {
headers: { "Content-Type": "text/plain" },
})
}
if (request.method === "POST" && method === "share_create") {
const body = await request.json<any>()
const sessionID = body.sessionID const sessionID = body.sessionID
const short = SyncServer.shortName(sessionID) const short = SyncServer.shortName(sessionID)
const id = c.env.SYNC_SERVER.idFromName(short) const id = env.SYNC_SERVER.idFromName(short)
const stub = c.env.SYNC_SERVER.get(id) const stub = env.SYNC_SERVER.get(id)
const secret = await stub.share(sessionID) const secret = await stub.share(sessionID)
return c.json({ return new Response(
JSON.stringify({
secret, secret,
url: `https://${c.env.WEB_DOMAIN}/s/${short}`, url: `https://${env.WEB_DOMAIN}/s/${short}`,
}) }),
}) {
.post("/share_delete", async (c) => { headers: { "Content-Type": "application/json" },
const body = await c.req.json<{ sessionID: string; secret: string }>() },
)
}
if (request.method === "POST" && method === "share_delete") {
const body = await request.json<any>()
const sessionID = body.sessionID const sessionID = body.sessionID
const secret = body.secret const secret = body.secret
const id = c.env.SYNC_SERVER.idFromName(SyncServer.shortName(sessionID)) const id = env.SYNC_SERVER.idFromName(SyncServer.shortName(sessionID))
const stub = c.env.SYNC_SERVER.get(id) const stub = env.SYNC_SERVER.get(id)
await stub.assertSecret(secret) await stub.assertSecret(secret)
await stub.clear() await stub.clear()
return c.json({}) return new Response(JSON.stringify({}), {
headers: { "Content-Type": "application/json" },
}) })
.post("/share_delete_admin", async (c) => { }
const id = c.env.SYNC_SERVER.idFromName("oVF8Rsiv")
const stub = c.env.SYNC_SERVER.get(id) if (request.method === "POST" && method === "share_delete_admin") {
const id = env.SYNC_SERVER.idFromName("oVF8Rsiv")
const stub = env.SYNC_SERVER.get(id)
await stub.clear() await stub.clear()
return c.json({}) return new Response(JSON.stringify({}), {
headers: { "Content-Type": "application/json" },
}) })
.post("/share_sync", async (c) => { }
const body = await c.req.json<{
if (request.method === "POST" && method === "share_sync") {
const body = await request.json<{
sessionID: string sessionID: string
secret: string secret: string
key: string key: string
content: any content: any
}>() }>()
const name = SyncServer.shortName(body.sessionID) const name = SyncServer.shortName(body.sessionID)
const id = c.env.SYNC_SERVER.idFromName(name) const id = env.SYNC_SERVER.idFromName(name)
const stub = c.env.SYNC_SERVER.get(id) const stub = env.SYNC_SERVER.get(id)
await stub.assertSecret(body.secret) await stub.assertSecret(body.secret)
await stub.publish(body.key, body.content) await stub.publish(body.key, body.content)
return c.json({}) return new Response(JSON.stringify({}), {
headers: { "Content-Type": "application/json" },
}) })
.get("/share_poll", async (c) => {
const upgradeHeader = c.req.header("Upgrade")
if (!upgradeHeader || upgradeHeader !== "websocket") {
return c.text("Error: Upgrade header is required", { status: 426 })
} }
const id = c.req.query("id")
console.log("share_poll", id) if (request.method === "GET" && method === "share_poll") {
if (!id) return c.text("Error: Share ID is required", { status: 400 }) const upgradeHeader = request.headers.get("Upgrade")
const stub = c.env.SYNC_SERVER.get(c.env.SYNC_SERVER.idFromName(id)) if (!upgradeHeader || upgradeHeader !== "websocket") {
return stub.fetch(c.req.raw) return new Response("Error: Upgrade header is required", {
status: 426,
}) })
.get("/share_data", async (c) => { }
const id = c.req.query("id") const id = url.searchParams.get("id")
console.log("share_poll", id)
if (!id)
return new Response("Error: Share ID is required", { status: 400 })
const stub = env.SYNC_SERVER.get(env.SYNC_SERVER.idFromName(id))
return stub.fetch(request)
}
if (request.method === "GET" && method === "share_data") {
const id = url.searchParams.get("id")
console.log("share_data", id) console.log("share_data", id)
if (!id) return c.text("Error: Share ID is required", { status: 400 }) if (!id)
const stub = c.env.SYNC_SERVER.get(c.env.SYNC_SERVER.idFromName(id)) return new Response("Error: Share ID is required", { status: 400 })
const stub = env.SYNC_SERVER.get(env.SYNC_SERVER.idFromName(id))
const data = await stub.getData() const data = await stub.getData()
let info let info
@ -184,134 +208,20 @@ export default new Hono<{ Bindings: Env }>()
return return
} }
if (type === "message") { if (type === "message") {
messages[d.content.id] = { const [, messageID] = splits
parts: [], messages[messageID] = d.content
...d.content,
}
}
if (type === "part") {
messages[d.content.messageID].parts.push(d.content)
} }
}) })
return c.json({ info, messages }) return new Response(
}) JSON.stringify({
/** info,
* Used by the GitHub action to get GitHub installation access token given the OIDC token messages,
*/ }),
.post("/exchange_github_app_token", async (c) => { {
const EXPECTED_AUDIENCE = "opencode-github-action" headers: { "Content-Type": "application/json" },
const GITHUB_ISSUER = "https://token.actions.githubusercontent.com" },
const JWKS_URL = `${GITHUB_ISSUER}/.well-known/jwks` )
// get Authorization header
const token = c.req.header("Authorization")?.replace(/^Bearer /, "")
if (!token) return c.json({ error: "Authorization header is required" }, { status: 401 })
// verify token
const JWKS = createRemoteJWKSet(new URL(JWKS_URL))
let owner, repo
try {
const { payload } = await jwtVerify(token, JWKS, {
issuer: GITHUB_ISSUER,
audience: EXPECTED_AUDIENCE,
})
const sub = payload.sub // e.g. 'repo:my-org/my-repo:ref:refs/heads/main'
const parts = sub.split(":")[1].split("/")
owner = parts[0]
repo = parts[1]
} catch (err) {
console.error("Token verification failed:", err)
return c.json({ error: "Invalid or expired token" }, { status: 403 })
} }
},
// Create app JWT token
const auth = createAppAuth({
appId: Resource.GITHUB_APP_ID.value,
privateKey: Resource.GITHUB_APP_PRIVATE_KEY.value,
})
const appAuth = await auth({ type: "app" })
// Lookup installation
const octokit = new Octokit({ auth: appAuth.token })
const { data: installation } = await octokit.apps.getRepoInstallation({ owner, repo })
// Get installation token
const installationAuth = await auth({ type: "installation", installationId: installation.id })
return c.json({ token: installationAuth.token })
})
/**
* Used by the GitHub action to get GitHub installation access token given user PAT token (used when testing `opencode github run` locally)
*/
.post("/exchange_github_app_token_with_pat", async (c) => {
const body = await c.req.json<{ owner: string; repo: string }>()
const owner = body.owner
const repo = body.repo
try {
// get Authorization header
const authHeader = c.req.header("Authorization")
const token = authHeader?.replace(/^Bearer /, "")
if (!token) throw new Error("Authorization header is required")
// Verify permissions
const userClient = new Octokit({ auth: token })
const { data: repoData } = await userClient.repos.get({ owner, repo })
if (!repoData.permissions.admin && !repoData.permissions.push && !repoData.permissions.maintain)
throw new Error("User does not have write permissions")
// Get installation token
const auth = createAppAuth({
appId: Resource.GITHUB_APP_ID.value,
privateKey: Resource.GITHUB_APP_PRIVATE_KEY.value,
})
const appAuth = await auth({ type: "app" })
// Lookup installation
const appClient = new Octokit({ auth: appAuth.token })
const { data: installation } = await appClient.apps.getRepoInstallation({ owner, repo })
// Get installation token
const installationAuth = await auth({ type: "installation", installationId: installation.id })
return c.json({ token: installationAuth.token })
} catch (e: any) {
let error = e
if (e instanceof Error) {
error = e.message
} }
return c.json({ error }, { status: 401 })
}
})
/**
* Used by the opencode CLI to check if the GitHub app is installed
*/
.get("/get_github_app_installation", async (c) => {
const owner = c.req.query("owner")
const repo = c.req.query("repo")
const auth = createAppAuth({
appId: Resource.GITHUB_APP_ID.value,
privateKey: Resource.GITHUB_APP_PRIVATE_KEY.value,
})
const appAuth = await auth({ type: "app" })
// Lookup installation
const octokit = new Octokit({ auth: appAuth.token })
let installation
try {
const ret = await octokit.apps.getRepoInstallation({ owner, repo })
installation = ret.data
} catch (err) {
if (err instanceof Error && err.message.includes("Not Found")) {
// not installed
} else {
throw err
}
}
return c.json({ installation })
})
.all("*", (c) => c.text("Not Found"))

View file

@ -6,26 +6,18 @@
import "sst" import "sst"
declare module "sst" { declare module "sst" {
export interface Resource { export interface Resource {
"GITHUB_APP_ID": { Web: {
"type": "sst.sst.Secret" type: "sst.cloudflare.Astro"
"value": string url: string
}
"GITHUB_APP_PRIVATE_KEY": {
"type": "sst.sst.Secret"
"value": string
}
"Web": {
"type": "sst.cloudflare.Astro"
"url": string
} }
} }
} }
// cloudflare // cloudflare
import * as cloudflare from "@cloudflare/workers-types"; import * as cloudflare from "@cloudflare/workers-types"
declare module "sst" { declare module "sst" {
export interface Resource { export interface Resource {
"Api": cloudflare.Service Api: cloudflare.Service
"Bucket": cloudflare.R2Bucket Bucket: cloudflare.R2Bucket
} }
} }

View file

@ -0,0 +1,369 @@
{
"type": "object",
"properties": {
"$schema": {
"type": "string",
"description": "JSON schema reference for configuration validation"
},
"theme": {
"type": "string",
"description": "Theme name to use for the interface"
},
"keybinds": {
"type": "object",
"properties": {
"leader": {
"type": "string",
"description": "Leader key for keybind combinations"
},
"help": {
"type": "string",
"description": "Show help dialog"
},
"editor_open": {
"type": "string",
"description": "Open external editor"
},
"session_new": {
"type": "string",
"description": "Create a new session"
},
"session_list": {
"type": "string",
"description": "List all sessions"
},
"session_share": {
"type": "string",
"description": "Share current session"
},
"session_interrupt": {
"type": "string",
"description": "Interrupt current session"
},
"session_compact": {
"type": "string",
"description": "Toggle compact mode for session"
},
"tool_details": {
"type": "string",
"description": "Show tool details"
},
"model_list": {
"type": "string",
"description": "List available models"
},
"theme_list": {
"type": "string",
"description": "List available themes"
},
"project_init": {
"type": "string",
"description": "Initialize project configuration"
},
"input_clear": {
"type": "string",
"description": "Clear input field"
},
"input_paste": {
"type": "string",
"description": "Paste from clipboard"
},
"input_submit": {
"type": "string",
"description": "Submit input"
},
"input_newline": {
"type": "string",
"description": "Insert newline in input"
},
"history_previous": {
"type": "string",
"description": "Navigate to previous history item"
},
"history_next": {
"type": "string",
"description": "Navigate to next history item"
},
"messages_page_up": {
"type": "string",
"description": "Scroll messages up by one page"
},
"messages_page_down": {
"type": "string",
"description": "Scroll messages down by one page"
},
"messages_half_page_up": {
"type": "string",
"description": "Scroll messages up by half page"
},
"messages_half_page_down": {
"type": "string",
"description": "Scroll messages down by half page"
},
"messages_previous": {
"type": "string",
"description": "Navigate to previous message"
},
"messages_next": {
"type": "string",
"description": "Navigate to next message"
},
"messages_first": {
"type": "string",
"description": "Navigate to first message"
},
"messages_last": {
"type": "string",
"description": "Navigate to last message"
},
"app_exit": {
"type": "string",
"description": "Exit the application"
}
},
"additionalProperties": false,
"description": "Custom keybind configurations"
},
"autoshare": {
"type": "boolean",
"description": "Share newly created sessions automatically"
},
"autoupdate": {
"type": "boolean",
"description": "Automatically update to the latest version"
},
"disabled_providers": {
"type": "array",
"items": {
"type": "string"
},
"description": "Disable providers that are loaded automatically"
},
"model": {
"type": "string",
"description": "Model to use in the format of provider/model, eg anthropic/claude-2"
},
"provider": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"api": {
"type": "string"
},
"name": {
"type": "string"
},
"env": {
"type": "array",
"items": {
"type": "string"
}
},
"id": {
"type": "string"
},
"npm": {
"type": "string"
},
"models": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"attachment": {
"type": "boolean"
},
"reasoning": {
"type": "boolean"
},
"temperature": {
"type": "boolean"
},
"tool_call": {
"type": "boolean"
},
"cost": {
"type": "object",
"properties": {
"input": {
"type": "number"
},
"output": {
"type": "number"
},
"cache_read": {
"type": "number"
},
"cache_write": {
"type": "number"
}
},
"required": ["input", "output"],
"additionalProperties": false
},
"limit": {
"type": "object",
"properties": {
"context": {
"type": "number"
},
"output": {
"type": "number"
}
},
"required": ["context", "output"],
"additionalProperties": false
},
"id": {
"type": "string"
},
"options": {
"type": "object",
"additionalProperties": {}
}
},
"additionalProperties": false
}
},
"options": {
"type": "object",
"additionalProperties": {}
}
},
"required": ["models"],
"additionalProperties": false
},
"description": "Custom provider configurations and model overrides"
},
"mcp": {
"type": "object",
"additionalProperties": {
"anyOf": [
{
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "local",
"description": "Type of MCP server connection"
},
"command": {
"type": "array",
"items": {
"type": "string"
},
"description": "Command and arguments to run the MCP server"
},
"environment": {
"type": "object",
"additionalProperties": {
"type": "string"
},
"description": "Environment variables to set when running the MCP server"
},
"enabled": {
"type": "boolean",
"description": "Enable or disable the MCP server on startup"
}
},
"required": ["type", "command"],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "remote",
"description": "Type of MCP server connection"
},
"url": {
"type": "string",
"description": "URL of the remote MCP server"
},
"enabled": {
"type": "boolean",
"description": "Enable or disable the MCP server on startup"
}
},
"required": ["type", "url"],
"additionalProperties": false
}
]
},
"description": "MCP (Model Context Protocol) server configurations"
},
"instructions": {
"type": "array",
"items": {
"type": "string"
},
"description": "Additional instruction files or patterns to include"
},
"experimental": {
"type": "object",
"properties": {
"hook": {
"type": "object",
"properties": {
"file_edited": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"properties": {
"command": {
"type": "array",
"items": {
"type": "string"
}
},
"environment": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": ["command"],
"additionalProperties": false
}
}
},
"session_completed": {
"type": "array",
"items": {
"type": "object",
"properties": {
"command": {
"type": "array",
"items": {
"type": "string"
}
},
"environment": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": ["command"],
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"$schema": "http://json-schema.org/draft-07/schema#"
}

View file

@ -1,12 +1,12 @@
{ {
"$schema": "https://json.schemastore.org/package.json", "$schema": "https://json.schemastore.org/package.json",
"version": "0.3.126", "version": "0.0.5",
"name": "opencode", "name": "opencode",
"type": "module", "type": "module",
"private": true, "private": true,
"scripts": { "scripts": {
"typecheck": "tsc --noEmit", "typecheck": "tsc --noEmit",
"dev": "bun run --conditions=development ./src/index.ts" "dev": "bun run ./src/index.ts"
}, },
"bin": { "bin": {
"opencode": "./bin/opencode" "opencode": "./bin/opencode"
@ -17,47 +17,37 @@
"devDependencies": { "devDependencies": {
"@ai-sdk/amazon-bedrock": "2.2.10", "@ai-sdk/amazon-bedrock": "2.2.10",
"@ai-sdk/anthropic": "1.2.12", "@ai-sdk/anthropic": "1.2.12",
"@octokit/webhooks-types": "7.6.1",
"@standard-schema/spec": "1.0.0",
"@tsconfig/bun": "1.0.7", "@tsconfig/bun": "1.0.7",
"@types/bun": "latest", "@types/bun": "latest",
"@types/turndown": "5.0.5", "@types/turndown": "5.0.5",
"@types/yargs": "17.0.33", "@types/yargs": "17.0.33",
"typescript": "catalog:", "typescript": "catalog:",
"vscode-languageserver-types": "3.17.5",
"zod-to-json-schema": "3.24.5" "zod-to-json-schema": "3.24.5"
}, },
"dependencies": { "dependencies": {
"@actions/core": "1.11.1", "@clack/prompts": "0.11.0",
"@actions/github": "6.0.1", "@flystorage/file-storage": "1.1.0",
"@clack/prompts": "1.0.0-alpha.1", "@flystorage/local-fs": "1.1.0",
"@hono/zod-validator": "0.4.2", "@hono/zod-validator": "0.5.0",
"@modelcontextprotocol/sdk": "1.15.1",
"@octokit/graphql": "9.0.1",
"@octokit/rest": "22.0.0",
"@openauthjs/openauth": "0.4.3", "@openauthjs/openauth": "0.4.3",
"@opencode-ai/plugin": "workspace:*",
"@opencode-ai/sdk": "workspace:*",
"@standard-schema/spec": "1.0.0", "@standard-schema/spec": "1.0.0",
"@zip.js/zip.js": "2.7.62",
"ai": "catalog:", "ai": "catalog:",
"decimal.js": "10.5.0", "decimal.js": "10.5.0",
"diff": "8.0.2", "diff": "8.0.2",
"gray-matter": "4.0.3", "env-paths": "3.0.0",
"hono": "catalog:", "hono": "4.7.10",
"hono-openapi": "0.4.8", "hono-openapi": "0.4.8",
"isomorphic-git": "1.32.1", "isomorphic-git": "1.32.1",
"jsonc-parser": "3.3.1",
"minimatch": "10.0.3",
"open": "10.1.2", "open": "10.1.2",
"remeda": "catalog:", "remeda": "2.22.3",
"tree-sitter": "0.22.4", "ts-lsp-client": "1.0.3",
"tree-sitter-bash": "0.23.3",
"turndown": "7.2.0", "turndown": "7.2.0",
"vscode-jsonrpc": "8.2.1", "vscode-jsonrpc": "8.2.1",
"vscode-languageclient": "8",
"xdg-basedir": "5.1.0", "xdg-basedir": "5.1.0",
"yargs": "18.0.0", "yargs": "18.0.0",
"zod": "catalog:", "zod": "catalog:",
"zod-openapi": "4.1.0" "zod-openapi": "4.2.4",
"zod-validation-error": "3.5.2"
} }
} }

View file

@ -1,30 +1,35 @@
#!/usr/bin/env bun #!/usr/bin/env bun
const dir = new URL("..", import.meta.url).pathname
process.chdir(dir)
import { $ } from "bun" import { $ } from "bun"
import pkg from "../package.json" import pkg from "../package.json"
const dry = process.env["OPENCODE_DRY"] === "true" const dry = process.argv.includes("--dry")
const version = process.env["OPENCODE_VERSION"]! const snapshot = process.argv.includes("--snapshot")
const snapshot = process.env["OPENCODE_SNAPSHOT"] === "true"
const version = snapshot
? `0.0.0-${new Date().toISOString().slice(0, 16).replace(/[-:T]/g, "")}`
: await $`git describe --tags --exact-match HEAD`
.text()
.then((x) => x.substring(1).trim())
.catch(() => {
console.error("tag not found")
process.exit(1)
})
console.log(`publishing ${version}`) console.log(`publishing ${version}`)
const GOARCH: Record<string, string> = { const GOARCH: Record<string, string> = {
arm64: "arm64", arm64: "arm64",
x64: "amd64", x64: "amd64",
"x64-baseline": "amd64",
} }
const targets = [ const targets = [
["windows", "x64"],
["linux", "arm64"], ["linux", "arm64"],
["linux", "x64"], ["linux", "x64"],
["linux", "x64-baseline"],
["darwin", "x64"], ["darwin", "x64"],
["darwin", "x64-baseline"],
["darwin", "arm64"], ["darwin", "arm64"],
["windows", "x64"],
] ]
await $`rm -rf dist` await $`rm -rf dist`
@ -38,7 +43,7 @@ for (const [os, arch] of targets) {
await $`CGO_ENABLED=0 GOOS=${os} GOARCH=${GOARCH[arch]} go build -ldflags="-s -w -X main.Version=${version}" -o ../opencode/dist/${name}/bin/tui ../tui/cmd/opencode/main.go`.cwd( await $`CGO_ENABLED=0 GOOS=${os} GOARCH=${GOARCH[arch]} go build -ldflags="-s -w -X main.Version=${version}" -o ../opencode/dist/${name}/bin/tui ../tui/cmd/opencode/main.go`.cwd(
"../tui", "../tui",
) )
await $`bun build --define OPENCODE_TUI_PATH="'../../../dist/${name}/bin/tui'" --define OPENCODE_VERSION="'${version}'" --compile --target=bun-${os}-${arch} --outfile=dist/${name}/bin/opencode ./src/index.ts` await $`bun build --define OPENCODE_VERSION="'${version}'" --compile --minify --target=bun-${os}-${arch} --outfile=dist/${name}/bin/opencode ./src/index.ts ./dist/${name}/bin/tui`
await $`rm -rf ./dist/${name}/bin/tui` await $`rm -rf ./dist/${name}/bin/tui`
await Bun.file(`dist/${name}/package.json`).write( await Bun.file(`dist/${name}/package.json`).write(
JSON.stringify( JSON.stringify(
@ -52,7 +57,8 @@ for (const [os, arch] of targets) {
2, 2,
), ),
) )
if (!dry) await $`cd dist/${name} && chmod 777 -R . && bun publish --access public --tag ${npmTag}` if (!dry)
await $`cd dist/${name} && bun publish --access public --tag ${npmTag}`
optionalDependencies[name] = version optionalDependencies[name] = version
} }
@ -76,7 +82,8 @@ await Bun.file(`./dist/${pkg.name}/package.json`).write(
2, 2,
), ),
) )
if (!dry) await $`cd ./dist/${pkg.name} && bun publish --access public --tag ${npmTag}` if (!dry)
await $`cd ./dist/${pkg.name} && bun publish --access public --tag ${npmTag}`
if (!snapshot) { if (!snapshot) {
// Github Release // Github Release
@ -84,44 +91,52 @@ if (!snapshot) {
await $`cd dist/${key}/bin && zip -r ../../${key}.zip *` await $`cd dist/${key}/bin && zip -r ../../${key}.zip *`
} }
const previous = await fetch("https://api.github.com/repos/sst/opencode/releases/latest") const previous = await fetch(
.then((res) => { "https://api.github.com/repos/sst/opencode/releases/latest",
if (!res.ok) throw new Error(res.statusText) )
return res.json() .then((res) => res.json())
})
.then((data) => data.tag_name) .then((data) => data.tag_name)
console.log("finding commits between", previous, "and", "HEAD") const commits = await fetch(
const commits = await fetch(`https://api.github.com/repos/sst/opencode/compare/${previous}...HEAD`) `https://api.github.com/repos/sst/opencode/compare/${previous}...HEAD`,
)
.then((res) => res.json()) .then((res) => res.json())
.then((data) => data.commits || []) .then((data) => data.commits || [])
const raw = commits.map((commit: any) => `- ${commit.commit.message.split("\n").join(" ")}`) const notes = commits
console.log(raw) .map((commit: any) => `- ${commit.commit.message.split("\n")[0]}`)
const notes =
raw
.filter((x: string) => { .filter((x: string) => {
const lower = x.toLowerCase() const lower = x.toLowerCase()
return ( return (
!lower.includes("release:") &&
!lower.includes("ignore:") && !lower.includes("ignore:") &&
!lower.includes("chore:") &&
!lower.includes("ci:") && !lower.includes("ci:") &&
!lower.includes("wip:") && !lower.includes("wip:") &&
!lower.includes("docs:") && !lower.includes("docs:") &&
!lower.includes("doc:") !lower.includes("doc:")
) )
}) })
.join("\n") || "No notable changes" .join("\n")
if (!dry) await $`gh release create v${version} --title "v${version}" --notes ${notes} ./dist/*.zip` if (!dry)
await $`gh release create v${version} --title "v${version}" --notes ${notes} ./dist/*.zip`
// Calculate SHA values // Calculate SHA values
const arm64Sha = await $`sha256sum ./dist/opencode-linux-arm64.zip | cut -d' ' -f1`.text().then((x) => x.trim()) const arm64Sha =
const x64Sha = await $`sha256sum ./dist/opencode-linux-x64.zip | cut -d' ' -f1`.text().then((x) => x.trim()) await $`sha256sum ./dist/opencode-linux-arm64.zip | cut -d' ' -f1`
const macX64Sha = await $`sha256sum ./dist/opencode-darwin-x64.zip | cut -d' ' -f1`.text().then((x) => x.trim()) .text()
const macArm64Sha = await $`sha256sum ./dist/opencode-darwin-arm64.zip | cut -d' ' -f1`.text().then((x) => x.trim()) .then((x) => x.trim())
const x64Sha =
await $`sha256sum ./dist/opencode-linux-x64.zip | cut -d' ' -f1`
.text()
.then((x) => x.trim())
const macX64Sha =
await $`sha256sum ./dist/opencode-darwin-x64.zip | cut -d' ' -f1`
.text()
.then((x) => x.trim())
const macArm64Sha =
await $`sha256sum ./dist/opencode-darwin-arm64.zip | cut -d' ' -f1`
.text()
.then((x) => x.trim())
// AUR package // AUR package
const pkgbuild = [ const pkgbuild = [
@ -155,8 +170,9 @@ if (!snapshot) {
for (const pkg of ["opencode", "opencode-bin"]) { for (const pkg of ["opencode", "opencode-bin"]) {
await $`rm -rf ./dist/aur-${pkg}` await $`rm -rf ./dist/aur-${pkg}`
await $`git clone ssh://aur@aur.archlinux.org/${pkg}.git ./dist/aur-${pkg}` await $`git clone ssh://aur@aur.archlinux.org/${pkg}.git ./dist/aur-${pkg}`
await $`cd ./dist/aur-${pkg} && git checkout master` await Bun.file(`./dist/aur-${pkg}/PKGBUILD`).write(
await Bun.file(`./dist/aur-${pkg}/PKGBUILD`).write(pkgbuild.replace("${pkg}", pkg)) pkgbuild.replace("${pkg}", pkg),
)
await $`cd ./dist/aur-${pkg} && makepkg --printsrcinfo > .SRCINFO` await $`cd ./dist/aur-${pkg} && makepkg --printsrcinfo > .SRCINFO`
await $`cd ./dist/aur-${pkg} && git add PKGBUILD .SRCINFO` await $`cd ./dist/aur-${pkg} && git add PKGBUILD .SRCINFO`
await $`cd ./dist/aur-${pkg} && git commit -m "Update to v${version}"` await $`cd ./dist/aur-${pkg} && git commit -m "Update to v${version}"`

View file

@ -4,32 +4,5 @@ import "zod-openapi/extend"
import { Config } from "../src/config/config" import { Config } from "../src/config/config"
import { zodToJsonSchema } from "zod-to-json-schema" import { zodToJsonSchema } from "zod-to-json-schema"
const file = process.argv[2] const result = zodToJsonSchema(Config.Info)
await Bun.write("config.schema.json", JSON.stringify(result, null, 2))
const result = zodToJsonSchema(Config.Info, {
/**
* We'll use the `default` values of the field as the only value in `examples`.
* This will ensure no docs are needed to be read, as the configuration is
* self-documenting.
*
* See https://json-schema.org/draft/2020-12/draft-bhutton-json-schema-validation-00#rfc.section.9.5
*/
postProcess(jsonSchema) {
const schema = jsonSchema as typeof jsonSchema & {
examples?: unknown[]
}
if (schema && typeof schema === "object" && "type" in schema && schema.type === "string" && schema?.default) {
if (!schema.examples) {
schema.examples = [schema.default]
}
schema.description = [schema.description || "", `default: \`${schema.default}\``]
.filter(Boolean)
.join("\n\n")
.trim()
}
return jsonSchema
},
})
await Bun.write(file, JSON.stringify(result, null, 2))

View file

@ -1,102 +0,0 @@
import { App } from "../app/app"
import { Config } from "../config/config"
import z from "zod"
import { Provider } from "../provider/provider"
import { generateObject, type ModelMessage } from "ai"
import PROMPT_GENERATE from "./generate.txt"
import { SystemPrompt } from "../session/system"
export namespace Agent {
export const Info = z
.object({
name: z.string(),
model: z
.object({
modelID: z.string(),
providerID: z.string(),
})
.optional(),
description: z.string(),
prompt: z.string().optional(),
tools: z.record(z.boolean()),
})
.openapi({
ref: "Agent",
})
export type Info = z.infer<typeof Info>
const state = App.state("agent", async () => {
const cfg = await Config.get()
const result: Record<string, Info> = {
general: {
name: "general",
description:
"General-purpose agent for researching complex questions, searching for code, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you.",
tools: {
todoread: false,
todowrite: false,
},
},
}
for (const [key, value] of Object.entries(cfg.agent ?? {})) {
if (value.disable) continue
let item = result[key]
if (!item)
item = result[key] = {
name: key,
description: "",
tools: {
todowrite: false,
todoread: false,
},
}
const model = value.model ?? cfg.model
if (model) item.model = Provider.parseModel(model)
if (value.prompt) item.prompt = value.prompt
if (value.tools)
item.tools = {
...item.tools,
...value.tools,
}
if (value.description) item.description = value.description
}
return result
})
export async function get(agent: string) {
return state().then((x) => x[agent])
}
export async function list() {
return state().then((x) => Object.values(x))
}
export async function generate(input: { description: string }) {
const defaultModel = await Provider.defaultModel()
const model = await Provider.getModel(defaultModel.providerID, defaultModel.modelID)
const system = SystemPrompt.header(defaultModel.providerID)
system.push(PROMPT_GENERATE)
const existing = await list()
const result = await generateObject({
temperature: 0.3,
prompt: [
...system.map(
(item): ModelMessage => ({
role: "system",
content: item,
}),
),
{
role: "user",
content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`,
},
],
model: model.language,
schema: z.object({
identifier: z.string(),
whenToUse: z.string(),
systemPrompt: z.string(),
}),
})
return result.object
}
}

View file

@ -1,75 +0,0 @@
You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability.
**Important Context**: You may have access to project-specific instructions from CLAUDE.md files and other context that may include coding standards, project structure, and custom requirements. Consider this context when creating agents to ensure they align with the project's established patterns and practices.
When a user describes what they want an agent to do, you will:
1. **Extract Core Intent**: Identify the fundamental purpose, key responsibilities, and success criteria for the agent. Look for both explicit requirements and implicit needs. Consider any project-specific context from CLAUDE.md files. For agents that are meant to review code, you should assume that the user is asking to review recently written code and not the whole codebase, unless the user has explicitly instructed you otherwise.
2. **Design Expert Persona**: Create a compelling expert identity that embodies deep domain knowledge relevant to the task. The persona should inspire confidence and guide the agent's decision-making approach.
3. **Architect Comprehensive Instructions**: Develop a system prompt that:
- Establishes clear behavioral boundaries and operational parameters
- Provides specific methodologies and best practices for task execution
- Anticipates edge cases and provides guidance for handling them
- Incorporates any specific requirements or preferences mentioned by the user
- Defines output format expectations when relevant
- Aligns with project-specific coding standards and patterns from CLAUDE.md
4. **Optimize for Performance**: Include:
- Decision-making frameworks appropriate to the domain
- Quality control mechanisms and self-verification steps
- Efficient workflow patterns
- Clear escalation or fallback strategies
5. **Create Identifier**: Design a concise, descriptive identifier that:
- Uses lowercase letters, numbers, and hyphens only
- Is typically 2-4 words joined by hyphens
- Clearly indicates the agent's primary function
- Is memorable and easy to type
- Avoids generic terms like "helper" or "assistant"
6 **Example agent descriptions**:
- in the 'whenToUse' field of the JSON object, you should include examples of when this agent should be used.
- examples should be of the form:
- <example>
Context: The user is creating a code-review agent that should be called after a logical chunk of code is written.
user: "Please write a function that checks if a number is prime"
assistant: "Here is the relevant function: "
<function call omitted for brevity only for this example>
<commentary>
Since the user is greeting, use the Task tool to launch the greeting-responder agent to respond with a friendly joke.
</commentary>
assistant: "Now let me use the code-reviewer agent to review the code"
</example>
- <example>
Context: User is creating an agent to respond to the word "hello" with a friendly jok.
user: "Hello"
assistant: "I'm going to use the Task tool to launch the greeting-responder agent to respond with a friendly joke"
<commentary>
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke.
</commentary>
</example>
- If the user mentioned or implied that the agent should be used proactively, you should include examples of this.
- NOTE: Ensure that in the examples, you are making the assistant use the Agent tool and not simply respond directly to the task.
Your output must be a valid JSON object with exactly these fields:
{
"identifier": "A unique, descriptive identifier using lowercase letters, numbers, and hyphens (e.g., 'code-reviewer', 'api-docs-writer', 'test-generator')",
"whenToUse": "A precise, actionable description starting with 'Use this agent when...' that clearly defines the triggering conditions and use cases. Ensure you include examples as described above.",
"systemPrompt": "The complete system prompt that will govern the agent's behavior, written in second person ('You are...', 'You will...') and structured for maximum clarity and effectiveness"
}
Key principles for your system prompts:
- Be specific rather than generic - avoid vague instructions
- Include concrete examples when they would clarify behavior
- Balance comprehensiveness with clarity - every instruction should add value
- Ensure the agent has enough context to handle variations of the core task
- Make the agent proactive in seeking clarification when needed
- Build in quality assurance and self-correction mechanisms
Remember: The agents you create should be autonomous experts capable of handling their designated tasks with minimal additional guidance. Your system prompts are their complete operational manual.

View file

@ -12,6 +12,7 @@ export namespace App {
export const Info = z export const Info = z
.object({ .object({
user: z.string(),
hostname: z.string(), hostname: z.string(),
git: z.boolean(), git: z.boolean(),
path: z.object({ path: z.object({
@ -44,14 +45,23 @@ export namespace App {
} }
export const provideExisting = ctx.provide export const provideExisting = ctx.provide
export async function provide<T>(input: Input, cb: (app: App.Info) => Promise<T>) { export async function provide<T>(
input: Input,
cb: (app: App.Info) => Promise<T>,
) {
log.info("creating", { log.info("creating", {
cwd: input.cwd, cwd: input.cwd,
}) })
const git = await Filesystem.findUp(".git", input.cwd).then(([x]) => (x ? path.dirname(x) : undefined)) const git = await Filesystem.findUp(".git", input.cwd).then(([x]) =>
x ? path.dirname(x) : undefined,
)
log.info("git", { git }) log.info("git", { git })
const data = path.join(Global.Path.data, "project", git ? directory(git) : "global") const data = path.join(
Global.Path.data,
"project",
git ? directory(git) : "global",
)
const stateFile = Bun.file(path.join(data, APP_JSON)) const stateFile = Bun.file(path.join(data, APP_JSON))
const state = (await stateFile.json().catch(() => ({}))) as { const state = (await stateFile.json().catch(() => ({}))) as {
initialized: number initialized: number
@ -69,6 +79,7 @@ export namespace App {
const root = git ?? input.cwd const root = git ?? input.cwd
const info: Info = { const info: Info = {
user: os.userInfo().username,
hostname: os.hostname(), hostname: os.hostname(),
time: { time: {
initialized: state.initialized, initialized: state.initialized,

View file

@ -4,18 +4,20 @@ import { Auth } from "./index"
export namespace AuthAnthropic { export namespace AuthAnthropic {
const CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" const CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
export async function authorize(mode: "max" | "console") { export async function authorize() {
const pkce = await generatePKCE() const pkce = await generatePKCE()
const url = new URL("https://claude.ai/oauth/authorize", import.meta.url)
const url = new URL(
`https://${mode === "console" ? "console.anthropic.com" : "claude.ai"}/oauth/authorize`,
import.meta.url,
)
url.searchParams.set("code", "true") url.searchParams.set("code", "true")
url.searchParams.set("client_id", CLIENT_ID) url.searchParams.set("client_id", CLIENT_ID)
url.searchParams.set("response_type", "code") url.searchParams.set("response_type", "code")
url.searchParams.set("redirect_uri", "https://console.anthropic.com/oauth/code/callback") url.searchParams.set(
url.searchParams.set("scope", "org:create_api_key user:profile user:inference") "redirect_uri",
"https://console.anthropic.com/oauth/code/callback",
)
url.searchParams.set(
"scope",
"org:create_api_key user:profile user:inference",
)
url.searchParams.set("code_challenge", pkce.challenge) url.searchParams.set("code_challenge", pkce.challenge)
url.searchParams.set("code_challenge_method", "S256") url.searchParams.set("code_challenge_method", "S256")
url.searchParams.set("state", pkce.verifier) url.searchParams.set("state", pkce.verifier)
@ -43,18 +45,21 @@ export namespace AuthAnthropic {
}) })
if (!result.ok) throw new ExchangeFailed() if (!result.ok) throw new ExchangeFailed()
const json = await result.json() const json = await result.json()
return { await Auth.set("anthropic", {
type: "oauth",
refresh: json.refresh_token as string, refresh: json.refresh_token as string,
access: json.access_token as string, access: json.access_token as string,
expires: Date.now() + json.expires_in * 1000, expires: Date.now() + json.expires_in * 1000,
} })
} }
export async function access() { export async function access() {
const info = await Auth.get("anthropic") const info = await Auth.get("anthropic")
if (!info || info.type !== "oauth") return if (!info || info.type !== "oauth") return
if (info.access && info.expires > Date.now()) return info.access if (info.access && info.expires > Date.now()) return info.access
const response = await fetch("https://console.anthropic.com/v1/oauth/token", { const response = await fetch(
"https://console.anthropic.com/v1/oauth/token",
{
method: "POST", method: "POST",
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
@ -64,7 +69,8 @@ export namespace AuthAnthropic {
refresh_token: info.refresh, refresh_token: info.refresh,
client_id: CLIENT_ID, client_id: CLIENT_ID,
}), }),
}) },
)
if (!response.ok) return if (!response.ok) return
const json = await response.json() const json = await response.json()
await Auth.set("anthropic", { await Auth.set("anthropic", {

View file

@ -4,12 +4,13 @@ import path from "path"
export const AuthCopilot = lazy(async () => { export const AuthCopilot = lazy(async () => {
const file = Bun.file(path.join(Global.Path.state, "plugin", "copilot.ts")) const file = Bun.file(path.join(Global.Path.state, "plugin", "copilot.ts"))
const exists = await file.exists() const response = fetch(
const response = fetch("https://raw.githubusercontent.com/sst/opencode-github-copilot/refs/heads/main/auth.ts") "https://raw.githubusercontent.com/sst/opencode-github-copilot/refs/heads/main/auth.ts",
)
.then((x) => Bun.write(file, x)) .then((x) => Bun.write(file, x))
.catch(() => {}) .catch(() => {})
if (!exists) { if (!file.exists()) {
const worked = await response const worked = await response
if (!worked) return if (!worked) return
} }

View file

@ -122,7 +122,10 @@ export namespace AuthGithubCopilot {
return tokenData.token return tokenData.token
} }
export const DeviceCodeError = NamedError.create("DeviceCodeError", z.object({})) export const DeviceCodeError = NamedError.create(
"DeviceCodeError",
z.object({}),
)
export const TokenExchangeError = NamedError.create( export const TokenExchangeError = NamedError.create(
"TokenExchangeError", "TokenExchangeError",

View file

@ -16,13 +16,7 @@ export namespace Auth {
key: z.string(), key: z.string(),
}) })
export const WellKnown = z.object({ export const Info = z.discriminatedUnion("type", [Oauth, Api])
type: z.literal("wellknown"),
key: z.string(),
token: z.string(),
})
export const Info = z.discriminatedUnion("type", [Oauth, Api, WellKnown])
export type Info = z.infer<typeof Info> export type Info = z.infer<typeof Info>
const filepath = path.join(Global.Path.data, "auth.json") const filepath = path.join(Global.Path.data, "auth.json")

View file

@ -8,7 +8,10 @@ import { readableStreamToText } from "bun"
export namespace BunProc { export namespace BunProc {
const log = Log.create({ service: "bun" }) const log = Log.create({ service: "bun" })
export async function run(cmd: string[], options?: Bun.SpawnOptions.OptionsObject<any, any, any>) { export async function run(
cmd: string[],
options?: Bun.SpawnOptions.OptionsObject<any, any, any>,
) {
log.info("running", { log.info("running", {
cmd: [which(), ...cmd], cmd: [which(), ...cmd],
...options, ...options,
@ -23,17 +26,9 @@ export namespace BunProc {
BUN_BE_BUN: "1", BUN_BE_BUN: "1",
}, },
}) })
const code = await result.exited const code = await result.exited;
const stdout = result.stdout const stdout = result.stdout ? typeof result.stdout === "number" ? result.stdout : await readableStreamToText(result.stdout) : undefined
? typeof result.stdout === "number" const stderr = result.stderr ? typeof result.stderr === "number" ? result.stderr : await readableStreamToText(result.stderr) : undefined
? result.stdout
: await readableStreamToText(result.stdout)
: undefined
const stderr = result.stderr
? typeof result.stderr === "number"
? result.stderr
: await readableStreamToText(result.stderr)
: undefined
log.info("done", { log.info("done", {
code, code,
stdout, stdout,
@ -60,23 +55,13 @@ export namespace BunProc {
export async function install(pkg: string, version = "latest") { export async function install(pkg: string, version = "latest") {
const mod = path.join(Global.Path.cache, "node_modules", pkg) const mod = path.join(Global.Path.cache, "node_modules", pkg)
const pkgjson = Bun.file(path.join(Global.Path.cache, "package.json")) const pkgjson = Bun.file(path.join(Global.Path.cache, "package.json"))
const parsed = await pkgjson.json().catch(async () => { const parsed = await pkgjson.json().catch(() => ({
const result = { dependencies: {} } dependencies: {},
await Bun.write(pkgjson.name!, JSON.stringify(result, null, 2)) }))
return result
})
if (parsed.dependencies[pkg] === version) return mod if (parsed.dependencies[pkg] === version) return mod
parsed.dependencies[pkg] = version
// Build command arguments await Bun.write(pkgjson, JSON.stringify(parsed, null, 2))
const args = ["add", "--force", "--exact", "--cwd", Global.Path.cache, pkg + "@" + version] await BunProc.run(["install", "--registry=https://registry.npmjs.org"], {
// Let Bun handle registry resolution:
// - If .npmrc files exist, Bun will use them automatically
// - If no .npmrc files exist, Bun will default to https://registry.npmjs.org
// - No need to pass --registry flag
log.info("installing package using Bun's default registry resolution", { pkg, version })
await BunProc.run(args, {
cwd: Global.Path.cache, cwd: Global.Path.cache,
}).catch((e) => { }).catch((e) => {
throw new InstallFailedError( throw new InstallFailedError(
@ -86,8 +71,6 @@ export namespace BunProc {
}, },
) )
}) })
parsed.dependencies[pkg] = version
await Bun.write(pkgjson.name!, JSON.stringify(parsed, null, 2))
return mod return mod
} }
} }

View file

@ -18,7 +18,10 @@ export namespace Bus {
const registry = new Map<string, EventDefinition>() const registry = new Map<string, EventDefinition>()
export function event<Type extends string, Properties extends ZodType>(type: Type, properties: Properties) { export function event<Type extends string, Properties extends ZodType>(
type: Type,
properties: Properties,
) {
const result = { const result = {
type, type,
properties, properties,
@ -69,7 +72,10 @@ export namespace Bus {
export function subscribe<Definition extends EventDefinition>( export function subscribe<Definition extends EventDefinition>(
def: Definition, def: Definition,
callback: (event: { type: Definition["type"]; properties: z.infer<Definition["properties"]> }) => void, callback: (event: {
type: Definition["type"]
properties: z.infer<Definition["properties"]>
}) => void,
) { ) {
return raw(def.type, callback) return raw(def.type, callback)
} }

View file

@ -1,19 +1,20 @@
import { App } from "../app/app" import { App } from "../app/app"
import { ConfigHooks } from "../config/hooks" import { ConfigHooks } from "../config/hooks"
import { FileWatcher } from "../file/watch"
import { Format } from "../format" import { Format } from "../format"
import { LSP } from "../lsp" import { LSP } from "../lsp"
import { Plugin } from "../plugin"
import { Share } from "../share/share" import { Share } from "../share/share"
import { Snapshot } from "../snapshot"
export async function bootstrap<T>(input: App.Input, cb: (app: App.Info) => Promise<T>) { export async function bootstrap<T>(
input: App.Input,
cb: (app: App.Info) => Promise<T>,
) {
return App.provide(input, async (app) => { return App.provide(input, async (app) => {
Share.init() Share.init()
Format.init() Format.init()
Plugin.init()
ConfigHooks.init() ConfigHooks.init()
LSP.init() LSP.init()
Snapshot.init() FileWatcher.init()
return cb(app) return cb(app)
}) })

View file

@ -1,110 +0,0 @@
import { cmd } from "./cmd"
import * as prompts from "@clack/prompts"
import { UI } from "../ui"
import { Global } from "../../global"
import { Agent } from "../../agent/agent"
import path from "path"
import matter from "gray-matter"
import { App } from "../../app/app"
const AgentCreateCommand = cmd({
command: "create",
describe: "create a new agent",
async handler() {
await App.provide({ cwd: process.cwd() }, async (app) => {
UI.empty()
prompts.intro("Create agent")
let scope: "global" | "project" = "global"
if (app.git) {
const scopeResult = await prompts.select({
message: "Location",
options: [
{
label: "Current project",
value: "project" as const,
hint: app.path.root,
},
{
label: "Global",
value: "global" as const,
hint: Global.Path.config,
},
],
})
if (prompts.isCancel(scopeResult)) throw new UI.CancelledError()
scope = scopeResult
}
const query = await prompts.text({
message: "Description",
placeholder: "What should this agent do?",
validate: (x) => x && (x.length > 0 ? undefined : "Required"),
})
if (prompts.isCancel(query)) throw new UI.CancelledError()
const spinner = prompts.spinner()
spinner.start("Generating agent configuration...")
const generated = await Agent.generate({ description: query })
spinner.stop(`Agent ${generated.identifier} generated`)
const availableTools = [
"bash",
"read",
"write",
"edit",
"list",
"glob",
"grep",
"webfetch",
"task",
"todowrite",
"todoread",
]
const selectedTools = await prompts.multiselect({
message: "Select tools to enable",
options: availableTools.map((tool) => ({
label: tool,
value: tool,
})),
initialValues: availableTools,
})
if (prompts.isCancel(selectedTools)) throw new UI.CancelledError()
const tools: Record<string, boolean> = {}
for (const tool of availableTools) {
if (!selectedTools.includes(tool)) {
tools[tool] = false
}
}
const frontmatter: any = {
description: generated.whenToUse,
}
if (Object.keys(tools).length > 0) {
frontmatter.tools = tools
}
const content = matter.stringify(generated.systemPrompt, frontmatter)
const filePath = path.join(
scope === "global" ? Global.Path.config : path.join(app.path.root, ".opencode"),
`agent`,
`${generated.identifier}.md`,
)
await Bun.write(filePath, content)
prompts.log.success(`Agent created: ${filePath}`)
prompts.outro("Done")
})
},
})
export const AgentCommand = cmd({
command: "agent",
describe: "manage agents",
builder: (yargs) => yargs.command(AgentCreateCommand).demandCommand(),
async handler() {},
})

View file

@ -15,7 +15,11 @@ export const AuthCommand = cmd({
command: "auth", command: "auth",
describe: "manage credentials", describe: "manage credentials",
builder: (yargs) => builder: (yargs) =>
yargs.command(AuthLoginCommand).command(AuthLogoutCommand).command(AuthListCommand).demandCommand(), yargs
.command(AuthLoginCommand)
.command(AuthLogoutCommand)
.command(AuthListCommand)
.demandCommand(),
async handler() {}, async handler() {},
}) })
@ -27,7 +31,9 @@ export const AuthListCommand = cmd({
UI.empty() UI.empty()
const authPath = path.join(Global.Path.data, "auth.json") const authPath = path.join(Global.Path.data, "auth.json")
const homedir = os.homedir() const homedir = os.homedir()
const displayPath = authPath.startsWith(homedir) ? authPath.replace(homedir, "~") : authPath const displayPath = authPath.startsWith(homedir)
? authPath.replace(homedir, "~")
: authPath
prompts.intro(`Credentials ${UI.Style.TEXT_DIM}${displayPath}`) prompts.intro(`Credentials ${UI.Style.TEXT_DIM}${displayPath}`)
const results = await Auth.all().then((x) => Object.entries(x)) const results = await Auth.all().then((x) => Object.entries(x))
const database = await ModelsDev.get() const database = await ModelsDev.get()
@ -61,56 +67,25 @@ export const AuthListCommand = cmd({
prompts.log.info(`${provider} ${UI.Style.TEXT_DIM}${envVar}`) prompts.log.info(`${provider} ${UI.Style.TEXT_DIM}${envVar}`)
} }
prompts.outro(`${activeEnvVars.length} environment variable` + (activeEnvVars.length === 1 ? "" : "s")) prompts.outro(`${activeEnvVars.length} environment variables`)
} }
}, },
}) })
export const AuthLoginCommand = cmd({ export const AuthLoginCommand = cmd({
command: "login [url]", command: "login",
describe: "log in to a provider", describe: "log in to a provider",
builder: (yargs) => async handler() {
yargs.positional("url", {
describe: "opencode auth provider",
type: "string",
}),
async handler(args) {
UI.empty() UI.empty()
prompts.intro("Add credential") prompts.intro("Add credential")
if (args.url) {
const wellknown = await fetch(`${args.url}/.well-known/opencode`).then((x) => x.json())
prompts.log.info(`Running \`${wellknown.auth.command.join(" ")}\``)
const proc = Bun.spawn({
cmd: wellknown.auth.command,
stdout: "pipe",
})
const exit = await proc.exited
if (exit !== 0) {
prompts.log.error("Failed")
prompts.outro("Done")
return
}
const token = await new Response(proc.stdout).text()
await Auth.set(args.url, {
type: "wellknown",
key: wellknown.auth.env,
token: token.trim(),
})
prompts.log.success("Logged into " + args.url)
prompts.outro("Done")
return
}
await ModelsDev.refresh().catch(() => {})
const providers = await ModelsDev.get() const providers = await ModelsDev.get()
const priority: Record<string, number> = { const priority: Record<string, number> = {
anthropic: 0, anthropic: 0,
"github-copilot": 1, "github-copilot": 1,
openai: 2, openai: 2,
google: 3, google: 3,
openrouter: 4,
vercel: 5,
} }
let provider = await prompts.autocomplete({ let provider = await prompts.select({
message: "Select provider", message: "Select provider",
maxItems: 8, maxItems: 8,
options: [ options: [
@ -139,7 +114,8 @@ export const AuthLoginCommand = cmd({
if (provider === "other") { if (provider === "other") {
provider = await prompts.text({ provider = await prompts.text({
message: "Enter provider id", message: "Enter provider id",
validate: (x) => x && (x.match(/^[0-9a-z-]+$/) ? undefined : "a-z, 0-9 and hyphens only"), validate: (x) =>
x.match(/^[a-z-]+$/) ? undefined : "a-z and hyphens only",
}) })
if (prompts.isCancel(provider)) throw new UI.CancelledError() if (prompts.isCancel(provider)) throw new UI.CancelledError()
provider = provider.replace(/^@ai-sdk\//, "") provider = provider.replace(/^@ai-sdk\//, "")
@ -151,7 +127,7 @@ export const AuthLoginCommand = cmd({
if (provider === "amazon-bedrock") { if (provider === "amazon-bedrock") {
prompts.log.info( prompts.log.info(
"Amazon bedrock can be configured with standard AWS environment variables like AWS_BEARER_TOKEN_BEDROCK, AWS_PROFILE or AWS_ACCESS_KEY_ID", "Amazon bedrock can be configured with standard AWS environment variables like AWS_PROFILE or AWS_ACCESS_KEY_ID",
) )
prompts.outro("Done") prompts.outro("Done")
return return
@ -163,24 +139,20 @@ export const AuthLoginCommand = cmd({
options: [ options: [
{ {
label: "Claude Pro/Max", label: "Claude Pro/Max",
value: "max", value: "oauth",
}, },
{ {
label: "Create API Key", label: "API Key",
value: "console",
},
{
label: "Manually enter API Key",
value: "api", value: "api",
}, },
], ],
}) })
if (prompts.isCancel(method)) throw new UI.CancelledError() if (prompts.isCancel(method)) throw new UI.CancelledError()
if (method === "max") { if (method === "oauth") {
// some weird bug where program exits without this // some weird bug where program exits without this
await new Promise((resolve) => setTimeout(resolve, 10)) await new Promise((resolve) => setTimeout(resolve, 10))
const { url, verifier } = await AuthAnthropic.authorize("max") const { url, verifier } = await AuthAnthropic.authorize()
prompts.note("Trying to open browser...") prompts.note("Trying to open browser...")
try { try {
await open(url) await open(url)
@ -193,70 +165,17 @@ export const AuthLoginCommand = cmd({
const code = await prompts.text({ const code = await prompts.text({
message: "Paste the authorization code here: ", message: "Paste the authorization code here: ",
validate: (x) => x && (x.length > 0 ? undefined : "Required"), validate: (x) => (x.length > 0 ? undefined : "Required"),
}) })
if (prompts.isCancel(code)) throw new UI.CancelledError() if (prompts.isCancel(code)) throw new UI.CancelledError()
try { await AuthAnthropic.exchange(code, verifier)
const credentials = await AuthAnthropic.exchange(code, verifier) .then(() => {
await Auth.set("anthropic", {
type: "oauth",
refresh: credentials.refresh,
access: credentials.access,
expires: credentials.expires,
})
prompts.log.success("Login successful") prompts.log.success("Login successful")
} catch { })
.catch(() => {
prompts.log.error("Invalid code") prompts.log.error("Invalid code")
}
prompts.outro("Done")
return
}
if (method === "console") {
// some weird bug where program exits without this
await new Promise((resolve) => setTimeout(resolve, 10))
const { url, verifier } = await AuthAnthropic.authorize("console")
prompts.note("Trying to open browser...")
try {
await open(url)
} catch (e) {
prompts.log.error(
"Failed to open browser perhaps you are running without a display or X server, please open the following URL in your browser:",
)
}
prompts.log.info(url)
const code = await prompts.text({
message: "Paste the authorization code here: ",
validate: (x) => x && (x.length > 0 ? undefined : "Required"),
}) })
if (prompts.isCancel(code)) throw new UI.CancelledError()
try {
const credentials = await AuthAnthropic.exchange(code, verifier)
const accessToken = credentials.access
const response = await fetch("https://api.anthropic.com/api/oauth/claude_cli/create_api_key", {
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json, text/plain, */*",
},
})
if (!response.ok) {
throw new Error("Failed to create API key")
}
const json = await response.json()
await Auth.set("anthropic", {
type: "api",
key: json.raw_key,
})
prompts.log.success("Login successful - API key created and saved")
} catch (error) {
prompts.log.error("Invalid code or failed to create API key")
}
prompts.outro("Done") prompts.outro("Done")
return return
} }
@ -267,13 +186,17 @@ export const AuthLoginCommand = cmd({
await new Promise((resolve) => setTimeout(resolve, 10)) await new Promise((resolve) => setTimeout(resolve, 10))
const deviceInfo = await copilot.authorize() const deviceInfo = await copilot.authorize()
prompts.note(`Please visit: ${deviceInfo.verification}\nEnter code: ${deviceInfo.user}`) prompts.note(
`Please visit: ${deviceInfo.verification}\nEnter code: ${deviceInfo.user}`,
)
const spinner = prompts.spinner() const spinner = prompts.spinner()
spinner.start("Waiting for authorization...") spinner.start("Waiting for authorization...")
while (true) { while (true) {
await new Promise((resolve) => setTimeout(resolve, deviceInfo.interval * 1000)) await new Promise((resolve) =>
setTimeout(resolve, deviceInfo.interval * 1000),
)
const response = await copilot.poll(deviceInfo.device) const response = await copilot.poll(deviceInfo.device)
if (response.status === "pending") continue if (response.status === "pending") continue
if (response.status === "success") { if (response.status === "success") {
@ -296,13 +219,9 @@ export const AuthLoginCommand = cmd({
return return
} }
if (provider === "vercel") {
prompts.log.info("You can create an api key in the dashboard")
}
const key = await prompts.password({ const key = await prompts.password({
message: "Enter your API key", message: "Enter your API key",
validate: (x) => x && (x.length > 0 ? undefined : "Required"), validate: (x) => (x.length > 0 ? undefined : "Required"),
}) })
if (prompts.isCancel(key)) throw new UI.CancelledError() if (prompts.isCancel(key)) throw new UI.CancelledError()
await Auth.set(provider, { await Auth.set(provider, {
@ -329,7 +248,12 @@ export const AuthLogoutCommand = cmd({
const providerID = await prompts.select({ const providerID = await prompts.select({
message: "Select provider", message: "Select provider",
options: credentials.map(([key, value]) => ({ options: credentials.map(([key, value]) => ({
label: (database[key]?.name || key) + UI.Style.TEXT_DIM + " (" + value.type + ")", label:
(database[key]?.name || key) +
UI.Style.TEXT_DIM +
" (" +
value.type +
")",
value: key, value: key,
})), })),
}) })

View file

@ -31,6 +31,7 @@ const FileStatusCommand = cmd({
export const FileCommand = cmd({ export const FileCommand = cmd({
command: "file", command: "file",
builder: (yargs) => yargs.command(FileReadCommand).command(FileStatusCommand).demandCommand(), builder: (yargs) =>
yargs.command(FileReadCommand).command(FileStatusCommand).demandCommand(),
async handler() {}, async handler() {},
}) })

View file

@ -1,10 +1,8 @@
import { Global } from "../../../global"
import { bootstrap } from "../../bootstrap" import { bootstrap } from "../../bootstrap"
import { cmd } from "../cmd" import { cmd } from "../cmd"
import { FileCommand } from "./file" import { FileCommand } from "./file"
import { LSPCommand } from "./lsp" import { LSPCommand } from "./lsp"
import { RipgrepCommand } from "./ripgrep" import { RipgrepCommand } from "./ripgrep"
import { ScrapCommand } from "./scrap"
import { SnapshotCommand } from "./snapshot" import { SnapshotCommand } from "./snapshot"
export const DebugCommand = cmd({ export const DebugCommand = cmd({
@ -14,26 +12,17 @@ export const DebugCommand = cmd({
.command(LSPCommand) .command(LSPCommand)
.command(RipgrepCommand) .command(RipgrepCommand)
.command(FileCommand) .command(FileCommand)
.command(ScrapCommand)
.command(SnapshotCommand) .command(SnapshotCommand)
.command(PathsCommand)
.command({ .command({
command: "wait", command: "wait",
async handler() { async handler() {
await bootstrap({ cwd: process.cwd() }, async () => { await bootstrap({ cwd: process.cwd() }, async () => {
await new Promise((resolve) => setTimeout(resolve, 1_000 * 60 * 60 * 24)) await new Promise((resolve) =>
setTimeout(resolve, 1_000 * 60 * 60 * 24),
)
}) })
}, },
}) })
.demandCommand(), .demandCommand(),
async handler() {}, async handler() {},
}) })
const PathsCommand = cmd({
command: "paths",
handler() {
for (const [key, value] of Object.entries(Global.Path)) {
console.log(key.padEnd(10), value)
}
},
})

View file

@ -6,13 +6,14 @@ import { Log } from "../../../util/log"
export const LSPCommand = cmd({ export const LSPCommand = cmd({
command: "lsp", command: "lsp",
builder: (yargs) => builder: (yargs) =>
yargs.command(DiagnosticsCommand).command(SymbolsCommand).command(DocumentSymbolsCommand).demandCommand(), yargs.command(DiagnosticsCommand).command(SymbolsCommand).demandCommand(),
async handler() {}, async handler() {},
}) })
const DiagnosticsCommand = cmd({ const DiagnosticsCommand = cmd({
command: "diagnostics <file>", command: "diagnostics <file>",
builder: (yargs) => yargs.positional("file", { type: "string", demandOption: true }), builder: (yargs) =>
yargs.positional("file", { type: "string", demandOption: true }),
async handler(args) { async handler(args) {
await bootstrap({ cwd: process.cwd() }, async () => { await bootstrap({ cwd: process.cwd() }, async () => {
await LSP.touchFile(args.file, true) await LSP.touchFile(args.file, true)
@ -23,24 +24,14 @@ const DiagnosticsCommand = cmd({
export const SymbolsCommand = cmd({ export const SymbolsCommand = cmd({
command: "symbols <query>", command: "symbols <query>",
builder: (yargs) => yargs.positional("query", { type: "string", demandOption: true }), builder: (yargs) =>
yargs.positional("query", { type: "string", demandOption: true }),
async handler(args) { async handler(args) {
await bootstrap({ cwd: process.cwd() }, async () => { await bootstrap({ cwd: process.cwd() }, async () => {
await LSP.touchFile("./src/index.ts", true)
using _ = Log.Default.time("symbols") using _ = Log.Default.time("symbols")
const results = await LSP.workspaceSymbol(args.query) const results = await LSP.workspaceSymbol(args.query)
console.log(JSON.stringify(results, null, 2)) console.log(JSON.stringify(results, null, 2))
}) })
}, },
}) })
export const DocumentSymbolsCommand = cmd({
command: "document-symbols <uri>",
builder: (yargs) => yargs.positional("uri", { type: "string", demandOption: true }),
async handler(args) {
await bootstrap({ cwd: process.cwd() }, async () => {
using _ = Log.Default.time("document-symbols")
const results = await LSP.documentSymbol(args.uri)
console.log(JSON.stringify(results, null, 2))
})
},
})

View file

@ -5,7 +5,12 @@ import { cmd } from "../cmd"
export const RipgrepCommand = cmd({ export const RipgrepCommand = cmd({
command: "rg", command: "rg",
builder: (yargs) => yargs.command(TreeCommand).command(FilesCommand).command(SearchCommand).demandCommand(), builder: (yargs) =>
yargs
.command(TreeCommand)
.command(FilesCommand)
.command(SearchCommand)
.demandCommand(),
async handler() {}, async handler() {},
}) })
@ -45,7 +50,7 @@ const FilesCommand = cmd({
const files = await Ripgrep.files({ const files = await Ripgrep.files({
cwd: app.path.cwd, cwd: app.path.cwd,
query: args.query, query: args.query,
glob: args.glob ? [args.glob] : undefined, glob: args.glob,
limit: args.limit, limit: args.limit,
}) })
console.log(files.join("\n")) console.log(files.join("\n"))

View file

@ -1,7 +0,0 @@
import { cmd } from "../cmd"
export const ScrapCommand = cmd({
command: "scrap",
builder: (yargs) => yargs,
async handler() {},
})

View file

@ -4,45 +4,36 @@ import { cmd } from "../cmd"
export const SnapshotCommand = cmd({ export const SnapshotCommand = cmd({
command: "snapshot", command: "snapshot",
builder: (yargs) => yargs.command(TrackCommand).command(PatchCommand).command(DiffCommand).demandCommand(), builder: (yargs) =>
yargs
.command(SnapshotCreateCommand)
.command(SnapshotRestoreCommand)
.demandCommand(),
async handler() {}, async handler() {},
}) })
const TrackCommand = cmd({ export const SnapshotCreateCommand = cmd({
command: "track", command: "create",
async handler() { async handler() {
await bootstrap({ cwd: process.cwd() }, async () => { await bootstrap({ cwd: process.cwd() }, async () => {
console.log(await Snapshot.track()) const result = await Snapshot.create("test")
console.log(result)
}) })
}, },
}) })
const PatchCommand = cmd({ export const SnapshotRestoreCommand = cmd({
command: "patch <hash>", command: "restore <commit>",
builder: (yargs) => builder: (yargs) =>
yargs.positional("hash", { yargs.positional("commit", {
type: "string", type: "string",
description: "hash", description: "commit",
demandOption: true, demandOption: true,
}), }),
async handler(args) { async handler(args) {
await bootstrap({ cwd: process.cwd() }, async () => { await bootstrap({ cwd: process.cwd() }, async () => {
console.log(await Snapshot.patch(args.hash)) await Snapshot.restore("test", args.commit)
}) console.log("restored")
},
})
const DiffCommand = cmd({
command: "diff <hash>",
builder: (yargs) =>
yargs.positional("hash", {
type: "string",
description: "hash",
demandOption: true,
}),
async handler(args) {
await bootstrap({ cwd: process.cwd() }, async () => {
console.log(await Snapshot.diff(args.hash))
}) })
}, },
}) })

View file

@ -1,10 +1,18 @@
import { Server } from "../../server/server" import { Server } from "../../server/server"
import fs from "fs/promises"
import path from "path"
import type { CommandModule } from "yargs" import type { CommandModule } from "yargs"
export const GenerateCommand = { export const GenerateCommand = {
command: "generate", command: "generate",
handler: async () => { handler: async () => {
const specs = await Server.openapi() const specs = await Server.openapi()
process.stdout.write(JSON.stringify(specs, null, 2)) const dir = "gen"
await fs.rmdir(dir, { recursive: true }).catch(() => {})
await fs.mkdir(dir, { recursive: true })
await Bun.write(
path.join(dir, "openapi.json"),
JSON.stringify(specs, null, 2),
)
}, },
} satisfies CommandModule } satisfies CommandModule

File diff suppressed because it is too large Load diff

View file

@ -1,80 +0,0 @@
import { cmd } from "./cmd"
import { Client } from "@modelcontextprotocol/sdk/client/index.js"
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js"
import * as prompts from "@clack/prompts"
import { UI } from "../ui"
export const McpCommand = cmd({
command: "mcp",
builder: (yargs) => yargs.command(McpAddCommand).demandCommand(),
async handler() {},
})
export const McpAddCommand = cmd({
command: "add",
describe: "add an MCP server",
async handler() {
UI.empty()
prompts.intro("Add MCP server")
const name = await prompts.text({
message: "Enter MCP server name",
validate: (x) => x && (x.length > 0 ? undefined : "Required"),
})
if (prompts.isCancel(name)) throw new UI.CancelledError()
const type = await prompts.select({
message: "Select MCP server type",
options: [
{
label: "Local",
value: "local",
hint: "Run a local command",
},
{
label: "Remote",
value: "remote",
hint: "Connect to a remote URL",
},
],
})
if (prompts.isCancel(type)) throw new UI.CancelledError()
if (type === "local") {
const command = await prompts.text({
message: "Enter command to run",
placeholder: "e.g., opencode x @modelcontextprotocol/server-filesystem",
validate: (x) => x && (x.length > 0 ? undefined : "Required"),
})
if (prompts.isCancel(command)) throw new UI.CancelledError()
prompts.log.info(`Local MCP server "${name}" configured with command: ${command}`)
prompts.outro("MCP server added successfully")
return
}
if (type === "remote") {
const url = await prompts.text({
message: "Enter MCP server URL",
placeholder: "e.g., https://example.com/mcp",
validate: (x) => {
if (!x) return "Required"
if (x.length === 0) return "Required"
const isValid = URL.canParse(x)
return isValid ? undefined : "Invalid URL"
},
})
if (prompts.isCancel(url)) throw new UI.CancelledError()
const client = new Client({
name: "opencode",
version: "1.0.0",
})
const transport = new StreamableHTTPClientTransport(new URL(url))
await client.connect(transport)
prompts.log.info(`Remote MCP server "${name}" configured with URL: ${url}`)
}
prompts.outro("MCP server added successfully")
},
})

View file

@ -2,14 +2,12 @@ import type { Argv } from "yargs"
import { Bus } from "../../bus" import { Bus } from "../../bus"
import { Provider } from "../../provider/provider" import { Provider } from "../../provider/provider"
import { Session } from "../../session" import { Session } from "../../session"
import { Message } from "../../session/message"
import { UI } from "../ui" import { UI } from "../ui"
import { cmd } from "./cmd" import { cmd } from "./cmd"
import { Flag } from "../../flag/flag" import { Flag } from "../../flag/flag"
import { Config } from "../../config/config" import { Config } from "../../config/config"
import { bootstrap } from "../bootstrap" import { bootstrap } from "../bootstrap"
import { MessageV2 } from "../../session/message-v2"
import { Mode } from "../../session/mode"
import { Identifier } from "../../id/id"
const TOOL: Record<string, [string, string]> = { const TOOL: Record<string, [string, string]> = {
todowrite: ["Todo", UI.Style.TEXT_WARNING_BOLD], todowrite: ["Todo", UI.Style.TEXT_WARNING_BOLD],
@ -54,22 +52,13 @@ export const RunCommand = cmd({
alias: ["m"], alias: ["m"],
describe: "model to use in the format of provider/model", describe: "model to use in the format of provider/model",
}) })
.option("mode", {
type: "string",
describe: "mode to use",
})
}, },
handler: async (args) => { handler: async (args) => {
let message = args.message.join(" ") const message = args.message.join(" ")
if (!process.stdin.isTTY) message += "\n" + (await Bun.stdin.text())
await bootstrap({ cwd: process.cwd() }, async () => { await bootstrap({ cwd: process.cwd() }, async () => {
const session = await (async () => { const session = await (async () => {
if (args.continue) { if (args.continue) {
const list = Session.list() const first = await Session.list().next()
const first = await list.next()
await list.return()
if (first.done) return if (first.done) return
return first.value return first.value
} }
@ -84,27 +73,32 @@ export const RunCommand = cmd({
return return
} }
const isPiped = !process.stdout.isTTY
UI.empty() UI.empty()
UI.println(UI.logo()) UI.println(UI.logo())
UI.empty() UI.empty()
UI.println(UI.Style.TEXT_NORMAL_BOLD + "> ", message)
UI.empty()
const cfg = await Config.get() const cfg = await Config.get()
if (cfg.share === "auto" || Flag.OPENCODE_AUTO_SHARE || args.share) { if (cfg.autoshare || Flag.OPENCODE_AUTO_SHARE || args.share) {
try {
await Session.share(session.id) await Session.share(session.id)
UI.println(UI.Style.TEXT_INFO_BOLD + "~ https://opencode.ai/s/" + session.id.slice(-8)) UI.println(
} catch (error) { UI.Style.TEXT_INFO_BOLD +
if (error instanceof Error && error.message.includes("disabled")) { "~ https://opencode.ai/s/" +
UI.println(UI.Style.TEXT_DANGER_BOLD + "! " + error.message) session.id.slice(-8),
} else { )
throw error
}
}
} }
UI.empty() UI.empty()
const { providerID, modelID } = args.model ? Provider.parseModel(args.model) : await Provider.defaultModel() const { providerID, modelID } = args.model
UI.println(UI.Style.TEXT_NORMAL_BOLD + "@ ", UI.Style.TEXT_NORMAL + `${providerID}/${modelID}`) ? Provider.parseModel(args.model)
: await Provider.defaultModel()
UI.println(
UI.Style.TEXT_NORMAL_BOLD + "@ ",
UI.Style.TEXT_NORMAL + `${providerID}/${modelID}`,
)
UI.empty() UI.empty()
function printEvent(color: string, type: string, title: string) { function printEvent(color: string, type: string, title: string) {
@ -116,73 +110,52 @@ export const RunCommand = cmd({
) )
} }
let text = "" Bus.subscribe(Message.Event.PartUpdated, async (evt) => {
Bus.subscribe(MessageV2.Event.PartUpdated, async (evt) => { if (evt.properties.sessionID !== session.id) return
if (evt.properties.part.sessionID !== session.id) return
if (evt.properties.part.messageID === messageID) return
const part = evt.properties.part const part = evt.properties.part
const message = await Session.getMessage(
evt.properties.sessionID,
evt.properties.messageID,
)
if (part.type === "tool" && part.state.status === "completed") { if (
const [tool, color] = TOOL[part.tool] ?? [part.tool, UI.Style.TEXT_INFO_BOLD] part.type === "tool-invocation" &&
const title = part.toolInvocation.state === "result"
part.state.title || Object.keys(part.state.input).length > 0 ? JSON.stringify(part.state.input) : "Unknown" ) {
printEvent(color, tool, title) const metadata = message.metadata.tool[part.toolInvocation.toolCallId]
const [tool, color] = TOOL[part.toolInvocation.toolName] ?? [
part.toolInvocation.toolName,
UI.Style.TEXT_INFO_BOLD,
]
printEvent(color, tool, metadata?.title || "Unknown")
} }
if (part.type === "text") { if (part.type === "text") {
text = part.text if (part.text.includes("\n")) {
if (part.time?.end) {
UI.empty() UI.empty()
UI.println(UI.markdown(text)) UI.println(part.text)
UI.empty() UI.empty()
text = ""
return return
} }
printEvent(UI.Style.TEXT_NORMAL_BOLD, "Text", part.text)
} }
}) })
let errorMsg: string | undefined
Bus.subscribe(Session.Event.Error, async (evt) => {
const { sessionID, error } = evt.properties
if (sessionID !== session.id || !error) return
let err = String(error.name)
if ("data" in error && error.data && "message" in error.data) {
err = error.data.message
}
errorMsg = errorMsg ? errorMsg + "\n" + err : err
UI.error(err)
})
const mode = args.mode ? await Mode.get(args.mode) : await Mode.list().then((x) => x[0])
const messageID = Identifier.ascending("message")
const result = await Session.chat({ const result = await Session.chat({
sessionID: session.id, sessionID: session.id,
messageID,
...(mode.model
? mode.model
: {
providerID, providerID,
modelID, modelID,
}),
mode: mode.name,
parts: [ parts: [
{ {
id: Identifier.ascending("part"),
type: "text", type: "text",
text: message, text: message,
}, },
], ],
}) })
const isPiped = !process.stdout.isTTY
if (isPiped) { if (isPiped) {
const match = result.parts.findLast((x) => x.type === "text") const match = result.parts.findLast((x) => x.type === "text")
if (match) process.stdout.write(UI.markdown(match.text)) if (match) process.stdout.write(match.text)
if (errorMsg) process.stdout.write(errorMsg)
} }
UI.empty() UI.empty()
}) })

View file

@ -1,5 +1,6 @@
import { Provider } from "../../provider/provider" import { Provider } from "../../provider/provider"
import { Server } from "../../server/server" import { Server } from "../../server/server"
import { Share } from "../../share/share"
import { bootstrap } from "../bootstrap" import { bootstrap } from "../bootstrap"
import { cmd } from "./cmd" import { cmd } from "./cmd"
@ -31,12 +32,15 @@ export const ServeCommand = cmd({
const hostname = args.hostname const hostname = args.hostname
const port = args.port const port = args.port
await Share.init()
const server = Server.listen({ const server = Server.listen({
port, port,
hostname, hostname,
}) })
console.log(`opencode server listening on http://${server.hostname}:${server.port}`) console.log(
`opencode server listening on http://${server.hostname}:${server.port}`,
)
await new Promise(() => {}) await new Promise(() => {})

View file

@ -1,98 +0,0 @@
import { cmd } from "./cmd"
interface SessionStats {
totalSessions: number
totalMessages: number
totalCost: number
totalTokens: {
input: number
output: number
reasoning: number
cache: {
read: number
write: number
}
}
toolUsage: Record<string, number>
dateRange: {
earliest: number
latest: number
}
days: number
costPerDay: number
}
export const StatsCommand = cmd({
command: "stats",
handler: async () => {},
})
export function displayStats(stats: SessionStats) {
const width = 56
function renderRow(label: string, value: string): string {
const availableWidth = width - 1
const paddingNeeded = availableWidth - label.length - value.length
const padding = Math.max(0, paddingNeeded)
return `${label}${" ".repeat(padding)}${value}`
}
// Overview section
console.log("┌────────────────────────────────────────────────────────┐")
console.log("│ OVERVIEW │")
console.log("├────────────────────────────────────────────────────────┤")
console.log(renderRow("Sessions", stats.totalSessions.toLocaleString()))
console.log(renderRow("Messages", stats.totalMessages.toLocaleString()))
console.log(renderRow("Days", stats.days.toString()))
console.log("└────────────────────────────────────────────────────────┘")
console.log()
// Cost & Tokens section
console.log("┌────────────────────────────────────────────────────────┐")
console.log("│ COST & TOKENS │")
console.log("├────────────────────────────────────────────────────────┤")
const cost = isNaN(stats.totalCost) ? 0 : stats.totalCost
const costPerDay = isNaN(stats.costPerDay) ? 0 : stats.costPerDay
console.log(renderRow("Total Cost", `$${cost.toFixed(2)}`))
console.log(renderRow("Cost/Day", `$${costPerDay.toFixed(2)}`))
console.log(renderRow("Input", formatNumber(stats.totalTokens.input)))
console.log(renderRow("Output", formatNumber(stats.totalTokens.output)))
console.log(renderRow("Cache Read", formatNumber(stats.totalTokens.cache.read)))
console.log(renderRow("Cache Write", formatNumber(stats.totalTokens.cache.write)))
console.log("└────────────────────────────────────────────────────────┘")
console.log()
// Tool Usage section
if (Object.keys(stats.toolUsage).length > 0) {
const sortedTools = Object.entries(stats.toolUsage)
.sort(([, a], [, b]) => b - a)
.slice(0, 10)
console.log("┌────────────────────────────────────────────────────────┐")
console.log("│ TOOL USAGE │")
console.log("├────────────────────────────────────────────────────────┤")
const maxCount = Math.max(...sortedTools.map(([, count]) => count))
const totalToolUsage = Object.values(stats.toolUsage).reduce((a, b) => a + b, 0)
for (const [tool, count] of sortedTools) {
const barLength = Math.max(1, Math.floor((count / maxCount) * 20))
const bar = "█".repeat(barLength)
const percentage = ((count / totalToolUsage) * 100).toFixed(1)
const content = ` ${tool.padEnd(10)} ${bar.padEnd(20)} ${count.toString().padStart(3)} (${percentage.padStart(4)}%)`
const padding = Math.max(0, width - content.length)
console.log(`${content}${" ".repeat(padding)}`)
}
console.log("└────────────────────────────────────────────────────────┘")
}
console.log()
}
function formatNumber(num: number): string {
if (num >= 1000000) {
return (num / 1000000).toFixed(1) + "M"
} else if (num >= 1000) {
return (num / 1000).toFixed(1) + "K"
}
return num.toString()
}

View file

@ -10,53 +10,14 @@ import { Installation } from "../../installation"
import { Config } from "../../config/config" import { Config } from "../../config/config"
import { Bus } from "../../bus" import { Bus } from "../../bus"
import { Log } from "../../util/log" import { Log } from "../../util/log"
import { FileWatcher } from "../../file/watch"
import { Mode } from "../../session/mode"
import { Ide } from "../../ide"
declare global {
const OPENCODE_TUI_PATH: string
}
if (typeof OPENCODE_TUI_PATH !== "undefined") {
await import(OPENCODE_TUI_PATH as string, {
with: { type: "file" },
})
}
export const TuiCommand = cmd({ export const TuiCommand = cmd({
command: "$0 [project]", command: "$0 [project]",
describe: "start opencode tui", describe: "start opencode tui",
builder: (yargs) => builder: (yargs) =>
yargs yargs.positional("project", {
.positional("project", {
type: "string", type: "string",
describe: "path to start opencode in", describe: "path to start opencode in",
})
.option("model", {
type: "string",
alias: ["m"],
describe: "model to use in the format of provider/model",
})
.option("prompt", {
alias: ["p"],
type: "string",
describe: "prompt to use",
})
.option("mode", {
type: "string",
describe: "mode to use",
})
.option("port", {
type: "number",
describe: "port to listen on",
default: 0,
})
.option("hostname", {
alias: ["h"],
type: "string",
describe: "hostname to listen on",
default: "127.0.0.1",
}), }),
handler: async (args) => { handler: async (args) => {
while (true) { while (true) {
@ -68,29 +29,30 @@ export const TuiCommand = cmd({
return return
} }
const result = await bootstrap({ cwd }, async (app) => { const result = await bootstrap({ cwd }, async (app) => {
FileWatcher.init()
const providers = await Provider.list() const providers = await Provider.list()
if (Object.keys(providers).length === 0) { if (Object.keys(providers).length === 0) {
return "needs_provider" return "needs_provider"
} }
const server = Server.listen({ const server = Server.listen({
port: args.port, port: 0,
hostname: args.hostname, hostname: "127.0.0.1",
}) })
let cmd = ["go", "run", "./main.go"] let cmd = ["go", "run", "./main.go"]
let cwd = Bun.fileURLToPath(new URL("../../../../tui/cmd/opencode", import.meta.url)) let cwd = Bun.fileURLToPath(
const tui = Bun.embeddedFiles.find((item) => (item as File).name.includes("tui")) as File new URL("../../../../tui/cmd/opencode", import.meta.url),
if (tui) { )
let binaryName = tui.name if (Bun.embeddedFiles.length > 0) {
const blob = Bun.embeddedFiles[0] as File
let binaryName = blob.name
if (process.platform === "win32" && !binaryName.endsWith(".exe")) { if (process.platform === "win32" && !binaryName.endsWith(".exe")) {
binaryName += ".exe" binaryName += ".exe"
} }
const binary = path.join(Global.Path.cache, "tui", binaryName) const binary = path.join(Global.Path.cache, "tui", binaryName)
const file = Bun.file(binary) const file = Bun.file(binary)
if (!(await file.exists())) { if (!(await file.exists())) {
await Bun.write(file, tui, { mode: 0o755 }) await Bun.write(file, blob, { mode: 0o755 })
await fs.chmod(binary, 0o755) await fs.chmod(binary, 0o755)
} }
cwd = process.cwd() cwd = process.cwd()
@ -100,22 +62,15 @@ export const TuiCommand = cmd({
cmd, cmd,
}) })
const proc = Bun.spawn({ const proc = Bun.spawn({
cmd: [ cmd: [...cmd, ...process.argv.slice(2)],
...cmd,
...(args.model ? ["--model", args.model] : []),
...(args.prompt ? ["--prompt", args.prompt] : []),
...(args.mode ? ["--mode", args.mode] : []),
],
cwd, cwd,
stdout: "inherit", stdout: "inherit",
stderr: "inherit", stderr: "inherit",
stdin: "inherit", stdin: "inherit",
env: { env: {
...process.env, ...process.env,
CGO_ENABLED: "0",
OPENCODE_SERVER: server.url.toString(), OPENCODE_SERVER: server.url.toString(),
OPENCODE_APP_INFO: JSON.stringify(app), OPENCODE_APP_INFO: JSON.stringify(app),
OPENCODE_MODES: JSON.stringify(await Mode.list()),
}, },
onExit: () => { onExit: () => {
server.stop() server.stop()
@ -123,7 +78,7 @@ export const TuiCommand = cmd({
}) })
;(async () => { ;(async () => {
if (Installation.isDev()) return if (Installation.VERSION === "dev") return
if (Installation.isSnapshot()) return if (Installation.isSnapshot()) return
const config = await Config.global() const config = await Config.global()
if (config.autoupdate === false) return if (config.autoupdate === false) return
@ -133,15 +88,9 @@ export const TuiCommand = cmd({
const method = await Installation.method() const method = await Installation.method()
if (method === "unknown") return if (method === "unknown") return
await Installation.upgrade(method, latest) await Installation.upgrade(method, latest)
.then(() => Bus.publish(Installation.Event.Updated, { version: latest })) .then(() => {
.catch(() => {}) Bus.publish(Installation.Event.Updated, { version: latest })
})() })
;(async () => {
if (Ide.alreadyInstalled()) return
const ide = Ide.ide()
if (ide === "unknown") return
await Ide.install(ide)
.then(() => Bus.publish(Ide.Event.Installed, { ide }))
.catch(() => {}) .catch(() => {})
})() })()

View file

@ -27,26 +27,22 @@ export const UpgradeCommand = {
const detectedMethod = await Installation.method() const detectedMethod = await Installation.method()
const method = (args.method as Installation.Method) ?? detectedMethod const method = (args.method as Installation.Method) ?? detectedMethod
if (method === "unknown") { if (method === "unknown") {
prompts.log.error(`opencode is installed to ${process.execPath} and seems to be managed by a package manager`) prompts.log.error(
`opencode is installed to ${process.execPath} and seems to be managed by a package manager`,
)
prompts.outro("Done") prompts.outro("Done")
return return
} }
prompts.log.info("Using method: " + method) prompts.log.info("Using method: " + method)
const target = args.target ? args.target.replace(/^v/, "") : await Installation.latest() const target = args.target ?? (await Installation.latest())
if (Installation.VERSION === target) {
prompts.log.warn(`opencode upgrade skipped: ${target} is already installed`)
prompts.outro("Done")
return
}
prompts.log.info(`From ${Installation.VERSION}${target}`) prompts.log.info(`From ${Installation.VERSION}${target}`)
const spinner = prompts.spinner() const spinner = prompts.spinner()
spinner.start("Upgrading...") spinner.start("Upgrading...")
const err = await Installation.upgrade(method, target).catch((err) => err) const err = await Installation.upgrade(method, target).catch((err) => err)
if (err) { if (err) {
spinner.stop("Upgrade failed") spinner.stop("Upgrade failed")
if (err instanceof Installation.UpgradeFailedError) prompts.log.error(err.data.stderr) if (err instanceof Installation.UpgradeFailedError)
prompts.log.error(err.data.stderr)
else if (err instanceof Error) prompts.log.error(err.message) else if (err instanceof Error) prompts.log.error(err.message)
prompts.outro("Done") prompts.outro("Done")
return return

View file

@ -5,15 +5,14 @@ import { UI } from "./ui"
export function FormatError(input: unknown) { export function FormatError(input: unknown) {
if (MCP.Failed.isInstance(input)) if (MCP.Failed.isInstance(input))
return `MCP server "${input.data.name}" failed. Note, opencode does not support MCP authentication yet.` return `MCP server "${input.data.name}" failed. Note, opencode does not support MCP authentication yet.`
if (Config.JsonError.isInstance(input)) { if (Config.JsonError.isInstance(input))
return ( return `Config file at ${input.data.path} is not valid JSON`
`Config file at ${input.data.path} is not valid JSON(C)` + (input.data.message ? `: ${input.data.message}` : "")
)
}
if (Config.InvalidError.isInstance(input)) if (Config.InvalidError.isInstance(input))
return [ return [
`Config file at ${input.data.path} is invalid`, `Config file at ${input.data.path} is invalid`,
...(input.data.issues?.map((issue) => "↳ " + issue.message + " " + issue.path.join(".")) ?? []), ...(input.data.issues?.map(
(issue) => "↳ " + issue.message + " " + issue.path.join("."),
) ?? []),
].join("\n") ].join("\n")
if (UI.CancelledError.isInstance(input)) return "" if (UI.CancelledError.isInstance(input)) return ""

View file

@ -76,8 +76,4 @@ export namespace UI {
export function error(message: string) { export function error(message: string) {
println(Style.TEXT_DANGER_BOLD + "Error: " + Style.TEXT_NORMAL + message) println(Style.TEXT_DANGER_BOLD + "Error: " + Style.TEXT_NORMAL + message)
} }
export function markdown(text: string): string {
return text
}
} }

View file

@ -1,119 +1,26 @@
import { Log } from "../util/log" import { Log } from "../util/log"
import path from "path" import path from "path"
import os from "os"
import { z } from "zod" import { z } from "zod"
import { App } from "../app/app" import { App } from "../app/app"
import { Filesystem } from "../util/filesystem" import { Filesystem } from "../util/filesystem"
import { ModelsDev } from "../provider/models" import { ModelsDev } from "../provider/models"
import { mergeDeep, pipe } from "remeda" import { mergeDeep } from "remeda"
import { Global } from "../global" import { Global } from "../global"
import fs from "fs/promises" import fs from "fs/promises"
import { lazy } from "../util/lazy" import { lazy } from "../util/lazy"
import { NamedError } from "../util/error" import { NamedError } from "../util/error"
import matter from "gray-matter"
import { Flag } from "../flag/flag"
import { Auth } from "../auth"
import { type ParseError as JsoncParseError, parse as parseJsonc, printParseErrorCode } from "jsonc-parser"
export namespace Config { export namespace Config {
const log = Log.create({ service: "config" }) const log = Log.create({ service: "config" })
export const state = App.state("config", async (app) => { export const state = App.state("config", async (app) => {
const auth = await Auth.all()
let result = await global() let result = await global()
for (const file of ["opencode.jsonc", "opencode.json"]) { for (const file of ["opencode.jsonc", "opencode.json"]) {
const found = await Filesystem.findUp(file, app.path.cwd, app.path.root) const found = await Filesystem.findUp(file, app.path.cwd, app.path.root)
for (const resolved of found.toReversed()) { for (const resolved of found.toReversed()) {
result = mergeDeep(result, await loadFile(resolved)) result = mergeDeep(result, await load(resolved))
} }
} }
// Override with custom config if provided
if (Flag.OPENCODE_CONFIG) {
result = mergeDeep(result, await loadFile(Flag.OPENCODE_CONFIG))
log.debug("loaded custom config", { path: Flag.OPENCODE_CONFIG })
}
for (const [key, value] of Object.entries(auth)) {
if (value.type === "wellknown") {
process.env[value.key] = value.token
const wellknown = await fetch(`${key}/.well-known/opencode`).then((x) => x.json())
result = mergeDeep(result, await load(JSON.stringify(wellknown.config ?? {}), process.cwd()))
}
}
result.agent = result.agent || {}
const markdownAgents = [
...(await Filesystem.globUp("agent/*.md", Global.Path.config, Global.Path.config)),
...(await Filesystem.globUp(".opencode/agent/*.md", app.path.cwd, app.path.root)),
]
for (const item of markdownAgents) {
const content = await Bun.file(item).text()
const md = matter(content)
if (!md.data) continue
const config = {
name: path.basename(item, ".md"),
...md.data,
prompt: md.content.trim(),
}
const parsed = Agent.safeParse(config)
if (parsed.success) {
result.agent = mergeDeep(result.agent, {
[config.name]: parsed.data,
})
continue
}
throw new InvalidError({ path: item }, { cause: parsed.error })
}
// Load mode markdown files
result.mode = result.mode || {}
const markdownModes = [
...(await Filesystem.globUp("mode/*.md", Global.Path.config, Global.Path.config)),
...(await Filesystem.globUp(".opencode/mode/*.md", app.path.cwd, app.path.root)),
]
for (const item of markdownModes) {
const content = await Bun.file(item).text()
const md = matter(content)
if (!md.data) continue
const config = {
name: path.basename(item, ".md"),
...md.data,
prompt: md.content.trim(),
}
const parsed = Mode.safeParse(config)
if (parsed.success) {
result.mode = mergeDeep(result.mode, {
[config.name]: parsed.data,
})
continue
}
throw new InvalidError({ path: item }, { cause: parsed.error })
}
result.plugin = result.plugin || []
result.plugin.push(
...[
...(await Filesystem.globUp("plugin/*.ts", Global.Path.config, Global.Path.config)),
...(await Filesystem.globUp(".opencode/plugin/*.ts", app.path.cwd, app.path.root)),
].map((x) => "file://" + x),
)
// Handle migration from autoshare to share field
if (result.autoshare === true && !result.share) {
result.share = "auto"
}
if (result.keybinds?.messages_revert && !result.keybinds.messages_undo) {
result.keybinds.messages_undo = result.keybinds.messages_revert
}
if (!result.username) {
const os = await import("os")
result.username = os.userInfo().username
}
log.info("loaded", result) log.info("loaded", result)
return result return result
@ -122,12 +29,18 @@ export namespace Config {
export const McpLocal = z export const McpLocal = z
.object({ .object({
type: z.literal("local").describe("Type of MCP server connection"), type: z.literal("local").describe("Type of MCP server connection"),
command: z.string().array().describe("Command and arguments to run the MCP server"), command: z
.string()
.array()
.describe("Command and arguments to run the MCP server"),
environment: z environment: z
.record(z.string(), z.string()) .record(z.string(), z.string())
.optional() .optional()
.describe("Environment variables to set when running the MCP server"), .describe("Environment variables to set when running the MCP server"),
enabled: z.boolean().optional().describe("Enable or disable the MCP server on startup"), enabled: z
.boolean()
.optional()
.describe("Enable or disable the MCP server on startup"),
}) })
.strict() .strict()
.openapi({ .openapi({
@ -138,8 +51,10 @@ export namespace Config {
.object({ .object({
type: z.literal("remote").describe("Type of MCP server connection"), type: z.literal("remote").describe("Type of MCP server connection"),
url: z.string().describe("URL of the remote MCP server"), url: z.string().describe("URL of the remote MCP server"),
enabled: z.boolean().optional().describe("Enable or disable the MCP server on startup"), enabled: z
headers: z.record(z.string(), z.string()).optional().describe("Headers to send with the request"), .boolean()
.optional()
.describe("Enable or disable the MCP server on startup"),
}) })
.strict() .strict()
.openapi({ .openapi({
@ -149,182 +64,122 @@ export namespace Config {
export const Mcp = z.discriminatedUnion("type", [McpLocal, McpRemote]) export const Mcp = z.discriminatedUnion("type", [McpLocal, McpRemote])
export type Mcp = z.infer<typeof Mcp> export type Mcp = z.infer<typeof Mcp>
export const Mode = z
.object({
model: z.string().optional(),
temperature: z.number().optional(),
top_p: z.number().optional(),
prompt: z.string().optional(),
tools: z.record(z.string(), z.boolean()).optional(),
disable: z.boolean().optional(),
})
.openapi({
ref: "ModeConfig",
})
export type Mode = z.infer<typeof Mode>
export const Agent = Mode.extend({
description: z.string(),
}).openapi({
ref: "AgentConfig",
})
export const Keybinds = z export const Keybinds = z
.object({ .object({
leader: z.string().optional().default("ctrl+x").describe("Leader key for keybind combinations"), leader: z
app_help: z.string().optional().default("<leader>h").describe("Show help dialog"), .string()
switch_mode: z.string().optional().default("tab").describe("Next mode"), .optional()
switch_mode_reverse: z.string().optional().default("shift+tab").describe("Previous Mode"), .describe("Leader key for keybind combinations"),
editor_open: z.string().optional().default("<leader>e").describe("Open external editor"), help: z.string().optional().describe("Show help dialog"),
session_export: z.string().optional().default("<leader>x").describe("Export session to editor"), editor_open: z.string().optional().describe("Open external editor"),
session_new: z.string().optional().default("<leader>n").describe("Create a new session"), session_new: z.string().optional().describe("Create a new session"),
session_list: z.string().optional().default("<leader>l").describe("List all sessions"), session_list: z.string().optional().describe("List all sessions"),
session_share: z.string().optional().default("<leader>s").describe("Share current session"), session_share: z.string().optional().describe("Share current session"),
session_unshare: z.string().optional().default("none").describe("Unshare current session"), session_interrupt: z
session_interrupt: z.string().optional().default("esc").describe("Interrupt current session"), .string()
session_compact: z.string().optional().default("<leader>c").describe("Compact the session"), .optional()
tool_details: z.string().optional().default("<leader>d").describe("Toggle tool details"), .describe("Interrupt current session"),
model_list: z.string().optional().default("<leader>m").describe("List available models"), session_compact: z
theme_list: z.string().optional().default("<leader>t").describe("List available themes"), .string()
file_list: z.string().optional().default("<leader>f").describe("List files"), .optional()
file_close: z.string().optional().default("esc").describe("Close file"), .describe("Toggle compact mode for session"),
file_search: z.string().optional().default("<leader>/").describe("Search file"), tool_details: z.string().optional().describe("Show tool details"),
file_diff_toggle: z.string().optional().default("<leader>v").describe("Split/unified diff"), model_list: z.string().optional().describe("List available models"),
project_init: z.string().optional().default("<leader>i").describe("Create/update AGENTS.md"), theme_list: z.string().optional().describe("List available themes"),
input_clear: z.string().optional().default("ctrl+c").describe("Clear input field"), project_init: z
input_paste: z.string().optional().default("ctrl+v").describe("Paste from clipboard"), .string()
input_submit: z.string().optional().default("enter").describe("Submit input"), .optional()
input_newline: z.string().optional().default("shift+enter,ctrl+j").describe("Insert newline in input"), .describe("Initialize project configuration"),
messages_page_up: z.string().optional().default("pgup").describe("Scroll messages up by one page"), input_clear: z.string().optional().describe("Clear input field"),
messages_page_down: z.string().optional().default("pgdown").describe("Scroll messages down by one page"), input_paste: z.string().optional().describe("Paste from clipboard"),
messages_half_page_up: z.string().optional().default("ctrl+alt+u").describe("Scroll messages up by half page"), input_submit: z.string().optional().describe("Submit input"),
input_newline: z.string().optional().describe("Insert newline in input"),
history_previous: z
.string()
.optional()
.describe("Navigate to previous history item"),
history_next: z
.string()
.optional()
.describe("Navigate to next history item"),
messages_page_up: z
.string()
.optional()
.describe("Scroll messages up by one page"),
messages_page_down: z
.string()
.optional()
.describe("Scroll messages down by one page"),
messages_half_page_up: z
.string()
.optional()
.describe("Scroll messages up by half page"),
messages_half_page_down: z messages_half_page_down: z
.string() .string()
.optional() .optional()
.default("ctrl+alt+d")
.describe("Scroll messages down by half page"), .describe("Scroll messages down by half page"),
messages_previous: z.string().optional().default("ctrl+up").describe("Navigate to previous message"), messages_previous: z
messages_next: z.string().optional().default("ctrl+down").describe("Navigate to next message"), .string()
messages_first: z.string().optional().default("ctrl+g").describe("Navigate to first message"), .optional()
messages_last: z.string().optional().default("ctrl+alt+g").describe("Navigate to last message"), .describe("Navigate to previous message"),
messages_layout_toggle: z.string().optional().default("<leader>p").describe("Toggle layout"), messages_next: z.string().optional().describe("Navigate to next message"),
messages_copy: z.string().optional().default("<leader>y").describe("Copy message"), messages_first: z
messages_revert: z.string().optional().default("none").describe("@deprecated use messages_undo. Revert message"), .string()
messages_undo: z.string().optional().default("<leader>u").describe("Undo message"), .optional()
messages_redo: z.string().optional().default("<leader>r").describe("Redo message"), .describe("Navigate to first message"),
app_exit: z.string().optional().default("ctrl+c,<leader>q").describe("Exit the application"), messages_last: z.string().optional().describe("Navigate to last message"),
app_exit: z.string().optional().describe("Exit the application"),
}) })
.strict() .strict()
.openapi({ .openapi({
ref: "KeybindsConfig", ref: "KeybindsConfig",
}) })
export const Layout = z.enum(["auto", "stretch"]).openapi({
ref: "LayoutConfig",
})
export type Layout = z.infer<typeof Layout>
export const Permission = z.union([z.literal("ask"), z.literal("allow")])
export type Permission = z.infer<typeof Permission>
export const Info = z export const Info = z
.object({ .object({
$schema: z.string().optional().describe("JSON schema reference for configuration validation"), $schema: z
theme: z.string().optional().describe("Theme name to use for the interface"), .string()
keybinds: Keybinds.optional().describe("Custom keybind configurations"),
plugin: z.string().array().optional(),
share: z
.enum(["manual", "auto", "disabled"])
.optional() .optional()
.describe( .describe("JSON schema reference for configuration validation"),
"Control sharing behavior:'manual' allows manual sharing via commands, 'auto' enables automatic sharing, 'disabled' disables all sharing", theme: z
), .string()
.optional()
.describe("Theme name to use for the interface"),
keybinds: Keybinds.optional().describe("Custom keybind configurations"),
autoshare: z autoshare: z
.boolean() .boolean()
.optional() .optional()
.describe("@deprecated Use 'share' field instead. Share newly created sessions automatically"), .describe("Share newly created sessions automatically"),
autoupdate: z.boolean().optional().describe("Automatically update to the latest version"), autoupdate: z
disabled_providers: z.array(z.string()).optional().describe("Disable providers that are loaded automatically"), .boolean()
model: z.string().describe("Model to use in the format of provider/model, eg anthropic/claude-2").optional(), .optional()
small_model: z .describe("Automatically update to the latest version"),
disabled_providers: z
.array(z.string())
.optional()
.describe("Disable providers that are loaded automatically"),
model: z
.string() .string()
.describe( .describe(
"Small model to use for tasks like summarization and title generation in the format of provider/model", "Model to use in the format of provider/model, eg anthropic/claude-2",
) )
.optional(), .optional(),
username: z
.string()
.optional()
.describe("Custom username to display in conversations instead of system username"),
mode: z
.object({
build: Mode.optional(),
plan: Mode.optional(),
})
.catchall(Mode)
.optional()
.describe("Modes configuration, see https://opencode.ai/docs/modes"),
agent: z
.object({
general: Agent.optional(),
})
.catchall(Agent)
.optional()
.describe("Modes configuration, see https://opencode.ai/docs/modes"),
provider: z provider: z
.record( .record(
ModelsDev.Provider.partial() ModelsDev.Provider.partial().extend({
.extend({
models: z.record(ModelsDev.Model.partial()), models: z.record(ModelsDev.Model.partial()),
options: z options: z.record(z.any()).optional(),
.object({ }),
apiKey: z.string().optional(),
baseURL: z.string().optional(),
})
.catchall(z.any())
.optional(),
})
.strict(),
) )
.optional() .optional()
.describe("Custom provider configurations and model overrides"), .describe("Custom provider configurations and model overrides"),
mcp: z.record(z.string(), Mcp).optional().describe("MCP (Model Context Protocol) server configurations"), mcp: z
formatter: z .record(z.string(), Mcp)
.record( .optional()
z.string(), .describe("MCP (Model Context Protocol) server configurations"),
z.object({ instructions: z
disabled: z.boolean().optional(), .array(z.string())
command: z.array(z.string()).optional(), .optional()
environment: z.record(z.string(), z.string()).optional(), .describe("Additional instruction files or patterns to include"),
extensions: z.array(z.string()).optional(),
}),
)
.optional(),
lsp: z
.record(
z.string(),
z.union([
z.object({
disabled: z.literal(true),
}),
z.object({
command: z.array(z.string()),
extensions: z.array(z.string()).optional(),
disabled: z.boolean().optional(),
env: z.record(z.string(), z.string()).optional(),
initialization: z.record(z.string(), z.any()).optional(),
}),
]),
)
.optional(),
instructions: z.array(z.string()).optional().describe("Additional instruction files or patterns to include"),
layout: Layout.optional().describe("@deprecated Always uses stretch layout."),
permission: z
.object({
edit: Permission.optional(),
bash: z.union([Permission, z.record(z.string(), Permission)]).optional(),
})
.optional(),
experimental: z experimental: z
.object({ .object({
hook: z hook: z
@ -360,12 +215,7 @@ export namespace Config {
export type Info = z.output<typeof Info> export type Info = z.output<typeof Info>
export const global = lazy(async () => { export const global = lazy(async () => {
let result: Info = pipe( let result = await load(path.join(Global.Path.config, "config.json"))
{},
mergeDeep(await loadFile(path.join(Global.Path.config, "config.json"))),
mergeDeep(await loadFile(path.join(Global.Path.config, "opencode.json"))),
mergeDeep(await loadFile(path.join(Global.Path.config, "opencode.jsonc"))),
)
await import(path.join(Global.Path.config, "config"), { await import(path.join(Global.Path.config, "config"), {
with: { with: {
@ -377,7 +227,10 @@ export namespace Config {
if (provider && model) result.model = `${provider}/${model}` if (provider && model) result.model = `${provider}/${model}`
result["$schema"] = "https://opencode.ai/config.json" result["$schema"] = "https://opencode.ai/config.json"
result = mergeDeep(result, rest) result = mergeDeep(result, rest)
await Bun.write(path.join(Global.Path.config, "config.json"), JSON.stringify(result, null, 2)) await Bun.write(
path.join(Global.Path.config, "config.json"),
JSON.stringify(result, null, 2),
)
await fs.unlink(path.join(Global.Path.config, "config")) await fs.unlink(path.join(Global.Path.config, "config"))
}) })
.catch(() => {}) .catch(() => {})
@ -385,93 +238,23 @@ export namespace Config {
return result return result
}) })
async function loadFile(filepath: string): Promise<Info> { async function load(path: string) {
log.info("loading", { path: filepath }) const data = await Bun.file(path)
let text = await Bun.file(filepath) .json()
.text()
.catch((err) => { .catch((err) => {
if (err.code === "ENOENT") return if (err.code === "ENOENT") return {}
throw new JsonError({ path: filepath }, { cause: err }) throw new JsonError({ path }, { cause: err })
}) })
if (!text) return {}
return load(text, filepath)
}
async function load(text: string, filepath: string) {
text = text.replace(/\{env:([^}]+)\}/g, (_, varName) => {
return process.env[varName] || ""
})
const fileMatches = text.match(/\{file:[^}]+\}/g)
if (fileMatches) {
const configDir = path.dirname(filepath)
const lines = text.split("\n")
for (const match of fileMatches) {
const lineIndex = lines.findIndex((line) => line.includes(match))
if (lineIndex !== -1 && lines[lineIndex].trim().startsWith("//")) {
continue // Skip if line is commented
}
let filePath = match.replace(/^\{file:/, "").replace(/\}$/, "")
if (filePath.startsWith("~/")) {
filePath = path.join(os.homedir(), filePath.slice(2))
}
const resolvedPath = path.isAbsolute(filePath) ? filePath : path.resolve(configDir, filePath)
const fileContent = (await Bun.file(resolvedPath).text()).trim()
// escape newlines/quotes, strip outer quotes
text = text.replace(match, JSON.stringify(fileContent).slice(1, -1))
}
}
const errors: JsoncParseError[] = []
const data = parseJsonc(text, errors, { allowTrailingComma: true })
if (errors.length) {
const lines = text.split("\n")
const errorDetails = errors
.map((e) => {
const beforeOffset = text.substring(0, e.offset).split("\n")
const line = beforeOffset.length
const column = beforeOffset[beforeOffset.length - 1].length + 1
const problemLine = lines[line - 1]
const error = `${printParseErrorCode(e.error)} at line ${line}, column ${column}`
if (!problemLine) return error
return `${error}\n Line ${line}: ${problemLine}\n${"".padStart(column + 9)}^`
})
.join("\n")
throw new JsonError({
path: filepath,
message: `\n--- JSONC Input ---\n${text}\n--- Errors ---\n${errorDetails}\n--- End ---`,
})
}
const parsed = Info.safeParse(data) const parsed = Info.safeParse(data)
if (parsed.success) { if (parsed.success) return parsed.data
if (!parsed.data.$schema) { throw new InvalidError({ path, issues: parsed.error.issues })
parsed.data.$schema = "https://opencode.ai/config.json"
await Bun.write(filepath, JSON.stringify(parsed.data, null, 2))
}
const data = parsed.data
if (data.plugin) {
for (let i = 0; i < data.plugin?.length; i++) {
const plugin = data.plugin[i]
try {
data.plugin[i] = import.meta.resolve(plugin, filepath)
} catch (err) {}
}
}
return data
} }
throw new InvalidError({ path: filepath, issues: parsed.error.issues })
}
export const JsonError = NamedError.create( export const JsonError = NamedError.create(
"ConfigJsonError", "ConfigJsonError",
z.object({ z.object({
path: z.string(), path: z.string(),
message: z.string().optional(),
}), }),
) )

View file

@ -22,7 +22,9 @@ export namespace ConfigHooks {
command: item.command, command: item.command,
}) })
Bun.spawn({ Bun.spawn({
cmd: item.command.map((x) => x.replace("$FILE", payload.properties.file)), cmd: item.command.map((x) =>
x.replace("$FILE", payload.properties.file),
),
env: item.environment, env: item.environment,
cwd: app.path.cwd, cwd: app.path.cwd,
stdout: "ignore", stdout: "ignore",
@ -31,13 +33,9 @@ export namespace ConfigHooks {
} }
}) })
Bus.subscribe(Session.Event.Idle, async (payload) => { Bus.subscribe(Session.Event.Idle, async () => {
const cfg = await Config.get() const cfg = await Config.get()
if (cfg.experimental?.hook?.session_completed) { if (cfg.experimental?.hook?.session_completed) {
const session = await Session.get(payload.properties.sessionID)
// Only fire hook for top-level sessions (not subagent sessions)
if (session.parentID) return
for (const item of cfg.experimental.hook.session_completed) { for (const item of cfg.experimental.hook.session_completed) {
log.info("session_completed", { log.info("session_completed", {
command: item.command, command: item.command,

View file

@ -5,7 +5,6 @@ import { z } from "zod"
import { NamedError } from "../util/error" import { NamedError } from "../util/error"
import { lazy } from "../util/lazy" import { lazy } from "../util/lazy"
import { Log } from "../util/log" import { Log } from "../util/log"
import { ZipReader, BlobReader, BlobWriter } from "@zip.js/zip.js"
export namespace Fzf { export namespace Fzf {
const log = Log.create({ service: "fzf" }) const log = Log.create({ service: "fzf" })
@ -87,32 +86,20 @@ export namespace Fzf {
}) })
} }
if (config.extension === "zip") { if (config.extension === "zip") {
const zipFileReader = new ZipReader(new BlobReader(new Blob([await Bun.file(archivePath).arrayBuffer()]))); const proc = Bun.spawn(
const entries = await zipFileReader.getEntries(); ["unzip", "-j", archivePath, "fzf.exe", "-d", Global.Path.bin],
let fzfEntry: any; {
for (const entry of entries) { cwd: Global.Path.bin,
if (entry.filename === "fzf.exe") { stderr: "pipe",
fzfEntry = entry; stdout: "ignore",
break; },
} )
} await proc.exited
if (proc.exitCode !== 0)
if (!fzfEntry) {
throw new ExtractionFailedError({ throw new ExtractionFailedError({
filepath: archivePath, filepath: archivePath,
stderr: "fzf.exe not found in zip archive", stderr: await Bun.readableStreamToText(proc.stderr),
}); })
}
const fzfBlob = await fzfEntry.getData(new BlobWriter());
if (!fzfBlob) {
throw new ExtractionFailedError({
filepath: archivePath,
stderr: "Failed to extract fzf.exe from zip archive",
});
}
await Bun.write(filepath, await fzfBlob.arrayBuffer());
await zipFileReader.close();
} }
await fs.unlink(archivePath) await fs.unlink(archivePath)
if (process.platform !== "win32") await fs.chmod(filepath, 0o755) if (process.platform !== "win32") await fs.chmod(filepath, 0o755)

View file

@ -11,19 +11,6 @@ import { Log } from "../util/log"
export namespace File { export namespace File {
const log = Log.create({ service: "file" }) const log = Log.create({ service: "file" })
export const Info = z
.object({
path: z.string(),
added: z.number().int(),
removed: z.number().int(),
status: z.enum(["added", "deleted", "modified"]),
})
.openapi({
ref: "File",
})
export type Info = z.infer<typeof Info>
export const Event = { export const Event = {
Edited: Bus.event( Edited: Bus.event(
"file.edited", "file.edited",
@ -37,16 +24,20 @@ export namespace File {
const app = App.info() const app = App.info()
if (!app.git) return [] if (!app.git) return []
const diffOutput = await $`git diff --numstat HEAD`.cwd(app.path.cwd).quiet().nothrow().text() const diffOutput = await $`git diff --numstat HEAD`
.cwd(app.path.cwd)
.quiet()
.nothrow()
.text()
const changedFiles: Info[] = [] const changedFiles = []
if (diffOutput.trim()) { if (diffOutput.trim()) {
const lines = diffOutput.trim().split("\n") const lines = diffOutput.trim().split("\n")
for (const line of lines) { for (const line of lines) {
const [added, removed, filepath] = line.split("\t") const [added, removed, filepath] = line.split("\t")
changedFiles.push({ changedFiles.push({
path: filepath, file: filepath,
added: added === "-" ? 0 : parseInt(added, 10), added: added === "-" ? 0 : parseInt(added, 10),
removed: removed === "-" ? 0 : parseInt(removed, 10), removed: removed === "-" ? 0 : parseInt(removed, 10),
status: "modified", status: "modified",
@ -54,16 +45,22 @@ export namespace File {
} }
} }
const untrackedOutput = await $`git ls-files --others --exclude-standard`.cwd(app.path.cwd).quiet().nothrow().text() const untrackedOutput = await $`git ls-files --others --exclude-standard`
.cwd(app.path.cwd)
.quiet()
.nothrow()
.text()
if (untrackedOutput.trim()) { if (untrackedOutput.trim()) {
const untrackedFiles = untrackedOutput.trim().split("\n") const untrackedFiles = untrackedOutput.trim().split("\n")
for (const filepath of untrackedFiles) { for (const filepath of untrackedFiles) {
try { try {
const content = await Bun.file(path.join(app.path.root, filepath)).text() const content = await Bun.file(
path.join(app.path.root, filepath),
).text()
const lines = content.split("\n").length const lines = content.split("\n").length
changedFiles.push({ changedFiles.push({
path: filepath, file: filepath,
added: lines, added: lines,
removed: 0, removed: 0,
status: "added", status: "added",
@ -75,13 +72,17 @@ export namespace File {
} }
// Get deleted files // Get deleted files
const deletedOutput = await $`git diff --name-only --diff-filter=D HEAD`.cwd(app.path.cwd).quiet().nothrow().text() const deletedOutput = await $`git diff --name-only --diff-filter=D HEAD`
.cwd(app.path.cwd)
.quiet()
.nothrow()
.text()
if (deletedOutput.trim()) { if (deletedOutput.trim()) {
const deletedFiles = deletedOutput.trim().split("\n") const deletedFiles = deletedOutput.trim().split("\n")
for (const filepath of deletedFiles) { for (const filepath of deletedFiles) {
changedFiles.push({ changedFiles.push({
path: filepath, file: filepath,
added: 0, added: 0,
removed: 0, // Could get original line count but would require another git command removed: 0, // Could get original line count but would require another git command
status: "deleted", status: "deleted",
@ -91,7 +92,7 @@ export namespace File {
return changedFiles.map((x) => ({ return changedFiles.map((x) => ({
...x, ...x,
path: path.relative(app.path.cwd, path.join(app.path.root, x.path)), file: path.relative(app.path.cwd, path.join(app.path.root, x.file)),
})) }))
} }
@ -111,7 +112,11 @@ export namespace File {
filepath: rel, filepath: rel,
}) })
if (diff !== "unmodified") { if (diff !== "unmodified") {
const original = await $`git show HEAD:${rel}`.cwd(app.path.root).quiet().nothrow().text() const original = await $`git show HEAD:${rel}`
.cwd(app.path.root)
.quiet()
.nothrow()
.text()
const patch = createPatch(file, original, content, "old", "new", { const patch = createPatch(file, original, content, "old", "new", {
context: Infinity, context: Infinity,
}) })

View file

@ -7,7 +7,6 @@ import { NamedError } from "../util/error"
import { lazy } from "../util/lazy" import { lazy } from "../util/lazy"
import { $ } from "bun" import { $ } from "bun"
import { Fzf } from "./fzf" import { Fzf } from "./fzf"
import { ZipReader, BlobReader, BlobWriter } from "@zip.js/zip.js"
export namespace Ripgrep { export namespace Ripgrep {
const Stats = z.object({ const Stats = z.object({
@ -123,11 +122,15 @@ export namespace Ripgrep {
const state = lazy(async () => { const state = lazy(async () => {
let filepath = Bun.which("rg") let filepath = Bun.which("rg")
if (filepath) return { filepath } if (filepath) return { filepath }
filepath = path.join(Global.Path.bin, "rg" + (process.platform === "win32" ? ".exe" : "")) filepath = path.join(
Global.Path.bin,
"rg" + (process.platform === "win32" ? ".exe" : ""),
)
const file = Bun.file(filepath) const file = Bun.file(filepath)
if (!(await file.exists())) { if (!(await file.exists())) {
const platformKey = `${process.arch}-${process.platform}` as keyof typeof PLATFORM const platformKey =
`${process.arch}-${process.platform}` as keyof typeof PLATFORM
const config = PLATFORM[platformKey] const config = PLATFORM[platformKey]
if (!config) throw new UnsupportedPlatformError({ platform: platformKey }) if (!config) throw new UnsupportedPlatformError({ platform: platformKey })
@ -136,7 +139,8 @@ export namespace Ripgrep {
const url = `https://github.com/BurntSushi/ripgrep/releases/download/${version}/${filename}` const url = `https://github.com/BurntSushi/ripgrep/releases/download/${version}/${filename}`
const response = await fetch(url) const response = await fetch(url)
if (!response.ok) throw new DownloadFailedError({ url, status: response.status }) if (!response.ok)
throw new DownloadFailedError({ url, status: response.status })
const buffer = await response.arrayBuffer() const buffer = await response.arrayBuffer()
const archivePath = path.join(Global.Path.bin, filename) const archivePath = path.join(Global.Path.bin, filename)
@ -160,35 +164,21 @@ export namespace Ripgrep {
}) })
} }
if (config.extension === "zip") { if (config.extension === "zip") {
if (config.extension === "zip") { const proc = Bun.spawn(
const zipFileReader = new ZipReader(new BlobReader(new Blob([await Bun.file(archivePath).arrayBuffer()]))) ["unzip", "-j", archivePath, "*/rg.exe", "-d", Global.Path.bin],
const entries = await zipFileReader.getEntries() {
let rgEntry: any cwd: Global.Path.bin,
for (const entry of entries) { stderr: "pipe",
if (entry.filename.endsWith("rg.exe")) { stdout: "ignore",
rgEntry = entry },
break )
} await proc.exited
} if (proc.exitCode !== 0)
if (!rgEntry) {
throw new ExtractionFailedError({ throw new ExtractionFailedError({
filepath: archivePath, filepath: archivePath,
stderr: "rg.exe not found in zip archive", stderr: await Bun.readableStreamToText(proc.stderr),
}) })
} }
const rgBlob = await rgEntry.getData(new BlobWriter())
if (!rgBlob) {
throw new ExtractionFailedError({
filepath: archivePath,
stderr: "Failed to extract rg.exe from zip archive",
})
}
await Bun.write(filepath, await rgBlob.arrayBuffer())
await zipFileReader.close()
}
}
await fs.unlink(archivePath) await fs.unlink(archivePath)
if (!platformKey.endsWith("-win32")) await fs.chmod(filepath, 0o755) if (!platformKey.endsWith("-win32")) await fs.chmod(filepath, 0o755)
} }
@ -203,16 +193,17 @@ export namespace Ripgrep {
return filepath return filepath
} }
export async function files(input: { cwd: string; query?: string; glob?: string[]; limit?: number }) { export async function files(input: {
const commands = [`${$.escape(await filepath())} --files --follow --hidden --glob='!.git/*'`] cwd: string
query?: string
if (input.glob) { glob?: string
for (const g of input.glob) { limit?: number
commands[0] += ` --glob='${g}'` }) {
} const commands = [
} `${await filepath()} --files --hidden --glob='!.git/*' ${input.glob ? `--glob='${input.glob}'` : ``}`,
]
if (input.query) commands.push(`${await Fzf.filepath()} --filter=${input.query}`) if (input.query)
commands.push(`${await Fzf.filepath()} --filter=${input.query}`)
if (input.limit) commands.push(`head -n ${input.limit}`) if (input.limit) commands.push(`head -n ${input.limit}`)
const joined = commands.join(" | ") const joined = commands.join(" | ")
const result = await $`${{ raw: joined }}`.cwd(input.cwd).nothrow().text() const result = await $`${{ raw: joined }}`.cwd(input.cwd).nothrow().text()
@ -249,7 +240,6 @@ export namespace Ripgrep {
children: [], children: [],
} }
for (const file of files) { for (const file of files) {
if (file.includes(".opencode")) continue
const parts = file.split(path.sep) const parts = file.split(path.sep)
getPath(root, parts, true) getPath(root, parts, true)
} }
@ -320,8 +310,18 @@ export namespace Ripgrep {
return lines.join("\n") return lines.join("\n")
} }
export async function search(input: { cwd: string; pattern: string; glob?: string[]; limit?: number }) { export async function search(input: {
const args = [`${await filepath()}`, "--json", "--hidden", "--glob='!.git/*'"] cwd: string
pattern: string
glob?: string[]
limit?: number
}) {
const args = [
`${await filepath()}`,
"--json",
"--hidden",
"--glob='!.git/*'",
]
if (input.glob) { if (input.glob) {
for (const g of input.glob) { for (const g of input.glob) {

View file

@ -27,7 +27,10 @@ export namespace FileTime {
export async function assert(sessionID: string, filepath: string) { export async function assert(sessionID: string, filepath: string) {
const time = get(sessionID, filepath) const time = get(sessionID, filepath)
if (!time) throw new Error(`You must read the file ${filepath} before overwriting it. Use the Read tool first`) if (!time)
throw new Error(
`You must read the file ${filepath} before overwriting it. Use the Read tool first`,
)
const stats = await Bun.file(filepath).stat() const stats = await Bun.file(filepath).stat()
if (stats.mtime.getTime() > time.getTime()) { if (stats.mtime.getTime() > time.getTime()) {
throw new Error( throw new Error(

View file

@ -21,9 +21,11 @@ export namespace FileWatcher {
"file.watcher", "file.watcher",
() => { () => {
const app = App.use() const app = App.use()
if (!app.info.git) return {}
try { try {
const watcher = fs.watch(app.info.path.cwd, { recursive: true }, (event, file) => { const watcher = fs.watch(
app.info.path.cwd,
{ recursive: true },
(event, file) => {
log.info("change", { file, event }) log.info("change", { file, event })
if (!file) return if (!file) return
// for some reason async local storage is lost here // for some reason async local storage is lost here
@ -34,7 +36,8 @@ export namespace FileWatcher {
event, event,
}) })
}) })
}) },
)
return { watcher } return { watcher }
} catch { } catch {
return {} return {}
@ -46,7 +49,7 @@ export namespace FileWatcher {
) )
export function init() { export function init() {
if (Flag.OPENCODE_DISABLE_WATCHER || true) return if (Flag.OPENCODE_DISABLE_WATCHER) return
state() state()
} }
} }

View file

@ -1,7 +1,6 @@
export namespace Flag { export namespace Flag {
export const OPENCODE_AUTO_SHARE = truthy("OPENCODE_AUTO_SHARE") export const OPENCODE_AUTO_SHARE = truthy("OPENCODE_AUTO_SHARE")
export const OPENCODE_DISABLE_WATCHER = truthy("OPENCODE_DISABLE_WATCHER") export const OPENCODE_DISABLE_WATCHER = truthy("OPENCODE_DISABLE_WATCHER")
export const OPENCODE_CONFIG = process.env["OPENCODE_CONFIG"]
function truthy(key: string) { function truthy(key: string) {
const value = process.env[key]?.toLowerCase() const value = process.env[key]?.toLowerCase()

View file

@ -1,6 +1,5 @@
import { App } from "../app/app" import { App } from "../app/app"
import { BunProc } from "../bun" import { BunProc } from "../bun"
import { Filesystem } from "../util/filesystem"
export interface Info { export interface Info {
name: string name: string
@ -30,7 +29,7 @@ export const mix: Info = {
export const prettier: Info = { export const prettier: Info = {
name: "prettier", name: "prettier",
command: [BunProc.which(), "x", "prettier", "--write", "$FILE"], command: [BunProc.which(), "run", "prettier", "--write", "$FILE"],
environment: { environment: {
BUN_BE_BUN: "1", BUN_BE_BUN: "1",
}, },
@ -63,55 +62,23 @@ export const prettier: Info = {
".gql", ".gql",
], ],
async enabled() { async enabled() {
const app = App.info() // this is more complicated because we only want to use prettier if it's
const items = await Filesystem.findUp("package.json", app.path.cwd, app.path.root) // being used with the current project
for (const item of items) { try {
const json = await Bun.file(item).json() const proc = Bun.spawn({
if (json.dependencies?.prettier) return true cmd: [BunProc.which(), "run", "prettier", "--version"],
if (json.devDependencies?.prettier) return true cwd: App.info().path.cwd,
} env: {
BUN_BE_BUN: "1",
},
stdout: "ignore",
stderr: "ignore",
})
const exit = await proc.exited
return exit === 0
} catch {
return false return false
},
} }
export const biome: Info = {
name: "biome",
command: [BunProc.which(), "x", "biome", "format", "--write", "$FILE"],
environment: {
BUN_BE_BUN: "1",
},
extensions: [
".js",
".jsx",
".mjs",
".cjs",
".ts",
".tsx",
".mts",
".cts",
".html",
".htm",
".css",
".scss",
".sass",
".less",
".vue",
".svelte",
".json",
".jsonc",
".yaml",
".yml",
".toml",
".xml",
".md",
".mdx",
".graphql",
".gql",
],
async enabled() {
const app = App.info()
const items = await Filesystem.findUp("biome.json", app.path.cwd, app.path.root)
return items.length > 0
}, },
} }
@ -127,11 +94,23 @@ export const zig: Info = {
export const clang: Info = { export const clang: Info = {
name: "clang-format", name: "clang-format",
command: ["clang-format", "-i", "$FILE"], command: ["clang-format", "-i", "$FILE"],
extensions: [".c", ".cc", ".cpp", ".cxx", ".c++", ".h", ".hh", ".hpp", ".hxx", ".h++", ".ino", ".C", ".H"], extensions: [
".c",
".cc",
".cpp",
".cxx",
".c++",
".h",
".hh",
".hpp",
".hxx",
".h++",
".ino",
".C",
".H",
],
async enabled() { async enabled() {
const app = App.info() return Bun.which("clang-format") !== null
const items = await Filesystem.findUp(".clang-format", app.path.cwd, app.path.root)
return items.length > 0
}, },
} }
@ -149,29 +128,7 @@ export const ruff: Info = {
command: ["ruff", "format", "$FILE"], command: ["ruff", "format", "$FILE"],
extensions: [".py", ".pyi"], extensions: [".py", ".pyi"],
async enabled() { async enabled() {
if (!Bun.which("ruff")) return false return Bun.which("ruff") !== null
const app = App.info()
const configs = ["pyproject.toml", "ruff.toml", ".ruff.toml"]
for (const config of configs) {
const found = await Filesystem.findUp(config, app.path.cwd, app.path.root)
if (found.length > 0) {
if (config === "pyproject.toml") {
const content = await Bun.file(found[0]).text()
if (content.includes("[tool.ruff]")) return true
} else {
return true
}
}
}
const deps = ["requirements.txt", "pyproject.toml", "Pipfile"]
for (const dep of deps) {
const found = await Filesystem.findUp(dep, app.path.cwd, app.path.root)
if (found.length > 0) {
const content = await Bun.file(found[0]).text()
if (content.includes("ruff")) return true
}
}
return false
}, },
} }

View file

@ -5,40 +5,20 @@ import { Log } from "../util/log"
import path from "path" import path from "path"
import * as Formatter from "./formatter" import * as Formatter from "./formatter"
import { Config } from "../config/config"
import { mergeDeep } from "remeda"
export namespace Format { export namespace Format {
const log = Log.create({ service: "format" }) const log = Log.create({ service: "format" })
const state = App.state("format", async () => { const state = App.state("format", () => {
const enabled: Record<string, boolean> = {} const enabled: Record<string, boolean> = {}
const cfg = await Config.get()
const formatters = { ...Formatter } as Record<string, Formatter.Info>
for (const [name, item] of Object.entries(cfg.formatter ?? {})) {
if (item.disabled) {
delete formatters[name]
continue
}
const result: Formatter.Info = mergeDeep(formatters[name] ?? {}, {
command: [],
extensions: [],
...item,
})
result.enabled = async () => true
result.name = name
formatters[name] = result
}
return { return {
enabled, enabled,
formatters,
} }
}) })
async function isEnabled(item: Formatter.Info) { async function isEnabled(item: Formatter.Info) {
const s = await state() const s = state()
let status = s.enabled[item.name] let status = s.enabled[item.name]
if (status === undefined) { if (status === undefined) {
status = await item.enabled() status = await item.enabled()
@ -48,10 +28,8 @@ export namespace Format {
} }
async function getFormatter(ext: string) { async function getFormatter(ext: string) {
const formatters = await state().then((x) => x.formatters)
const result = [] const result = []
for (const item of Object.values(formatters)) { for (const item of Object.values(Formatter)) {
log.info("checking", { name: item.name, ext })
if (!item.extensions.includes(ext)) continue if (!item.extensions.includes(ext)) continue
if (!(await isEnabled(item))) continue if (!(await isEnabled(item))) continue
result.push(item) result.push(item)

View file

@ -13,7 +13,7 @@ export namespace Global {
export const Path = { export const Path = {
data, data,
bin: path.join(data, "bin"), bin: path.join(data, "bin"),
log: path.join(data, "log"), providers: path.join(config, "providers"),
cache, cache,
config, config,
state, state,
@ -23,17 +23,7 @@ export namespace Global {
await Promise.all([ await Promise.all([
fs.mkdir(Global.Path.data, { recursive: true }), fs.mkdir(Global.Path.data, { recursive: true }),
fs.mkdir(Global.Path.config, { recursive: true }), fs.mkdir(Global.Path.config, { recursive: true }),
fs.mkdir(Global.Path.cache, { recursive: true }),
fs.mkdir(Global.Path.providers, { recursive: true }),
fs.mkdir(Global.Path.state, { recursive: true }), fs.mkdir(Global.Path.state, { recursive: true }),
fs.mkdir(Global.Path.log, { recursive: true }),
]) ])
const CACHE_VERSION = "4"
const version = await Bun.file(path.join(Global.Path.cache, "version"))
.text()
.catch(() => "0")
if (version !== CACHE_VERSION) {
await fs.rm(Global.Path.cache, { recursive: true, force: true })
await Bun.file(path.join(Global.Path.cache, "version")).write(CACHE_VERSION)
}

View file

@ -5,9 +5,7 @@ export namespace Identifier {
const prefixes = { const prefixes = {
session: "ses", session: "ses",
message: "msg", message: "msg",
permission: "per",
user: "usr", user: "usr",
part: "prt",
} as const } as const
export function schema(prefix: keyof typeof prefixes) { export function schema(prefix: keyof typeof prefixes) {
@ -28,7 +26,11 @@ export namespace Identifier {
return generateID(prefix, true, given) return generateID(prefix, true, given)
} }
function generateID(prefix: keyof typeof prefixes, descending: boolean, given?: string): string { function generateID(
prefix: keyof typeof prefixes,
descending: boolean,
given?: string,
): string {
if (!given) { if (!given) {
return generateNewID(prefix, descending) return generateNewID(prefix, descending)
} }
@ -40,7 +42,8 @@ export namespace Identifier {
} }
function randomBase62(length: number): string { function randomBase62(length: number): string {
const chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" const chars =
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
let result = "" let result = ""
const bytes = randomBytes(length) const bytes = randomBytes(length)
for (let i = 0; i < length; i++) { for (let i = 0; i < length; i++) {
@ -49,7 +52,10 @@ export namespace Identifier {
return result return result
} }
function generateNewID(prefix: keyof typeof prefixes, descending: boolean): string { function generateNewID(
prefix: keyof typeof prefixes,
descending: boolean,
): string {
const currentTimestamp = Date.now() const currentTimestamp = Date.now()
if (currentTimestamp !== lastTimestamp) { if (currentTimestamp !== lastTimestamp) {
@ -67,6 +73,11 @@ export namespace Identifier {
timeBytes[i] = Number((now >> BigInt(40 - 8 * i)) & BigInt(0xff)) timeBytes[i] = Number((now >> BigInt(40 - 8 * i)) & BigInt(0xff))
} }
return prefixes[prefix] + "_" + timeBytes.toString("hex") + randomBase62(LENGTH - 12) return (
prefixes[prefix] +
"_" +
timeBytes.toString("hex") +
randomBase62(LENGTH - 12)
)
} }
} }

View file

@ -1,74 +0,0 @@
import { spawn } from "bun"
import { z } from "zod"
import { NamedError } from "../util/error"
import { Log } from "../util/log"
import { Bus } from "../bus"
const SUPPORTED_IDES = [
{ name: "Windsurf" as const, cmd: "windsurf" },
{ name: "Visual Studio Code" as const, cmd: "code" },
{ name: "Cursor" as const, cmd: "cursor" },
{ name: "VSCodium" as const, cmd: "codium" },
]
export namespace Ide {
const log = Log.create({ service: "ide" })
export const Event = {
Installed: Bus.event(
"ide.installed",
z.object({
ide: z.string(),
}),
),
}
export const AlreadyInstalledError = NamedError.create("AlreadyInstalledError", z.object({}))
export const InstallFailedError = NamedError.create(
"InstallFailedError",
z.object({
stderr: z.string(),
}),
)
export function ide() {
if (process.env["TERM_PROGRAM"] === "vscode") {
const v = process.env["GIT_ASKPASS"]
for (const ide of SUPPORTED_IDES) {
if (v?.includes(ide.name)) return ide.name
}
}
return "unknown"
}
export function alreadyInstalled() {
return process.env["OPENCODE_CALLER"] === "vscode"
}
export async function install(ide: (typeof SUPPORTED_IDES)[number]["name"]) {
const cmd = SUPPORTED_IDES.find((i) => i.name === ide)?.cmd
if (!cmd) throw new Error(`Unknown IDE: ${ide}`)
const p = spawn([cmd, "--install-extension", "sst-dev.opencode"], {
stdout: "pipe",
stderr: "pipe",
})
await p.exited
const stdout = await new Response(p.stdout).text()
const stderr = await new Response(p.stderr).text()
log.info("installed", {
ide,
stdout,
stderr,
})
if (p.exitCode !== 0) {
throw new InstallFailedError({ stderr })
}
if (stdout.includes("already installed")) {
throw new AlreadyInstalledError({})
}
}
}

View file

@ -5,7 +5,6 @@ import { RunCommand } from "./cli/cmd/run"
import { GenerateCommand } from "./cli/cmd/generate" import { GenerateCommand } from "./cli/cmd/generate"
import { Log } from "./util/log" import { Log } from "./util/log"
import { AuthCommand } from "./cli/cmd/auth" import { AuthCommand } from "./cli/cmd/auth"
import { AgentCommand } from "./cli/cmd/agent"
import { UpgradeCommand } from "./cli/cmd/upgrade" import { UpgradeCommand } from "./cli/cmd/upgrade"
import { ModelsCommand } from "./cli/cmd/models" import { ModelsCommand } from "./cli/cmd/models"
import { UI } from "./cli/ui" import { UI } from "./cli/ui"
@ -15,12 +14,6 @@ import { FormatError } from "./cli/error"
import { ServeCommand } from "./cli/cmd/serve" import { ServeCommand } from "./cli/cmd/serve"
import { TuiCommand } from "./cli/cmd/tui" import { TuiCommand } from "./cli/cmd/tui"
import { DebugCommand } from "./cli/cmd/debug" import { DebugCommand } from "./cli/cmd/debug"
import { StatsCommand } from "./cli/cmd/stats"
import { McpCommand } from "./cli/cmd/mcp"
import { GithubCommand } from "./cli/cmd/github"
import { Trace } from "./trace"
Trace.init()
const cancel = new AbortController() const cancel = new AbortController()
@ -45,45 +38,29 @@ const cli = yargs(hideBin(process.argv))
describe: "print logs to stderr", describe: "print logs to stderr",
type: "boolean", type: "boolean",
}) })
.option("log-level", { .middleware(async () => {
describe: "log level", await Log.init({ print: process.argv.includes("--print-logs") })
type: "string",
choices: ["DEBUG", "INFO", "WARN", "ERROR"],
})
.middleware(async (opts) => {
await Log.init({
print: process.argv.includes("--print-logs"),
dev: Installation.isDev(),
level: (() => {
if (opts.logLevel) return opts.logLevel as Log.Level
if (Installation.isDev()) return "DEBUG"
return "INFO"
})(),
})
Log.Default.info("opencode", { Log.Default.info("opencode", {
version: Installation.VERSION, version: Installation.VERSION,
args: process.argv.slice(2), args: process.argv.slice(2),
}) })
}) })
.usage("\n" + UI.logo()) .usage("\n" + UI.logo())
.command(McpCommand)
.command(TuiCommand) .command(TuiCommand)
.command(RunCommand) .command(RunCommand)
.command(GenerateCommand) .command(GenerateCommand)
.command(DebugCommand) .command(DebugCommand)
.command(AuthCommand) .command(AuthCommand)
.command(AgentCommand)
.command(UpgradeCommand) .command(UpgradeCommand)
.command(ServeCommand) .command(ServeCommand)
.command(ModelsCommand) .command(ModelsCommand)
.command(StatsCommand)
.command(GithubCommand)
.fail((msg) => { .fail((msg) => {
if (msg.startsWith("Unknown argument") || msg.startsWith("Not enough non-option arguments")) { if (
msg.startsWith("Unknown argument") ||
msg.startsWith("Not enough non-option arguments")
) {
cli.showHelp("log") cli.showHelp("log")
} }
process.exit(1)
}) })
.strict() .strict()
@ -120,7 +97,10 @@ try {
Log.Default.error("fatal", data) Log.Default.error("fatal", data)
const formatted = FormatError(e) const formatted = FormatError(e)
if (formatted) UI.error(formatted) if (formatted) UI.error(formatted)
if (formatted === undefined) UI.error("Unexpected error, check log file at " + Log.file() + " for more details") if (formatted === undefined)
UI.error(
"Unexpected error, check log file at " + Log.file() + " for more details",
)
process.exitCode = 1 process.exitCode = 1
} }

View file

@ -135,18 +135,12 @@ export namespace Installation {
}) })
} }
export const VERSION = typeof OPENCODE_VERSION === "string" ? OPENCODE_VERSION : "dev" export const VERSION =
export const USER_AGENT = `opencode/${VERSION}` typeof OPENCODE_VERSION === "string" ? OPENCODE_VERSION : "dev"
export async function latest() { export async function latest() {
return fetch("https://api.github.com/repos/sst/opencode/releases/latest") return fetch("https://api.github.com/repos/sst/opencode/releases/latest")
.then((res) => res.json()) .then((res) => res.json())
.then((data) => { .then((data) => data.tag_name.slice(1) as string)
if (typeof data.tag_name !== "string") {
log.error("GitHub API error", data)
throw new Error("failed to fetch latest version")
}
return data.tag_name.slice(1) as string
})
} }
} }

View file

@ -1,5 +1,9 @@
import path from "path" import path from "path"
import { createMessageConnection, StreamMessageReader, StreamMessageWriter } from "vscode-jsonrpc/node" import {
createMessageConnection,
StreamMessageReader,
StreamMessageWriter,
} from "vscode-jsonrpc/node"
import type { Diagnostic as VSCodeDiagnostic } from "vscode-languageserver-types" import type { Diagnostic as VSCodeDiagnostic } from "vscode-languageserver-types"
import { App } from "../app/app" import { App } from "../app/app"
import { Log } from "../util/log" import { Log } from "../util/log"
@ -34,54 +38,45 @@ export namespace LSPClient {
), ),
} }
export async function create(input: { serverID: string; server: LSPServer.Handle; root: string }) { export async function create(serverID: string, server: LSPServer.Handle) {
const app = App.info() const app = App.info()
const l = log.clone().tag("serverID", input.serverID) log.info("starting client", { id: serverID })
l.info("starting client")
const connection = createMessageConnection( const connection = createMessageConnection(
new StreamMessageReader(input.server.process.stdout), new StreamMessageReader(server.process.stdout),
new StreamMessageWriter(input.server.process.stdin), new StreamMessageWriter(server.process.stdin),
) )
const diagnostics = new Map<string, Diagnostic[]>() const diagnostics = new Map<string, Diagnostic[]>()
connection.onNotification("textDocument/publishDiagnostics", (params) => { connection.onNotification("textDocument/publishDiagnostics", (params) => {
const path = new URL(params.uri).pathname const path = new URL(params.uri).pathname
l.info("textDocument/publishDiagnostics", { log.info("textDocument/publishDiagnostics", {
path, path,
}) })
const exists = diagnostics.has(path) const exists = diagnostics.has(path)
diagnostics.set(path, params.diagnostics) diagnostics.set(path, params.diagnostics)
if (!exists && input.serverID === "typescript") return if (!exists && serverID === "typescript") return
Bus.publish(Event.Diagnostics, { path, serverID: input.serverID }) Bus.publish(Event.Diagnostics, { path, serverID })
})
connection.onRequest("window/workDoneProgress/create", (params) => {
l.info("window/workDoneProgress/create", params)
return null
}) })
connection.onRequest("workspace/configuration", async () => { connection.onRequest("workspace/configuration", async () => {
return [{}] return [{}]
}) })
connection.listen() connection.listen()
l.info("sending initialize") log.info("sending initialize", { id: serverID })
await withTimeout( await withTimeout(
connection.sendRequest("initialize", { connection.sendRequest("initialize", {
rootUri: "file://" + input.root, processId: server.process.pid,
processId: input.server.process.pid,
workspaceFolders: [ workspaceFolders: [
{ {
name: "workspace", name: "workspace",
uri: "file://" + input.root, uri: "file://" + app.path.cwd,
}, },
], ],
initializationOptions: { initializationOptions: {
...input.server.initialization, ...server.initialization,
}, },
capabilities: { capabilities: {
window: {
workDoneProgress: true,
},
workspace: { workspace: {
configuration: true, configuration: true,
}, },
@ -98,9 +93,9 @@ export namespace LSPClient {
}), }),
5_000, 5_000,
).catch((err) => { ).catch((err) => {
l.error("initialize error", { error: err }) log.error("initialize error", { error: err })
throw new InitializeError( throw new InitializeError(
{ serverID: input.serverID }, { serverID },
{ {
cause: err, cause: err,
}, },
@ -108,22 +103,26 @@ export namespace LSPClient {
}) })
await connection.sendNotification("initialized", {}) await connection.sendNotification("initialized", {})
log.info("initialized", {
serverID,
})
const files: { const files: {
[path: string]: number [path: string]: number
} = {} } = {}
const result = { const result = {
root: input.root,
get serverID() { get serverID() {
return input.serverID return serverID
}, },
get connection() { get connection() {
return connection return connection
}, },
notify: { notify: {
async open(input: { path: string }) { async open(input: { path: string }) {
input.path = path.isAbsolute(input.path) ? input.path : path.resolve(app.path.cwd, input.path) input.path = path.isAbsolute(input.path)
? input.path
: path.resolve(app.path.cwd, input.path)
const file = Bun.file(input.path) const file = Bun.file(input.path)
const text = await file.text() const text = await file.text()
const version = files[input.path] const version = files[input.path]
@ -155,13 +154,18 @@ export namespace LSPClient {
return diagnostics return diagnostics
}, },
async waitForDiagnostics(input: { path: string }) { async waitForDiagnostics(input: { path: string }) {
input.path = path.isAbsolute(input.path) ? input.path : path.resolve(app.path.cwd, input.path) input.path = path.isAbsolute(input.path)
? input.path
: path.resolve(app.path.cwd, input.path)
log.info("waiting for diagnostics", input) log.info("waiting for diagnostics", input)
let unsub: () => void let unsub: () => void
return await withTimeout( return await withTimeout(
new Promise<void>((resolve) => { new Promise<void>((resolve) => {
unsub = Bus.subscribe(Event.Diagnostics, (event) => { unsub = Bus.subscribe(Event.Diagnostics, (event) => {
if (event.properties.path === input.path && event.properties.serverID === result.serverID) { if (
event.properties.path === input.path &&
event.properties.serverID === result.serverID
) {
log.info("got diagnostics", input) log.info("got diagnostics", input)
unsub?.() unsub?.()
resolve() resolve()
@ -176,16 +180,13 @@ export namespace LSPClient {
}) })
}, },
async shutdown() { async shutdown() {
l.info("shutting down") log.info("shutting down", { serverID })
connection.end() connection.end()
connection.dispose() connection.dispose()
input.server.process.kill() log.info("shutdown", { serverID })
l.info("shutdown")
}, },
} }
l.info("initialized")
return result return result
} }
} }

View file

@ -3,15 +3,19 @@ import { Log } from "../util/log"
import { LSPClient } from "./client" import { LSPClient } from "./client"
import path from "path" import path from "path"
import { LSPServer } from "./server" import { LSPServer } from "./server"
import { Ripgrep } from "../file/ripgrep"
import { z } from "zod" import { z } from "zod"
import { Config } from "../config/config"
import { spawn } from "child_process"
export namespace LSP { export namespace LSP {
const log = Log.create({ service: "lsp" }) const log = Log.create({ service: "lsp" })
export const Range = z export const Symbol = z
.object({ .object({
name: z.string(),
kind: z.number(),
location: z.object({
uri: z.string(),
range: z.object({
start: z.object({ start: z.object({
line: z.number(), line: z.number(),
character: z.number(), character: z.number(),
@ -20,76 +24,43 @@ export namespace LSP {
line: z.number(), line: z.number(),
character: z.number(), character: z.number(),
}), }),
}) }),
.openapi({
ref: "Range",
})
export type Range = z.infer<typeof Range>
export const Symbol = z
.object({
name: z.string(),
kind: z.number(),
location: z.object({
uri: z.string(),
range: Range,
}), }),
}) })
.openapi({ .openapi({
ref: "Symbol", ref: "LSP.Symbol",
}) })
export type Symbol = z.infer<typeof Symbol> export type Symbol = z.infer<typeof Symbol>
export const DocumentSymbol = z
.object({
name: z.string(),
detail: z.string().optional(),
kind: z.number(),
range: Range,
selectionRange: Range,
})
.openapi({
ref: "DocumentSymbol",
})
export type DocumentSymbol = z.infer<typeof DocumentSymbol>
const state = App.state( const state = App.state(
"lsp", "lsp",
async () => { async (app) => {
const clients: LSPClient.Info[] = [] log.info("initializing")
const servers: Record<string, LSPServer.Info> = LSPServer const clients = new Map<string, LSPClient.Info>()
const cfg = await Config.get() for (const server of Object.values(LSPServer)) {
for (const [name, item] of Object.entries(cfg.lsp ?? {})) { for (const extension of server.extensions) {
const existing = servers[name] const [file] = await Ripgrep.files({
if (item.disabled) { cwd: app.path.cwd,
delete servers[name] glob: "*" + extension,
continue })
if (!file) continue
const handle = await server.spawn(App.info())
if (!handle) break
const client = await LSPClient.create(server.id, handle).catch(
(err) => log.error("", { error: err }),
)
if (!client) break
clients.set(server.id, client)
break
} }
servers[name] = { }
...existing, log.info("initialized")
extensions: item.extensions ?? existing.extensions,
spawn: async (_app, root) => {
return { return {
process: spawn(item.command[0], item.command.slice(1), {
cwd: root,
env: {
...process.env,
...item.env,
},
}),
initialization: item.initialization,
}
},
}
}
return {
broken: new Set<string>(),
servers,
clients, clients,
} }
}, },
async (state) => { async (state) => {
for (const client of state.clients) { for (const client of state.clients.values()) {
await client.shutdown() await client.shutdown()
} }
}, },
@ -99,44 +70,16 @@ export namespace LSP {
return state() return state()
} }
async function getClients(file: string) {
const s = await state()
const extension = path.parse(file).ext
const result: LSPClient.Info[] = []
for (const server of Object.values(LSPServer)) {
if (server.extensions.length && !server.extensions.includes(extension)) continue
const root = await server.root(file, App.info())
if (!root) continue
if (s.broken.has(root + server.id)) continue
const match = s.clients.find((x) => x.root === root && x.serverID === server.id)
if (match) {
result.push(match)
continue
}
const handle = await server.spawn(App.info(), root)
if (!handle) continue
const client = await LSPClient.create({
serverID: server.id,
server: handle,
root,
}).catch((err) => {
s.broken.add(root + server.id)
handle.process.kill()
log.error("", { error: err })
})
if (!client) continue
s.clients.push(client)
result.push(client)
}
return result
}
export async function touchFile(input: string, waitForDiagnostics?: boolean) { export async function touchFile(input: string, waitForDiagnostics?: boolean) {
const clients = await getClients(input) const extension = path.parse(input).ext
const matches = Object.values(LSPServer)
.filter((x) => x.extensions.includes(extension))
.map((x) => x.id)
await run(async (client) => { await run(async (client) => {
if (!clients.includes(client)) return if (!matches.includes(client.serverID)) return
const wait = waitForDiagnostics ? client.waitForDiagnostics({ path: input }) : Promise.resolve() const wait = waitForDiagnostics
? client.waitForDiagnostics({ path: input })
: Promise.resolve()
await client.notify.open({ path: input }) await client.notify.open({ path: input })
return wait return wait
}) })
@ -154,7 +97,11 @@ export namespace LSP {
return results return results
} }
export async function hover(input: { file: string; line: number; character: number }) { export async function hover(input: {
file: string
line: number
character: number
}) {
return run((client) => { return run((client) => {
return client.connection.sendRequest("textDocument/hover", { return client.connection.sendRequest("textDocument/hover", {
textDocument: { textDocument: {
@ -168,74 +115,18 @@ export namespace LSP {
}) })
} }
enum SymbolKind {
File = 1,
Module = 2,
Namespace = 3,
Package = 4,
Class = 5,
Method = 6,
Property = 7,
Field = 8,
Constructor = 9,
Enum = 10,
Interface = 11,
Function = 12,
Variable = 13,
Constant = 14,
String = 15,
Number = 16,
Boolean = 17,
Array = 18,
Object = 19,
Key = 20,
Null = 21,
EnumMember = 22,
Struct = 23,
Event = 24,
Operator = 25,
TypeParameter = 26,
}
const kinds = [
SymbolKind.Class,
SymbolKind.Function,
SymbolKind.Method,
SymbolKind.Interface,
SymbolKind.Variable,
SymbolKind.Constant,
SymbolKind.Struct,
SymbolKind.Enum,
]
export async function workspaceSymbol(query: string) { export async function workspaceSymbol(query: string) {
return run((client) => return run((client) =>
client.connection client.connection.sendRequest("workspace/symbol", {
.sendRequest("workspace/symbol", {
query, query,
}) }),
.then((result: any) => result.filter((x: LSP.Symbol) => kinds.includes(x.kind)))
.then((result: any) => result.slice(0, 10))
.catch(() => []),
).then((result) => result.flat() as LSP.Symbol[]) ).then((result) => result.flat() as LSP.Symbol[])
} }
export async function documentSymbol(uri: string) { async function run<T>(
return run((client) => input: (client: LSPClient.Info) => Promise<T>,
client.connection ): Promise<T[]> {
.sendRequest("textDocument/documentSymbol", { const clients = await state().then((x) => [...x.clients.values()])
textDocument: {
uri,
},
})
.catch(() => []),
)
.then((result) => result.flat() as (LSP.DocumentSymbol | LSP.Symbol)[])
.then((result) => result.filter(Boolean))
}
async function run<T>(input: (client: LSPClient.Info) => Promise<T>): Promise<T[]> {
const clients = await state().then((x) => x.clients)
const tasks = clients.map((x) => input(x)) const tasks = clients.map((x) => input(x))
return Promise.all(tasks) return Promise.all(tasks)
} }

View file

@ -94,6 +94,4 @@ export const LANGUAGE_EXTENSIONS: Record<string, string> = {
".yml": "yaml", ".yml": "yaml",
".mjs": "javascript", ".mjs": "javascript",
".cjs": "javascript", ".cjs": "javascript",
".zig": "zig",
".zon": "zig",
} as const } as const

View file

@ -6,7 +6,6 @@ import { Log } from "../util/log"
import { BunProc } from "../bun" import { BunProc } from "../bun"
import { $ } from "bun" import { $ } from "bun"
import fs from "fs/promises" import fs from "fs/promises"
import { Filesystem } from "../util/filesystem"
export namespace LSPServer { export namespace LSPServer {
const log = Log.create({ service: "lsp.server" }) const log = Log.create({ service: "lsp.server" })
@ -16,44 +15,31 @@ export namespace LSPServer {
initialization?: Record<string, any> initialization?: Record<string, any>
} }
type RootFunction = (file: string, app: App.Info) => Promise<string | undefined>
const NearestRoot = (patterns: string[]): RootFunction => {
return async (file, app) => {
const files = Filesystem.up({
targets: patterns,
start: path.dirname(file),
stop: app.path.root,
})
const first = await files.next()
await files.return()
if (!first.value) return app.path.root
return path.dirname(first.value)
}
}
export interface Info { export interface Info {
id: string id: string
extensions: string[] extensions: string[]
global?: boolean spawn(app: App.Info): Promise<Handle | undefined>
root: RootFunction
spawn(app: App.Info, root: string): Promise<Handle | undefined>
} }
export const Typescript: Info = { export const Typescript: Info = {
id: "typescript", id: "typescript",
root: NearestRoot(["tsconfig.json", "package.json", "jsconfig.json"]),
extensions: [".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs", ".mts", ".cts"], extensions: [".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs", ".mts", ".cts"],
async spawn(app, root) { async spawn(app) {
const tsserver = await Bun.resolve("typescript/lib/tsserver.js", app.path.cwd).catch(() => {}) const tsserver = await Bun.resolve(
"typescript/lib/tsserver.js",
app.path.cwd,
).catch(() => {})
if (!tsserver) return if (!tsserver) return
const proc = spawn(BunProc.which(), ["x", "typescript-language-server", "--stdio"], { const proc = spawn(
cwd: root, BunProc.which(),
["x", "typescript-language-server", "--stdio"],
{
env: { env: {
...process.env, ...process.env,
BUN_BE_BUN: "1", BUN_BE_BUN: "1",
}, },
}) },
)
return { return {
process: proc, process: proc,
initialization: { initialization: {
@ -67,13 +53,8 @@ export namespace LSPServer {
export const Gopls: Info = { export const Gopls: Info = {
id: "golang", id: "golang",
root: async (file, app) => {
const work = await NearestRoot(["go.work"])(file, app)
if (work) return work
return NearestRoot(["go.mod", "go.sum"])(file, app)
},
extensions: [".go"], extensions: [".go"],
async spawn(_, root) { async spawn() {
let bin = Bun.which("gopls", { let bin = Bun.which("gopls", {
PATH: process.env["PATH"] + ":" + Global.Path.bin, PATH: process.env["PATH"] + ":" + Global.Path.bin,
}) })
@ -92,24 +73,24 @@ export namespace LSPServer {
log.error("Failed to install gopls") log.error("Failed to install gopls")
return return
} }
bin = path.join(Global.Path.bin, "gopls" + (process.platform === "win32" ? ".exe" : "")) bin = path.join(
Global.Path.bin,
"gopls" + (process.platform === "win32" ? ".exe" : ""),
)
log.info(`installed gopls`, { log.info(`installed gopls`, {
bin, bin,
}) })
} }
return { return {
process: spawn(bin!, { process: spawn(bin!),
cwd: root,
}),
} }
}, },
} }
export const RubyLsp: Info = { export const RubyLsp: Info = {
id: "ruby-lsp", id: "ruby-lsp",
root: NearestRoot(["Gemfile"]),
extensions: [".rb", ".rake", ".gemspec", ".ru"], extensions: [".rb", ".rake", ".gemspec", ".ru"],
async spawn(_, root) { async spawn() {
let bin = Bun.which("ruby-lsp", { let bin = Bun.which("ruby-lsp", {
PATH: process.env["PATH"] + ":" + Global.Path.bin, PATH: process.env["PATH"] + ":" + Global.Path.bin,
}) })
@ -132,15 +113,16 @@ export namespace LSPServer {
log.error("Failed to install ruby-lsp") log.error("Failed to install ruby-lsp")
return return
} }
bin = path.join(Global.Path.bin, "ruby-lsp" + (process.platform === "win32" ? ".exe" : "")) bin = path.join(
Global.Path.bin,
"ruby-lsp" + (process.platform === "win32" ? ".exe" : ""),
)
log.info(`installed ruby-lsp`, { log.info(`installed ruby-lsp`, {
bin, bin,
}) })
} }
return { return {
process: spawn(bin!, ["--stdio"], { process: spawn(bin!, ["--stdio"]),
cwd: root,
}),
} }
}, },
} }
@ -148,15 +130,17 @@ export namespace LSPServer {
export const Pyright: Info = { export const Pyright: Info = {
id: "pyright", id: "pyright",
extensions: [".py", ".pyi"], extensions: [".py", ".pyi"],
root: NearestRoot(["pyproject.toml", "setup.py", "setup.cfg", "requirements.txt", "Pipfile", "pyrightconfig.json"]), async spawn() {
async spawn(_, root) { const proc = spawn(
const proc = spawn(BunProc.which(), ["x", "pyright-langserver", "--stdio"], { BunProc.which(),
cwd: root, ["x", "pyright-langserver", "--stdio"],
{
env: { env: {
...process.env, ...process.env,
BUN_BE_BUN: "1", BUN_BE_BUN: "1",
}, },
}) },
)
return { return {
process: proc, process: proc,
} }
@ -166,8 +150,7 @@ export namespace LSPServer {
export const ElixirLS: Info = { export const ElixirLS: Info = {
id: "elixir-ls", id: "elixir-ls",
extensions: [".ex", ".exs"], extensions: [".ex", ".exs"],
root: NearestRoot(["mix.exs", "mix.lock"]), async spawn() {
async spawn(_, root) {
let binary = Bun.which("elixir-ls") let binary = Bun.which("elixir-ls")
if (!binary) { if (!binary) {
const elixirLsPath = path.join(Global.Path.bin, "elixir-ls") const elixirLsPath = path.join(Global.Path.bin, "elixir-ls")
@ -175,7 +158,9 @@ export namespace LSPServer {
Global.Path.bin, Global.Path.bin,
"elixir-ls-master", "elixir-ls-master",
"release", "release",
process.platform === "win32" ? "language_server.bar" : "language_server.sh", process.platform === "win32"
? "language_server.bar"
: "language_server.sh",
) )
if (!(await Bun.file(binary).exists())) { if (!(await Bun.file(binary).exists())) {
@ -187,7 +172,9 @@ export namespace LSPServer {
log.info("downloading elixir-ls from GitHub releases") log.info("downloading elixir-ls from GitHub releases")
const response = await fetch("https://github.com/elixir-lsp/elixir-ls/archive/refs/heads/master.zip") const response = await fetch(
"https://github.com/elixir-lsp/elixir-ls/archive/refs/heads/master.zip",
)
if (!response.ok) return if (!response.ok) return
const zipPath = path.join(Global.Path.bin, "elixir-ls.zip") const zipPath = path.join(Global.Path.bin, "elixir-ls.zip")
await Bun.file(zipPath).write(response) await Bun.file(zipPath).write(response)
@ -211,153 +198,7 @@ export namespace LSPServer {
} }
return { return {
process: spawn(binary, { process: spawn(binary),
cwd: root,
}),
}
},
}
export const Zls: Info = {
id: "zls",
extensions: [".zig", ".zon"],
root: NearestRoot(["build.zig"]),
async spawn(_, root) {
let bin = Bun.which("zls", {
PATH: process.env["PATH"] + ":" + Global.Path.bin,
})
if (!bin) {
const zig = Bun.which("zig")
if (!zig) {
log.error("Zig is required to use zls. Please install Zig first.")
return
}
log.info("downloading zls from GitHub releases")
const releaseResponse = await fetch("https://api.github.com/repos/zigtools/zls/releases/latest")
if (!releaseResponse.ok) {
log.error("Failed to fetch zls release info")
return
}
const release = await releaseResponse.json()
const platform = process.platform
const arch = process.arch
let assetName = ""
let zlsArch: string = arch
if (arch === "arm64") zlsArch = "aarch64"
else if (arch === "x64") zlsArch = "x86_64"
else if (arch === "ia32") zlsArch = "x86"
let zlsPlatform: string = platform
if (platform === "darwin") zlsPlatform = "macos"
else if (platform === "win32") zlsPlatform = "windows"
const ext = platform === "win32" ? "zip" : "tar.xz"
assetName = `zls-${zlsArch}-${zlsPlatform}.${ext}`
const supportedCombos = [
"zls-x86_64-linux.tar.xz",
"zls-x86_64-macos.tar.xz",
"zls-x86_64-windows.zip",
"zls-aarch64-linux.tar.xz",
"zls-aarch64-macos.tar.xz",
"zls-aarch64-windows.zip",
"zls-x86-linux.tar.xz",
"zls-x86-windows.zip",
]
if (!supportedCombos.includes(assetName)) {
log.error(`Platform ${platform} and architecture ${arch} is not supported by zls`)
return
}
const asset = release.assets.find((a: any) => a.name === assetName)
if (!asset) {
log.error(`Could not find asset ${assetName} in latest zls release`)
return
}
const downloadUrl = asset.browser_download_url
const downloadResponse = await fetch(downloadUrl)
if (!downloadResponse.ok) {
log.error("Failed to download zls")
return
}
const tempPath = path.join(Global.Path.bin, assetName)
await Bun.file(tempPath).write(downloadResponse)
if (ext === "zip") {
await $`unzip -o -q ${tempPath}`.cwd(Global.Path.bin).nothrow()
} else {
await $`tar -xf ${tempPath}`.cwd(Global.Path.bin).nothrow()
}
await fs.rm(tempPath, { force: true })
bin = path.join(Global.Path.bin, "zls" + (platform === "win32" ? ".exe" : ""))
if (!(await Bun.file(bin).exists())) {
log.error("Failed to extract zls binary")
return
}
if (platform !== "win32") {
await $`chmod +x ${bin}`.nothrow()
}
log.info(`installed zls`, { bin })
}
return {
process: spawn(bin, {
cwd: root,
}),
}
},
}
export const CSharp: Info = {
id: "csharp",
root: NearestRoot([".sln", ".csproj", "global.json"]),
extensions: [".cs"],
async spawn(_, root) {
let bin = Bun.which("csharp-ls", {
PATH: process.env["PATH"] + ":" + Global.Path.bin,
})
if (!bin) {
if (!Bun.which("dotnet")) {
log.error(".NET SDK is required to install csharp-ls")
return
}
log.info("installing csharp-ls via dotnet tool")
const proc = Bun.spawn({
cmd: ["dotnet", "tool", "install", "csharp-ls", "--tool-path", Global.Path.bin],
stdout: "pipe",
stderr: "pipe",
stdin: "pipe",
})
const exit = await proc.exited
if (exit !== 0) {
log.error("Failed to install csharp-ls")
return
}
bin = path.join(Global.Path.bin, "csharp-ls" + (process.platform === "win32" ? ".exe" : ""))
log.info(`installed csharp-ls`, { bin })
}
return {
process: spawn(bin, {
cwd: root,
}),
} }
}, },
} }

View file

@ -1,7 +1,5 @@
import { experimental_createMCPClient, type Tool } from "ai" import { experimental_createMCPClient, type Tool } from "ai"
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js" import { Experimental_StdioMCPTransport } from "ai/mcp-stdio"
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js"
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"
import { App } from "../app/app" import { App } from "../app/app"
import { Config } from "../config/config" import { Config } from "../config/config"
import { Log } from "../util/log" import { Log } from "../util/log"
@ -34,28 +32,14 @@ export namespace MCP {
} }
log.info("found", { key, type: mcp.type }) log.info("found", { key, type: mcp.type })
if (mcp.type === "remote") { if (mcp.type === "remote") {
const transports = [
new StreamableHTTPClientTransport(new URL(mcp.url), {
requestInit: {
headers: mcp.headers,
},
}),
new SSEClientTransport(new URL(mcp.url), {
requestInit: {
headers: mcp.headers,
},
}),
]
for (const transport of transports) {
const client = await experimental_createMCPClient({ const client = await experimental_createMCPClient({
name: key, name: key,
transport, transport: {
type: "sse",
url: mcp.url,
},
}).catch(() => {}) }).catch(() => {})
if (!client) continue if (!client) {
clients[key] = client
break
}
if (!clients[key])
Bus.publish(Session.Event.Error, { Bus.publish(Session.Event.Error, {
error: { error: {
name: "UnknownError", name: "UnknownError",
@ -64,13 +48,16 @@ export namespace MCP {
}, },
}, },
}) })
continue
}
clients[key] = client
} }
if (mcp.type === "local") { if (mcp.type === "local") {
const [cmd, ...args] = mcp.command const [cmd, ...args] = mcp.command
const client = await experimental_createMCPClient({ const client = await experimental_createMCPClient({
name: key, name: key,
transport: new StdioClientTransport({ transport: new Experimental_StdioMCPTransport({
stderr: "ignore", stderr: "ignore",
command: cmd, command: cmd,
args, args,
@ -115,8 +102,7 @@ export namespace MCP {
const result: Record<string, Tool> = {} const result: Record<string, Tool> = {}
for (const [clientName, client] of Object.entries(await clients())) { for (const [clientName, client] of Object.entries(await clients())) {
for (const [toolName, tool] of Object.entries(await client.tools())) { for (const [toolName, tool] of Object.entries(await client.tools())) {
const sanitizedClientName = clientName.replace(/\s+/g, "_") result[clientName + "_" + toolName] = tool
result[sanitizedClientName + "_" + toolName] = tool
} }
} }
return result return result

View file

@ -2,8 +2,6 @@ import { App } from "../app/app"
import { z } from "zod" import { z } from "zod"
import { Bus } from "../bus" import { Bus } from "../bus"
import { Log } from "../util/log" import { Log } from "../util/log"
import { Identifier } from "../id/id"
import { Plugin } from "../plugin"
export namespace Permission { export namespace Permission {
const log = Log.create({ service: "permission" }) const log = Log.create({ service: "permission" })
@ -11,11 +9,7 @@ export namespace Permission {
export const Info = z export const Info = z
.object({ .object({
id: z.string(), id: z.string(),
type: z.string(),
pattern: z.string().optional(),
sessionID: z.string(), sessionID: z.string(),
messageID: z.string(),
callID: z.string().optional(),
title: z.string(), title: z.string(),
metadata: z.record(z.any()), metadata: z.record(z.any()),
time: z.object({ time: z.object({
@ -23,16 +17,12 @@ export namespace Permission {
}), }),
}) })
.openapi({ .openapi({
ref: "Permission", ref: "permission.info",
}) })
export type Info = z.infer<typeof Info> export type Info = z.infer<typeof Info>
export const Event = { export const Event = {
Updated: Bus.event("permission.updated", Info), Updated: Bus.event("permission.updated", Info),
Replied: Bus.event(
"permission.replied",
z.object({ sessionID: z.string(), permissionID: z.string(), response: z.string() }),
),
} }
const state = App.state( const state = App.state(
@ -50,7 +40,7 @@ export namespace Permission {
const approved: { const approved: {
[sessionID: string]: { [sessionID: string]: {
[permissionID: string]: boolean [permissionID: string]: Info
} }
} = {} } = {}
@ -62,90 +52,76 @@ export namespace Permission {
async (state) => { async (state) => {
for (const pending of Object.values(state.pending)) { for (const pending of Object.values(state.pending)) {
for (const item of Object.values(pending)) { for (const item of Object.values(pending)) {
item.reject(new RejectedError(item.info.sessionID, item.info.id, item.info.callID)) item.reject(new RejectedError(item.info.sessionID, item.info.id))
} }
} }
}, },
) )
export async function ask(input: { export function ask(input: {
type: Info["type"] id: Info["id"]
title: Info["title"]
pattern?: Info["pattern"]
callID?: Info["callID"]
sessionID: Info["sessionID"] sessionID: Info["sessionID"]
messageID: Info["messageID"] title: Info["title"]
metadata: Info["metadata"] metadata: Info["metadata"]
}) { }) {
return
const { pending, approved } = state() const { pending, approved } = state()
log.info("asking", { log.info("asking", {
sessionID: input.sessionID, sessionID: input.sessionID,
messageID: input.messageID, permissionID: input.id,
toolCallID: input.callID,
}) })
if (approved[input.sessionID]?.[input.pattern ?? input.type]) return if (approved[input.sessionID]?.[input.id]) {
const info: Info = { log.info("previously approved", {
id: Identifier.ascending("permission"), sessionID: input.sessionID,
type: input.type, permissionID: input.id,
})
return
}
const info: Info = {
id: input.id,
sessionID: input.sessionID, sessionID: input.sessionID,
messageID: input.messageID,
callID: input.callID,
title: input.title, title: input.title,
metadata: input.metadata, metadata: input.metadata,
time: { time: {
created: Date.now(), created: Date.now(),
}, },
} }
switch (
await Plugin.trigger("permission.ask", info, {
status: "ask",
}).then((x) => x.status)
) {
case "deny":
throw new RejectedError(info.sessionID, info.id, info.callID)
case "allow":
return
}
pending[input.sessionID] = pending[input.sessionID] || {} pending[input.sessionID] = pending[input.sessionID] || {}
return new Promise<void>((resolve, reject) => { return new Promise<void>((resolve, reject) => {
pending[input.sessionID][info.id] = { pending[input.sessionID][input.id] = {
info, info,
resolve, resolve,
reject, reject,
} }
setTimeout(() => {
respond({
sessionID: input.sessionID,
permissionID: input.id,
response: "always",
})
}, 1000)
Bus.publish(Event.Updated, info) Bus.publish(Event.Updated, info)
}) })
} }
export const Response = z.enum(["once", "always", "reject"]) export function respond(input: {
export type Response = z.infer<typeof Response> sessionID: Info["sessionID"]
permissionID: Info["id"]
export function respond(input: { sessionID: Info["sessionID"]; permissionID: Info["id"]; response: Response }) { response: "once" | "always" | "reject"
}) {
log.info("response", input) log.info("response", input)
const { pending, approved } = state() const { pending, approved } = state()
const match = pending[input.sessionID]?.[input.permissionID] const match = pending[input.sessionID]?.[input.permissionID]
if (!match) return if (!match) return
delete pending[input.sessionID][input.permissionID] delete pending[input.sessionID][input.permissionID]
if (input.response === "reject") { if (input.response === "reject") {
match.reject(new RejectedError(input.sessionID, input.permissionID, match.info.callID)) match.reject(new RejectedError(input.sessionID, input.permissionID))
return return
} }
match.resolve() match.resolve()
Bus.publish(Event.Replied, {
sessionID: input.sessionID,
permissionID: input.permissionID,
response: input.response,
})
if (input.response === "always") { if (input.response === "always") {
approved[input.sessionID] = approved[input.sessionID] || {} approved[input.sessionID] = approved[input.sessionID] || {}
approved[input.sessionID][match.info.pattern ?? match.info.type] = true approved[input.sessionID][input.permissionID] = match.info
for (const item of Object.values(pending[input.sessionID])) {
if ((item.info.pattern ?? item.info.type) === (match.info.pattern ?? match.info.type)) {
respond({ sessionID: item.info.sessionID, permissionID: item.info.id, response: input.response })
}
}
} }
} }
@ -153,7 +129,6 @@ export namespace Permission {
constructor( constructor(
public readonly sessionID: string, public readonly sessionID: string,
public readonly permissionID: string, public readonly permissionID: string,
public readonly toolCallID?: string,
) { ) {
super(`The user rejected permission to use this functionality`) super(`The user rejected permission to use this functionality`)
} }

View file

@ -1,69 +0,0 @@
import type { Hooks, Plugin as PluginInstance } from "@opencode-ai/plugin"
import { App } from "../app/app"
import { Config } from "../config/config"
import { Bus } from "../bus"
import { Log } from "../util/log"
import { createOpencodeClient } from "@opencode-ai/sdk"
import { Server } from "../server/server"
import { BunProc } from "../bun"
export namespace Plugin {
const log = Log.create({ service: "plugin" })
const state = App.state("plugin", async (app) => {
const client = createOpencodeClient({
baseUrl: "http://localhost:4096",
fetch: async (...args) => Server.app().fetch(...args),
})
const config = await Config.get()
const hooks = []
for (let plugin of config.plugin ?? []) {
log.info("loading plugin", { path: plugin })
if (!plugin.startsWith("file://")) {
const [pkg, version] = plugin.split("@")
plugin = await BunProc.install(pkg, version ?? "latest")
}
const mod = await import(plugin)
for (const [_name, fn] of Object.entries<PluginInstance>(mod)) {
const init = await fn({
client,
app,
$: Bun.$,
})
hooks.push(init)
}
}
return {
hooks,
}
})
export async function trigger<
Name extends keyof Required<Hooks>,
Input = Parameters<Required<Hooks>[Name]>[0],
Output = Parameters<Required<Hooks>[Name]>[1],
>(name: Name, input: Input, output: Output): Promise<Output> {
if (!name) return output
for (const hook of await state().then((x) => x.hooks)) {
const fn = hook[name]
if (!fn) continue
// @ts-expect-error if you feel adventurous, please fix the typing, make sure to bump the try-counter if you
// give up.
// try-counter: 2
await fn(input, output)
}
return output
}
export function init() {
Bus.subscribeAll(async (input) => {
const hooks = await state().then((x) => x.hooks)
for (const hook of hooks) {
hook["event"]?.({
event: input,
})
}
})
}
}

View file

@ -3,7 +3,6 @@ import { Log } from "../util/log"
import path from "path" import path from "path"
import { z } from "zod" import { z } from "zod"
import { data } from "./models-macro" with { type: "macro" } import { data } from "./models-macro" with { type: "macro" }
import { Installation } from "../installation"
export namespace ModelsDev { export namespace ModelsDev {
const log = Log.create({ service: "models.dev" }) const log = Log.create({ service: "models.dev" })
@ -51,30 +50,21 @@ export namespace ModelsDev {
export type Provider = z.infer<typeof Provider> export type Provider = z.infer<typeof Provider>
export async function get() { export async function get() {
refresh()
const file = Bun.file(filepath) const file = Bun.file(filepath)
const result = await file.json().catch(() => {}) const result = await file.json().catch(() => {})
if (result) return result as Record<string, Provider> if (result) {
refresh()
return result as Record<string, Provider>
}
refresh()
const json = await data() const json = await data()
return JSON.parse(json) as Record<string, Provider> return JSON.parse(json) as Record<string, Provider>
} }
export async function refresh() { async function refresh() {
const file = Bun.file(filepath) const file = Bun.file(filepath)
log.info("refreshing", { log.info("refreshing")
file, const result = await fetch("https://models.dev/api.json").catch(() => {})
}) if (result && result.ok) await Bun.write(file, result)
const result = await fetch("https://models.dev/api.json", {
headers: {
"User-Agent": Installation.USER_AGENT,
},
}).catch((e) => {
log.error("Failed to fetch models.dev", {
error: e,
})
})
if (result && result.ok) await Bun.write(file, await result.text())
} }
} }
setInterval(() => ModelsDev.refresh(), 60 * 1000 * 60).unref()

View file

@ -5,11 +5,23 @@ import { mergeDeep, sortBy } from "remeda"
import { NoSuchModelError, type LanguageModel, type Provider as SDK } from "ai" import { NoSuchModelError, type LanguageModel, type Provider as SDK } from "ai"
import { Log } from "../util/log" import { Log } from "../util/log"
import { BunProc } from "../bun" import { BunProc } from "../bun"
import { BashTool } from "../tool/bash"
import { EditTool } from "../tool/edit"
import { WebFetchTool } from "../tool/webfetch"
import { GlobTool } from "../tool/glob"
import { GrepTool } from "../tool/grep"
import { ListTool } from "../tool/ls"
import { PatchTool } from "../tool/patch"
import { ReadTool } from "../tool/read"
import type { Tool } from "../tool/tool"
import { WriteTool } from "../tool/write"
import { TodoReadTool, TodoWriteTool } from "../tool/todo"
import { AuthAnthropic } from "../auth/anthropic" import { AuthAnthropic } from "../auth/anthropic"
import { AuthCopilot } from "../auth/copilot" import { AuthCopilot } from "../auth/copilot"
import { ModelsDev } from "./models" import { ModelsDev } from "./models"
import { NamedError } from "../util/error" import { NamedError } from "../util/error"
import { Auth } from "../auth" import { Auth } from "../auth"
// import { TaskTool } from "../tool/task"
export namespace Provider { export namespace Provider {
const log = Log.create({ service: "provider" }) const log = Log.create({ service: "provider" })
@ -79,7 +91,8 @@ export namespace Provider {
if (!info || info.type !== "oauth") return if (!info || info.type !== "oauth") return
if (!info.access || info.expires < Date.now()) { if (!info.access || info.expires < Date.now()) {
const tokens = await copilot.access(info.refresh) const tokens = await copilot.access(info.refresh)
if (!tokens) throw new Error("GitHub Copilot authentication expired") if (!tokens)
throw new Error("GitHub Copilot authentication expired")
await Auth.set("github-copilot", { await Auth.set("github-copilot", {
type: "oauth", type: "oauth",
...tokens, ...tokens,
@ -87,27 +100,25 @@ export namespace Provider {
info.access = tokens.access info.access = tokens.access
} }
let isAgentCall = false let isAgentCall = false
let isVisionRequest = false
try { try {
const body = typeof init.body === "string" ? JSON.parse(init.body) : init.body const body =
typeof init.body === "string"
? JSON.parse(init.body)
: init.body
if (body?.messages) { if (body?.messages) {
isAgentCall = body.messages.some((msg: any) => msg.role && ["tool", "assistant"].includes(msg.role)) isAgentCall = body.messages.some(
isVisionRequest = body.messages.some(
(msg: any) => (msg: any) =>
Array.isArray(msg.content) && msg.content.some((part: any) => part.type === "image_url"), msg.role && ["tool", "assistant"].includes(msg.role),
) )
} }
} catch {} } catch {}
const headers: Record<string, string> = { const headers = {
...init.headers, ...init.headers,
...copilot.HEADERS, ...copilot.HEADERS,
Authorization: `Bearer ${info.access}`, Authorization: `Bearer ${info.access}`,
"Openai-Intent": "conversation-edits", "Openai-Intent": "conversation-edits",
"X-Initiator": isAgentCall ? "agent" : "user", "X-Initiator": isAgentCall ? "agent" : "user",
} }
if (isVisionRequest) {
headers["Copilot-Vision-Request"] = "true"
}
delete headers["x-api-key"] delete headers["x-api-key"]
return fetch(input, { return fetch(input, {
...init, ...init,
@ -126,22 +137,15 @@ export namespace Provider {
options: {}, options: {},
} }
}, },
azure: async () => {
return {
autoload: false,
async getModel(sdk: any, modelID: string) {
return sdk.responses(modelID)
},
options: {},
}
},
"amazon-bedrock": async () => { "amazon-bedrock": async () => {
if (!process.env["AWS_PROFILE"] && !process.env["AWS_ACCESS_KEY_ID"] && !process.env["AWS_BEARER_TOKEN_BEDROCK"]) if (!process.env["AWS_PROFILE"] && !process.env["AWS_ACCESS_KEY_ID"])
return { autoload: false } return { autoload: false }
const region = process.env["AWS_REGION"] ?? "us-east-1" const region = process.env["AWS_REGION"] ?? "us-east-1"
const { fromNodeProviderChain } = await import(await BunProc.install("@aws-sdk/credential-providers")) const { fromNodeProviderChain } = await import(
await BunProc.install("@aws-sdk/credential-providers")
)
return { return {
autoload: true, autoload: true,
options: { options: {
@ -153,7 +157,9 @@ export namespace Provider {
switch (regionPrefix) { switch (regionPrefix) {
case "us": { case "us": {
const modelRequiresPrefix = ["claude", "deepseek"].some((m) => modelID.includes(m)) const modelRequiresPrefix = ["claude", "deepseek"].some((m) =>
modelID.includes(m),
)
if (modelRequiresPrefix) { if (modelRequiresPrefix) {
modelID = `${regionPrefix}.${modelID}` modelID = `${regionPrefix}.${modelID}`
} }
@ -168,18 +174,25 @@ export namespace Provider {
"eu-south-1", "eu-south-1",
"eu-south-2", "eu-south-2",
].some((r) => region.includes(r)) ].some((r) => region.includes(r))
const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "llama3", "pixtral"].some((m) => const modelRequiresPrefix = [
modelID.includes(m), "claude",
) "nova-lite",
"nova-micro",
"llama3",
"pixtral",
].some((m) => modelID.includes(m))
if (regionRequiresPrefix && modelRequiresPrefix) { if (regionRequiresPrefix && modelRequiresPrefix) {
modelID = `${regionPrefix}.${modelID}` modelID = `${regionPrefix}.${modelID}`
} }
break break
} }
case "ap": { case "ap": {
const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "nova-pro"].some((m) => const modelRequiresPrefix = [
modelID.includes(m), "claude",
) "nova-lite",
"nova-micro",
"nova-pro",
].some((m) => modelID.includes(m))
if (modelRequiresPrefix) { if (modelRequiresPrefix) {
regionPrefix = "apac" regionPrefix = "apac"
modelID = `${regionPrefix}.${modelID}` modelID = `${regionPrefix}.${modelID}`
@ -203,17 +216,6 @@ export namespace Provider {
}, },
} }
}, },
vercel: async () => {
return {
autoload: false,
options: {
headers: {
"http-referer": "https://opencode.ai/",
"x-title": "opencode",
},
},
}
},
} }
const state = App.state("provider", async () => { const state = App.state("provider", async () => {
@ -228,7 +230,10 @@ export namespace Provider {
options: Record<string, any> options: Record<string, any>
} }
} = {} } = {}
const models = new Map<string, { info: ModelsDev.Model; language: LanguageModel }>() const models = new Map<
string,
{ info: ModelsDev.Model; language: LanguageModel }
>()
const sdk = new Map<string, SDK>() const sdk = new Map<string, SDK>()
log.info("init") log.info("init")
@ -243,7 +248,7 @@ export namespace Provider {
if (!provider) { if (!provider) {
const info = database[id] const info = database[id]
if (!info) return if (!info) return
if (info.api && !options["baseURL"]) options["baseURL"] = info.api if (info.api) options["baseURL"] = info.api
providers[id] = { providers[id] = {
source, source,
info, info,
@ -280,19 +285,13 @@ export namespace Provider {
reasoning: model.reasoning ?? existing?.reasoning ?? false, reasoning: model.reasoning ?? existing?.reasoning ?? false,
temperature: model.temperature ?? existing?.temperature ?? false, temperature: model.temperature ?? existing?.temperature ?? false,
tool_call: model.tool_call ?? existing?.tool_call ?? true, tool_call: model.tool_call ?? existing?.tool_call ?? true,
cost: cost: {
!model.cost && !existing?.cost ...existing?.cost,
? { ...model.cost,
input: 0, input: 0,
output: 0, output: 0,
cache_read: 0, cache_read: 0,
cache_write: 0, cache_write: 0,
}
: {
cache_read: 0,
cache_write: 0,
...existing?.cost,
...model.cost,
}, },
options: { options: {
...existing?.options, ...existing?.options,
@ -309,7 +308,9 @@ export namespace Provider {
database[providerID] = parsed database[providerID] = parsed
} }
const disabled = await Config.get().then((cfg) => new Set(cfg.disabled_providers ?? [])) const disabled = await Config.get().then(
(cfg) => new Set(cfg.disabled_providers ?? []),
)
// load env // load env
for (const [providerID, provider] of Object.entries(database)) { for (const [providerID, provider] of Object.entries(database)) {
if (disabled.has(providerID)) continue if (disabled.has(providerID)) continue
@ -336,7 +337,12 @@ export namespace Provider {
if (disabled.has(providerID)) continue if (disabled.has(providerID)) continue
const result = await fn(database[providerID]) const result = await fn(database[providerID])
if (result && (result.autoload || providers[providerID])) { if (result && (result.autoload || providers[providerID])) {
mergeProvider(providerID, result.options ?? {}, "custom", result.getModel) mergeProvider(
providerID,
result.options ?? {},
"custom",
result.getModel,
)
} }
} }
@ -373,12 +379,9 @@ export namespace Provider {
const existing = s.sdk.get(provider.id) const existing = s.sdk.get(provider.id)
if (existing) return existing if (existing) return existing
const pkg = provider.npm ?? provider.id const pkg = provider.npm ?? provider.id
const mod = await import(await BunProc.install(pkg, "beta")) const mod = await import(await BunProc.install(pkg, "latest"))
const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!]
const loaded = fn({ const loaded = fn(s.providers[provider.id]?.options)
name: provider.id,
...s.providers[provider.id]?.options,
})
s.sdk.set(provider.id, loaded) s.sdk.set(provider.id, loaded)
return loaded as SDK return loaded as SDK
})().catch((e) => { })().catch((e) => {
@ -386,10 +389,6 @@ export namespace Provider {
}) })
} }
export async function getProvider(providerID: string) {
return state().then((s) => s.providers[providerID])
}
export async function getModel(providerID: string, modelID: string) { export async function getModel(providerID: string, modelID: string) {
const key = `${providerID}/${modelID}` const key = `${providerID}/${modelID}`
const s = await state() const s = await state()
@ -407,7 +406,9 @@ export namespace Provider {
const sdk = await getSDK(provider.info) const sdk = await getSDK(provider.info)
try { try {
const language = provider.getModel ? await provider.getModel(sdk, modelID) : sdk.languageModel(modelID) const language = provider.getModel
? await provider.getModel(sdk, modelID)
: sdk.languageModel(modelID)
log.info("found", { providerID, modelID }) log.info("found", { providerID, modelID })
s.models.set(key, { s.models.set(key, {
info, info,
@ -430,29 +431,14 @@ export namespace Provider {
} }
} }
export async function getSmallModel(providerID: string) {
const cfg = await Config.get()
if (cfg.small_model) {
const parsed = parseModel(cfg.small_model)
return getModel(parsed.providerID, parsed.modelID)
}
const provider = await state().then((state) => state.providers[providerID])
if (!provider) return
const priority = ["3-5-haiku", "3.5-haiku", "gemini-2.5-flash"]
for (const item of priority) {
for (const model of Object.keys(provider.info.models)) {
if (model.includes(item)) return getModel(providerID, model)
}
}
}
const priority = ["gemini-2.5-pro-preview", "codex-mini", "claude-sonnet-4"] const priority = ["gemini-2.5-pro-preview", "codex-mini", "claude-sonnet-4"]
export function sort(models: ModelsDev.Model[]) { export function sort(models: ModelsDev.Model[]) {
return sortBy( return sortBy(
models, models,
[(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"], [
(model) => priority.findIndex((filter) => model.id.includes(filter)),
"desc",
],
[(model) => (model.id.includes("latest") ? 0 : 1), "asc"], [(model) => (model.id.includes("latest") ? 0 : 1), "asc"],
[(model) => model.id, "desc"], [(model) => model.id, "desc"],
) )
@ -463,7 +449,11 @@ export namespace Provider {
if (cfg.model) return parseModel(cfg.model) if (cfg.model) return parseModel(cfg.model)
const provider = await list() const provider = await list()
.then((val) => Object.values(val)) .then((val) => Object.values(val))
.then((x) => x.find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.info.id))) .then((x) =>
x.find(
(p) => !cfg.provider || Object.keys(cfg.provider).includes(p.info.id),
),
)
if (!provider) throw new Error("no providers found") if (!provider) throw new Error("no providers found")
const [model] = sort(Object.values(provider.info.models)) const [model] = sort(Object.values(provider.info.models))
if (!model) throw new Error("no models found") if (!model) throw new Error("no models found")
@ -481,6 +471,80 @@ export namespace Provider {
} }
} }
const TOOLS = [
BashTool,
EditTool,
WebFetchTool,
GlobTool,
GrepTool,
ListTool,
// LspDiagnosticTool,
// LspHoverTool,
PatchTool,
ReadTool,
// MultiEditTool,
WriteTool,
TodoWriteTool,
TodoReadTool,
// TaskTool,
]
const TOOL_MAPPING: Record<string, Tool.Info[]> = {
anthropic: TOOLS.filter((t) => t.id !== "patch"),
openai: TOOLS.map((t) => ({
...t,
parameters: optionalToNullable(t.parameters),
})),
azure: TOOLS.map((t) => ({
...t,
parameters: optionalToNullable(t.parameters),
})),
google: TOOLS,
}
export async function tools(providerID: string) {
/*
const cfg = await Config.get()
if (cfg.tool?.provider?.[providerID])
return cfg.tool.provider[providerID].map(
(id) => TOOLS.find((t) => t.id === id)!,
)
*/
return TOOL_MAPPING[providerID] ?? TOOLS
}
function optionalToNullable(schema: z.ZodTypeAny): z.ZodTypeAny {
if (schema instanceof z.ZodObject) {
const shape = schema.shape
const newShape: Record<string, z.ZodTypeAny> = {}
for (const [key, value] of Object.entries(shape)) {
const zodValue = value as z.ZodTypeAny
if (zodValue instanceof z.ZodOptional) {
newShape[key] = zodValue.unwrap().nullable()
} else {
newShape[key] = optionalToNullable(zodValue)
}
}
return z.object(newShape)
}
if (schema instanceof z.ZodArray) {
return z.array(optionalToNullable(schema.element))
}
if (schema instanceof z.ZodUnion) {
return z.union(
schema.options.map((option: z.ZodTypeAny) =>
optionalToNullable(option),
) as [z.ZodTypeAny, z.ZodTypeAny, ...z.ZodTypeAny[]],
)
}
return schema
}
export const ModelNotFoundError = NamedError.create( export const ModelNotFoundError = NamedError.create(
"ProviderModelNotFoundError", "ProviderModelNotFoundError",
z.object({ z.object({
@ -495,4 +559,12 @@ export namespace Provider {
providerID: z.string(), providerID: z.string(),
}), }),
) )
export const AuthError = NamedError.create(
"ProviderAuthError",
z.object({
providerID: z.string(),
message: z.string(),
}),
)
} }

View file

@ -1,84 +1,38 @@
import type { ModelMessage } from "ai" import type { LanguageModelV1Prompt } from "ai"
import { unique } from "remeda" import { unique } from "remeda"
export namespace ProviderTransform { export namespace ProviderTransform {
function normalizeToolCallIds(msgs: ModelMessage[]): ModelMessage[] { export function message(
return msgs.map((msg) => { msgs: LanguageModelV1Prompt,
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) { providerID: string,
msg.content = msg.content.map((part) => { modelID: string,
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) { ) {
return { if (providerID === "anthropic" || modelID.includes("anthropic")) {
...part,
toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
}
}
return part
})
}
return msg
})
}
function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
const system = msgs.filter((msg) => msg.role === "system").slice(0, 2) const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
const final = msgs.filter((msg) => msg.role !== "system").slice(-2) const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
const providerOptions = { for (const msg of unique([...system, ...final])) {
msg.providerMetadata = {
...msg.providerMetadata,
anthropic: { anthropic: {
cacheControl: { type: "ephemeral" }, cacheControl: { type: "ephemeral" },
}, },
openrouter: { }
cache_control: { type: "ephemeral" }, }
}, }
if (providerID === "amazon-bedrock" || modelID.includes("anthropic")) {
const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
for (const msg of unique([...system, ...final])) {
msg.providerMetadata = {
...msg.providerMetadata,
bedrock: { bedrock: {
cachePoint: { type: "ephemeral" }, cachePoint: { type: "ephemeral" },
}, },
openaiCompatible: {
cache_control: { type: "ephemeral" },
},
}
for (const msg of unique([...system, ...final])) {
const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0
if (shouldUseContentOptions) {
const lastContent = msg.content[msg.content.length - 1]
if (lastContent && typeof lastContent === "object") {
lastContent.providerOptions = {
...lastContent.providerOptions,
...providerOptions,
}
continue
} }
} }
msg.providerOptions = {
...msg.providerOptions,
...providerOptions,
} }
}
return msgs return msgs
} }
export function message(msgs: ModelMessage[], providerID: string, modelID: string) {
if (modelID.includes("claude")) {
msgs = normalizeToolCallIds(msgs)
}
if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) {
msgs = applyCaching(msgs, providerID)
}
return msgs
}
export function temperature(_providerID: string, modelID: string) {
if (modelID.toLowerCase().includes("qwen")) return 0.55
return 0
}
export function topP(_providerID: string, modelID: string) {
if (modelID.toLowerCase().includes("qwen")) return 1
return undefined
}
} }

View file

@ -6,6 +6,7 @@ import { streamSSE } from "hono/streaming"
import { Session } from "../session" import { Session } from "../session"
import { resolver, validator as zValidator } from "hono-openapi/zod" import { resolver, validator as zValidator } from "hono-openapi/zod"
import { z } from "zod" import { z } from "zod"
import { Message } from "../session/message"
import { Provider } from "../provider/provider" import { Provider } from "../provider/provider"
import { App } from "../app/app" import { App } from "../app/app"
import { mapValues } from "remeda" import { mapValues } from "remeda"
@ -15,11 +16,6 @@ import { Ripgrep } from "../file/ripgrep"
import { Config } from "../config/config" import { Config } from "../config/config"
import { File } from "../file" import { File } from "../file"
import { LSP } from "../lsp" import { LSP } from "../lsp"
import { MessageV2 } from "../session/message-v2"
import { Mode } from "../session/mode"
import { callTui, TuiRoute } from "./tui"
import { Permission } from "../permission"
import { lazy } from "../util/lazy"
const ERRORS = { const ERRORS = {
400: { 400: {
@ -45,11 +41,7 @@ export namespace Server {
export type Routes = ReturnType<typeof app> export type Routes = ReturnType<typeof app>
export const Event = { function app() {
Connected: Bus.event("server.connected", z.object({})),
}
export const app = lazy(() => {
const app = new Hono() const app = new Hono()
const result = app const result = app
@ -59,25 +51,23 @@ export namespace Server {
status: 400, status: 400,
}) })
} }
return c.json(new NamedError.Unknown({ message: err.toString() }).toObject(), { return c.json(
new NamedError.Unknown({ message: err.toString() }).toObject(),
{
status: 400, status: 400,
}) },
)
}) })
.use(async (c, next) => { .use(async (c, next) => {
const skipLogging = c.req.path === "/log"
if (!skipLogging) {
log.info("request", { log.info("request", {
method: c.req.method, method: c.req.method,
path: c.req.path, path: c.req.path,
}) })
}
const start = Date.now() const start = Date.now()
await next() await next()
if (!skipLogging) {
log.info("response", { log.info("response", {
duration: Date.now() - start, duration: Date.now() - start,
}) })
}
}) })
.get( .get(
"/doc", "/doc",
@ -96,7 +86,6 @@ export namespace Server {
"/event", "/event",
describeRoute({ describeRoute({
description: "Get events", description: "Get events",
operationId: "event.subscribe",
responses: { responses: {
200: { 200: {
description: "Event stream", description: "Event stream",
@ -116,10 +105,7 @@ export namespace Server {
log.info("event connected") log.info("event connected")
return streamSSE(c, async (stream) => { return streamSSE(c, async (stream) => {
stream.writeSSE({ stream.writeSSE({
data: JSON.stringify({ data: JSON.stringify({}),
type: "server.connected",
properties: {},
}),
}) })
const unsub = Bus.subscribeAll(async (event) => { const unsub = Bus.subscribeAll(async (event) => {
await stream.writeSSE({ await stream.writeSSE({
@ -140,7 +126,6 @@ export namespace Server {
"/app", "/app",
describeRoute({ describeRoute({
description: "Get app info", description: "Get app info",
operationId: "app.get",
responses: { responses: {
200: { 200: {
description: "200", description: "200",
@ -160,7 +145,6 @@ export namespace Server {
"/app/init", "/app/init",
describeRoute({ describeRoute({
description: "Initialize the app", description: "Initialize the app",
operationId: "app.init",
responses: { responses: {
200: { 200: {
description: "Initialize the app", description: "Initialize the app",
@ -181,7 +165,6 @@ export namespace Server {
"/config", "/config",
describeRoute({ describeRoute({
description: "Get config info", description: "Get config info",
operationId: "config.get",
responses: { responses: {
200: { 200: {
description: "Get config info", description: "Get config info",
@ -201,7 +184,6 @@ export namespace Server {
"/session", "/session",
describeRoute({ describeRoute({
description: "List all sessions", description: "List all sessions",
operationId: "session.list",
responses: { responses: {
200: { 200: {
description: "List of sessions", description: "List of sessions",
@ -215,7 +197,6 @@ export namespace Server {
}), }),
async (c) => { async (c) => {
const sessions = await Array.fromAsync(Session.list()) const sessions = await Array.fromAsync(Session.list())
sessions.sort((a, b) => b.time.updated - a.time.updated)
return c.json(sessions) return c.json(sessions)
}, },
) )
@ -223,7 +204,6 @@ export namespace Server {
"/session", "/session",
describeRoute({ describeRoute({
description: "Create a new session", description: "Create a new session",
operationId: "session.create",
responses: { responses: {
...ERRORS, ...ERRORS,
200: { 200: {
@ -245,7 +225,6 @@ export namespace Server {
"/session/:id", "/session/:id",
describeRoute({ describeRoute({
description: "Delete a session and all its data", description: "Delete a session and all its data",
operationId: "session.delete",
responses: { responses: {
200: { 200: {
description: "Successfully deleted session", description: "Successfully deleted session",
@ -272,7 +251,6 @@ export namespace Server {
"/session/:id/init", "/session/:id/init",
describeRoute({ describeRoute({
description: "Analyze the app and create an AGENTS.md file", description: "Analyze the app and create an AGENTS.md file",
operationId: "session.init",
responses: { responses: {
200: { 200: {
description: "200", description: "200",
@ -293,7 +271,6 @@ export namespace Server {
zValidator( zValidator(
"json", "json",
z.object({ z.object({
messageID: z.string(),
providerID: z.string(), providerID: z.string(),
modelID: z.string(), modelID: z.string(),
}), }),
@ -309,7 +286,6 @@ export namespace Server {
"/session/:id/abort", "/session/:id/abort",
describeRoute({ describeRoute({
description: "Abort a session", description: "Abort a session",
operationId: "session.abort",
responses: { responses: {
200: { 200: {
description: "Aborted session", description: "Aborted session",
@ -335,7 +311,6 @@ export namespace Server {
"/session/:id/share", "/session/:id/share",
describeRoute({ describeRoute({
description: "Share a session", description: "Share a session",
operationId: "session.share",
responses: { responses: {
200: { 200: {
description: "Successfully shared session", description: "Successfully shared session",
@ -364,7 +339,6 @@ export namespace Server {
"/session/:id/share", "/session/:id/share",
describeRoute({ describeRoute({
description: "Unshare the session", description: "Unshare the session",
operationId: "session.unshare",
responses: { responses: {
200: { 200: {
description: "Successfully unshared session", description: "Successfully unshared session",
@ -393,7 +367,6 @@ export namespace Server {
"/session/:id/summarize", "/session/:id/summarize",
describeRoute({ describeRoute({
description: "Summarize the session", description: "Summarize the session",
operationId: "session.summarize",
responses: { responses: {
200: { 200: {
description: "Summarized session", description: "Summarized session",
@ -429,20 +402,12 @@ export namespace Server {
"/session/:id/message", "/session/:id/message",
describeRoute({ describeRoute({
description: "List messages for a session", description: "List messages for a session",
operationId: "session.messages",
responses: { responses: {
200: { 200: {
description: "List of messages", description: "List of messages",
content: { content: {
"application/json": { "application/json": {
schema: resolver( schema: resolver(Message.Info.array()),
z
.object({
info: MessageV2.Info,
parts: MessageV2.Part.array(),
})
.array(),
),
}, },
}, },
}, },
@ -459,51 +424,16 @@ export namespace Server {
return c.json(messages) return c.json(messages)
}, },
) )
.get(
"/session/:id/message/:messageID",
describeRoute({
description: "Get a message from a session",
operationId: "session.message",
responses: {
200: {
description: "Message",
content: {
"application/json": {
schema: resolver(
z.object({
info: MessageV2.Info,
parts: MessageV2.Part.array(),
}),
),
},
},
},
},
}),
zValidator(
"param",
z.object({
id: z.string().openapi({ description: "Session ID" }),
messageID: z.string().openapi({ description: "Message ID" }),
}),
),
async (c) => {
const params = c.req.valid("param")
const message = await Session.getMessage(params.id, params.messageID)
return c.json(message)
},
)
.post( .post(
"/session/:id/message", "/session/:id/message",
describeRoute({ describeRoute({
description: "Create and send a new message to a session", description: "Create and send a new message to a session",
operationId: "session.chat",
responses: { responses: {
200: { 200: {
description: "Created message", description: "Created message",
content: { content: {
"application/json": { "application/json": {
schema: resolver(MessageV2.Assistant), schema: resolver(Message.Info),
}, },
}, },
}, },
@ -515,7 +445,14 @@ export namespace Server {
id: z.string().openapi({ description: "Session ID" }), id: z.string().openapi({ description: "Session ID" }),
}), }),
), ),
zValidator("json", Session.ChatInput.omit({ sessionID: true })), zValidator(
"json",
z.object({
providerID: z.string(),
modelID: z.string(),
parts: Message.MessagePart.array(),
}),
),
async (c) => { async (c) => {
const sessionID = c.req.valid("param").id const sessionID = c.req.valid("param").id
const body = c.req.valid("json") const body = c.req.valid("json")
@ -523,100 +460,10 @@ export namespace Server {
return c.json(msg) return c.json(msg)
}, },
) )
.post(
"/session/:id/revert",
describeRoute({
description: "Revert a message",
operationId: "session.revert",
responses: {
200: {
description: "Updated session",
content: {
"application/json": {
schema: resolver(Session.Info),
},
},
},
},
}),
zValidator(
"param",
z.object({
id: z.string(),
}),
),
zValidator("json", Session.RevertInput.omit({ sessionID: true })),
async (c) => {
const id = c.req.valid("param").id
log.info("revert", c.req.valid("json"))
const session = await Session.revert({ sessionID: id, ...c.req.valid("json") })
return c.json(session)
},
)
.post(
"/session/:id/unrevert",
describeRoute({
description: "Restore all reverted messages",
operationId: "session.unrevert",
responses: {
200: {
description: "Updated session",
content: {
"application/json": {
schema: resolver(Session.Info),
},
},
},
},
}),
zValidator(
"param",
z.object({
id: z.string(),
}),
),
async (c) => {
const id = c.req.valid("param").id
const session = await Session.unrevert({ sessionID: id })
return c.json(session)
},
)
.post(
"/session/:id/permissions/:permissionID",
describeRoute({
description: "Respond to a permission request",
responses: {
200: {
description: "Permission processed successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
zValidator(
"param",
z.object({
id: z.string(),
permissionID: z.string(),
}),
),
zValidator("json", z.object({ response: Permission.Response })),
async (c) => {
const params = c.req.valid("param")
const id = params.id
const permissionID = params.permissionID
Permission.respond({ sessionID: id, permissionID, response: c.req.valid("json").response })
return c.json(true)
},
)
.get( .get(
"/config/providers", "/config/providers",
describeRoute({ describeRoute({
description: "List all providers", description: "List all providers",
operationId: "config.providers",
responses: { responses: {
200: { 200: {
description: "List of providers", description: "List of providers",
@ -634,10 +481,15 @@ export namespace Server {
}, },
}), }),
async (c) => { async (c) => {
const providers = await Provider.list().then((x) => mapValues(x, (item) => item.info)) const providers = await Provider.list().then((x) =>
mapValues(x, (item) => item.info),
)
return c.json({ return c.json({
providers: Object.values(providers), providers: Object.values(providers),
default: mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0].id), default: mapValues(
providers,
(item) => Provider.sort(Object.values(item.models))[0].id,
),
}) })
}, },
) )
@ -645,7 +497,6 @@ export namespace Server {
"/find", "/find",
describeRoute({ describeRoute({
description: "Find text in files", description: "Find text in files",
operationId: "find.text",
responses: { responses: {
200: { 200: {
description: "Matches", description: "Matches",
@ -678,7 +529,6 @@ export namespace Server {
"/find/file", "/find/file",
describeRoute({ describeRoute({
description: "Find files", description: "Find files",
operationId: "find.files",
responses: { responses: {
200: { 200: {
description: "File paths", description: "File paths",
@ -711,13 +561,12 @@ export namespace Server {
"/find/symbol", "/find/symbol",
describeRoute({ describeRoute({
description: "Find workspace symbols", description: "Find workspace symbols",
operationId: "find.symbols",
responses: { responses: {
200: { 200: {
description: "Symbols", description: "Symbols",
content: { content: {
"application/json": { "application/json": {
schema: resolver(LSP.Symbol.array()), schema: resolver(z.unknown().array()),
}, },
}, },
}, },
@ -739,7 +588,6 @@ export namespace Server {
"/file", "/file",
describeRoute({ describeRoute({
description: "Read a file", description: "Read a file",
operationId: "file.read",
responses: { responses: {
200: { 200: {
description: "File content", description: "File content",
@ -776,13 +624,21 @@ export namespace Server {
"/file/status", "/file/status",
describeRoute({ describeRoute({
description: "Get file status", description: "Get file status",
operationId: "file.status",
responses: { responses: {
200: { 200: {
description: "File status", description: "File status",
content: { content: {
"application/json": { "application/json": {
schema: resolver(File.Info.array()), schema: resolver(
z
.object({
file: z.string(),
added: z.number().int(),
removed: z.number().int(),
status: z.enum(["added", "deleted", "modified"]),
})
.array(),
),
}, },
}, },
}, },
@ -793,237 +649,9 @@ export namespace Server {
return c.json(content) return c.json(content)
}, },
) )
.post(
"/log",
describeRoute({
description: "Write a log entry to the server logs",
operationId: "app.log",
responses: {
200: {
description: "Log entry written successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
zValidator(
"json",
z.object({
service: z.string().openapi({ description: "Service name for the log entry" }),
level: z.enum(["debug", "info", "error", "warn"]).openapi({ description: "Log level" }),
message: z.string().openapi({ description: "Log message" }),
extra: z
.record(z.string(), z.any())
.optional()
.openapi({ description: "Additional metadata for the log entry" }),
}),
),
async (c) => {
const { service, level, message, extra } = c.req.valid("json")
const logger = Log.create({ service })
switch (level) {
case "debug":
logger.debug(message, extra)
break
case "info":
logger.info(message, extra)
break
case "error":
logger.error(message, extra)
break
case "warn":
logger.warn(message, extra)
break
}
return c.json(true)
},
)
.get(
"/mode",
describeRoute({
description: "List all modes",
operationId: "app.modes",
responses: {
200: {
description: "List of modes",
content: {
"application/json": {
schema: resolver(Mode.Info.array()),
},
},
},
},
}),
async (c) => {
const modes = await Mode.list()
return c.json(modes)
},
)
.post(
"/tui/append-prompt",
describeRoute({
description: "Append prompt to the TUI",
operationId: "tui.appendPrompt",
responses: {
200: {
description: "Prompt processed successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
zValidator(
"json",
z.object({
text: z.string(),
}),
),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/open-help",
describeRoute({
description: "Open the help dialog",
operationId: "tui.openHelp",
responses: {
200: {
description: "Help dialog opened successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/open-sessions",
describeRoute({
description: "Open the session dialog",
operationId: "tui.openSessions",
responses: {
200: {
description: "Session dialog opened successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/open-themes",
describeRoute({
description: "Open the theme dialog",
operationId: "tui.openThemes",
responses: {
200: {
description: "Theme dialog opened successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/open-models",
describeRoute({
description: "Open the model dialog",
operationId: "tui.openModels",
responses: {
200: {
description: "Model dialog opened successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/submit-prompt",
describeRoute({
description: "Submit the prompt",
operationId: "tui.submitPrompt",
responses: {
200: {
description: "Prompt submitted successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/clear-prompt",
describeRoute({
description: "Clear the prompt",
operationId: "tui.clearPrompt",
responses: {
200: {
description: "Prompt cleared successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
async (c) => c.json(await callTui(c)),
)
.post(
"/tui/execute-command",
describeRoute({
description: "Execute a TUI command (e.g. switch_mode)",
operationId: "tui.executeCommand",
responses: {
200: {
description: "Command executed successfully",
content: {
"application/json": {
schema: resolver(z.boolean()),
},
},
},
},
}),
zValidator(
"json",
z.object({
command: z.string(),
}),
),
async (c) => c.json(await callTui(c)),
)
.route("/tui/control", TuiRoute)
return result return result
}) }
export async function openapi() { export async function openapi() {
const a = app() const a = app()

View file

@ -1,30 +0,0 @@
import { Hono, type Context } from "hono"
import { AsyncQueue } from "../util/queue"
interface Request {
path: string
body: any
}
const request = new AsyncQueue<Request>()
const response = new AsyncQueue<any>()
export async function callTui(ctx: Context) {
const body = await ctx.req.json()
request.push({
path: ctx.req.path,
body,
})
return response.next()
}
export const TuiRoute = new Hono()
.get("/next", async (c) => {
const req = await request.next()
return c.json(req)
})
.post("/response", async (c) => {
const body = await c.req.json()
response.push(body)
return c.json(true)
})

File diff suppressed because it is too large Load diff

View file

@ -1,515 +0,0 @@
import z from "zod"
import { Bus } from "../bus"
import { NamedError } from "../util/error"
import { Message } from "./message"
import { convertToModelMessages, type ModelMessage, type UIMessage } from "ai"
import { Identifier } from "../id/id"
import { LSP } from "../lsp"
export namespace MessageV2 {
export const OutputLengthError = NamedError.create("MessageOutputLengthError", z.object({}))
export const AbortedError = NamedError.create("MessageAbortedError", z.object({}))
export const AuthError = NamedError.create(
"ProviderAuthError",
z.object({
providerID: z.string(),
message: z.string(),
}),
)
export const ToolStatePending = z
.object({
status: z.literal("pending"),
})
.openapi({
ref: "ToolStatePending",
})
export type ToolStatePending = z.infer<typeof ToolStatePending>
export const ToolStateRunning = z
.object({
status: z.literal("running"),
input: z.any(),
title: z.string().optional(),
metadata: z.record(z.any()).optional(),
time: z.object({
start: z.number(),
}),
})
.openapi({
ref: "ToolStateRunning",
})
export type ToolStateRunning = z.infer<typeof ToolStateRunning>
export const ToolStateCompleted = z
.object({
status: z.literal("completed"),
input: z.record(z.any()),
output: z.string(),
title: z.string(),
metadata: z.record(z.any()),
time: z.object({
start: z.number(),
end: z.number(),
}),
})
.openapi({
ref: "ToolStateCompleted",
})
export type ToolStateCompleted = z.infer<typeof ToolStateCompleted>
export const ToolStateError = z
.object({
status: z.literal("error"),
input: z.record(z.any()),
error: z.string(),
time: z.object({
start: z.number(),
end: z.number(),
}),
})
.openapi({
ref: "ToolStateError",
})
export type ToolStateError = z.infer<typeof ToolStateError>
export const ToolState = z
.discriminatedUnion("status", [ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError])
.openapi({
ref: "ToolState",
})
const PartBase = z.object({
id: z.string(),
sessionID: z.string(),
messageID: z.string(),
})
export const SnapshotPart = PartBase.extend({
type: z.literal("snapshot"),
snapshot: z.string(),
}).openapi({
ref: "SnapshotPart",
})
export type SnapshotPart = z.infer<typeof SnapshotPart>
export const PatchPart = PartBase.extend({
type: z.literal("patch"),
hash: z.string(),
files: z.string().array(),
}).openapi({
ref: "PatchPart",
})
export type PatchPart = z.infer<typeof PatchPart>
export const TextPart = PartBase.extend({
type: z.literal("text"),
text: z.string(),
synthetic: z.boolean().optional(),
time: z
.object({
start: z.number(),
end: z.number().optional(),
})
.optional(),
}).openapi({
ref: "TextPart",
})
export type TextPart = z.infer<typeof TextPart>
export const ToolPart = PartBase.extend({
type: z.literal("tool"),
callID: z.string(),
tool: z.string(),
state: ToolState,
}).openapi({
ref: "ToolPart",
})
export type ToolPart = z.infer<typeof ToolPart>
const FilePartSourceBase = z.object({
text: z
.object({
value: z.string(),
start: z.number().int(),
end: z.number().int(),
})
.openapi({
ref: "FilePartSourceText",
}),
})
export const FileSource = FilePartSourceBase.extend({
type: z.literal("file"),
path: z.string(),
}).openapi({
ref: "FileSource",
})
export const SymbolSource = FilePartSourceBase.extend({
type: z.literal("symbol"),
path: z.string(),
range: LSP.Range,
name: z.string(),
kind: z.number().int(),
}).openapi({
ref: "SymbolSource",
})
export const FilePartSource = z.discriminatedUnion("type", [FileSource, SymbolSource]).openapi({
ref: "FilePartSource",
})
export const FilePart = PartBase.extend({
type: z.literal("file"),
mime: z.string(),
filename: z.string().optional(),
url: z.string(),
source: FilePartSource.optional(),
}).openapi({
ref: "FilePart",
})
export type FilePart = z.infer<typeof FilePart>
export const StepStartPart = PartBase.extend({
type: z.literal("step-start"),
}).openapi({
ref: "StepStartPart",
})
export type StepStartPart = z.infer<typeof StepStartPart>
export const StepFinishPart = PartBase.extend({
type: z.literal("step-finish"),
cost: z.number(),
tokens: z.object({
input: z.number(),
output: z.number(),
reasoning: z.number(),
cache: z.object({
read: z.number(),
write: z.number(),
}),
}),
}).openapi({
ref: "StepFinishPart",
})
export type StepFinishPart = z.infer<typeof StepFinishPart>
const Base = z.object({
id: z.string(),
sessionID: z.string(),
})
export const User = Base.extend({
role: z.literal("user"),
time: z.object({
created: z.number(),
}),
}).openapi({
ref: "UserMessage",
})
export type User = z.infer<typeof User>
export const Part = z
.discriminatedUnion("type", [TextPart, FilePart, ToolPart, StepStartPart, StepFinishPart, SnapshotPart, PatchPart])
.openapi({
ref: "Part",
})
export type Part = z.infer<typeof Part>
export const Assistant = Base.extend({
role: z.literal("assistant"),
time: z.object({
created: z.number(),
completed: z.number().optional(),
}),
error: z
.discriminatedUnion("name", [
AuthError.Schema,
NamedError.Unknown.Schema,
OutputLengthError.Schema,
AbortedError.Schema,
])
.optional(),
system: z.string().array(),
modelID: z.string(),
providerID: z.string(),
mode: z.string(),
path: z.object({
cwd: z.string(),
root: z.string(),
}),
summary: z.boolean().optional(),
cost: z.number(),
tokens: z.object({
input: z.number(),
output: z.number(),
reasoning: z.number(),
cache: z.object({
read: z.number(),
write: z.number(),
}),
}),
}).openapi({
ref: "AssistantMessage",
})
export type Assistant = z.infer<typeof Assistant>
export const Info = z.discriminatedUnion("role", [User, Assistant]).openapi({
ref: "Message",
})
export type Info = z.infer<typeof Info>
export const Event = {
Updated: Bus.event(
"message.updated",
z.object({
info: Info,
}),
),
Removed: Bus.event(
"message.removed",
z.object({
sessionID: z.string(),
messageID: z.string(),
}),
),
PartUpdated: Bus.event(
"message.part.updated",
z.object({
part: Part,
}),
),
PartRemoved: Bus.event(
"message.part.removed",
z.object({
sessionID: z.string(),
messageID: z.string(),
partID: z.string(),
}),
),
}
export function fromV1(v1: Message.Info) {
if (v1.role === "assistant") {
const info: Assistant = {
id: v1.id,
sessionID: v1.metadata.sessionID,
role: "assistant",
time: {
created: v1.metadata.time.created,
completed: v1.metadata.time.completed,
},
cost: v1.metadata.assistant!.cost,
path: v1.metadata.assistant!.path,
summary: v1.metadata.assistant!.summary,
tokens: v1.metadata.assistant!.tokens,
modelID: v1.metadata.assistant!.modelID,
providerID: v1.metadata.assistant!.providerID,
system: v1.metadata.assistant!.system,
mode: "build",
error: v1.metadata.error,
}
const parts = v1.parts.flatMap((part): Part[] => {
const base = {
id: Identifier.ascending("part"),
messageID: v1.id,
sessionID: v1.metadata.sessionID,
}
if (part.type === "text") {
return [
{
...base,
type: "text",
text: part.text,
},
]
}
if (part.type === "step-start") {
return [
{
...base,
type: "step-start",
},
]
}
if (part.type === "tool-invocation") {
return [
{
...base,
type: "tool",
callID: part.toolInvocation.toolCallId,
tool: part.toolInvocation.toolName,
state: (() => {
if (part.toolInvocation.state === "partial-call") {
return {
status: "pending",
}
}
const { title, time, ...metadata } = v1.metadata.tool[part.toolInvocation.toolCallId] ?? {}
if (part.toolInvocation.state === "call") {
return {
status: "running",
input: part.toolInvocation.args,
time: {
start: time?.start,
},
}
}
if (part.toolInvocation.state === "result") {
return {
status: "completed",
input: part.toolInvocation.args,
output: part.toolInvocation.result,
title,
time,
metadata,
}
}
throw new Error("unknown tool invocation state")
})(),
},
]
}
return []
})
return {
info,
parts,
}
}
if (v1.role === "user") {
const info: User = {
id: v1.id,
sessionID: v1.metadata.sessionID,
role: "user",
time: {
created: v1.metadata.time.created,
},
}
const parts = v1.parts.flatMap((part): Part[] => {
const base = {
id: Identifier.ascending("part"),
messageID: v1.id,
sessionID: v1.metadata.sessionID,
}
if (part.type === "text") {
return [
{
...base,
type: "text",
text: part.text,
},
]
}
if (part.type === "file") {
return [
{
...base,
type: "file",
mime: part.mediaType,
filename: part.filename,
url: part.url,
},
]
}
return []
})
return { info, parts }
}
throw new Error("unknown message type")
}
export function toModelMessage(
input: {
info: Info
parts: Part[]
}[],
): ModelMessage[] {
const result: UIMessage[] = []
for (const msg of input) {
if (msg.parts.length === 0) continue
if (msg.info.role === "user") {
result.push({
id: msg.info.id,
role: "user",
parts: msg.parts.flatMap((part): UIMessage["parts"] => {
if (part.type === "text")
return [
{
type: "text",
text: part.text,
},
]
// text/plain files are converted into text parts, ignore them
if (part.type === "file" && part.mime !== "text/plain")
return [
{
type: "file",
url: part.url,
mediaType: part.mime,
filename: part.filename,
},
]
return []
}),
})
}
if (msg.info.role === "assistant") {
result.push({
id: msg.info.id,
role: "assistant",
parts: msg.parts.flatMap((part): UIMessage["parts"] => {
if (part.type === "text")
return [
{
type: "text",
text: part.text,
},
]
if (part.type === "step-start")
return [
{
type: "step-start",
},
]
if (part.type === "tool") {
if (part.state.status === "completed")
return [
{
type: ("tool-" + part.tool) as `tool-${string}`,
state: "output-available",
toolCallId: part.callID,
input: part.state.input,
output: part.state.output,
},
]
if (part.state.status === "error")
return [
{
type: ("tool-" + part.tool) as `tool-${string}`,
state: "output-error",
toolCallId: part.callID,
input: part.state.input,
errorText: part.state.error,
},
]
}
return []
}),
})
}
}
return convertToModelMessages(result)
}
}

View file

@ -1,14 +1,12 @@
import z from "zod" import z from "zod"
import { Bus } from "../bus"
import { Provider } from "../provider/provider"
import { NamedError } from "../util/error" import { NamedError } from "../util/error"
export namespace Message { export namespace Message {
export const OutputLengthError = NamedError.create("MessageOutputLengthError", z.object({})) export const OutputLengthError = NamedError.create(
export const AuthError = NamedError.create( "MessageOutputLengthError",
"ProviderAuthError", z.object({}),
z.object({
providerID: z.string(),
message: z.string(),
}),
) )
export const ToolCall = z export const ToolCall = z
@ -51,7 +49,9 @@ export namespace Message {
}) })
export type ToolResult = z.infer<typeof ToolResult> export type ToolResult = z.infer<typeof ToolResult>
export const ToolInvocation = z.discriminatedUnion("state", [ToolCall, ToolPartialCall, ToolResult]).openapi({ export const ToolInvocation = z
.discriminatedUnion("state", [ToolCall, ToolPartialCall, ToolResult])
.openapi({
ref: "ToolInvocation", ref: "ToolInvocation",
}) })
export type ToolInvocation = z.infer<typeof ToolInvocation> export type ToolInvocation = z.infer<typeof ToolInvocation>
@ -122,7 +122,14 @@ export namespace Message {
export type StepStartPart = z.infer<typeof StepStartPart> export type StepStartPart = z.infer<typeof StepStartPart>
export const MessagePart = z export const MessagePart = z
.discriminatedUnion("type", [TextPart, ReasoningPart, ToolInvocationPart, SourceUrlPart, FilePart, StepStartPart]) .discriminatedUnion("type", [
TextPart,
ReasoningPart,
ToolInvocationPart,
SourceUrlPart,
FilePart,
StepStartPart,
])
.openapi({ .openapi({
ref: "MessagePart", ref: "MessagePart",
}) })
@ -140,7 +147,11 @@ export namespace Message {
completed: z.number().optional(), completed: z.number().optional(),
}), }),
error: z error: z
.discriminatedUnion("name", [AuthError.Schema, NamedError.Unknown.Schema, OutputLengthError.Schema]) .discriminatedUnion("name", [
Provider.AuthError.Schema,
NamedError.Unknown.Schema,
OutputLengthError.Schema,
])
.optional(), .optional(),
sessionID: z.string(), sessionID: z.string(),
tool: z.record( tool: z.record(
@ -186,4 +197,28 @@ export namespace Message {
ref: "Message", ref: "Message",
}) })
export type Info = z.infer<typeof Info> export type Info = z.infer<typeof Info>
export const Event = {
Updated: Bus.event(
"message.updated",
z.object({
info: Info,
}),
),
Removed: Bus.event(
"message.removed",
z.object({
sessionID: z.string(),
messageID: z.string(),
}),
),
PartUpdated: Bus.event(
"message.part.updated",
z.object({
part: MessagePart,
sessionID: z.string(),
messageID: z.string(),
}),
),
}
} }

View file

@ -1,74 +0,0 @@
import { App } from "../app/app"
import { Config } from "../config/config"
import z from "zod"
import { Provider } from "../provider/provider"
export namespace Mode {
export const Info = z
.object({
name: z.string(),
temperature: z.number().optional(),
topP: z.number().optional(),
model: z
.object({
modelID: z.string(),
providerID: z.string(),
})
.optional(),
prompt: z.string().optional(),
tools: z.record(z.boolean()),
})
.openapi({
ref: "Mode",
})
export type Info = z.infer<typeof Info>
const state = App.state("mode", async () => {
const cfg = await Config.get()
const model = cfg.model ? Provider.parseModel(cfg.model) : undefined
const result: Record<string, Info> = {
build: {
model,
name: "build",
tools: {},
},
plan: {
name: "plan",
model,
tools: {
write: false,
edit: false,
patch: false,
},
},
}
for (const [key, value] of Object.entries(cfg.mode ?? {})) {
if (value.disable) continue
let item = result[key]
if (!item)
item = result[key] = {
name: key,
tools: {},
}
item.name = key
if (value.model) item.model = Provider.parseModel(value.model)
if (value.prompt) item.prompt = value.prompt
if (value.temperature != undefined) item.temperature = value.temperature
if (value.top_p != undefined) item.topP = value.top_p
if (value.tools)
item.tools = {
...value.tools,
...item.tools,
}
}
return result
})
export async function get(mode: string) {
return state().then((x) => x[mode])
}
export async function list() {
return state().then((x) => Object.values(x))
}
}

View file

@ -1,159 +0,0 @@
You are opencode, an agent - please keep going until the users query is completely resolved, before ending your turn and yielding back to the user.
Your thinking should be thorough and so it's fine if it's very long. However, avoid unnecessary repetition and verbosity. You should be concise, but thorough.
You MUST iterate and keep going until the problem is solved.
You have everything you need to resolve this problem. I want you to fully solve this autonomously before coming back to me.
Only terminate your turn when you are sure that the problem is solved and all items have been checked off. Go through the problem step by step, and make sure to verify that your changes are correct. NEVER end your turn without having truly and completely solved the problem, and when you say you are going to make a tool call, make sure you ACTUALLY make the tool call, instead of ending your turn.
THE PROBLEM CAN NOT BE SOLVED WITHOUT EXTENSIVE INTERNET RESEARCH.
You must use the webfetch tool to recursively gather all information from URL's provided to you by the user, as well as any links you find in the content of those pages.
Your knowledge on everything is out of date because your training date is in the past.
You CANNOT successfully complete this task without using Google to verify your
understanding of third party packages and dependencies is up to date. You must use the webfetch tool to search google for how to properly use libraries, packages, frameworks, dependencies, etc. every single time you install or implement one. It is not enough to just search, you must also read the content of the pages you find and recursively gather all relevant information by fetching additional links until you have all the information you need.
Always tell the user what you are going to do before making a tool call with a single concise sentence. This will help them understand what you are doing and why.
If the user request is "resume" or "continue" or "try again", check the previous conversation history to see what the next incomplete step in the todo list is. Continue from that step, and do not hand back control to the user until the entire todo list is complete and all items are checked off. Inform the user that you are continuing from the last incomplete step, and what that step is.
Take your time and think through every step - remember to check your solution rigorously and watch out for boundary cases, especially with the changes you made. Use the sequential thinking tool if available. Your solution must be perfect. If not, continue working on it. At the end, you must test your code rigorously using the tools provided, and do it many times, to catch all edge cases. If it is not robust, iterate more and make it perfect. Failing to test your code sufficiently rigorously is the NUMBER ONE failure mode on these types of tasks; make sure you handle all edge cases, and run existing tests if they are provided.
You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.
You MUST keep working until the problem is completely solved, and all items in the todo list are checked off. Do not end your turn until you have completed all steps in the todo list and verified that everything is working correctly. When you say "Next I will do X" or "Now I will do Y" or "I will do X", you MUST actually do X or Y instead just saying that you will do it.
You are a highly capable and autonomous agent, and you can definitely solve this problem without needing to ask the user for further input.
# Workflow
1. Fetch any URL's provided by the user using the `webfetch` tool.
2. Understand the problem deeply. Carefully read the issue and think critically about what is required. Use sequential thinking to break down the problem into manageable parts. Consider the following:
- What is the expected behavior?
- What are the edge cases?
- What are the potential pitfalls?
- How does this fit into the larger context of the codebase?
- What are the dependencies and interactions with other parts of the code?
3. Investigate the codebase. Explore relevant files, search for key functions, and gather context.
4. Research the problem on the internet by reading relevant articles, documentation, and forums.
5. Develop a clear, step-by-step plan. Break down the fix into manageable, incremental steps. Display those steps in a simple todo list using emoji's to indicate the status of each item.
6. Implement the fix incrementally. Make small, testable code changes.
7. Debug as needed. Use debugging techniques to isolate and resolve issues.
8. Test frequently. Run tests after each change to verify correctness.
9. Iterate until the root cause is fixed and all tests pass.
10. Reflect and validate comprehensively. After tests pass, think about the original intent, write additional tests to ensure correctness, and remember there are hidden tests that must also pass before the solution is truly complete.
Refer to the detailed sections below for more information on each step.
## 1. Fetch Provided URLs
- If the user provides a URL, use the `webfetch` tool to retrieve the content of the provided URL.
- After fetching, review the content returned by the webfetch tool.
- If you find any additional URLs or links that are relevant, use the `webfetch` tool again to retrieve those links.
- Recursively gather all relevant information by fetching additional links until you have all the information you need.
## 2. Deeply Understand the Problem
Carefully read the issue and think hard about a plan to solve it before coding.
## 3. Codebase Investigation
- Explore relevant files and directories.
- Search for key functions, classes, or variables related to the issue.
- Read and understand relevant code snippets.
- Identify the root cause of the problem.
- Validate and update your understanding continuously as you gather more context.
## 4. Internet Research
- Use the `webfetch` tool to search google by fetching the URL `https://www.google.com/search?q=your+search+query`.
- After fetching, review the content returned by the fetch tool.
- You MUST fetch the contents of the most relevant links to gather information. Do not rely on the summary that you find in the search results.
- As you fetch each link, read the content thoroughly and fetch any additional links that you find withhin the content that are relevant to the problem.
- Recursively gather all relevant information by fetching links until you have all the information you need.
## 5. Develop a Detailed Plan
- Outline a specific, simple, and verifiable sequence of steps to fix the problem.
- Create a todo list in markdown format to track your progress.
- Each time you complete a step, check it off using `[x]` syntax.
- Each time you check off a step, display the updated todo list to the user.
- Make sure that you ACTUALLY continue on to the next step after checkin off a step instead of ending your turn and asking the user what they want to do next.
## 6. Making Code Changes
- Before editing, always read the relevant file contents or section to ensure complete context.
- Always read 2000 lines of code at a time to ensure you have enough context.
- If a patch is not applied correctly, attempt to reapply it.
- Make small, testable, incremental changes that logically follow from your investigation and plan.
- Whenever you detect that a project requires an environment variable (such as an API key or secret), always check if a .env file exists in the project root. If it does not exist, automatically create a .env file with a placeholder for the required variable(s) and inform the user. Do this proactively, without waiting for the user to request it.
## 7. Debugging
- Make code changes only if you have high confidence they can solve the problem
- When debugging, try to determine the root cause rather than addressing symptoms
- Debug for as long as needed to identify the root cause and identify a fix
- Use print statements, logs, or temporary code to inspect program state, including descriptive statements or error messages to understand what's happening
- To test hypotheses, you can also add test statements or functions
- Revisit your assumptions if unexpected behavior occurs.
# How to create a Todo List
Use the following format to create a todo list:
```markdown
- [ ] Step 1: Description of the first step
- [ ] Step 2: Description of the second step
- [ ] Step 3: Description of the third step
```
Do not ever use HTML tags or any other formatting for the todo list, as it will not be rendered correctly. Always use the markdown format shown above. Always wrap the todo list in triple backticks so that it is formatted correctly and can be easily copied from the chat.
Always show the completed todo list to the user as the last item in your message, so that they can see that you have addressed all of the steps.
# Communication Guidelines
Always communicate clearly and concisely in a casual, friendly yet professional tone.
<examples>
"Let me fetch the URL you provided to gather more information."
"Ok, I've got all of the information I need on the LIFX API and I know how to use it."
"Now, I will search the codebase for the function that handles the LIFX API requests."
"I need to update several files here - stand by"
"OK! Now let's run the tests to make sure everything is working correctly."
"Whelp - I see we have some problems. Let's fix those up."
</examples>
- Respond with clear, direct answers. Use bullet points and code blocks for structure. - Avoid unnecessary explanations, repetition, and filler.
- Always write code directly to the correct files.
- Do not display code to the user unless they specifically ask for it.
- Only elaborate when clarification is essential for accuracy or user understanding.
# Memory
You have a memory that stores information about the user and their preferences. This memory is used to provide a more personalized experience. You can access and update this memory as needed. The memory is stored in a file called `.github/instructions/memory.instruction.md`. If the file is empty, you'll need to create it.
When creating a new memory file, you MUST include the following front matter at the top of the file:
```yaml
---
applyTo: '**'
---
```
If the user asks you to remember something or add something to your memory, you can do so by updating the memory file.
# Reading Files and Folders
**Always check if you have already read a file, folder, or workspace structure before reading it again.**
- If you have already read the content and it has not changed, do NOT re-read it.
- Only re-read files or folders if:
- You suspect the content has changed since your last read.
- You have made edits to the file or folder.
- You encounter an error that suggests the context may be stale or incomplete.
- Use your internal memory and previous context to avoid redundant reads.
- This will save time, reduce unnecessary operations, and make your workflow more efficient.
# Writing Prompts
If you are asked to write a prompt, you should always generate the prompt in markdown format.
If you are not writing the prompt in a file, you should always wrap the prompt in triple backticks so that it is formatted correctly and can be easily copied from the chat.
Remember that todo lists must always be written in markdown format and must always be wrapped in triple backticks.
# Git
If the user tells you to stage and commit, you may do so.
You are NEVER allowed to stage and commit files automatically.

View file

@ -1,155 +0,0 @@
You are opencode, an interactive CLI agent specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
# Core Mandates
- **Conventions:** Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code, tests, and configuration first.
- **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, configuration files like 'package.json', 'Cargo.toml', 'requirements.txt', 'build.gradle', etc., or observe neighboring files) before employing it.
- **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project.
- **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically.
- **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add high-value comments if necessary for clarity or if requested by the user. Do not edit comments that are separate from the code you are changing. *NEVER* talk to the user or describe your changes through comments.
- **Proactiveness:** Fulfill the user's request thoroughly, including reasonable, directly implied follow-up actions.
- **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it.
- **Explaining Changes:** After completing a code modification or file operation *do not* provide summaries unless asked.
- **Path Construction:** Before using any file system tool (e.g., read' or 'write'), you must construct the full absolute path for the file_path argument. Always combine the absolute path of the project's root directory with the file's path relative to the root. For example, if the project root is /path/to/project/ and the file is foo/bar/baz.txt, the final path you must use is /path/to/project/foo/bar/baz.txt. If the user provides a relative path, you must resolve it against the root directory to create an absolute path.
- **Do Not revert changes:** Do not revert changes to the codebase unless asked to do so by the user. Only revert changes made by you if they have resulted in an error or if the user has explicitly asked you to revert the changes.
# Primary Workflows
## Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this sequence:
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'grep' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read' to understand context and validate any assumptions you may have.
2. **Plan:** Build a coherent and grounded (based on the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self-verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
3. **Implement:** Use the available tools (e.g., 'edit', 'write' 'bash' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
## New Applications
**Goal:** Autonomously implement and deliver a visually appealing, substantially complete, and functional prototype. Utilize all tools at your disposal to implement the application. Some tools you may especially find useful are 'write', 'edit' and 'bash'.
1. **Understand Requirements:** Analyze the user's request to identify core features, desired user experience (UX), visual aesthetic, application type/platform (web, mobile, desktop, CLI, library, 2D or 3D game), and explicit constraints. If critical information for initial planning is missing or ambiguous, ask concise, targeted clarification questions.
2. **Propose Plan:** Formulate an internal development plan. Present a clear, concise, high-level summary to the user. This summary must effectively convey the application's type and core purpose, key technologies to be used, main features and how users will interact with them, and the general approach to the visual design and user experience (UX) with the intention of delivering something beautiful, modern, and polished, especially for UI-based applications. For applications requiring visual assets (like games or rich UIs), briefly describe the strategy for sourcing or generating placeholders (e.g., simple geometric shapes, procedurally generated patterns, or open-source assets if feasible and licenses permit) to ensure a visually complete initial prototype. Ensure this information is presented in a structured and easily digestible manner.
3. **User Approval:** Obtain user approval for the proposed plan.
4. **Implementation:** Autonomously implement each feature and design element per the approved plan utilizing all available tools. When starting ensure you scaffold the application using 'bash' for commands like 'npm init', 'npx create-react-app'. Aim for full scope completion. Proactively create or source necessary placeholder assets (e.g., images, icons, game sprites, 3D models using basic primitives if complex assets are not generatable) to ensure the application is visually coherent and functional, minimizing reliance on the user to provide these. If the model can generate simple assets (e.g., a uniformly colored square sprite, a simple 3D cube), it should do so. Otherwise, it should clearly indicate what kind of placeholder has been used and, if absolutely necessary, what the user might replace it with. Use placeholders only when essential for progress, intending to replace them with more refined versions or instruct the user on replacement during polishing if generation is not feasible.
5. **Verify:** Review work against the original request, the approved plan. Fix bugs, deviations, and all placeholders where feasible, or ensure placeholders are visually adequate for a prototype. Ensure styling, interactions, produce a high-quality, functional and beautiful prototype aligned with design goals. Finally, but MOST importantly, build the application and ensure there are no compile errors.
6. **Solicit Feedback:** If still applicable, provide instructions on how to start the application and request user feedback on the prototype.
# Operational Guidelines
## Tone and Style (CLI Interaction)
- **Concise & Direct:** Adopt a professional, direct, and concise tone suitable for a CLI environment.
- **Minimal Output:** Aim for fewer than 3 lines of text output (excluding tool use/code generation) per response whenever practical. Focus strictly on the user's query.
- **Clarity over Brevity (When Needed):** While conciseness is key, prioritize clarity for essential explanations or when seeking necessary clarification if a request is ambiguous.
- **No Chitchat:** Avoid conversational filler, preambles ("Okay, I will now..."), or postambles ("I have finished the changes..."). Get straight to the action or answer.
- **Formatting:** Use GitHub-flavored Markdown. Responses will be rendered in monospace.
- **Tools vs. Text:** Use tools for actions, text output *only* for communication. Do not add explanatory comments within tool calls or code blocks unless specifically part of the required code/command itself.
- **Handling Inability:** If unable/unwilling to fulfill a request, state so briefly (1-2 sentences) without excessive justification. Offer alternatives if appropriate.
## Security and Safety Rules
- **Explain Critical Commands:** Before executing commands with 'bash' that modify the file system, codebase, or system state, you *must* provide a brief explanation of the command's purpose and potential impact. Prioritize user understanding and safety. You should not ask permission to use the tool; the user will be presented with a confirmation dialogue upon use (you do not need to tell them this).
- **Security First:** Always apply security best practices. Never introduce code that exposes, logs, or commits secrets, API keys, or other sensitive information.
## Tool Usage
- **File Paths:** Always use absolute paths when referring to files with tools like 'read' or 'write'. Relative paths are not supported. You must provide an absolute path.
- **Parallelism:** Execute multiple independent tool calls in parallel when feasible (i.e. searching the codebase).
- **Command Execution:** Use the 'bash' tool for running shell commands, remembering the safety rule to explain modifying commands first.
- **Background Processes:** Use background processes (via \`&\`) for commands that are unlikely to stop on their own, e.g. \`node server.js &\`. If unsure, ask the user.
- **Interactive Commands:** Try to avoid shell commands that are likely to require user interaction (e.g. \`git rebase -i\`). Use non-interactive versions of commands (e.g. \`npm init -y\` instead of \`npm init\`) when available, and otherwise remind the user that interactive shell commands are not supported and may cause hangs until canceled by the user.
- **Respect User Confirmations:** Most tool calls (also denoted as 'function calls') will first require confirmation from the user, where they will either approve or cancel the function call. If a user cancels a function call, respect their choice and do _not_ try to make the function call again. It is okay to request the tool call again _only_ if the user requests that same tool call on a subsequent prompt. When a user cancels a function call, assume best intentions from the user and consider inquiring if they prefer any alternative paths forward.
## Interaction Details
- **Help Command:** The user can use '/help' to display help information.
- **Feedback:** To report a bug or provide feedback, please use the /bug command.
# Examples (Illustrating Tone and Workflow)
<example>
user: 1 + 2
model: 3
</example>
<example>
user: is 13 a prime number?
model: true
</example>
<example>
user: list files here.
model: [tool_call: ls for path '/path/to/project']
</example>
<example>
user: start the server implemented in server.js
model: [tool_call: bash for 'node server.js &' because it must run in the background]
</example>
<example>
user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib.
model: Okay, I can refactor 'src/auth.py'.
First, I'll analyze the code and check for a test safety net before planning any changes.
[tool_call: glob for path 'tests/test_auth.py']
[tool_call: read for absolute_path '/path/to/tests/test_auth.py']
(After analysis)
Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring.
I'll also confirm 'requests' is a dependency.
[tool_call: read for absolute_path '/path/to/requirements.txt']
(After analysis)
Looks good, 'requests' is available.
Here's the plan:
1. Replace the 'urllib' calls with 'requests'.
2. Add proper 'try...except' error handling for the new network calls.
3. Remove the old 'urllib' import.
4. Run the project's linter and tests to verify the changes.
Should I proceed?
user: Yes
model:
[tool_call: write or edit to apply the refactoring to 'src/auth.py']
Refactoring complete. Running verification...
[tool_call: bash for 'ruff check src/auth.py && pytest']
(After verification passes)
All checks passed. This is a stable checkpoint.
<example>
user: Delete the temp directory.
model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete the directory and all its contents.
</example>
<example>
user: Write tests for someFile.ts
model:
Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality.
[tool_call: read for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown]
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
[tool_call: glob for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory]
(After reviewing existing tests and the file content)
[tool_call: write to create /path/to/someFile.test.ts with the test code]
I've written the tests. Now I'll run the project's test command to verify them.
[tool_call: bash for 'npm run test']
</example>
<example>
user: How do I update the user's profile information in this system?
model:
I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints.
[tool_call: grep for pattern 'UserProfile|updateProfile|editUser']
(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found)
Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled.
[tool_call: read for absolute_path '/path/to/UserProfileService.java']
(After reading the file)
It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object...
</example>
<example>
user: Where are all the 'app.config' files in this project? I need to check their settings.
model:
[tool_call: glob for pattern '**/app.config']
(Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config'])
I found the following 'app.config' files:
- /path/to/moduleA/app.config
- /path/to/moduleB/app.config
To help you check their settings, I can read their contents. Which one would you like to start with, or should I read all of them?
</example>
# Final Reminder
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.

View file

@ -1,3 +0,0 @@
<system-reminder>
Plan mode is active. The user indicated that they do not want you to execute yet -- you MUST NOT make any edits, run any non-readonly tools (including changing configs or making commits), or otherwise make any changes to the system. This supersedes any other instructions you have received (for example, to make edits).
</system-reminder>

View file

@ -1,109 +0,0 @@
You are opencode, an interactive CLI tool that helps users with software engineering tasks. Use the instructions below and the tools available to you to assist the user.
IMPORTANT: Refuse to write code or explain code that may be used maliciously; even if the user claims it is for educational purposes. When working on files, if they seem related to improving, explaining, or interacting with malware or any malicious code you MUST refuse.
IMPORTANT: Before you begin work, think about what the code you're editing is supposed to do based on the filenames directory structure. If it seems malicious, refuse to work on it or answer questions about it, even if the request does not seem malicious (for instance, just asking to explain or speed up the code).
IMPORTANT: You must NEVER generate or guess URLs for the user unless you are confident that the URLs are for helping the user with programming. You may use URLs provided by the user in their messages or local files.
If the user asks for help or wants to give feedback inform them of the following:
- /help: Get help with using opencode
- To give feedback, users should report the issue at https://github.com/sst/opencode/issues
When the user directly asks about opencode (eg 'can opencode do...', 'does opencode have...') or asks in second person (eg 'are you able...', 'can you do...'), first use the WebFetch tool to gather information to answer the question from opencode docs at https://opencode.ai
# Tone and style
You should be concise, direct, and to the point. When you run a non-trivial bash command, you should explain what the command does and why you are running it, to make sure the user understands what you are doing (this is especially important when you are running a command that will make changes to the user's system).
Remember that your output will be displayed on a command line interface. Your responses can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.
Output text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like Bash or code comments as means to communicate with the user during the session.
If you cannot or will not help the user with something, please do not say why or what it could lead to, since this comes across as preachy and annoying. Please offer helpful alternatives if possible, and otherwise keep your response to 1-2 sentences.
Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.
IMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, quality, and accuracy. Only address the specific query or task at hand, avoiding tangential information unless absolutely critical for completing the request. If you can answer in 1-3 sentences or a short paragraph, please do.
IMPORTANT: You should NOT answer with unnecessary preamble or postamble (such as explaining your code or summarizing your action), unless the user asks you to.
IMPORTANT: Keep your responses short, since they will be displayed on a command line interface. You MUST answer concisely with fewer than 4 lines (not including tool use or code generation), unless user asks for detail. Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as "The answer is <answer>.", "Here is the content of the file..." or "Based on the information provided, the answer is..." or "Here is what I will do next...". Here are some examples to demonstrate appropriate verbosity:
<example>
user: 2 + 2
assistant: 4
</example>
<example>
user: what is 2+2?
assistant: 4
</example>
<example>
user: is 11 a prime number?
assistant: Yes
</example>
<example>
user: what command should I run to list files in the current directory?
assistant: ls
</example>
<example>
user: what command should I run to watch files in the current directory?
assistant: [use the ls tool to list the files in the current directory, then read docs/commands in the relevant file to find out how to watch files]
npm run dev
</example>
<example>
user: How many golf balls fit inside a jetta?
assistant: 150000
</example>
<example>
user: what files are in the directory src/?
assistant: [runs ls and sees foo.c, bar.c, baz.c]
user: which file contains the implementation of foo?
assistant: src/foo.c
</example>
<example>
user: write tests for new feature
assistant: [uses grep and glob search tools to find where similar tests are defined, uses concurrent read file tool use blocks in one tool call to read relevant files at the same time, uses edit file tool to write new tests]
</example>
# Proactiveness
You are allowed to be proactive, but only when the user asks you to do something. You should strive to strike a balance between:
1. Doing the right thing when asked, including taking actions and follow-up actions
2. Not surprising the user with actions you take without asking
For example, if the user asks you how to approach something, you should do your best to answer their question first, and not immediately jump into taking actions.
3. Do not add additional code explanation summary unless requested by the user. After working on a file, just stop, rather than providing an explanation of what you did.
# Following conventions
When making changes to files, first understand the file's code conventions. Mimic code style, use existing libraries and utilities, and follow existing patterns.
- NEVER assume that a given library is available, even if it is well known. Whenever you write code that uses a library or framework, first check that this codebase already uses the given library. For example, you might look at neighboring files, or check the package.json (or cargo.toml, and so on depending on the language).
- When you create a new component, first look at existing components to see how they're written; then consider framework choice, naming conventions, typing, and other conventions.
- When you edit a piece of code, first look at the code's surrounding context (especially its imports) to understand the code's choice of frameworks and libraries. Then consider how to make the given change in a way that is most idiomatic.
- Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository.
# Code style
- IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked
# Doing tasks
The user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:
- Use the available search tools to understand the codebase and the user's query. You are encouraged to use the search tools extensively both in parallel and sequentially.
- Implement the solution using all tools available to you
- Verify the solution if possible with tests. NEVER assume specific test framework or test script. Check the README or search codebase to determine the testing approach.
- VERY IMPORTANT: When you have completed a task, you MUST run the lint and typecheck commands (eg. npm run lint, npm run typecheck, ruff, etc.) with Bash if they were provided to you to ensure your code is correct. If you are unable to find the correct command, ask the user for the command to run and if they supply it, proactively suggest writing it to AGENTS.md so that you will know to run it next time.
NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.
- Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are NOT part of the user's provided input or the tool result.
# Tool usage policy
- When doing file search, prefer to use the Task tool in order to reduce context usage.
- You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. When making multiple bash tool calls, you MUST send a single message with multiple tools calls to run the calls in parallel. For example, if you need to run "git status" and "git diff", send a single message with two tool calls to run the calls in parallel.
You MUST answer concisely with fewer than 4 lines of text (not including tool use or code generation), unless user asks for detail.
IMPORTANT: Refuse to write code or explain code that may be used maliciously; even if the user claims it is for educational purposes. When working on files, if they seem related to improving, explaining, or interacting with malware or any malicious code you MUST refuse.
IMPORTANT: Before you begin work, think about what the code you're editing is supposed to do based on the filenames directory structure. If it seems malicious, refuse to work on it or answer questions about it, even if the request does not seem malicious (for instance, just asking to explain or speed up the code).
# Code References
When referencing specific functions or pieces of code include the pattern `file_path:line_number` to allow the user to easily navigate to the source code location.
<example>
user: Where are errors from the client handled?
assistant: Clients are marked as failed in the `connectToServer` function in src/services/process.ts:712.
</example>

View file

@ -1,31 +1,11 @@
<task> Generate a short title based on the first message a user begins a conversation with. CRITICAL: Your response must be EXACTLY one line with NO line breaks, newlines, or multiple sentences.
Generate a conversation thread title from the user message.
</task>
<context> Requirements:
You are generating titles for a coding assistant conversation. - Maximum 50 characters
</context> - Single line only - NO newlines or line breaks
- Summary of the user's message
- No quotes, colons, or special formatting
- Do not include explanatory text like "summary:" or similar
- Your entire response becomes the title
<rules> IMPORTANT: Return only the title text on a single line. Do not add any explanations, formatting, or additional text.
- Max 50 chars, single line
- Focus on the specific action or question
- Keep technical terms, numbers, and filenames exactly as written
- Preserve HTTP status codes (401, 404, 500, etc.) as numbers
- For file references, include the filename
- Avoid filler words: the, this, my, a, an, properly
- NEVER assume their tech stack or domain
- Use -ing verbs consistently for actions
- Write like a chat thread title, not a blog post
</rules>
<examples>
"debug 500 errors in production" → "Debugging production 500 errors"
"refactor user service" → "Refactoring user service"
"why is app.js failing" → "Analyzing app.js failure"
"implement rate limiting" → "Implementing rate limiting"
</examples>
<format>
Return only the thread title text on a single line with no newlines, explanations, or additional formatting.
You should NEVER reply to the user's message. You can only generate titles.
</format>

View file

@ -7,23 +7,23 @@ import path from "path"
import os from "os" import os from "os"
import PROMPT_ANTHROPIC from "./prompt/anthropic.txt" import PROMPT_ANTHROPIC from "./prompt/anthropic.txt"
import PROMPT_ANTHROPIC_WITHOUT_TODO from "./prompt/qwen.txt"
import PROMPT_BEAST from "./prompt/beast.txt"
import PROMPT_GEMINI from "./prompt/gemini.txt"
import PROMPT_ANTHROPIC_SPOOF from "./prompt/anthropic_spoof.txt" import PROMPT_ANTHROPIC_SPOOF from "./prompt/anthropic_spoof.txt"
import PROMPT_SUMMARIZE from "./prompt/summarize.txt" import PROMPT_SUMMARIZE from "./prompt/summarize.txt"
import PROMPT_TITLE from "./prompt/title.txt" import PROMPT_TITLE from "./prompt/title.txt"
export namespace SystemPrompt { export namespace SystemPrompt {
export function header(providerID: string) { export function provider(providerID: string) {
if (providerID.includes("anthropic")) return [PROMPT_ANTHROPIC_SPOOF.trim()] const result = []
return [] switch (providerID) {
case "anthropic":
result.push(PROMPT_ANTHROPIC_SPOOF.trim())
result.push(PROMPT_ANTHROPIC)
break
default:
result.push(PROMPT_ANTHROPIC)
break
} }
export function provider(modelID: string) { return result
if (modelID.includes("gpt-") || modelID.includes("o1") || modelID.includes("o3")) return [PROMPT_BEAST]
if (modelID.includes("gemini-")) return [PROMPT_GEMINI]
if (modelID.includes("claude")) return [PROMPT_ANTHROPIC]
return [PROMPT_ANTHROPIC_WITHOUT_TODO]
} }
export async function environment() { export async function environment() {
@ -60,28 +60,33 @@ export namespace SystemPrompt {
export async function custom() { export async function custom() {
const { cwd, root } = App.info().path const { cwd, root } = App.info().path
const config = await Config.get() const config = await Config.get()
const paths = new Set<string>() const found = []
for (const item of CUSTOM_FILES) { for (const item of CUSTOM_FILES) {
const matches = await Filesystem.findUp(item, cwd, root) const matches = await Filesystem.findUp(item, cwd, root)
matches.forEach((path) => paths.add(path)) found.push(...matches.map((x) => Bun.file(x).text()))
} }
found.push(
paths.add(path.join(Global.Path.config, "AGENTS.md")) Bun.file(path.join(Global.Path.config, "AGENTS.md"))
paths.add(path.join(os.homedir(), ".claude", "CLAUDE.md"))
if (config.instructions) {
for (const instruction of config.instructions) {
const matches = await Filesystem.globUp(instruction, cwd, root).catch(() => [])
matches.forEach((path) => paths.add(path))
}
}
const found = Array.from(paths).map((p) =>
Bun.file(p)
.text() .text()
.catch(() => ""), .catch(() => ""),
) )
found.push(
Bun.file(path.join(os.homedir(), ".claude", "CLAUDE.md"))
.text()
.catch(() => ""),
)
if (config.instructions) {
for (const instruction of config.instructions) {
try {
const matches = await Filesystem.globUp(instruction, cwd, root)
found.push(...matches.map((x) => Bun.file(x).text()))
} catch {
continue // Skip invalid glob patterns
}
}
}
return Promise.all(found).then((result) => result.filter(Boolean)) return Promise.all(found).then((result) => result.filter(Boolean))
} }

View file

@ -53,7 +53,9 @@ export namespace Share {
export const URL = export const URL =
process.env["OPENCODE_API"] ?? process.env["OPENCODE_API"] ??
(Installation.isSnapshot() || Installation.isDev() ? "https://api.dev.opencode.ai" : "https://api.opencode.ai") (Installation.isSnapshot() || Installation.isDev()
? "https://api.dev.opencode.ai"
: "https://api.opencode.ai")
export async function create(sessionID: string) { export async function create(sessionID: string) {
return fetch(`${URL}/share_create`, { return fetch(`${URL}/share_create`, {

View file

@ -2,31 +2,28 @@ import { App } from "../app/app"
import { $ } from "bun" import { $ } from "bun"
import path from "path" import path from "path"
import fs from "fs/promises" import fs from "fs/promises"
import { Ripgrep } from "../file/ripgrep"
import { Log } from "../util/log" import { Log } from "../util/log"
import { Global } from "../global"
import { z } from "zod"
export namespace Snapshot { export namespace Snapshot {
const log = Log.create({ service: "snapshot" }) const log = Log.create({ service: "snapshot" })
export function init() { export async function create(sessionID: string) {
Array.fromAsync( return
new Bun.Glob("**/snapshot").scan({ log.info("creating snapshot")
absolute: true, const app = App.info()
onlyFiles: false, const git = gitdir(sessionID)
cwd: Global.Path.data,
}), // not a git repo, check if too big to snapshot
).then((files) => { if (!app.git) {
for (const file of files) { const files = await Ripgrep.files({
fs.rmdir(file, { recursive: true }) cwd: app.path.cwd,
} limit: 1000,
}) })
log.info("found files", { count: files.length })
if (files.length > 1000) return
} }
export async function track() {
const app = App.info()
if (!app.git) return
const git = gitdir()
if (await fs.mkdir(git, { recursive: true })) { if (await fs.mkdir(git, { recursive: true })) {
await $`git init` await $`git init`
.env({ .env({
@ -38,71 +35,33 @@ export namespace Snapshot {
.nothrow() .nothrow()
log.info("initialized") log.info("initialized")
} }
await $`git --git-dir ${git} add .`.quiet().cwd(app.path.cwd).nothrow() await $`git --git-dir ${git} add .`.quiet().cwd(app.path.cwd).nothrow()
const hash = await $`git --git-dir ${git} write-tree`.quiet().cwd(app.path.cwd).nothrow().text() log.info("added files")
return hash.trim()
const result =
await $`git --git-dir ${git} commit --allow-empty -m "snapshot" --author="opencode <mail@opencode.ai>"`
.quiet()
.cwd(app.path.cwd)
.nothrow()
log.info("commit")
const match = result.stdout.toString().match(/\[.+ ([a-f0-9]+)\]/)
if (!match) return
return match![1]
} }
export const Patch = z.object({ export async function restore(sessionID: string, commit: string) {
hash: z.string(), log.info("restore", { commit })
files: z.string().array(),
})
export type Patch = z.infer<typeof Patch>
export async function patch(hash: string): Promise<Patch> {
const app = App.info() const app = App.info()
const git = gitdir() const git = gitdir(sessionID)
await $`git --git-dir ${git} add .`.quiet().cwd(app.path.cwd).nothrow() await $`git --git-dir=${git} checkout ${commit} --force`
const files = await $`git --git-dir ${git} diff --name-only ${hash} -- .`.cwd(app.path.cwd).text()
return {
hash,
files: files
.trim()
.split("\n")
.map((x) => x.trim())
.filter(Boolean)
.map((x) => path.join(app.path.cwd, x)),
}
}
export async function restore(snapshot: string) {
log.info("restore", { commit: snapshot })
const app = App.info()
const git = gitdir()
await $`git --git-dir=${git} read-tree ${snapshot} && git --git-dir=${git} checkout-index -a -f`
.quiet() .quiet()
.cwd(app.path.root) .cwd(app.path.root)
} }
export async function revert(patches: Patch[]) { function gitdir(sessionID: string) {
const files = new Set<string>()
const git = gitdir()
for (const item of patches) {
for (const file of item.files) {
if (files.has(file)) continue
log.info("reverting", { file, hash: item.hash })
const result = await $`git --git-dir=${git} checkout ${item.hash} -- ${file}`
.quiet()
.cwd(App.info().path.root)
.nothrow()
if (result.exitCode !== 0) {
log.info("file not found in history, deleting", { file })
await fs.unlink(file).catch(() => {})
}
files.add(file)
}
}
}
export async function diff(hash: string) {
const app = App.info() const app = App.info()
const git = gitdir() return path.join(app.path.data, "snapshot", sessionID)
const result = await $`git --git-dir=${git} diff ${hash} -- .`.quiet().cwd(app.path.root).text()
return result.trim()
}
function gitdir() {
const app = App.info()
return path.join(app.path.data, "snapshots")
} }
} }

View file

@ -4,152 +4,61 @@ import { Bus } from "../bus"
import path from "path" import path from "path"
import z from "zod" import z from "zod"
import fs from "fs/promises" import fs from "fs/promises"
import { MessageV2 } from "../session/message-v2"
import { Identifier } from "../id/id"
export namespace Storage { export namespace Storage {
const log = Log.create({ service: "storage" }) const log = Log.create({ service: "storage" })
export const Event = { export const Event = {
Write: Bus.event("storage.write", z.object({ key: z.string(), content: z.any() })), Write: Bus.event(
} "storage.write",
z.object({ key: z.string(), content: z.any() }),
type Migration = (dir: string) => Promise<void>
const MIGRATIONS: Migration[] = [
async (dir: string) => {
try {
const files = new Bun.Glob("session/message/*/*.json").scanSync({
cwd: dir,
absolute: true,
})
for (const file of files) {
const content = await Bun.file(file).json()
if (!content.metadata) continue
log.info("migrating to v2 message", { file })
try {
const result = MessageV2.fromV1(content)
await Bun.write(
file,
JSON.stringify(
{
...result.info,
parts: result.parts,
},
null,
2,
), ),
)
} catch (e) {
await fs.rename(file, file.replace("storage", "broken"))
} }
}
} catch {}
},
async (dir: string) => {
const files = new Bun.Glob("session/message/*/*.json").scanSync({
cwd: dir,
absolute: true,
})
for (const file of files) {
try {
const { parts, ...info } = await Bun.file(file).json()
if (!parts) continue
for (const part of parts) {
const id = Identifier.ascending("part")
await Bun.write(
[dir, "session", "part", info.sessionID, info.id, id + ".json"].join("/"),
JSON.stringify({
...part,
id,
sessionID: info.sessionID,
messageID: info.id,
...(part.type === "tool" ? { callID: part.id } : {}),
}),
)
}
await Bun.write(file, JSON.stringify(info, null, 2))
} catch (e) {}
}
},
async (dir: string) => {
const files = new Bun.Glob("session/message/*/*.json").scanSync({
cwd: dir,
absolute: true,
})
for (const file of files) {
try {
const content = await Bun.file(file).json()
if (content.role === "assistant" && !content.mode) {
log.info("adding mode field to message", { file })
content.mode = "build"
await Bun.write(file, JSON.stringify(content, null, 2))
}
} catch (e) {}
}
},
]
const state = App.state("storage", async () => { const state = App.state("storage", () => {
const app = App.info() const app = App.info()
const dir = path.normalize(path.join(app.path.data, "storage")) const dir = path.join(app.path.data, "storage")
await fs.mkdir(dir, { recursive: true }) log.info("init", { path: dir })
const migration = await Bun.file(path.join(dir, "migration"))
.json()
.then((x) => parseInt(x))
.catch(() => 0)
for (let index = migration; index < MIGRATIONS.length; index++) {
log.info("running migration", { index })
const migration = MIGRATIONS[index]
await migration(dir)
await Bun.write(path.join(dir, "migration"), (index + 1).toString())
}
return { return {
dir, dir,
} }
}) })
export async function remove(key: string) { export async function remove(key: string) {
const dir = await state().then((x) => x.dir) const target = path.join(state().dir, key + ".json")
const target = path.join(dir, key + ".json")
await fs.unlink(target).catch(() => {}) await fs.unlink(target).catch(() => {})
} }
export async function removeDir(key: string) { export async function removeDir(key: string) {
const dir = await state().then((x) => x.dir) const target = path.join(state().dir, key)
const target = path.join(dir, key)
await fs.rm(target, { recursive: true, force: true }).catch(() => {}) await fs.rm(target, { recursive: true, force: true }).catch(() => {})
} }
export async function readJSON<T>(key: string) { export async function readJSON<T>(key: string) {
const dir = await state().then((x) => x.dir) return Bun.file(path.join(state().dir, key + ".json")).json() as Promise<T>
return Bun.file(path.join(dir, key + ".json")).json() as Promise<T>
} }
export async function writeJSON<T>(key: string, content: T) { export async function writeJSON<T>(key: string, content: T) {
const dir = await state().then((x) => x.dir) const target = path.join(state().dir, key + ".json")
const target = path.join(dir, key + ".json")
const tmp = target + Date.now() + ".tmp" const tmp = target + Date.now() + ".tmp"
await Bun.write(tmp, JSON.stringify(content, null, 2)) await Bun.write(tmp, JSON.stringify(content))
await fs.rename(tmp, target).catch(() => {}) await fs.rename(tmp, target).catch(() => {})
await fs.unlink(tmp).catch(() => {}) await fs.unlink(tmp).catch(() => {})
Bus.publish(Event.Write, { key, content }) Bus.publish(Event.Write, { key, content })
} }
const glob = new Bun.Glob("**/*") const glob = new Bun.Glob("**/*")
export async function list(prefix: string) { export async function* list(prefix: string) {
const dir = await state().then((x) => x.dir)
try { try {
const result = await Array.fromAsync( for await (const item of glob.scan({
glob.scan({ cwd: path.join(state().dir, prefix),
cwd: path.join(dir, prefix),
onlyFiles: true, onlyFiles: true,
}), })) {
).then((items) => items.map((item) => path.join(prefix, item.slice(0, -5)))) const result = path.join(prefix, item.slice(0, -5))
result.sort() yield result
return result }
} catch { } catch {
return [] return
} }
} }
} }

View file

@ -1,36 +1,23 @@
import { z } from "zod" import { z } from "zod"
import { exec } from "child_process"
import { text } from "stream/consumers"
import { Tool } from "./tool" import { Tool } from "./tool"
import DESCRIPTION from "./bash.txt" import DESCRIPTION from "./bash.txt"
import { App } from "../app/app" import { App } from "../app/app"
import { Permission } from "../permission"
import { Config } from "../config/config"
import { Filesystem } from "../util/filesystem"
import { lazy } from "../util/lazy"
import { Log } from "../util/log"
import { Wildcard } from "../util/wildcard"
import { $ } from "bun"
const MAX_OUTPUT_LENGTH = 30000 const MAX_OUTPUT_LENGTH = 30000
const DEFAULT_TIMEOUT = 1 * 60 * 1000 const DEFAULT_TIMEOUT = 1 * 60 * 1000
const MAX_TIMEOUT = 10 * 60 * 1000 const MAX_TIMEOUT = 10 * 60 * 1000
const log = Log.create({ service: "bash-tool" }) export const BashTool = Tool.define({
id: "bash",
const parser = lazy(async () => {
const { default: Parser } = await import("tree-sitter")
const Bash = await import("tree-sitter-bash")
const p = new Parser()
p.setLanguage(Bash.language as any)
return p
})
export const BashTool = Tool.define("bash", {
description: DESCRIPTION, description: DESCRIPTION,
parameters: z.object({ parameters: z.object({
command: z.string().describe("The command to execute"), command: z.string().describe("The command to execute"),
timeout: z.number().describe("Optional timeout in milliseconds").optional(), timeout: z
.number()
.min(0)
.max(MAX_TIMEOUT)
.describe("Optional timeout in milliseconds")
.optional(),
description: z description: z
.string() .string()
.describe( .describe(
@ -39,113 +26,36 @@ export const BashTool = Tool.define("bash", {
}), }),
async execute(params, ctx) { async execute(params, ctx) {
const timeout = Math.min(params.timeout ?? DEFAULT_TIMEOUT, MAX_TIMEOUT) const timeout = Math.min(params.timeout ?? DEFAULT_TIMEOUT, MAX_TIMEOUT)
const app = App.info()
const cfg = await Config.get()
const tree = await parser().then((p) => p.parse(params.command))
const permissions = (() => {
const value = cfg.permission?.bash
if (!value)
return {
"*": "allow",
}
if (typeof value === "string")
return {
"*": value,
}
return value
})()
let needsAsk = false const process = Bun.spawn({
for (const node of tree.rootNode.descendantsOfType("command")) { cmd: ["bash", "-c", params.command],
const command = [] cwd: App.info().path.cwd,
for (let i = 0; i < node.childCount; i++) {
const child = node.child(i)
if (!child) continue
if (
child.type !== "command_name" &&
child.type !== "word" &&
child.type !== "string" &&
child.type !== "raw_string" &&
child.type !== "concatenation"
) {
continue
}
command.push(child.text)
}
// not an exhaustive list, but covers most common cases
if (["cd", "rm", "cp", "mv", "mkdir", "touch", "chmod", "chown"].includes(command[0])) {
for (const arg of command.slice(1)) {
if (arg.startsWith("-") || (command[0] === "chmod" && arg.startsWith("+"))) continue
const resolved = await $`realpath ${arg}`
.quiet()
.nothrow()
.text()
.then((x) => x.trim())
log.info("resolved path", { arg, resolved })
if (resolved && !Filesystem.contains(app.path.cwd, resolved)) {
throw new Error(
`This command references paths outside of ${app.path.cwd} so it is not allowed to be executed.`,
)
}
}
}
// always allow cd if it passes above check
if (!needsAsk && command[0] !== "cd") {
const ask = (() => {
for (const [pattern, value] of Object.entries(permissions)) {
const match = Wildcard.match(node.text, pattern)
log.info("checking", { text: node.text.trim(), pattern, match })
if (match) return value
}
return "ask"
})()
if (ask === "ask") needsAsk = true
}
}
if (needsAsk) {
await Permission.ask({
type: "bash",
sessionID: ctx.sessionID,
messageID: ctx.messageID,
callID: ctx.callID,
title: params.command,
metadata: {
command: params.command,
},
})
}
const process = exec(params.command, {
cwd: app.path.cwd,
signal: ctx.abort,
maxBuffer: MAX_OUTPUT_LENGTH, maxBuffer: MAX_OUTPUT_LENGTH,
timeout, signal: ctx.abort,
timeout: timeout,
stdout: "pipe",
stderr: "pipe",
}) })
await process.exited
const stdoutPromise = text(process.stdout!) const stdout = await new Response(process.stdout).text()
const stderrPromise = text(process.stderr!) const stderr = await new Response(process.stderr).text()
await new Promise<void>((resolve) => {
process.on("close", () => {
resolve()
})
})
const stdout = await stdoutPromise
const stderr = await stderrPromise
return { return {
title: params.command,
metadata: { metadata: {
stderr, stderr,
stdout, stdout,
exit: process.exitCode, exit: process.exitCode,
description: params.description, description: params.description,
title: params.command,
}, },
output: [`<stdout>`, stdout ?? "", `</stdout>`, `<stderr>`, stderr ?? "", `</stderr>`].join("\n"), output: [
`<stdout>`,
stdout ?? "",
`</stdout>`,
`<stderr>`,
stderr ?? "",
`</stderr>`,
].join("\n"),
} }
}, },
}) })

View file

@ -1,7 +1,6 @@
// the approaches in this edit tool are sourced from // the approaches in this edit tool are sourced from
// https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-23-25.ts // https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-23-25.ts
// https://github.com/google-gemini/gemini-cli/blob/main/packages/core/src/utils/editCorrector.ts // https://github.com/google-gemini/gemini-cli/blob/main/packages/core/src/utils/editCorrector.ts
// https://github.com/cline/cline/blob/main/evals/diff-edits/diff-apply/diff-06-26-25.ts
import { z } from "zod" import { z } from "zod"
import * as path from "path" import * as path from "path"
@ -14,16 +13,22 @@ import { App } from "../app/app"
import { File } from "../file" import { File } from "../file"
import { Bus } from "../bus" import { Bus } from "../bus"
import { FileTime } from "../file/time" import { FileTime } from "../file/time"
import { Config } from "../config/config"
import { Filesystem } from "../util/filesystem"
export const EditTool = Tool.define("edit", { export const EditTool = Tool.define({
id: "edit",
description: DESCRIPTION, description: DESCRIPTION,
parameters: z.object({ parameters: z.object({
filePath: z.string().describe("The absolute path to the file to modify"), filePath: z.string().describe("The absolute path to the file to modify"),
oldString: z.string().describe("The text to replace"), oldString: z.string().describe("The text to replace"),
newString: z.string().describe("The text to replace it with (must be different from oldString)"), newString: z
replaceAll: z.boolean().optional().describe("Replace all occurrences of oldString (default false)"), .string()
.describe(
"The text to replace it with (must be different from old_string)",
),
replaceAll: z
.boolean()
.optional()
.describe("Replace all occurrences of old_string (default false)"),
}), }),
async execute(params, ctx) { async execute(params, ctx) {
if (!params.filePath) { if (!params.filePath) {
@ -35,124 +40,87 @@ export const EditTool = Tool.define("edit", {
} }
const app = App.info() const app = App.info()
const filePath = path.isAbsolute(params.filePath) ? params.filePath : path.join(app.path.cwd, params.filePath) const filepath = path.isAbsolute(params.filePath)
if (!Filesystem.contains(app.path.cwd, filePath)) { ? params.filePath
throw new Error(`File ${filePath} is not in the current working directory`) : path.join(app.path.cwd, params.filePath)
}
await Permission.ask({
id: "edit",
sessionID: ctx.sessionID,
title: "Edit this file: " + filepath,
metadata: {
filePath: filepath,
oldString: params.oldString,
newString: params.newString,
},
})
const cfg = await Config.get()
let diff = ""
let contentOld = "" let contentOld = ""
let contentNew = "" let contentNew = ""
await (async () => { await (async () => {
if (params.oldString === "") { if (params.oldString === "") {
contentNew = params.newString contentNew = params.newString
diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew)) await Bun.write(filepath, params.newString)
if (cfg.permission?.edit === "ask") {
await Permission.ask({
type: "edit",
sessionID: ctx.sessionID,
messageID: ctx.messageID,
callID: ctx.callID,
title: "Edit this file: " + filePath,
metadata: {
filePath,
diff,
},
})
}
await Bun.write(filePath, params.newString)
await Bus.publish(File.Event.Edited, { await Bus.publish(File.Event.Edited, {
file: filePath, file: filepath,
}) })
return return
} }
const file = Bun.file(filePath) const file = Bun.file(filepath)
const stats = await file.stat().catch(() => {}) const stats = await file.stat().catch(() => {})
if (!stats) throw new Error(`File ${filePath} not found`) if (!stats) throw new Error(`File ${filepath} not found`)
if (stats.isDirectory()) throw new Error(`Path is a directory, not a file: ${filePath}`) if (stats.isDirectory())
await FileTime.assert(ctx.sessionID, filePath) throw new Error(`Path is a directory, not a file: ${filepath}`)
await FileTime.assert(ctx.sessionID, filepath)
contentOld = await file.text() contentOld = await file.text()
contentNew = replace(contentOld, params.oldString, params.newString, params.replaceAll)
diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew))
if (cfg.permission?.edit === "ask") {
await Permission.ask({
type: "edit",
sessionID: ctx.sessionID,
messageID: ctx.messageID,
callID: ctx.callID,
title: "Edit this file: " + filePath,
metadata: {
filePath,
diff,
},
})
}
contentNew = replace(
contentOld,
params.oldString,
params.newString,
params.replaceAll,
)
await file.write(contentNew) await file.write(contentNew)
await Bus.publish(File.Event.Edited, { await Bus.publish(File.Event.Edited, {
file: filePath, file: filepath,
}) })
contentNew = await file.text() contentNew = await file.text()
diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew))
})() })()
FileTime.read(ctx.sessionID, filePath) const diff = trimDiff(
createTwoFilesPatch(filepath, filepath, contentOld, contentNew),
)
FileTime.read(ctx.sessionID, filepath)
let output = "" let output = ""
await LSP.touchFile(filePath, true) await LSP.touchFile(filepath, true)
const diagnostics = await LSP.diagnostics() const diagnostics = await LSP.diagnostics()
for (const [file, issues] of Object.entries(diagnostics)) { for (const [file, issues] of Object.entries(diagnostics)) {
if (issues.length === 0) continue if (issues.length === 0) continue
if (file === filePath) { if (file === filepath) {
output += `\nThis file has errors, please fix\n<file_diagnostics>\n${issues.map(LSP.Diagnostic.pretty).join("\n")}\n</file_diagnostics>\n` output += `\nThis file has errors, please fix\n<file_diagnostics>\n${issues.map(LSP.Diagnostic.pretty).join("\n")}\n</file_diagnostics>\n`
continue continue
} }
output += `\n<project_diagnostics>\n${file}\n${issues output += `\n<project_diagnostics>\n${file}\n${issues.map(LSP.Diagnostic.pretty).join("\n")}\n</project_diagnostics>\n`
.filter((item) => item.severity === 1)
.map(LSP.Diagnostic.pretty)
.join("\n")}\n</project_diagnostics>\n`
} }
return { return {
metadata: { metadata: {
diagnostics, diagnostics,
diff, diff,
title: `${path.relative(app.path.root, filepath)}`,
}, },
title: `${path.relative(app.path.root, filePath)}`,
output, output,
} }
}, },
}) })
export type Replacer = (content: string, find: string) => Generator<string, void, unknown> export type Replacer = (
content: string,
// Similarity thresholds for block anchor fallback matching find: string,
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0 ) => Generator<string, void, unknown>
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3
/**
* Levenshtein distance algorithm implementation
*/
function levenshtein(a: string, b: string): number {
// Handle empty strings
if (a === "" || b === "") {
return Math.max(a.length, b.length)
}
const matrix = Array.from({ length: a.length + 1 }, (_, i) =>
Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0)),
)
for (let i = 1; i <= a.length; i++) {
for (let j = 1; j <= b.length; j++) {
const cost = a[i - 1] === b[j - 1] ? 0 : 1
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
}
}
return matrix[a.length][b.length]
}
export const SimpleReplacer: Replacer = function* (_content, find) { export const SimpleReplacer: Replacer = function* (_content, find) {
yield find yield find
@ -209,10 +177,8 @@ export const BlockAnchorReplacer: Replacer = function* (content, find) {
const firstLineSearch = searchLines[0].trim() const firstLineSearch = searchLines[0].trim()
const lastLineSearch = searchLines[searchLines.length - 1].trim() const lastLineSearch = searchLines[searchLines.length - 1].trim()
const searchBlockSize = searchLines.length
// Collect all candidate positions where both anchors match // Find blocks where first line matches the search first line
const candidates: Array<{ startLine: number; endLine: number }> = []
for (let i = 0; i < originalLines.length; i++) { for (let i = 0; i < originalLines.length; i++) {
if (originalLines[i].trim() !== firstLineSearch) { if (originalLines[i].trim() !== firstLineSearch) {
continue continue
@ -221,116 +187,31 @@ export const BlockAnchorReplacer: Replacer = function* (content, find) {
// Look for the matching last line after this first line // Look for the matching last line after this first line
for (let j = i + 2; j < originalLines.length; j++) { for (let j = i + 2; j < originalLines.length; j++) {
if (originalLines[j].trim() === lastLineSearch) { if (originalLines[j].trim() === lastLineSearch) {
candidates.push({ startLine: i, endLine: j }) // Found a potential block from i to j
let matchStartIndex = 0
for (let k = 0; k < i; k++) {
matchStartIndex += originalLines[k].length + 1
}
let matchEndIndex = matchStartIndex
for (let k = 0; k <= j - i; k++) {
matchEndIndex += originalLines[i + k].length
if (k < j - i) {
matchEndIndex += 1 // Add newline character except for the last line
}
}
yield content.substring(matchStartIndex, matchEndIndex)
break // Only match the first occurrence of the last line break // Only match the first occurrence of the last line
} }
} }
} }
// Return immediately if no candidates
if (candidates.length === 0) {
return
} }
// Handle single candidate scenario (using relaxed threshold) export const WhitespaceNormalizedReplacer: Replacer = function* (
if (candidates.length === 1) { content,
const { startLine, endLine } = candidates[0] find,
const actualBlockSize = endLine - startLine + 1 ) {
let similarity = 0
let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2) // Middle lines only
if (linesToCheck > 0) {
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
const originalLine = originalLines[startLine + j].trim()
const searchLine = searchLines[j].trim()
const maxLen = Math.max(originalLine.length, searchLine.length)
if (maxLen === 0) {
continue
}
const distance = levenshtein(originalLine, searchLine)
similarity += (1 - distance / maxLen) / linesToCheck
// Exit early when threshold is reached
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
break
}
}
} else {
// No middle lines to compare, just accept based on anchors
similarity = 1.0
}
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
let matchStartIndex = 0
for (let k = 0; k < startLine; k++) {
matchStartIndex += originalLines[k].length + 1
}
let matchEndIndex = matchStartIndex
for (let k = startLine; k <= endLine; k++) {
matchEndIndex += originalLines[k].length
if (k < endLine) {
matchEndIndex += 1 // Add newline character except for the last line
}
}
yield content.substring(matchStartIndex, matchEndIndex)
}
return
}
// Calculate similarity for multiple candidates
let bestMatch: { startLine: number; endLine: number } | null = null
let maxSimilarity = -1
for (const candidate of candidates) {
const { startLine, endLine } = candidate
const actualBlockSize = endLine - startLine + 1
let similarity = 0
let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2) // Middle lines only
if (linesToCheck > 0) {
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
const originalLine = originalLines[startLine + j].trim()
const searchLine = searchLines[j].trim()
const maxLen = Math.max(originalLine.length, searchLine.length)
if (maxLen === 0) {
continue
}
const distance = levenshtein(originalLine, searchLine)
similarity += 1 - distance / maxLen
}
similarity /= linesToCheck // Average similarity
} else {
// No middle lines to compare, just accept based on anchors
similarity = 1.0
}
if (similarity > maxSimilarity) {
maxSimilarity = similarity
bestMatch = candidate
}
}
// Threshold judgment
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
const { startLine, endLine } = bestMatch
let matchStartIndex = 0
for (let k = 0; k < startLine; k++) {
matchStartIndex += originalLines[k].length + 1
}
let matchEndIndex = matchStartIndex
for (let k = startLine; k <= endLine; k++) {
matchEndIndex += originalLines[k].length
if (k < endLine) {
matchEndIndex += 1
}
}
yield content.substring(matchStartIndex, matchEndIndex)
}
}
export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) {
const normalizeWhitespace = (text: string) => text.replace(/\s+/g, " ").trim() const normalizeWhitespace = (text: string) => text.replace(/\s+/g, " ").trim()
const normalizedFind = normalizeWhitespace(find) const normalizedFind = normalizeWhitespace(find)
@ -340,14 +221,17 @@ export const WhitespaceNormalizedReplacer: Replacer = function* (content, find)
const line = lines[i] const line = lines[i]
if (normalizeWhitespace(line) === normalizedFind) { if (normalizeWhitespace(line) === normalizedFind) {
yield line yield line
} else { }
// Only check for substring matches if the full line doesn't match
// Also check for substring matches within lines
const normalizedLine = normalizeWhitespace(line) const normalizedLine = normalizeWhitespace(line)
if (normalizedLine.includes(normalizedFind)) { if (normalizedLine.includes(normalizedFind)) {
// Find the actual substring in the original line that matches // Find the actual substring in the original line that matches
const words = find.trim().split(/\s+/) const words = find.trim().split(/\s+/)
if (words.length > 0) { if (words.length > 0) {
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\s+") const pattern = words
.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"))
.join("\\s+")
try { try {
const regex = new RegExp(pattern) const regex = new RegExp(pattern)
const match = line.match(regex) const match = line.match(regex)
@ -360,7 +244,6 @@ export const WhitespaceNormalizedReplacer: Replacer = function* (content, find)
} }
} }
} }
}
// Handle multi-line matches // Handle multi-line matches
const findLines = find.split("\n") const findLines = find.split("\n")
@ -387,7 +270,9 @@ export const IndentationFlexibleReplacer: Replacer = function* (content, find) {
}), }),
) )
return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join("\n") return lines
.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent)))
.join("\n")
} }
const normalizedFind = removeIndentation(find) const normalizedFind = removeIndentation(find)
@ -538,7 +423,10 @@ export const ContextAwareReplacer: Replacer = function* (content, find) {
} }
} }
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) { if (
totalNonEmptyLines === 0 ||
matchingLines / totalNonEmptyLines >= 0.5
) {
yield block yield block
break // Only match the first occurrence break // Only match the first occurrence
} }
@ -585,7 +473,12 @@ function trimDiff(diff: string): string {
return trimmedLines.join("\n") return trimmedLines.join("\n")
} }
export function replace(content: string, oldString: string, newString: string, replaceAll = false): string { export function replace(
content: string,
oldString: string,
newString: string,
replaceAll = false,
): string {
if (oldString === newString) { if (oldString === newString) {
throw new Error("oldString and newString must be different") throw new Error("oldString and newString must be different")
} }
@ -596,7 +489,7 @@ export function replace(content: string, oldString: string, newString: string, r
BlockAnchorReplacer, BlockAnchorReplacer,
WhitespaceNormalizedReplacer, WhitespaceNormalizedReplacer,
IndentationFlexibleReplacer, IndentationFlexibleReplacer,
EscapeNormalizedReplacer, // EscapeNormalizedReplacer,
// TrimmedBoundaryReplacer, // TrimmedBoundaryReplacer,
// ContextAwareReplacer, // ContextAwareReplacer,
// MultiOccurrenceReplacer, // MultiOccurrenceReplacer,
@ -609,7 +502,11 @@ export function replace(content: string, oldString: string, newString: string, r
} }
const lastIndex = content.lastIndexOf(search) const lastIndex = content.lastIndexOf(search)
if (index !== lastIndex) continue if (index !== lastIndex) continue
return content.substring(0, index) + newString + content.substring(index + search.length) return (
content.substring(0, index) +
newString +
content.substring(index + search.length)
)
} }
} }
throw new Error("oldString not found in content or was found multiple times") throw new Error("oldString not found in content or was found multiple times")

View file

@ -2,8 +2,8 @@ Performs exact string replacements in files.
Usage: Usage:
- You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file. - You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file.
- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the oldString or newString. - When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the old_string or new_string.
- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required. - ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.
- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked. - Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.
- The edit will FAIL if `oldString` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replaceAll` to change every instance of `oldString`. - The edit will FAIL if `old_string` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replace_all` to change every instance of `old_string`.
- Use `replaceAll` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance. - Use `replace_all` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.

View file

@ -5,7 +5,8 @@ import { App } from "../app/app"
import DESCRIPTION from "./glob.txt" import DESCRIPTION from "./glob.txt"
import { Ripgrep } from "../file/ripgrep" import { Ripgrep } from "../file/ripgrep"
export const GlobTool = Tool.define("glob", { export const GlobTool = Tool.define({
id: "glob",
description: DESCRIPTION, description: DESCRIPTION,
parameters: z.object({ parameters: z.object({
pattern: z.string().describe("The glob pattern to match files against"), pattern: z.string().describe("The glob pattern to match files against"),
@ -19,14 +20,16 @@ export const GlobTool = Tool.define("glob", {
async execute(params) { async execute(params) {
const app = App.info() const app = App.info()
let search = params.path ?? app.path.cwd let search = params.path ?? app.path.cwd
search = path.isAbsolute(search) ? search : path.resolve(app.path.cwd, search) search = path.isAbsolute(search)
? search
: path.resolve(app.path.cwd, search)
const limit = 100 const limit = 100
const files = [] const files = []
let truncated = false let truncated = false
for (const file of await Ripgrep.files({ for (const file of await Ripgrep.files({
cwd: search, cwd: search,
glob: [params.pattern], glob: params.pattern,
})) { })) {
if (files.length >= limit) { if (files.length >= limit) {
truncated = true truncated = true
@ -50,15 +53,17 @@ export const GlobTool = Tool.define("glob", {
output.push(...files.map((f) => f.path)) output.push(...files.map((f) => f.path))
if (truncated) { if (truncated) {
output.push("") output.push("")
output.push("(Results are truncated. Consider using a more specific path or pattern.)") output.push(
"(Results are truncated. Consider using a more specific path or pattern.)",
)
} }
} }
return { return {
title: path.relative(app.path.root, search),
metadata: { metadata: {
count: files.length, count: files.length,
truncated, truncated,
title: path.relative(app.path.root, search),
}, },
output: output.join("\n"), output: output.join("\n"),
} }

Some files were not shown because too many files have changed in this diff Show more