diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 0fdf26392f62..75b255ac4dbc 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -387,6 +387,18 @@ export namespace Provider { }, } }, + maple: async () => { + // Maple AI uses a local proxy (maple-proxy) that handles TEE attestation + // and encryption. The proxy runs on localhost:8080 by default. + // Users need to run the maple-proxy or use the Maple desktop app. + // See: https://blog.trymaple.ai/maple-proxy-documentation/ + return { + autoload: false, + options: { + baseURL: "http://localhost:8080/v1", + }, + } + }, } export const Model = z @@ -585,6 +597,250 @@ export namespace Provider { } } + // Add Maple AI provider - uses maple-proxy for TEE attestation + // Maple AI provides private, encrypted LLM access via Trusted Execution Environments + // See: https://blog.trymaple.ai/maple-proxy-documentation/ + database["maple"] = { + id: "maple", + name: "Maple AI", + source: "custom", + env: ["MAPLE_API_KEY"], + options: {}, + models: { + "llama-3.3-70b": { + id: "llama-3.3-70b", + providerID: "maple", + name: "Llama 3.3 70B", + family: "llama", + api: { + id: "llama-3.3-70b", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 4, + output: 4, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 128000, + output: 8192, + }, + capabilities: { + temperature: true, + reasoning: false, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2024-12-01", + }, + "deepseek-r1-0528": { + id: "deepseek-r1-0528", + providerID: "maple", + name: "DeepSeek R1", + family: "deepseek", + api: { + id: "deepseek-r1-0528", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 4, + output: 4, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 128000, + output: 8192, + }, + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-05-28", + }, + "gpt-oss-120b": { + id: "gpt-oss-120b", + providerID: "maple", + name: "GPT-OSS 120B", + family: "gpt-oss", + api: { + id: "gpt-oss-120b", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 4, + output: 4, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 128000, + output: 8192, + }, + capabilities: { + temperature: true, + reasoning: false, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-01-01", + }, + "qwen3-coder-480b": { + id: "qwen3-coder-480b", + providerID: "maple", + name: "Qwen3 Coder 480B", + family: "qwen3", + api: { + id: "qwen3-coder-480b", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 4, + output: 4, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 200000, + output: 65536, + }, + capabilities: { + temperature: true, + reasoning: false, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-07-22", + }, + "qwen2-5-72b": { + id: "qwen2-5-72b", + providerID: "maple", + name: "Qwen 2.5 72B", + family: "qwen2.5", + api: { + id: "qwen2-5-72b", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 4, + output: 4, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 128000, + output: 8192, + }, + capabilities: { + temperature: true, + reasoning: false, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2024-09-01", + }, + "mistral-small-3-1-24b": { + id: "mistral-small-3-1-24b", + providerID: "maple", + name: "Mistral Small 3.1 24B", + family: "mistral", + api: { + id: "mistral-small-3-1-24b", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 4, + output: 4, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 128000, + output: 8192, + }, + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-01-01", + }, + "gemma-3-27b-it": { + id: "leon-se/gemma-3-27b-it-fp8-dynamic", + providerID: "maple", + name: "Gemma 3 27B (Image Analysis)", + family: "gemma", + api: { + id: "leon-se/gemma-3-27b-it-fp8-dynamic", + url: "http://localhost:8080/v1", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: 10, + output: 10, + cache: { read: 0, write: 0 }, + }, + limit: { + context: 128000, + output: 8192, + }, + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-01-01", + }, + }, + } + function mergeProvider(providerID: string, provider: Partial) { const existing = providers[providerID] if (existing) { diff --git a/packages/opencode/test/provider/provider.test.ts b/packages/opencode/test/provider/provider.test.ts index c6c6924f01f4..ec478d9c5130 100644 --- a/packages/opencode/test/provider/provider.test.ts +++ b/packages/opencode/test/provider/provider.test.ts @@ -1807,3 +1807,149 @@ test("custom model inherits api.url from models.dev provider", async () => { }, }) }) + +test("maple provider is loaded with MAPLE_API_KEY", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MAPLE_API_KEY", "test-maple-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["maple"]).toBeDefined() + expect(providers["maple"].name).toBe("Maple AI") + expect(providers["maple"].options.baseURL).toBe("http://localhost:8080/v1") + }, + }) +}) + +test("maple provider has expected models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MAPLE_API_KEY", "test-maple-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["maple"]).toBeDefined() + + // Check that expected models exist + const models = Object.keys(providers["maple"].models) + expect(models).toContain("llama-3.3-70b") + expect(models).toContain("deepseek-r1-0528") + expect(models).toContain("gpt-oss-120b") + expect(models).toContain("qwen3-coder-480b") + expect(models).toContain("qwen2-5-72b") + expect(models).toContain("mistral-small-3-1-24b") + expect(models).toContain("gemma-3-27b-it") + }, + }) +}) + +test("maple model uses openai-compatible npm package", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MAPLE_API_KEY", "test-maple-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["maple"].models["llama-3.3-70b"] + expect(model.api.npm).toBe("@ai-sdk/openai-compatible") + expect(model.api.url).toBe("http://localhost:8080/v1") + }, + }) +}) + +test("maple deepseek model has reasoning capability", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MAPLE_API_KEY", "test-maple-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const deepseek = providers["maple"].models["deepseek-r1-0528"] + expect(deepseek.capabilities.reasoning).toBe(true) + + // Other models should not have reasoning + const llama = providers["maple"].models["llama-3.3-70b"] + expect(llama.capabilities.reasoning).toBe(false) + }, + }) +}) + +test("maple image models have attachment capability", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MAPLE_API_KEY", "test-maple-api-key") + }, + fn: async () => { + const providers = await Provider.list() + + // Gemma and Mistral models support image input + const gemma = providers["maple"].models["gemma-3-27b-it"] + expect(gemma.capabilities.attachment).toBe(true) + expect(gemma.capabilities.input.image).toBe(true) + + const mistral = providers["maple"].models["mistral-small-3-1-24b"] + expect(mistral.capabilities.attachment).toBe(true) + expect(mistral.capabilities.input.image).toBe(true) + + // Text-only models should not have attachment + const llama = providers["maple"].models["llama-3.3-70b"] + expect(llama.capabilities.attachment).toBe(false) + }, + }) +})