123 lines
3.8 KiB
JavaScript
123 lines
3.8 KiB
JavaScript
|
|
/**
|
||
|
|
* Alfred Agent Harness — Provider Abstraction
|
||
|
|
*
|
||
|
|
* Multi-provider support: Anthropic, OpenAI-compat (Groq, xAI, etc.), local Ollama.
|
||
|
|
* Reads API keys from vault (tmpfs) at runtime — never hardcoded.
|
||
|
|
*/
|
||
|
|
import Anthropic from '@anthropic-ai/sdk';
|
||
|
|
import { readFileSync } from 'fs';
|
||
|
|
|
||
|
|
function loadKeyFromVault(name) {
|
||
|
|
const paths = [
|
||
|
|
`/run/user/1004/keys/${name}.key`,
|
||
|
|
`${process.env.HOME}/.vault/keys/${name}.key`,
|
||
|
|
];
|
||
|
|
for (const p of paths) {
|
||
|
|
try { return readFileSync(p, 'utf8').trim(); } catch {}
|
||
|
|
}
|
||
|
|
return process.env[`${name.toUpperCase()}_API_KEY`] || null;
|
||
|
|
}
|
||
|
|
|
||
|
|
/** Anthropic Claude provider */
|
||
|
|
export function createAnthropicProvider(opts = {}) {
|
||
|
|
const apiKey = opts.apiKey || loadKeyFromVault('anthropic') || process.env.ANTHROPIC_API_KEY;
|
||
|
|
if (!apiKey) throw new Error('No Anthropic API key found. Set ANTHROPIC_API_KEY or save to /run/user/1004/keys/anthropic.key');
|
||
|
|
|
||
|
|
const client = new Anthropic({ apiKey });
|
||
|
|
const model = opts.model || process.env.ANTHROPIC_MODEL || 'claude-sonnet-4-6';
|
||
|
|
|
||
|
|
return {
|
||
|
|
name: 'anthropic',
|
||
|
|
model,
|
||
|
|
|
||
|
|
async query({ systemPrompt, messages, tools, maxTokens = 8192 }) {
|
||
|
|
const toolDefs = tools.map(t => ({
|
||
|
|
name: t.name,
|
||
|
|
description: t.description,
|
||
|
|
input_schema: t.inputSchema,
|
||
|
|
}));
|
||
|
|
|
||
|
|
const response = await client.messages.create({
|
||
|
|
model,
|
||
|
|
max_tokens: maxTokens,
|
||
|
|
system: Array.isArray(systemPrompt) ? systemPrompt.join('\n\n') : systemPrompt,
|
||
|
|
messages,
|
||
|
|
tools: toolDefs.length > 0 ? toolDefs : undefined,
|
||
|
|
});
|
||
|
|
|
||
|
|
return {
|
||
|
|
stopReason: response.stop_reason,
|
||
|
|
content: response.content,
|
||
|
|
usage: response.usage,
|
||
|
|
model: response.model,
|
||
|
|
};
|
||
|
|
},
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
/** OpenAI-compatible provider (Groq, xAI, local, etc.) */
|
||
|
|
export function createOpenAICompatProvider(opts = {}) {
|
||
|
|
const apiKey = opts.apiKey || loadKeyFromVault(opts.name || 'openai');
|
||
|
|
const baseURL = opts.baseURL || 'https://api.openai.com/v1';
|
||
|
|
const model = opts.model || 'gpt-4o';
|
||
|
|
|
||
|
|
return {
|
||
|
|
name: opts.name || 'openai',
|
||
|
|
model,
|
||
|
|
|
||
|
|
async query({ systemPrompt, messages, tools, maxTokens = 4096 }) {
|
||
|
|
const body = {
|
||
|
|
model,
|
||
|
|
max_tokens: maxTokens,
|
||
|
|
messages: [
|
||
|
|
{ role: 'system', content: Array.isArray(systemPrompt) ? systemPrompt.join('\n\n') : systemPrompt },
|
||
|
|
...messages.map(m => ({
|
||
|
|
role: m.role,
|
||
|
|
content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content),
|
||
|
|
})),
|
||
|
|
],
|
||
|
|
};
|
||
|
|
|
||
|
|
if (tools?.length > 0) {
|
||
|
|
body.tools = tools.map(t => ({
|
||
|
|
type: 'function',
|
||
|
|
function: { name: t.name, description: t.description, parameters: t.inputSchema },
|
||
|
|
}));
|
||
|
|
}
|
||
|
|
|
||
|
|
const res = await fetch(`${baseURL}/chat/completions`, {
|
||
|
|
method: 'POST',
|
||
|
|
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` },
|
||
|
|
body: JSON.stringify(body),
|
||
|
|
});
|
||
|
|
|
||
|
|
if (!res.ok) throw new Error(`${opts.name || 'OpenAI'} API error: ${res.status} ${await res.text()}`);
|
||
|
|
const data = await res.json();
|
||
|
|
const choice = data.choices?.[0];
|
||
|
|
|
||
|
|
// Convert OpenAI format to our normalized format
|
||
|
|
const content = [];
|
||
|
|
if (choice?.message?.content) {
|
||
|
|
content.push({ type: 'text', text: choice.message.content });
|
||
|
|
}
|
||
|
|
if (choice?.message?.tool_calls) {
|
||
|
|
for (const tc of choice.message.tool_calls) {
|
||
|
|
content.push({
|
||
|
|
type: 'tool_use',
|
||
|
|
id: tc.id,
|
||
|
|
name: tc.function.name,
|
||
|
|
input: JSON.parse(tc.function.arguments),
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
return {
|
||
|
|
stopReason: choice?.finish_reason === 'tool_calls' ? 'tool_use' : choice?.finish_reason || 'end_turn',
|
||
|
|
content,
|
||
|
|
usage: data.usage,
|
||
|
|
model: data.model,
|
||
|
|
};
|
||
|
|
},
|
||
|
|
};
|
||
|
|
}
|