230 lines
8.0 KiB
TypeScript
230 lines
8.0 KiB
TypeScript
/**
|
||
* Agent Compiler — компилирует новых AI-агентов по техническому заданию через LLM
|
||
* Автоматически определяет: модель, роль, системный промпт, инструменты, параметры LLM
|
||
*/
|
||
import { invokeLLM } from "./_core/llm";
|
||
import { getDb } from "./db";
|
||
import { agents } from "../drizzle/schema";
|
||
import { TOOL_REGISTRY } from "./tools";
|
||
|
||
export interface CompileAgentRequest {
|
||
/** Техническое задание — описание что должен делать агент */
|
||
specification: string;
|
||
/** Имя агента (если не указано — LLM выберет) */
|
||
name?: string;
|
||
/** Провайдер LLM (ollama, openai, anthropic) */
|
||
preferredProvider?: string;
|
||
/** Предпочитаемая модель */
|
||
preferredModel?: string;
|
||
/** ID пользователя-владельца */
|
||
userId: number;
|
||
}
|
||
|
||
export interface CompiledAgentConfig {
|
||
name: string;
|
||
description: string;
|
||
role: string;
|
||
model: string;
|
||
provider: string;
|
||
temperature: number;
|
||
maxTokens: number;
|
||
topP: number;
|
||
frequencyPenalty: number;
|
||
presencePenalty: number;
|
||
systemPrompt: string;
|
||
allowedTools: string[];
|
||
allowedDomains: string[];
|
||
maxRequestsPerHour: number;
|
||
tags: string[];
|
||
reasoning: string; // Объяснение почему такие параметры
|
||
}
|
||
|
||
export interface CompileAgentResult {
|
||
success: boolean;
|
||
config?: CompiledAgentConfig;
|
||
agentId?: number;
|
||
error?: string;
|
||
}
|
||
|
||
/**
|
||
* Компилирует конфигурацию агента по ТЗ через LLM
|
||
*/
|
||
export async function compileAgentConfig(request: CompileAgentRequest): Promise<CompileAgentResult> {
|
||
// Get available tools for context
|
||
const availableTools = TOOL_REGISTRY.map(t => ({
|
||
id: t.id,
|
||
name: t.name,
|
||
description: t.description,
|
||
category: t.category,
|
||
dangerous: t.dangerous,
|
||
}));
|
||
|
||
const systemPrompt = `You are an expert AI agent architect. Your task is to analyze a technical specification (ТЗ) and generate the optimal configuration for an AI agent.
|
||
|
||
Available tools that can be assigned to the agent:
|
||
${JSON.stringify(availableTools, null, 2)}
|
||
|
||
Available providers and models:
|
||
- ollama: llama3.2, llama3.1, mistral, codellama, deepseek-coder, phi3
|
||
- openai: gpt-4o, gpt-4o-mini, gpt-3.5-turbo
|
||
- anthropic: claude-3-5-sonnet, claude-3-haiku
|
||
|
||
Guidelines for configuration:
|
||
- temperature: 0.1-0.3 for precise/analytical tasks, 0.5-0.7 for balanced, 0.8-1.0 for creative
|
||
- maxTokens: 512-1024 for simple tasks, 2048-4096 for complex, 8192 for very long outputs
|
||
- topP: 0.9-1.0 for most tasks, lower for more focused outputs
|
||
- systemPrompt: detailed, specific, includes examples if helpful
|
||
- allowedTools: only tools the agent actually needs
|
||
- allowedDomains: specific domains if web access needed, empty array if not needed
|
||
- role: one of "developer", "researcher", "analyst", "writer", "executor", "monitor", "coordinator"
|
||
|
||
Return ONLY valid JSON with this exact structure (no markdown, no extra text):
|
||
{
|
||
"name": "Agent Name",
|
||
"description": "Brief description of what this agent does",
|
||
"role": "developer|researcher|analyst|writer|executor|monitor|coordinator",
|
||
"model": "model-name",
|
||
"provider": "ollama|openai|anthropic",
|
||
"temperature": 0.7,
|
||
"maxTokens": 2048,
|
||
"topP": 1.0,
|
||
"frequencyPenalty": 0.0,
|
||
"presencePenalty": 0.0,
|
||
"systemPrompt": "Detailed system prompt for the agent...",
|
||
"allowedTools": ["tool_id1", "tool_id2"],
|
||
"allowedDomains": ["example.com"],
|
||
"maxRequestsPerHour": 100,
|
||
"tags": ["tag1", "tag2"],
|
||
"reasoning": "Explanation of why these parameters were chosen"
|
||
}`;
|
||
|
||
const userPrompt = `Technical Specification (ТЗ):
|
||
${request.specification}
|
||
|
||
${request.name ? `Preferred name: ${request.name}` : ""}
|
||
${request.preferredProvider ? `Preferred provider: ${request.preferredProvider}` : ""}
|
||
${request.preferredModel ? `Preferred model: ${request.preferredModel}` : ""}
|
||
|
||
Analyze this specification and generate the optimal agent configuration. Be specific and detailed in the systemPrompt.`;
|
||
|
||
try {
|
||
const response = await invokeLLM({
|
||
messages: [
|
||
{ role: "system", content: systemPrompt },
|
||
{ role: "user", content: userPrompt },
|
||
],
|
||
response_format: {
|
||
type: "json_schema",
|
||
json_schema: {
|
||
name: "agent_config",
|
||
strict: true,
|
||
schema: {
|
||
type: "object",
|
||
properties: {
|
||
name: { type: "string" },
|
||
description: { type: "string" },
|
||
role: { type: "string" },
|
||
model: { type: "string" },
|
||
provider: { type: "string" },
|
||
temperature: { type: "number" },
|
||
maxTokens: { type: "integer" },
|
||
topP: { type: "number" },
|
||
frequencyPenalty: { type: "number" },
|
||
presencePenalty: { type: "number" },
|
||
systemPrompt: { type: "string" },
|
||
allowedTools: { type: "array", items: { type: "string" } },
|
||
allowedDomains: { type: "array", items: { type: "string" } },
|
||
maxRequestsPerHour: { type: "integer" },
|
||
tags: { type: "array", items: { type: "string" } },
|
||
reasoning: { type: "string" },
|
||
},
|
||
required: [
|
||
"name", "description", "role", "model", "provider",
|
||
"temperature", "maxTokens", "topP", "frequencyPenalty", "presencePenalty",
|
||
"systemPrompt", "allowedTools", "allowedDomains", "maxRequestsPerHour",
|
||
"tags", "reasoning"
|
||
],
|
||
additionalProperties: false,
|
||
},
|
||
},
|
||
},
|
||
});
|
||
|
||
const content = response.choices[0].message.content;
|
||
const config = typeof content === "string" ? JSON.parse(content) : content;
|
||
|
||
// Validate that allowedTools exist in registry
|
||
const validToolIds = TOOL_REGISTRY.map(t => t.id);
|
||
config.allowedTools = config.allowedTools.filter((id: string) => validToolIds.includes(id));
|
||
|
||
return { success: true, config };
|
||
} catch (error: any) {
|
||
return {
|
||
success: false,
|
||
error: `Failed to compile agent: ${error.message}`,
|
||
};
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Деплоит скомпилированного агента в БД
|
||
*/
|
||
export async function deployCompiledAgent(
|
||
config: CompiledAgentConfig,
|
||
userId: number
|
||
): Promise<{ success: boolean; agentId?: number; error?: string }> {
|
||
const db = await getDb();
|
||
if (!db) return { success: false, error: "Database not available" };
|
||
|
||
try {
|
||
const [result] = await db.insert(agents).values({
|
||
userId,
|
||
name: config.name,
|
||
description: config.description,
|
||
role: config.role,
|
||
model: config.model,
|
||
provider: config.provider,
|
||
temperature: String(config.temperature),
|
||
maxTokens: config.maxTokens,
|
||
topP: String(config.topP),
|
||
frequencyPenalty: String(config.frequencyPenalty),
|
||
presencePenalty: String(config.presencePenalty),
|
||
systemPrompt: config.systemPrompt,
|
||
allowedTools: config.allowedTools,
|
||
allowedDomains: config.allowedDomains,
|
||
maxRequestsPerHour: config.maxRequestsPerHour,
|
||
tags: config.tags,
|
||
metadata: { compiledFromSpec: true, reasoning: config.reasoning },
|
||
isActive: true,
|
||
isPublic: false,
|
||
});
|
||
|
||
return { success: true, agentId: (result as any).insertId };
|
||
} catch (error: any) {
|
||
return { success: false, error: error.message };
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Полный цикл: компиляция + деплой
|
||
*/
|
||
export async function compileAndDeployAgent(
|
||
request: CompileAgentRequest
|
||
): Promise<CompileAgentResult> {
|
||
const compileResult = await compileAgentConfig(request);
|
||
if (!compileResult.success || !compileResult.config) {
|
||
return compileResult;
|
||
}
|
||
|
||
const deployResult = await deployCompiledAgent(compileResult.config, request.userId);
|
||
if (!deployResult.success) {
|
||
return { success: false, error: deployResult.error };
|
||
}
|
||
|
||
return {
|
||
success: true,
|
||
config: compileResult.config,
|
||
agentId: deployResult.agentId,
|
||
};
|
||
}
|