From 294426ced93a478d23483ae108c1d90b64914ab6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=A8=E7=82=8E?= <635735027@qq.com> Date: Thu, 16 Apr 2026 22:13:11 +0800 Subject: [PATCH] feat: add llm-client.js with prompt builder and JSON extractor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Qoder][https://qoder.com] --- frontend/scene-generator/llm-client.js | 121 +++++++++++++++++++++++++ tests/scene_generator_llm_test.js | 45 +++++++++ 2 files changed, 166 insertions(+) create mode 100644 frontend/scene-generator/llm-client.js create mode 100644 tests/scene_generator_llm_test.js diff --git a/frontend/scene-generator/llm-client.js b/frontend/scene-generator/llm-client.js new file mode 100644 index 0000000..c87dcd3 --- /dev/null +++ b/frontend/scene-generator/llm-client.js @@ -0,0 +1,121 @@ +const http = require("http"); + +const SYSTEM_PROMPT = `你是一个场景信息提取助手。根据场景目录的内容,提取 scene-id 和 scene-name。 + +scene-id 规则: +- 使用英文短横线连接,如 tq-lineloss-report +- 全小写,有业务含义 + +scene-name 规则: +- 使用中文,简短描述性名称 +- 如 "台区线损报表"、"知乎热榜导出" + +请以 JSON 格式返回:{"sceneId": "...", "sceneName": "..."}`; + +function buildAnalyzePrompt(sourceDir, dirContents) { + const parts = []; + + parts.push(`=== 目录结构 ===`); + parts.push(dirContents.tree || "(empty)"); + + if (dirContents["scene.toml"]) { + parts.push(`\n=== scene.toml ===`); + parts.push(dirContents["scene.toml"]); + } + + if (dirContents["SKILL.toml"]) { + parts.push(`\n=== SKILL.toml ===`); + parts.push(dirContents["SKILL.toml"]); + } + + if (dirContents["SKILL.md"]) { + parts.push(`\n=== SKILL.md ===`); + parts.push(dirContents["SKILL.md"]); + } + + if (dirContents.scripts && Object.keys(dirContents.scripts).length > 0) { + parts.push(`\n=== 脚本文件 ===`); + for (const [name, content] of Object.entries(dirContents.scripts)) { + parts.push(`\n--- ${name} ---`); + parts.push(content.substring(0, 2000)); + } + } + + return `以下是场景目录 "${sourceDir}" 的内容:\n\n${parts.join("\n")}\n\n请以 JSON 格式返回:{"sceneId": "...", "sceneName": "..."}`; +} + +function extractJsonFromResponse(text) { + const codeBlockMatch = text.match(/```(?:json)?\s*\n([\s\S]*?)\n```/); + if (codeBlockMatch) return JSON.parse(codeBlockMatch[1]); + + const jsonMatch = text.match( + /\{[\s\S]*"sceneId"[\s\S]*"sceneName"[\s\S]*\}/ + ); + if (jsonMatch) return JSON.parse(jsonMatch[0]); + + return JSON.parse(text); +} + +function analyzeScene(sourceDir, dirContents, { apiKey, baseUrl, model }) { + const userPrompt = buildAnalyzePrompt(sourceDir, dirContents); + + const requestBody = JSON.stringify({ + model, + messages: [ + { role: "system", content: SYSTEM_PROMPT }, + { role: "user", content: userPrompt }, + ], + temperature: 0.1, + max_tokens: 256, + }); + + return new Promise((resolve, reject) => { + const url = new URL(baseUrl.replace(/\/v1\/?$/, "") + "/v1/chat/completions"); + const options = { + hostname: url.hostname, + port: url.port || (url.protocol === "https:" ? 443 : 80), + path: url.pathname, + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + "Content-Length": Buffer.byteLength(requestBody), + }, + }; + + const req = http.request(options, (res) => { + let data = ""; + res.on("data", (chunk) => (data += chunk)); + res.on("end", () => { + if (res.statusCode !== 200) { + return reject(new Error(`LLM API error ${res.statusCode}: ${data}`)); + } + + try { + const parsed = JSON.parse(data); + const content = parsed.choices?.[0]?.message?.content; + if (!content) return reject(new Error("LLM returned empty response")); + const result = extractJsonFromResponse(content); + if (!result.sceneId || !result.sceneName) { + return reject( + new Error(`LLM response missing sceneId/sceneName: ${content}`) + ); + } + resolve(result); + } catch (err) { + reject(new Error(`Failed to parse LLM response: ${err.message}`)); + } + }); + }); + + req.on("error", reject); + req.setTimeout(30000, () => { + req.destroy(new Error("LLM API request timed out")); + }); + + req.write(requestBody); + req.end(); + }); +} + +module.exports = { buildAnalyzePrompt, extractJsonFromResponse, analyzeScene }; diff --git a/tests/scene_generator_llm_test.js b/tests/scene_generator_llm_test.js new file mode 100644 index 0000000..b5238fa --- /dev/null +++ b/tests/scene_generator_llm_test.js @@ -0,0 +1,45 @@ +const assert = require("assert"); +const { + buildAnalyzePrompt, + extractJsonFromResponse, +} = require("../frontend/scene-generator/llm-client"); + +function testBuildAnalyzePromptIncludesFileContents() { + const dirContents = { + "scene.toml": '[scene]\nid = "test-scene"', + scripts: { "collect_test.js": "async function main() {}" }, + tree: "├── scene.toml\n└── collect_test.js", + }; + + const prompt = buildAnalyzePrompt("D:/test/scenario", dirContents); + + assert.ok(prompt.includes("scene.toml"), "should include scene.toml"); + assert.ok(prompt.includes("collect_test.js"), "should include script name"); + assert.ok(prompt.includes("D:/test/scenario"), "should include sourceDir"); + console.log("PASS: testBuildAnalyzePromptIncludesFileContents"); +} + +function testExtractJsonFromResponse() { + const withMarkdown = + '```json\n{"sceneId": "test", "sceneName": "测试"}\n```'; + const plain = '{"sceneId": "test", "sceneName": "测试"}'; + const withPrefix = + 'Here is the result:\n{"sceneId": "test", "sceneName": "测试"}'; + + assert.deepStrictEqual(extractJsonFromResponse(withMarkdown), { + sceneId: "test", + sceneName: "测试", + }); + assert.deepStrictEqual(extractJsonFromResponse(plain), { + sceneId: "test", + sceneName: "测试", + }); + assert.deepStrictEqual(extractJsonFromResponse(withPrefix), { + sceneId: "test", + sceneName: "测试", + }); + console.log("PASS: testExtractJsonFromResponse"); +} + +testBuildAnalyzePromptIncludesFileContents(); +testExtractJsonFromResponse();