feat: add llm-client.js with prompt builder and JSON extractor
🤖 Generated with [Qoder][https://qoder.com]
This commit is contained in:
121
frontend/scene-generator/llm-client.js
Normal file
121
frontend/scene-generator/llm-client.js
Normal file
@@ -0,0 +1,121 @@
|
||||
const http = require("http");
|
||||
|
||||
const SYSTEM_PROMPT = `你是一个场景信息提取助手。根据场景目录的内容,提取 scene-id 和 scene-name。
|
||||
|
||||
scene-id 规则:
|
||||
- 使用英文短横线连接,如 tq-lineloss-report
|
||||
- 全小写,有业务含义
|
||||
|
||||
scene-name 规则:
|
||||
- 使用中文,简短描述性名称
|
||||
- 如 "台区线损报表"、"知乎热榜导出"
|
||||
|
||||
请以 JSON 格式返回:{"sceneId": "...", "sceneName": "..."}`;
|
||||
|
||||
function buildAnalyzePrompt(sourceDir, dirContents) {
|
||||
const parts = [];
|
||||
|
||||
parts.push(`=== 目录结构 ===`);
|
||||
parts.push(dirContents.tree || "(empty)");
|
||||
|
||||
if (dirContents["scene.toml"]) {
|
||||
parts.push(`\n=== scene.toml ===`);
|
||||
parts.push(dirContents["scene.toml"]);
|
||||
}
|
||||
|
||||
if (dirContents["SKILL.toml"]) {
|
||||
parts.push(`\n=== SKILL.toml ===`);
|
||||
parts.push(dirContents["SKILL.toml"]);
|
||||
}
|
||||
|
||||
if (dirContents["SKILL.md"]) {
|
||||
parts.push(`\n=== SKILL.md ===`);
|
||||
parts.push(dirContents["SKILL.md"]);
|
||||
}
|
||||
|
||||
if (dirContents.scripts && Object.keys(dirContents.scripts).length > 0) {
|
||||
parts.push(`\n=== 脚本文件 ===`);
|
||||
for (const [name, content] of Object.entries(dirContents.scripts)) {
|
||||
parts.push(`\n--- ${name} ---`);
|
||||
parts.push(content.substring(0, 2000));
|
||||
}
|
||||
}
|
||||
|
||||
return `以下是场景目录 "${sourceDir}" 的内容:\n\n${parts.join("\n")}\n\n请以 JSON 格式返回:{"sceneId": "...", "sceneName": "..."}`;
|
||||
}
|
||||
|
||||
function extractJsonFromResponse(text) {
|
||||
const codeBlockMatch = text.match(/```(?:json)?\s*\n([\s\S]*?)\n```/);
|
||||
if (codeBlockMatch) return JSON.parse(codeBlockMatch[1]);
|
||||
|
||||
const jsonMatch = text.match(
|
||||
/\{[\s\S]*"sceneId"[\s\S]*"sceneName"[\s\S]*\}/
|
||||
);
|
||||
if (jsonMatch) return JSON.parse(jsonMatch[0]);
|
||||
|
||||
return JSON.parse(text);
|
||||
}
|
||||
|
||||
function analyzeScene(sourceDir, dirContents, { apiKey, baseUrl, model }) {
|
||||
const userPrompt = buildAnalyzePrompt(sourceDir, dirContents);
|
||||
|
||||
const requestBody = JSON.stringify({
|
||||
model,
|
||||
messages: [
|
||||
{ role: "system", content: SYSTEM_PROMPT },
|
||||
{ role: "user", content: userPrompt },
|
||||
],
|
||||
temperature: 0.1,
|
||||
max_tokens: 256,
|
||||
});
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = new URL(baseUrl.replace(/\/v1\/?$/, "") + "/v1/chat/completions");
|
||||
const options = {
|
||||
hostname: url.hostname,
|
||||
port: url.port || (url.protocol === "https:" ? 443 : 80),
|
||||
path: url.pathname,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
"Content-Length": Buffer.byteLength(requestBody),
|
||||
},
|
||||
};
|
||||
|
||||
const req = http.request(options, (res) => {
|
||||
let data = "";
|
||||
res.on("data", (chunk) => (data += chunk));
|
||||
res.on("end", () => {
|
||||
if (res.statusCode !== 200) {
|
||||
return reject(new Error(`LLM API error ${res.statusCode}: ${data}`));
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
const content = parsed.choices?.[0]?.message?.content;
|
||||
if (!content) return reject(new Error("LLM returned empty response"));
|
||||
const result = extractJsonFromResponse(content);
|
||||
if (!result.sceneId || !result.sceneName) {
|
||||
return reject(
|
||||
new Error(`LLM response missing sceneId/sceneName: ${content}`)
|
||||
);
|
||||
}
|
||||
resolve(result);
|
||||
} catch (err) {
|
||||
reject(new Error(`Failed to parse LLM response: ${err.message}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on("error", reject);
|
||||
req.setTimeout(30000, () => {
|
||||
req.destroy(new Error("LLM API request timed out"));
|
||||
});
|
||||
|
||||
req.write(requestBody);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { buildAnalyzePrompt, extractJsonFromResponse, analyzeScene };
|
||||
45
tests/scene_generator_llm_test.js
Normal file
45
tests/scene_generator_llm_test.js
Normal file
@@ -0,0 +1,45 @@
|
||||
const assert = require("assert");
|
||||
const {
|
||||
buildAnalyzePrompt,
|
||||
extractJsonFromResponse,
|
||||
} = require("../frontend/scene-generator/llm-client");
|
||||
|
||||
function testBuildAnalyzePromptIncludesFileContents() {
|
||||
const dirContents = {
|
||||
"scene.toml": '[scene]\nid = "test-scene"',
|
||||
scripts: { "collect_test.js": "async function main() {}" },
|
||||
tree: "├── scene.toml\n└── collect_test.js",
|
||||
};
|
||||
|
||||
const prompt = buildAnalyzePrompt("D:/test/scenario", dirContents);
|
||||
|
||||
assert.ok(prompt.includes("scene.toml"), "should include scene.toml");
|
||||
assert.ok(prompt.includes("collect_test.js"), "should include script name");
|
||||
assert.ok(prompt.includes("D:/test/scenario"), "should include sourceDir");
|
||||
console.log("PASS: testBuildAnalyzePromptIncludesFileContents");
|
||||
}
|
||||
|
||||
function testExtractJsonFromResponse() {
|
||||
const withMarkdown =
|
||||
'```json\n{"sceneId": "test", "sceneName": "测试"}\n```';
|
||||
const plain = '{"sceneId": "test", "sceneName": "测试"}';
|
||||
const withPrefix =
|
||||
'Here is the result:\n{"sceneId": "test", "sceneName": "测试"}';
|
||||
|
||||
assert.deepStrictEqual(extractJsonFromResponse(withMarkdown), {
|
||||
sceneId: "test",
|
||||
sceneName: "测试",
|
||||
});
|
||||
assert.deepStrictEqual(extractJsonFromResponse(plain), {
|
||||
sceneId: "test",
|
||||
sceneName: "测试",
|
||||
});
|
||||
assert.deepStrictEqual(extractJsonFromResponse(withPrefix), {
|
||||
sceneId: "test",
|
||||
sceneName: "测试",
|
||||
});
|
||||
console.log("PASS: testExtractJsonFromResponse");
|
||||
}
|
||||
|
||||
testBuildAnalyzePromptIncludesFileContents();
|
||||
testExtractJsonFromResponse();
|
||||
Reference in New Issue
Block a user