Compare commits
8 Commits
96c3bf1dee
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8a470481d | ||
|
|
45b60e37f7 | ||
|
|
72b79feca9 | ||
|
|
dd7805d341 | ||
|
|
311cc1fee6 | ||
|
|
7443b9da7f | ||
|
|
34035cdc9c | ||
|
|
4becf81066 |
334
Cargo.lock
generated
334
Cargo.lock
generated
@@ -26,18 +26,6 @@ dependencies = [
|
|||||||
"generic-array",
|
"generic-array",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "ahash"
|
|
||||||
version = "0.8.12"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"once_cell",
|
|
||||||
"version_check",
|
|
||||||
"zerocopy",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aho-corasick"
|
name = "aho-corasick"
|
||||||
version = "1.1.4"
|
version = "1.1.4"
|
||||||
@@ -47,12 +35,6 @@ dependencies = [
|
|||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "allocator-api2"
|
|
||||||
version = "0.2.21"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "android_system_properties"
|
name = "android_system_properties"
|
||||||
version = "0.1.5"
|
version = "0.1.5"
|
||||||
@@ -118,15 +100,6 @@ version = "1.0.102"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
|
checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "ar_archive_writer"
|
|
||||||
version = "0.5.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "7eb93bbb63b9c227414f6eb3a0adfddca591a8ce1e9b60661bb08969b87e340b"
|
|
||||||
dependencies = [
|
|
||||||
"object",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-channel"
|
name = "async-channel"
|
||||||
version = "1.9.0"
|
version = "1.9.0"
|
||||||
@@ -220,9 +193,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aws-lc-sys"
|
name = "aws-lc-sys"
|
||||||
version = "0.39.0"
|
version = "0.39.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
|
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"cmake",
|
"cmake",
|
||||||
@@ -328,6 +301,12 @@ version = "1.25.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec"
|
checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "byteorder"
|
||||||
|
version = "1.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "byteorder-lite"
|
name = "byteorder-lite"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -342,9 +321,9 @@ checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cc"
|
name = "cc"
|
||||||
version = "1.2.57"
|
version = "1.2.59"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
|
checksum = "b7a4d3ec6524d28a329fc53654bbadc9bdd7b0431f5d65f1a56ffb28a1ee5283"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"find-msvc-tools",
|
"find-msvc-tools",
|
||||||
"jobserver",
|
"jobserver",
|
||||||
@@ -421,16 +400,6 @@ dependencies = [
|
|||||||
"phf",
|
"phf",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "chumsky"
|
|
||||||
version = "0.9.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8eebd66744a15ded14960ab4ccdbfb51ad3b81f51f3f04a80adac98c985396c9"
|
|
||||||
dependencies = [
|
|
||||||
"hashbrown 0.14.5",
|
|
||||||
"stacker",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cipher"
|
name = "cipher"
|
||||||
version = "0.4.4"
|
version = "0.4.4"
|
||||||
@@ -493,9 +462,9 @@ checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmake"
|
name = "cmake"
|
||||||
version = "0.1.57"
|
version = "0.1.58"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
|
checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
]
|
]
|
||||||
@@ -772,9 +741,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastrand"
|
name = "fastrand"
|
||||||
version = "2.3.0"
|
version = "2.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fdeflate"
|
name = "fdeflate"
|
||||||
@@ -1012,16 +981,6 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hashbrown"
|
|
||||||
version = "0.14.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
|
|
||||||
dependencies = [
|
|
||||||
"ahash",
|
|
||||||
"allocator-api2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashbrown"
|
name = "hashbrown"
|
||||||
version = "0.15.5"
|
version = "0.15.5"
|
||||||
@@ -1039,10 +998,11 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashify"
|
name = "hashify"
|
||||||
version = "0.2.7"
|
version = "0.2.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "149e3ea90eb5a26ad354cfe3cb7f7401b9329032d0235f2687d03a35f30e5d4c"
|
checksum = "dd1246c0e5493286aeb2dde35b1f4eb9c4ce00e628641210a5e553fc001a1f26"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"indexmap",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn",
|
||||||
@@ -1136,9 +1096,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper"
|
name = "hyper"
|
||||||
version = "1.8.1"
|
version = "1.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11"
|
checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atomic-waker",
|
"atomic-waker",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -1151,7 +1111,6 @@ dependencies = [
|
|||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"pin-utils",
|
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"tokio",
|
"tokio",
|
||||||
"want",
|
"want",
|
||||||
@@ -1223,12 +1182,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_collections"
|
name = "icu_collections"
|
||||||
version = "2.1.1"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
|
checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"displaydoc",
|
"displaydoc",
|
||||||
"potential_utf",
|
"potential_utf",
|
||||||
|
"utf8_iter",
|
||||||
"yoke",
|
"yoke",
|
||||||
"zerofrom",
|
"zerofrom",
|
||||||
"zerovec",
|
"zerovec",
|
||||||
@@ -1236,9 +1196,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_locale_core"
|
name = "icu_locale_core"
|
||||||
version = "2.1.1"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
|
checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"displaydoc",
|
"displaydoc",
|
||||||
"litemap",
|
"litemap",
|
||||||
@@ -1249,9 +1209,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_normalizer"
|
name = "icu_normalizer"
|
||||||
version = "2.1.1"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
|
checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"icu_collections",
|
"icu_collections",
|
||||||
"icu_normalizer_data",
|
"icu_normalizer_data",
|
||||||
@@ -1263,15 +1223,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_normalizer_data"
|
name = "icu_normalizer_data"
|
||||||
version = "2.1.1"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
|
checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_properties"
|
name = "icu_properties"
|
||||||
version = "2.1.2"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
|
checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"icu_collections",
|
"icu_collections",
|
||||||
"icu_locale_core",
|
"icu_locale_core",
|
||||||
@@ -1283,15 +1243,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_properties_data"
|
name = "icu_properties_data"
|
||||||
version = "2.1.2"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
|
checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "icu_provider"
|
name = "icu_provider"
|
||||||
version = "2.1.1"
|
version = "2.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
|
checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"displaydoc",
|
"displaydoc",
|
||||||
"icu_locale_core",
|
"icu_locale_core",
|
||||||
@@ -1355,9 +1315,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "indexmap"
|
name = "indexmap"
|
||||||
version = "2.13.0"
|
version = "2.13.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
|
checksum = "45a8a2b9cb3e0b0c1803dbb0758ffac5de2f425b23c28f518faabd9d805342ff"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"equivalent",
|
"equivalent",
|
||||||
"hashbrown 0.16.1",
|
"hashbrown 0.16.1",
|
||||||
@@ -1395,9 +1355,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "iri-string"
|
name = "iri-string"
|
||||||
version = "0.7.11"
|
version = "0.7.12"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb"
|
checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -1427,10 +1387,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "js-sys"
|
name = "js-sys"
|
||||||
version = "0.3.91"
|
version = "0.3.94"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
|
checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"futures-util",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
@@ -1449,12 +1411,11 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lettre"
|
name = "lettre"
|
||||||
version = "0.11.19"
|
version = "0.11.21"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9e13e10e8818f8b2a60f52cb127041d388b89f3a96a62be9ceaffa22262fef7f"
|
checksum = "dabda5859ee7c06b995b9d1165aa52c39110e079ef609db97178d86aeb051fa7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64",
|
"base64",
|
||||||
"chumsky",
|
|
||||||
"email-encoding",
|
"email-encoding",
|
||||||
"email_address",
|
"email_address",
|
||||||
"fastrand",
|
"fastrand",
|
||||||
@@ -1473,9 +1434,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libc"
|
name = "libc"
|
||||||
version = "0.2.183"
|
version = "0.2.184"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
|
checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libloading"
|
name = "libloading"
|
||||||
@@ -1518,9 +1479,9 @@ checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "litemap"
|
name = "litemap"
|
||||||
version = "0.8.1"
|
version = "0.8.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
|
checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lock_api"
|
name = "lock_api"
|
||||||
@@ -1607,9 +1568,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mio"
|
name = "mio"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"wasi",
|
"wasi",
|
||||||
@@ -1669,15 +1630,6 @@ dependencies = [
|
|||||||
"autocfg",
|
"autocfg",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "object"
|
|
||||||
version = "0.37.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe"
|
|
||||||
dependencies = [
|
|
||||||
"memchr",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.21.4"
|
version = "1.21.4"
|
||||||
@@ -1831,9 +1783,9 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "potential_utf"
|
name = "potential_utf"
|
||||||
version = "0.1.4"
|
version = "0.1.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
|
checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"zerovec",
|
"zerovec",
|
||||||
]
|
]
|
||||||
@@ -1866,16 +1818,6 @@ dependencies = [
|
|||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "psm"
|
|
||||||
version = "0.1.30"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3852766467df634d74f0b2d7819bf8dc483a0eb2e3b0f50f756f9cfe8b0d18d8"
|
|
||||||
dependencies = [
|
|
||||||
"ar_archive_writer",
|
|
||||||
"cc",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pxfm"
|
name = "pxfm"
|
||||||
version = "0.1.28"
|
version = "0.1.28"
|
||||||
@@ -2204,9 +2146,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustc-hash"
|
name = "rustc-hash"
|
||||||
version = "2.1.1"
|
version = "2.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustix"
|
name = "rustix"
|
||||||
@@ -2328,9 +2270,9 @@ checksum = "b12e76d157a900eb52e81bc6e9f3069344290341720e9178cde2407113ac8d89"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "semver"
|
name = "semver"
|
||||||
version = "1.0.27"
|
version = "1.0.28"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
|
checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde"
|
name = "serde"
|
||||||
@@ -2399,9 +2341,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_spanned"
|
name = "serde_spanned"
|
||||||
version = "1.1.0"
|
version = "1.1.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98"
|
checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_core",
|
"serde_core",
|
||||||
]
|
]
|
||||||
@@ -2420,7 +2362,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sgclaw"
|
name = "sgclaw"
|
||||||
version = "0.1.0"
|
version = "0.1.0-2026.4.9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2437,6 +2379,7 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"uuid",
|
"uuid",
|
||||||
"zeroclawlabs",
|
"zeroclawlabs",
|
||||||
|
"zip 0.6.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2503,9 +2446,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "simd-adler32"
|
name = "simd-adler32"
|
||||||
version = "0.3.8"
|
version = "0.3.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
|
checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "siphasher"
|
name = "siphasher"
|
||||||
@@ -2541,20 +2484,6 @@ version = "1.2.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
|
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "stacker"
|
|
||||||
version = "0.1.23"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "08d74a23609d509411d10e2176dc2a4346e3b4aea2e7b1869f19fdedbc71c013"
|
|
||||||
dependencies = [
|
|
||||||
"cc",
|
|
||||||
"cfg-if",
|
|
||||||
"libc",
|
|
||||||
"psm",
|
|
||||||
"windows-sys 0.52.0",
|
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "stop-token"
|
name = "stop-token"
|
||||||
version = "0.7.0"
|
version = "0.7.0"
|
||||||
@@ -2685,9 +2614,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tinystr"
|
name = "tinystr"
|
||||||
version = "0.8.2"
|
version = "0.8.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
|
checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"displaydoc",
|
"displaydoc",
|
||||||
"zerovec",
|
"zerovec",
|
||||||
@@ -2710,9 +2639,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.50.0"
|
version = "1.51.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d"
|
checksum = "f66bf9585cda4b724d3e78ab34b73fb2bbaba9011b9bfdf69dc836382ea13b8c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"libc",
|
"libc",
|
||||||
@@ -2726,9 +2655,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-macros"
|
name = "tokio-macros"
|
||||||
version = "2.6.1"
|
version = "2.7.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c"
|
checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -2812,9 +2741,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml"
|
name = "toml"
|
||||||
version = "1.1.0+spec-1.1.0"
|
version = "1.1.2+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f8195ca05e4eb728f4ba94f3e3291661320af739c4e43779cbdfae82ab239fcc"
|
checksum = "81f3d15e84cbcd896376e6730314d59fb5a87f31e4b038454184435cd57defee"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"serde_core",
|
"serde_core",
|
||||||
@@ -2822,32 +2751,32 @@ dependencies = [
|
|||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
"toml_parser",
|
"toml_parser",
|
||||||
"toml_writer",
|
"toml_writer",
|
||||||
"winnow 1.0.0",
|
"winnow 1.0.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_datetime"
|
name = "toml_datetime"
|
||||||
version = "1.1.0+spec-1.1.0"
|
version = "1.1.1+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "97251a7c317e03ad83774a8752a7e81fb6067740609f75ea2b585b569a59198f"
|
checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_core",
|
"serde_core",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_parser"
|
name = "toml_parser"
|
||||||
version = "1.1.0+spec-1.1.0"
|
version = "1.1.2+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011"
|
checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"winnow 1.0.0",
|
"winnow 1.0.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_writer"
|
name = "toml_writer"
|
||||||
version = "1.1.0+spec-1.1.0"
|
version = "1.1.1+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed"
|
checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tower"
|
name = "tower"
|
||||||
@@ -3068,9 +2997,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uuid"
|
name = "uuid"
|
||||||
version = "1.22.0"
|
version = "1.23.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"getrandom 0.4.2",
|
"getrandom 0.4.2",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
@@ -3134,9 +3063,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen"
|
name = "wasm-bindgen"
|
||||||
version = "0.2.114"
|
version = "0.2.117"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
|
checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -3147,23 +3076,19 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-futures"
|
name = "wasm-bindgen-futures"
|
||||||
version = "0.4.64"
|
version = "0.4.67"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
|
checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
|
||||||
"futures-util",
|
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"once_cell",
|
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
"web-sys",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-macro"
|
name = "wasm-bindgen-macro"
|
||||||
version = "0.2.114"
|
version = "0.2.117"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
|
checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"wasm-bindgen-macro-support",
|
"wasm-bindgen-macro-support",
|
||||||
@@ -3171,9 +3096,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-macro-support"
|
name = "wasm-bindgen-macro-support"
|
||||||
version = "0.2.114"
|
version = "0.2.117"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
|
checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bumpalo",
|
"bumpalo",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
@@ -3184,9 +3109,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-shared"
|
name = "wasm-bindgen-shared"
|
||||||
version = "0.2.114"
|
version = "0.2.117"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
|
checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
@@ -3240,9 +3165,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "web-sys"
|
name = "web-sys"
|
||||||
version = "0.3.91"
|
version = "0.3.94"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
|
checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
@@ -3362,15 +3287,6 @@ dependencies = [
|
|||||||
"windows-targets 0.52.6",
|
"windows-targets 0.52.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "windows-sys"
|
|
||||||
version = "0.59.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
|
||||||
dependencies = [
|
|
||||||
"windows-targets 0.52.6",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows-sys"
|
name = "windows-sys"
|
||||||
version = "0.60.2"
|
version = "0.60.2"
|
||||||
@@ -3529,9 +3445,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "winnow"
|
name = "winnow"
|
||||||
version = "1.0.0"
|
version = "1.0.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
|
checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wit-bindgen"
|
name = "wit-bindgen"
|
||||||
@@ -3623,9 +3539,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "writeable"
|
name = "writeable"
|
||||||
version = "0.6.2"
|
version = "0.6.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
|
checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "xattr"
|
name = "xattr"
|
||||||
@@ -3639,9 +3555,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "yoke"
|
name = "yoke"
|
||||||
version = "0.8.1"
|
version = "0.8.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
|
checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"stable_deref_trait",
|
"stable_deref_trait",
|
||||||
"yoke-derive",
|
"yoke-derive",
|
||||||
@@ -3650,9 +3566,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "yoke-derive"
|
name = "yoke-derive"
|
||||||
version = "0.8.1"
|
version = "0.8.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
|
checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -3729,23 +3645,23 @@ dependencies = [
|
|||||||
"uuid",
|
"uuid",
|
||||||
"webpki-roots 1.0.6",
|
"webpki-roots 1.0.6",
|
||||||
"which",
|
"which",
|
||||||
"zip",
|
"zip 8.5.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerocopy"
|
name = "zerocopy"
|
||||||
version = "0.8.47"
|
version = "0.8.48"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87"
|
checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"zerocopy-derive",
|
"zerocopy-derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerocopy-derive"
|
name = "zerocopy-derive"
|
||||||
version = "0.8.47"
|
version = "0.8.48"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
|
checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -3754,18 +3670,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerofrom"
|
name = "zerofrom"
|
||||||
version = "0.1.6"
|
version = "0.1.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
|
checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"zerofrom-derive",
|
"zerofrom-derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerofrom-derive"
|
name = "zerofrom-derive"
|
||||||
version = "0.1.6"
|
version = "0.1.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
|
checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -3781,9 +3697,9 @@ checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerotrie"
|
name = "zerotrie"
|
||||||
version = "0.2.3"
|
version = "0.2.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
|
checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"displaydoc",
|
"displaydoc",
|
||||||
"yoke",
|
"yoke",
|
||||||
@@ -3792,9 +3708,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerovec"
|
name = "zerovec"
|
||||||
version = "0.11.5"
|
version = "0.11.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
|
checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"yoke",
|
"yoke",
|
||||||
"zerofrom",
|
"zerofrom",
|
||||||
@@ -3803,9 +3719,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerovec-derive"
|
name = "zerovec-derive"
|
||||||
version = "0.11.2"
|
version = "0.11.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
|
checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -3814,9 +3730,21 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zip"
|
name = "zip"
|
||||||
version = "8.4.0"
|
version = "0.6.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7756d0206d058333667493c4014f545f4b9603c4330ccd6d9b3f86dcab59f7d9"
|
checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261"
|
||||||
|
dependencies = [
|
||||||
|
"byteorder",
|
||||||
|
"crc32fast",
|
||||||
|
"crossbeam-utils",
|
||||||
|
"flate2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "zip"
|
||||||
|
version = "8.5.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "dcab981e19633ebcf0b001ddd37dd802996098bc1864f90b7c5d970ce76c1d59"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crc32fast",
|
"crc32fast",
|
||||||
"flate2",
|
"flate2",
|
||||||
@@ -3839,9 +3767,9 @@ checksum = "cb8a0807f7c01457d0379ba880ba6322660448ddebc890ce29bb64da71fb40f9"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zune-jpeg"
|
name = "zune-jpeg"
|
||||||
version = "0.5.14"
|
version = "0.5.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0b7a1c0af6e5d8d1363f4994b7a091ccf963d8b694f7da5b0b9cceb82da2c0a6"
|
checksum = "27bc9d5b815bc103f142aa054f561d9187d191692ec7c2d1e2b4737f8dbd7296"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"zune-core",
|
"zune-core",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "sgclaw"
|
name = "sgclaw"
|
||||||
version = "0.1.0"
|
version = "0.1.0-2026.4.9"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -18,4 +18,5 @@ sha2 = "0.10"
|
|||||||
thiserror = "1"
|
thiserror = "1"
|
||||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "macros"] }
|
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "macros"] }
|
||||||
uuid = { version = "1", features = ["v4"] }
|
uuid = { version = "1", features = ["v4"] }
|
||||||
|
zip = { version = "0.6.6", default-features = false, features = ["deflate"] }
|
||||||
zeroclaw = { package = "zeroclawlabs", path = "third_party/zeroclaw", default-features = false }
|
zeroclaw = { package = "zeroclawlabs", path = "third_party/zeroclaw", default-features = false }
|
||||||
|
|||||||
@@ -0,0 +1,281 @@
|
|||||||
|
# Config-Owned Direct Skill Contract Implementation Plan
|
||||||
|
|
||||||
|
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||||
|
|
||||||
|
**Goal:** Validate the `directSubmitSkill` control surface early and prevent malformed direct-skill configs from entering the submit routing path, without changing the current happy-path direct execution behavior.
|
||||||
|
|
||||||
|
**Architecture:** Keep the existing direct-submit runtime and submit-task seam intact for valid configs. Move `directSubmitSkill` format validation into the normal `SgClawSettings` load path so malformed config fails before routing begins, while leaving valid-but-unresolvable `skill.tool` targets as direct runtime errors in the current direct path.
|
||||||
|
|
||||||
|
**Tech Stack:** Rust 2021, `serde` config parsing, current `BrowserMessage::SubmitTask` path, current direct skill runtime, Rust integration tests.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Context
|
||||||
|
|
||||||
|
- Follow @superpowers:test-driven-development for the Rust code changes in this plan.
|
||||||
|
- Follow @superpowers:verification-before-completion before claiming any task is done.
|
||||||
|
- Do **not** create a git worktree unless the user explicitly asks. This project prefers staying in the current checkout.
|
||||||
|
- Keep scope tight: this plan does **not** add per-skill dispatch metadata, docs changes, intent classification, or LLM routing changes.
|
||||||
|
|
||||||
|
## File Map
|
||||||
|
|
||||||
|
### Existing files to modify
|
||||||
|
|
||||||
|
- Modify: `src/config/settings.rs`
|
||||||
|
- validate `directSubmitSkill` during config normalization
|
||||||
|
- keep the stored field as `Option<String>` so the current direct runtime API stays stable
|
||||||
|
- Modify: `tests/compat_config_test.rs`
|
||||||
|
- add a failing config-load regression for malformed `directSubmitSkill`
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
- add a failing submit-path regression proving malformed config is rejected before direct routing begins
|
||||||
|
|
||||||
|
### Existing files to read but not broaden
|
||||||
|
|
||||||
|
- Reuse without redesign: `src/agent/mod.rs`
|
||||||
|
- Reuse without redesign: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- Reuse without redesign: `docs/superpowers/specs/2026-04-09-config-owned-direct-skill-dispatch-design.md`
|
||||||
|
|
||||||
|
### No new files expected
|
||||||
|
|
||||||
|
This slice should fit in the existing config and tests surfaces only.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 1: Validate `directSubmitSkill` Before Submit Routing
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `tests/compat_config_test.rs`
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
- Modify: `src/config/settings.rs`
|
||||||
|
- Read only: `src/agent/mod.rs`
|
||||||
|
- Read only: `src/compat/direct_skill_runtime.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the failing config test for malformed `directSubmitSkill`**
|
||||||
|
|
||||||
|
Add this focused test to `tests/compat_config_test.rs`:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn sgclaw_settings_reject_invalid_direct_submit_skill_format() {
|
||||||
|
let root = std::env::temp_dir().join(format!(
|
||||||
|
"sgclaw-invalid-direct-submit-skill-{}",
|
||||||
|
Uuid::new_v4()
|
||||||
|
));
|
||||||
|
fs::create_dir_all(&root).unwrap();
|
||||||
|
let config_path = root.join("sgclaw_config.json");
|
||||||
|
|
||||||
|
fs::write(
|
||||||
|
&config_path,
|
||||||
|
r#"{
|
||||||
|
"providers": [],
|
||||||
|
"skillsDir": "skill_lib",
|
||||||
|
"directSubmitSkill": "fault-details-report"
|
||||||
|
}"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let err = SgClawSettings::load(Some(config_path.as_path()))
|
||||||
|
.expect_err("expected invalid directSubmitSkill format");
|
||||||
|
let message = err.to_string();
|
||||||
|
|
||||||
|
assert!(message.contains("directSubmitSkill"));
|
||||||
|
assert!(message.contains("skill.tool"));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the focused config test and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test sgclaw_settings_reject_invalid_direct_submit_skill_format -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the current config loader accepts the malformed string instead of rejecting it early.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Write the failing agent regression for malformed config**
|
||||||
|
|
||||||
|
Add this focused test to `tests/agent_runtime_test.rs`:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn submit_task_rejects_invalid_direct_submit_skill_config_before_routing() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let workspace_root = std::env::temp_dir().join(format!(
|
||||||
|
"sgclaw-invalid-direct-submit-workspace-{}",
|
||||||
|
Uuid::new_v4()
|
||||||
|
));
|
||||||
|
fs::create_dir_all(&workspace_root).unwrap();
|
||||||
|
let config_path = workspace_root.join("sgclaw_config.json");
|
||||||
|
fs::write(
|
||||||
|
&config_path,
|
||||||
|
serde_json::json!({
|
||||||
|
"providers": [],
|
||||||
|
"skillsDir": skill_root,
|
||||||
|
"directSubmitSkill": "fault-details-report"
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let runtime_context = AgentRuntimeContext::new(Some(config_path), workspace_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
assert!(matches!(
|
||||||
|
sent.last(),
|
||||||
|
Some(AgentMessage::TaskComplete { success, summary })
|
||||||
|
if !success && summary.contains("skill.tool")
|
||||||
|
));
|
||||||
|
assert!(direct_submit_mode_logs(&sent).is_empty());
|
||||||
|
assert!(!sent.iter().any(|message| matches!(message, AgentMessage::Command { .. })));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 4: Run the focused agent test and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test submit_task_rejects_invalid_direct_submit_skill_config_before_routing -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the malformed config currently loads, enters the direct-submit branch, and emits `direct_skill_primary` before failing later.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Implement the minimal config validation**
|
||||||
|
|
||||||
|
In `src/config/settings.rs`, add a small helper that validates the normalized `directSubmitSkill` string during `SgClawSettings::new(...)`.
|
||||||
|
|
||||||
|
Recommended implementation shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
fn normalize_direct_submit_skill(raw: Option<String>) -> Result<Option<String>, ConfigError> {
|
||||||
|
let value = normalize_optional_value(raw);
|
||||||
|
let Some(value) = value.as_deref() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some((skill_name, tool_name)) = value.split_once('.') else {
|
||||||
|
return Err(ConfigError::InvalidValue(
|
||||||
|
"directSubmitSkill",
|
||||||
|
format!("must use skill.tool format, got {value}"),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
if skill_name.trim().is_empty() || tool_name.trim().is_empty() {
|
||||||
|
return Err(ConfigError::InvalidValue(
|
||||||
|
"directSubmitSkill",
|
||||||
|
format!("must use skill.tool format, got {value}"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(value.to_string()))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then use it here:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let direct_submit_skill = normalize_direct_submit_skill(direct_submit_skill)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- do not change the public field type from `Option<String>`
|
||||||
|
- do not move parsing responsibility into `src/agent/mod.rs`
|
||||||
|
- do not redesign `src/compat/direct_skill_runtime.rs`
|
||||||
|
- keep valid-but-unresolvable `skill.tool` targets as runtime errors in the direct path
|
||||||
|
|
||||||
|
- [ ] **Step 6: Re-run the two focused tests and verify they pass**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test sgclaw_settings_reject_invalid_direct_submit_skill_format -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test submit_task_rejects_invalid_direct_submit_skill_config_before_routing -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 7: Re-run the broader regression suites**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
cargo build --bin sgclaw
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS, including:
|
||||||
|
- the direct-submit happy path
|
||||||
|
- the existing no-LLM fallback behavior when `directSubmitSkill` is absent
|
||||||
|
- unchanged browser-script helper semantics
|
||||||
|
- clean binary build
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification Checklist
|
||||||
|
|
||||||
|
### Config validation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: malformed `directSubmitSkill` is rejected early, while the existing direct-only config shape still loads.
|
||||||
|
|
||||||
|
### Submit-path behavior
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected:
|
||||||
|
- malformed `directSubmitSkill` never reaches direct routing
|
||||||
|
- valid configured direct skill still succeeds without LLM config
|
||||||
|
- no direct skill configured still returns the existing no-LLM message
|
||||||
|
|
||||||
|
### Browser-script helper safety
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: current browser-script execution semantics remain unchanged.
|
||||||
|
|
||||||
|
### Build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build --bin sgclaw
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: the main binary compiles cleanly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes For The Engineer
|
||||||
|
|
||||||
|
- The paired spec is `docs/superpowers/specs/2026-04-09-config-owned-direct-skill-dispatch-design.md`.
|
||||||
|
- Do **not** add sgClaw-specific dispatch metadata under `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging` in this slice.
|
||||||
|
- Do **not** turn this into a per-skill registry task yet. This plan only hardens the current config-owned bootstrap contract.
|
||||||
|
- Keep the current direct target example as `fault-details-report.collect_fault_details`; avoid hard-coding that name into new generic APIs.
|
||||||
|
- If you discover a need for broader policy routing (`direct_browser` / `llm_agent` by skill), stop and write a new spec/plan instead of expanding this one.
|
||||||
@@ -0,0 +1,520 @@
|
|||||||
|
# Direct Skill Invocation Without LLM Implementation Plan
|
||||||
|
|
||||||
|
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||||
|
|
||||||
|
**Goal:** Let the current pipe submit-task flow accept natural-language input but directly invoke one fixed staged browser skill without calling any model, while reserving a clean switch back to LLM-based routing later.
|
||||||
|
|
||||||
|
**Architecture:** Keep the existing `BrowserMessage::SubmitTask` entrypoint and add one narrow pre-routing seam before the current compat/LLM chain. When a new config field points to a fixed direct-submit skill, sgClaw loads that skill package from the configured external skills root, finds the target `browser_script` tool, executes it through the existing browser-script wrapper, and returns the result directly. When the field is absent, the current behavior stays unchanged. This preserves a future path where each skill can later declare `direct_browser` or `llm_agent` dispatch without rewriting the submit pipeline again.
|
||||||
|
|
||||||
|
**Tech Stack:** Rust 2021, existing `BrowserPipeTool`, current submit-task agent entrypoint, current browser-script skill executor, current sgClaw JSON config loader, `zeroclaw` skill manifest loader.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Recommended First Skill
|
||||||
|
|
||||||
|
Use `fault-details-report.collect_fault_details` from:
|
||||||
|
- `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/fault-details-report/scene.json`
|
||||||
|
- `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/SKILL.toml`
|
||||||
|
- `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
|
||||||
|
Why this one first:
|
||||||
|
- it is clearly a report/export skill
|
||||||
|
- it exposes exactly one browser-script tool: `collect_fault_details`
|
||||||
|
- it has the smallest contract surface (`period` only)
|
||||||
|
- its current JS is deterministic and simple, so the first slice can focus on plumbing instead of browser scraping complexity
|
||||||
|
|
||||||
|
## Scope Guardrails
|
||||||
|
|
||||||
|
- Do **not** redesign the existing submit-task protocol.
|
||||||
|
- Do **not** remove or rewrite the current LLM/compat path; leave it as the fallback/default path.
|
||||||
|
- Do **not** introduce generic NL intent routing in this slice; this is one fixed direct skill only.
|
||||||
|
- Do **not** modify `third_party/zeroclaw` skill manifest schema in phase 1.
|
||||||
|
- Do **not** add Excel export wiring in the first slice unless a test explicitly requires it.
|
||||||
|
- Do **not** invent a new browser-script execution model; reuse the existing wrapper semantics.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## File Map
|
||||||
|
|
||||||
|
### Existing files to modify
|
||||||
|
|
||||||
|
- Modify: `src/config/settings.rs`
|
||||||
|
- add a minimal config field for one direct-submit skill name
|
||||||
|
- Modify: `src/agent/mod.rs`
|
||||||
|
- add a narrow pre-routing branch before the current compat/LLM path
|
||||||
|
- Modify: `src/compat/browser_script_skill_tool.rs`
|
||||||
|
- expose the smallest reusable helper for direct browser-script execution
|
||||||
|
- Modify: `src/compat/mod.rs` or the nearest module export surface
|
||||||
|
- export the new narrow direct-skill runtime module if needed
|
||||||
|
- Modify: `tests/compat_config_test.rs`
|
||||||
|
- add config coverage for the new direct-submit field
|
||||||
|
- Modify: `tests/browser_script_skill_tool_test.rs`
|
||||||
|
- add coverage for the reusable direct-execution helper
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
- prove submit-task can bypass the model and directly invoke the fixed skill
|
||||||
|
|
||||||
|
### New files to create
|
||||||
|
|
||||||
|
- Create: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- small runtime for loading one configured skill, resolving one configured tool, deriving minimal args, and executing it directly
|
||||||
|
|
||||||
|
### Files to reuse without changing behavior
|
||||||
|
|
||||||
|
- Reuse: `src/compat/runtime.rs`
|
||||||
|
- Reuse: `src/compat/orchestration.rs`
|
||||||
|
- Reuse: `src/compat/config_adapter.rs`
|
||||||
|
- Reuse: `third_party/zeroclaw/src/skills/mod.rs`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 1: Add A Minimal Direct-Submit Skill Config Field
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `src/config/settings.rs`
|
||||||
|
- Modify: `tests/compat_config_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the failing config test for the new field**
|
||||||
|
|
||||||
|
In `tests/compat_config_test.rs`, add a focused config-load test proving the browser config file can declare one fixed direct-submit skill.
|
||||||
|
|
||||||
|
Test shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn sgclaw_settings_load_direct_submit_skill_from_browser_config() {
|
||||||
|
let root = std::env::temp_dir().join(format!("sgclaw-direct-skill-{}", uuid::Uuid::new_v4()));
|
||||||
|
std::fs::create_dir_all(&root).unwrap();
|
||||||
|
let config_path = root.join("sgclaw_config.json");
|
||||||
|
|
||||||
|
std::fs::write(
|
||||||
|
&config_path,
|
||||||
|
r#"{
|
||||||
|
"apiKey": "sk-runtime",
|
||||||
|
"baseUrl": "https://api.deepseek.com",
|
||||||
|
"model": "deepseek-chat",
|
||||||
|
"skillsDir": "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging",
|
||||||
|
"directSubmitSkill": "fault-details-report.collect_fault_details"
|
||||||
|
}"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let settings = sgclaw::config::SgClawSettings::load(Some(config_path.as_path()))
|
||||||
|
.unwrap()
|
||||||
|
.expect("expected sgclaw settings from config file");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
settings.direct_submit_skill.as_deref(),
|
||||||
|
Some("fault-details-report.collect_fault_details")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the focused config test and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test sgclaw_settings_load_direct_submit_skill_from_browser_config -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the config field does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Implement the minimal config field**
|
||||||
|
|
||||||
|
In `src/config/settings.rs`, add:
|
||||||
|
- `direct_submit_skill: Option<String>` to `SgClawSettings`
|
||||||
|
- `direct_submit_skill: Option<String>` to `RawSgClawSettings`
|
||||||
|
- field normalization in `SgClawSettings::new(...)`
|
||||||
|
|
||||||
|
Recommended JSON key shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[serde(rename = "directSubmitSkill", alias = "direct_submit_skill", default)]
|
||||||
|
direct_submit_skill: Option<String>,
|
||||||
|
```
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- trim empty values to `None`
|
||||||
|
- keep `DeepSeekSettings` unchanged for this slice unless a compile error proves it must mirror the field
|
||||||
|
- do not alter unrelated config semantics
|
||||||
|
|
||||||
|
- [ ] **Step 4: Re-run the focused config test**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test sgclaw_settings_load_direct_submit_skill_from_browser_config -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Re-run the broader config file tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 6: Commit Task 1**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/config/settings.rs tests/compat_config_test.rs
|
||||||
|
git commit -m "feat: add direct submit skill config"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 2: Extract A Reusable Browser-Script Direct Execution Helper
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `src/compat/browser_script_skill_tool.rs`
|
||||||
|
- Modify: `tests/browser_script_skill_tool_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the first failing helper test**
|
||||||
|
|
||||||
|
In `tests/browser_script_skill_tool_test.rs`, add a focused test proving direct code can execute a packaged browser script without constructing a full `Tool` object first.
|
||||||
|
|
||||||
|
Test shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[tokio::test]
|
||||||
|
async fn execute_browser_script_tool_runs_packaged_script_with_expected_domain() {
|
||||||
|
// build temp skill script
|
||||||
|
// call the helper directly
|
||||||
|
// assert Action::Eval was sent with wrapped args and normalized domain
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Required assertions:
|
||||||
|
- the helper reads the packaged JS file
|
||||||
|
- it wraps args with `const args = ...`
|
||||||
|
- it normalizes URL-like `expected_domain`
|
||||||
|
- it returns the serialized payload string on success
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the helper test and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test execute_browser_script_tool_runs_packaged_script_with_expected_domain -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the helper does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Add the second failing helper test for required-domain validation**
|
||||||
|
|
||||||
|
Add a focused failure-path test proving the helper rejects missing or invalid `expected_domain` before any browser command is sent.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Run the validation test and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test execute_browser_script_tool_rejects_missing_expected_domain -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the helper does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Implement the minimal reusable helper**
|
||||||
|
|
||||||
|
In `src/compat/browser_script_skill_tool.rs`, extract the smallest reusable function, for example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub async fn execute_browser_script_tool<T: Transport + 'static>(
|
||||||
|
tool: &SkillTool,
|
||||||
|
skill_root: &Path,
|
||||||
|
browser_tool: BrowserPipeTool<T>,
|
||||||
|
args: Value,
|
||||||
|
) -> anyhow::Result<ToolResult>
|
||||||
|
```
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- reuse the current path validation, script loading, wrapping, `Action::Eval`, and payload formatting logic already used by `BrowserScriptSkillTool::execute`
|
||||||
|
- do not change outward behavior of `BrowserScriptSkillTool`
|
||||||
|
- keep the helper narrow and browser-script-only
|
||||||
|
|
||||||
|
- [ ] **Step 6: Refactor `BrowserScriptSkillTool::execute` to call the helper**
|
||||||
|
|
||||||
|
Keep existing behavior and tests green while removing duplicate execution logic.
|
||||||
|
|
||||||
|
- [ ] **Step 7: Re-run the browser-script tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 8: Commit Task 2**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/compat/browser_script_skill_tool.rs tests/browser_script_skill_tool_test.rs
|
||||||
|
git commit -m "refactor: extract direct browser script execution helper"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 3: Add A Narrow Direct Skill Runtime For One Fixed Skill
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- Modify: `src/compat/mod.rs` or nearest module export point
|
||||||
|
- Reuse: `src/compat/config_adapter.rs`
|
||||||
|
- Reuse: `third_party/zeroclaw/src/skills/mod.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the first failing direct-runtime test**
|
||||||
|
|
||||||
|
Add a focused test in `tests/agent_runtime_test.rs` or a new narrow compat test proving code can resolve the configured external skills root, load `fault-details-report`, find `collect_fault_details`, and execute it directly.
|
||||||
|
|
||||||
|
Recommended shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn direct_skill_runtime_executes_fault_details_report_without_provider() {
|
||||||
|
// config points at skill_staging root
|
||||||
|
// direct_submit_skill points at fault-details-report.collect_fault_details
|
||||||
|
// browser response returns report-artifact payload
|
||||||
|
// assert no provider/http path is touched
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the focused direct-runtime test and verify it fails**
|
||||||
|
|
||||||
|
Run the narrowest test command for the new test.
|
||||||
|
|
||||||
|
Expected: FAIL because the direct runtime does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Implement `src/compat/direct_skill_runtime.rs`**
|
||||||
|
|
||||||
|
Add a narrow runtime with responsibilities only to:
|
||||||
|
- resolve the configured skills dir with `resolve_skills_dir_from_sgclaw_settings(...)`
|
||||||
|
- load skills from that directory with `load_skills_from_directory(...)`
|
||||||
|
- parse the configured tool name into `skill_name` + `tool_name`
|
||||||
|
- find the matching skill and matching tool
|
||||||
|
- verify `tool.kind == "browser_script"`
|
||||||
|
- derive the minimal argument object
|
||||||
|
- call the new browser-script helper
|
||||||
|
- return the output string or a clear `PipeError`
|
||||||
|
|
||||||
|
Do **not** add generic routing, scenes, or model fallback here.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Keep argument derivation intentionally minimal**
|
||||||
|
|
||||||
|
For the first slice, derive only:
|
||||||
|
- `expected_domain` from `page_url` when present, otherwise fail with a clear message
|
||||||
|
- `period` from the instruction using a narrow deterministic pattern such as `YYYY-MM`
|
||||||
|
|
||||||
|
If the period cannot be derived, return a concise error telling the user to provide it explicitly. Do not guess.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Re-run the focused direct-runtime test**
|
||||||
|
|
||||||
|
Run the same test command again.
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 6: Commit Task 3**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/compat/direct_skill_runtime.rs src/compat/mod.rs tests/agent_runtime_test.rs
|
||||||
|
git commit -m "feat: add fixed direct skill runtime"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 4: Insert The Pre-Routing Seam In Submit-Task Entry
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `src/agent/mod.rs`
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the first failing submit-path bypass test**
|
||||||
|
|
||||||
|
In `tests/agent_runtime_test.rs`, add a focused regression proving that when `directSubmitSkill` is configured, `BrowserMessage::SubmitTask` can succeed without any model/provider being configured.
|
||||||
|
|
||||||
|
Test shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn submit_task_uses_direct_skill_mode_without_llm_configuration() {
|
||||||
|
// config contains skillsDir + directSubmitSkill, but no reachable provider
|
||||||
|
// natural-language instruction includes period and page_url
|
||||||
|
// expect TaskComplete success from direct skill result
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Required assertions:
|
||||||
|
- task succeeds even if provider would be unavailable
|
||||||
|
- output contains the report artifact payload
|
||||||
|
- no summary like `未配置大语言模型`
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the bypass test and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test submit_task_uses_direct_skill_mode_without_llm_configuration -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because submit-task still goes into the current LLM-oriented path.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Add the second failing priority test**
|
||||||
|
|
||||||
|
Add one focused test proving the direct-submit branch runs before the existing compat/LLM branch.
|
||||||
|
|
||||||
|
The easiest assertion is that the mode log becomes something new like:
|
||||||
|
- `direct_skill_primary`
|
||||||
|
|
||||||
|
and the normal mode logs do not appear for that turn.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Run the priority test and verify it fails**
|
||||||
|
|
||||||
|
Run the narrow test command for the new test.
|
||||||
|
|
||||||
|
Expected: FAIL because the mode does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Add the narrow pre-routing branch in `src/agent/mod.rs`**
|
||||||
|
|
||||||
|
In `handle_browser_message_with_context(...)`, after config load/logging and before the existing `should_use_primary_orchestration(...)` / `compat::runtime` path:
|
||||||
|
- check `settings.direct_submit_skill`
|
||||||
|
- if present, emit mode log `direct_skill_primary`
|
||||||
|
- call the new direct runtime
|
||||||
|
- send `TaskComplete` and return immediately
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- if `direct_submit_skill` is absent, keep existing behavior byte-for-byte where possible
|
||||||
|
- do not modify `compat::runtime.rs` or `compat::orchestration.rs` for this slice
|
||||||
|
- do not silently fall through to LLM when direct execution fails; return the direct error clearly so the first slice is debuggable
|
||||||
|
|
||||||
|
- [ ] **Step 6: Re-run the focused submit-path tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test submit_task_uses_direct_skill_mode_without_llm_configuration -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test direct_skill_mode_logs_direct_skill_primary -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 7: Re-run existing no-LLM submit regression coverage**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS, including existing cases where no direct skill is configured and the old no-LLM failure still applies.
|
||||||
|
|
||||||
|
- [ ] **Step 8: Commit Task 4**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/agent/mod.rs tests/agent_runtime_test.rs
|
||||||
|
git commit -m "feat: route submit tasks through fixed direct skill mode"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 5: Lock The Future Migration Seam Without Implementing LLM Dispatch Yet
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify only if needed: `src/config/settings.rs`
|
||||||
|
- Modify only if needed: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- Reuse: docs/plan only unless code needs one tiny naming fix
|
||||||
|
|
||||||
|
- [ ] **Step 1: Keep the config naming compatible with future per-skill dispatch**
|
||||||
|
|
||||||
|
Document and preserve this future meaning in code naming:
|
||||||
|
- current field: one fixed direct skill for submit-task bootstrap
|
||||||
|
- future model: each skill can declare dispatch mode such as `direct_browser` or `llm_agent`
|
||||||
|
|
||||||
|
Prefer neutral names in helper code like:
|
||||||
|
- `direct skill mode`
|
||||||
|
- `direct submit skill`
|
||||||
|
|
||||||
|
Avoid hard-coding `fault_details` into generic APIs.
|
||||||
|
|
||||||
|
- [ ] **Step 2: Add one small negative test for fallback behavior**
|
||||||
|
|
||||||
|
Add a focused test proving that when `directSubmitSkill` is not configured, submit-task still behaves exactly as before and can still return the existing no-LLM message.
|
||||||
|
|
||||||
|
If an existing test already proves this, keep it and do not add another.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Re-run the focused end-to-end verification set**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Build the main binary**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build --bin sgclaw
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Commit Task 5**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/config/settings.rs src/compat/direct_skill_runtime.rs src/compat/browser_script_skill_tool.rs src/agent/mod.rs tests/compat_config_test.rs tests/browser_script_skill_tool_test.rs tests/agent_runtime_test.rs
|
||||||
|
git commit -m "test: verify fixed direct skill submit path"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification Checklist
|
||||||
|
|
||||||
|
### Config loading
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: `directSubmitSkill` loads correctly and existing config behavior remains intact.
|
||||||
|
|
||||||
|
### Browser-script helper
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: direct helper preserves the existing browser-script execution semantics.
|
||||||
|
|
||||||
|
### Submit-path bypass
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: configured direct skill bypasses the model path, while unconfigured submit-task behavior stays unchanged.
|
||||||
|
|
||||||
|
### Build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build --bin sgclaw
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: the binary compiles cleanly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes For The Engineer
|
||||||
|
|
||||||
|
- The key to keeping this slice small is to avoid changing `compat::runtime.rs` and `compat::orchestration.rs`; they remain the future LLM path.
|
||||||
|
- `fault-details-report.collect_fault_details` is only the bootstrap skill. The plumbing must stay generic enough that the configured tool name can later point to another staged browser skill.
|
||||||
|
- Phase 1 should not add per-skill dispatch metadata to the external skill manifests yet. Keep that decision in sgClaw config first; move it into skill metadata only after the direct path is proven useful.
|
||||||
|
- Once the intranet model is ready, the clean next step is to add a dispatch policy layer that chooses between `direct_browser` and `llm_agent` before the current compat path is entered, reusing this same pre-routing seam.
|
||||||
@@ -0,0 +1,672 @@
|
|||||||
|
# Fault Details Full Skill Alignment Implementation Plan
|
||||||
|
|
||||||
|
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||||
|
|
||||||
|
**Goal:** Upgrade `fault-details-report.collect_fault_details` into a real staged browser skill that matches the original fault-details workflow, and make `claw-new` interpret the returned artifact status correctly in the direct-submit path.
|
||||||
|
|
||||||
|
**Architecture:** Keep routing and direct-skill selection in `claw-new`, but move all fault-details collection, normalization, classification, summary, export, and report-log behavior into the staged skill under `skill_staging`. Implement the staged skill as a true browser-eval entrypoint that remains valid in page context, while exposing testable pure helpers through an environment-safe export guard for `node:test`; then add a narrow Rust artifact interpreter in `src/compat/direct_skill_runtime.rs` so `ok` / `partial` / `empty` map to successful task completion while `blocked` / `error` map to failed completion.
|
||||||
|
|
||||||
|
**Tech Stack:** Rust 2021, `serde_json`, existing `BrowserPipeTool` / `browser_script` runtime, `node:test`, staged skill fixtures, Cargo integration tests.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Context
|
||||||
|
|
||||||
|
- Follow @superpowers:test-driven-development for every behavior change.
|
||||||
|
- Follow @superpowers:verification-before-completion before claiming each task is done.
|
||||||
|
- Do **not** create a git worktree unless the user explicitly asks. This repo preference is already established.
|
||||||
|
- Keep scope tight. Do **not** add a new browser protocol, new dispatch metadata, new UI opener behavior, or Rust-side fault classification logic.
|
||||||
|
- Keep the current direct path bootstrap requirement intact: the user instruction must still include an explicit `YYYY-MM`, but the staged skill must treat the page-selected range as the source of truth for collection once execution begins.
|
||||||
|
- Preserve parity with the original package’s real behavior: port the original classification table, `qxxcjl`-based reason heuristics, canonical detail mapping, summary aggregation rules, localhost export call, and report-log call into the staged skill rather than implementing a fixture-only subset.
|
||||||
|
|
||||||
|
## File Map
|
||||||
|
|
||||||
|
### Existing files to modify in `claw-new`
|
||||||
|
|
||||||
|
- Modify: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- add narrow structured artifact parsing and status-to-summary mapping
|
||||||
|
- keep direct-skill routing/config ownership unchanged
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
- add direct-submit regressions for `ok`, `partial`, `empty`, `blocked`, and `error`
|
||||||
|
- Modify: `tests/browser_script_skill_tool_test.rs`
|
||||||
|
- add browser-script execution-shape regression for browser-eval return payloads used by fault-details
|
||||||
|
|
||||||
|
### Existing files to modify in `skill_staging`
|
||||||
|
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
- replace empty shell with browser-eval entrypoint plus parity helpers
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js`
|
||||||
|
- deterministic fixture coverage for normalization, classification, summary, artifact contract, export/logging degradation, and entrypoint shape helpers
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/SKILL.toml`
|
||||||
|
- align tool description with real collection/export/report-log behavior
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/SKILL.md`
|
||||||
|
- align written contract with actual runtime behavior and artifact fields
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/references/collection-flow.md`
|
||||||
|
- align flow with page-range/query/export/report-log sequence
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/references/data-quality.md`
|
||||||
|
- make canonical columns, original classification tables, reason heuristics, summary rules, and partial semantics explicit
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/fault-details-report/scene.json`
|
||||||
|
- keep scene output/state contract aligned with real staged artifact behavior
|
||||||
|
|
||||||
|
### Existing files to read but not redesign
|
||||||
|
|
||||||
|
- Read only: `docs/superpowers/specs/2026-04-10-fault-details-full-skill-alignment-design.md`
|
||||||
|
- Read only: `src/agent/mod.rs`
|
||||||
|
- Read only: `src/compat/browser_script_skill_tool.rs`
|
||||||
|
- Read only: `D:/desk/智能体资料/大四区报告监测项/故障明细/index.html`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 1: Add staged-skill red tests for normalization, summary, and artifact-contract semantics
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js`
|
||||||
|
- Read only: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
- Read only: `D:/desk/智能体资料/大四区报告监测项/故障明细/index.html`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the failing staged-skill test file**
|
||||||
|
|
||||||
|
Add `collect_fault_details.test.js` using `node:test` and `assert/strict`. Cover these behaviors with fixed fixtures:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const test = require('node:test');
|
||||||
|
const assert = require('node:assert/strict');
|
||||||
|
|
||||||
|
const {
|
||||||
|
DETAIL_COLUMNS,
|
||||||
|
SUMMARY_COLUMNS,
|
||||||
|
normalizeDetailRow,
|
||||||
|
deriveSummaryRows,
|
||||||
|
determineArtifactStatus,
|
||||||
|
buildFaultDetailsArtifact,
|
||||||
|
buildBrowserEntrypointResult
|
||||||
|
} = require('./collect_fault_details.js');
|
||||||
|
|
||||||
|
test('normalizeDetailRow maps canonical detail fields from raw repair rows', () => {
|
||||||
|
const row = normalizeDetailRow({
|
||||||
|
qxdbh: 'QX-1',
|
||||||
|
bxsj: '2026-03-09 08:00:00',
|
||||||
|
cityName: '国网兰州供电公司',
|
||||||
|
maintOrgName: '城关供电服务班',
|
||||||
|
maintGroupName: '抢修一班',
|
||||||
|
bdzMc: '110kV东岗变',
|
||||||
|
xlmc10: '10kV东岗线',
|
||||||
|
byqmc: '东岗1号变',
|
||||||
|
yjflMc: '电网故障',
|
||||||
|
ejflMc: '线路故障',
|
||||||
|
sjflMc: '低压线路',
|
||||||
|
qxxcjl: '现场检查:低压线路断线,已处理完成',
|
||||||
|
gzms: '客户报修停电'
|
||||||
|
}, {
|
||||||
|
companyName: '国网兰州供电公司'
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(row.slsj, '2026-03-09 08:00:00');
|
||||||
|
assert.equal(row.gssgs, '甘肃省电力公司');
|
||||||
|
assert.equal(row.gddw, '城关供电服务班');
|
||||||
|
assert.equal(row.gds, '抢修一班');
|
||||||
|
assert.equal(row.clzt, '处理完成');
|
||||||
|
assert.equal(row.bdz, '110kV东岗变');
|
||||||
|
assert.equal(row.line, '10kV东岗线');
|
||||||
|
assert.equal(row.pb, '东岗1号变');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('deriveSummaryRows groups normalized rows by gds and computes counters', () => {
|
||||||
|
const rows = [
|
||||||
|
{ gds: '抢修一班', gddw: '城关供电服务班', sgs: '国网兰州供电公司', sxfl1: '无效', sxfl2: '无效', gzsb: '' },
|
||||||
|
{ gds: '抢修一班', gddw: '城关供电服务班', sgs: '国网兰州供电公司', sxfl1: '有效', sxfl2: '用户侧', gzsb: '表后线' },
|
||||||
|
{ gds: '抢修一班', gddw: '城关供电服务班', sgs: '国网兰州供电公司', sxfl1: '有效', sxfl2: '电网侧', dwcFl: '低压故障', gzsb: '低压线路' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const summaryRows = deriveSummaryRows(rows, { companyName: '国网兰州供电公司' });
|
||||||
|
assert.equal(summaryRows.length, 1);
|
||||||
|
assert.equal(summaryRows[0].className, '抢修一班');
|
||||||
|
assert.equal(summaryRows[0].allCount, 3);
|
||||||
|
assert.equal(summaryRows[0].wxCount, 1);
|
||||||
|
assert.equal(summaryRows[0].khcCount, 0);
|
||||||
|
assert.equal(summaryRows[0].dyGzCount, 1);
|
||||||
|
assert.equal(summaryRows[0].dyxlCount, 1);
|
||||||
|
assert.equal(summaryRows[0].bhxCount, 1);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('determineArtifactStatus follows blocked > error > partial > empty > ok precedence', () => {
|
||||||
|
assert.equal(determineArtifactStatus({ blockedReason: 'missing_session', fatalError: null, partialReasons: [], detailRows: [{}] }), 'blocked');
|
||||||
|
assert.equal(determineArtifactStatus({ blockedReason: null, fatalError: 'parse_failed', partialReasons: [], detailRows: [{}] }), 'error');
|
||||||
|
assert.equal(determineArtifactStatus({ blockedReason: null, fatalError: null, partialReasons: ['export_failed'], detailRows: [{}] }), 'partial');
|
||||||
|
assert.equal(determineArtifactStatus({ blockedReason: null, fatalError: null, partialReasons: [], detailRows: [] }), 'empty');
|
||||||
|
assert.equal(determineArtifactStatus({ blockedReason: null, fatalError: null, partialReasons: [], detailRows: [{}] }), 'ok');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildFaultDetailsArtifact keeps canonical fields, selected range, counts, and downstream results', () => {
|
||||||
|
const artifact = buildFaultDetailsArtifact({
|
||||||
|
period: '2026-03',
|
||||||
|
selectedRange: { start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' },
|
||||||
|
detailRows: [{ qxdbh: 'QX-1' }],
|
||||||
|
summaryRows: [{ index: 1 }],
|
||||||
|
partialReasons: ['report_log_failed'],
|
||||||
|
downstream: {
|
||||||
|
export: { attempted: true, success: true, path: 'http://localhost/export.xlsx' },
|
||||||
|
report_log: { attempted: true, success: false, error: '500' }
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.type, 'report-artifact');
|
||||||
|
assert.equal(artifact.status, 'partial');
|
||||||
|
assert.deepEqual(artifact.selected_range, { start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' });
|
||||||
|
assert.equal(artifact.counts.detail_rows, 1);
|
||||||
|
assert.equal(artifact.counts.summary_rows, 1);
|
||||||
|
assert.deepEqual(artifact.partial_reasons, ['report_log_failed']);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildFaultDetailsArtifact keeps required top-level fields for blocked artifact', () => {
|
||||||
|
const artifact = buildFaultDetailsArtifact({
|
||||||
|
period: '2026-03',
|
||||||
|
blockedReason: 'selected_range_unavailable',
|
||||||
|
partialReasons: ['selected_range_unavailable']
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.type, 'report-artifact');
|
||||||
|
assert.equal(artifact.report_name, 'fault-details-report');
|
||||||
|
assert.equal(artifact.period, '2026-03');
|
||||||
|
assert.equal(artifact.status, 'blocked');
|
||||||
|
assert.deepEqual(artifact.partial_reasons, ['selected_range_unavailable']);
|
||||||
|
assert.equal('downstream' in artifact, false);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildFaultDetailsArtifact keeps known selected range and counts on late error', () => {
|
||||||
|
const artifact = buildFaultDetailsArtifact({
|
||||||
|
period: '2026-03',
|
||||||
|
selectedRange: { start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' },
|
||||||
|
detailRows: [],
|
||||||
|
summaryRows: [],
|
||||||
|
fatalError: 'summary_failed',
|
||||||
|
partialReasons: ['summary_failed']
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.status, 'error');
|
||||||
|
assert.deepEqual(artifact.selected_range, { start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' });
|
||||||
|
assert.equal(artifact.counts.detail_rows, 0);
|
||||||
|
assert.equal(artifact.counts.summary_rows, 0);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildBrowserEntrypointResult returns blocked artifact when selected range is unavailable', async () => {
|
||||||
|
const artifact = await buildBrowserEntrypointResult({
|
||||||
|
period: '2026-03'
|
||||||
|
}, {
|
||||||
|
readSelectedRange: async () => null
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.status, 'blocked');
|
||||||
|
assert.ok(artifact.partial_reasons.includes('selected_range_unavailable'));
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the staged-skill test file and verify it fails**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because `collect_fault_details.js` does not export these helpers yet and still only returns an empty shell.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 2: Implement staged-skill parity helpers and a valid browser entrypoint
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
- Test: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Implement the helper exports and browser entrypoint shape needed to satisfy the red tests**
|
||||||
|
|
||||||
|
Refactor `collect_fault_details.js` so the file remains a valid browser-eval script in page context while still supporting `node:test` through an environment-safe export guard.
|
||||||
|
|
||||||
|
Required implementation pieces:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const DETAIL_COLUMNS = [/* existing canonical columns */];
|
||||||
|
const SUMMARY_COLUMNS = [/* existing summary columns */];
|
||||||
|
|
||||||
|
function normalizeDetailRow(raw, context) {
|
||||||
|
// map qxdbh/gssgs/sgs/gddw/gds/slsj/clzt/bdz/line/pb
|
||||||
|
// derive sxfl1/sxfl2/sxfl3/gzsb/gzyy from the original package rules
|
||||||
|
}
|
||||||
|
|
||||||
|
function deriveSummaryRows(detailRows, context) {
|
||||||
|
// group by gds and compute all original package counters
|
||||||
|
}
|
||||||
|
|
||||||
|
function determineArtifactStatus({ blockedReason, fatalError, partialReasons, detailRows }) {
|
||||||
|
// blocked > error > partial > empty > ok
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildFaultDetailsArtifact({
|
||||||
|
period,
|
||||||
|
selectedRange,
|
||||||
|
detailRows,
|
||||||
|
summaryRows,
|
||||||
|
partialReasons,
|
||||||
|
blockedReason,
|
||||||
|
fatalError,
|
||||||
|
downstream
|
||||||
|
}) {
|
||||||
|
// return report-artifact with columns, sections, counts, status, partial_reasons, downstream
|
||||||
|
}
|
||||||
|
|
||||||
|
async function buildBrowserEntrypointResult(input, deps = defaultBrowserDeps()) {
|
||||||
|
// read selected range from page
|
||||||
|
// collect raw rows from page query
|
||||||
|
// normalize rows
|
||||||
|
// derive summary
|
||||||
|
// attempt export + report log
|
||||||
|
// return final artifact
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof module !== 'undefined' && module.exports) {
|
||||||
|
module.exports = {
|
||||||
|
DETAIL_COLUMNS,
|
||||||
|
SUMMARY_COLUMNS,
|
||||||
|
normalizeDetailRow,
|
||||||
|
deriveSummaryRows,
|
||||||
|
determineArtifactStatus,
|
||||||
|
buildFaultDetailsArtifact,
|
||||||
|
buildBrowserEntrypointResult
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return await buildBrowserEntrypointResult(args);
|
||||||
|
```
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- keep `DETAIL_COLUMNS` and `SUMMARY_COLUMNS` canonical and stable
|
||||||
|
- keep helper functions self-contained in this file unless a separate pure helper file becomes necessary for runtime validity
|
||||||
|
- keep the browser entrypoint compatible with current `eval` wrapper
|
||||||
|
- keep browser runtime free of unguarded Node-only assumptions
|
||||||
|
- do **not** invent a new protocol or callback surface
|
||||||
|
|
||||||
|
- [ ] **Step 2: Re-run the staged-skill test file and verify it now reaches deeper failures or passes the initial helper coverage**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: either PASS for the Task 1 cases, or fail only on the still-missing full parity/export/history specifics added in Task 3.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 3: Add red tests for full classification parity, downstream partials, and empty-result export semantics
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js`
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
- Read only: `D:/desk/智能体资料/大四区报告监测项/故障明细/index.html`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Extend the staged-skill tests with failing parity and downstream cases**
|
||||||
|
|
||||||
|
Add focused failing tests such as:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
test('normalizeDetailRow derives gzyy from qxxcjl text heuristics', () => {
|
||||||
|
const row = normalizeDetailRow({
|
||||||
|
qxxcjl: '现场检查:客户表后线烧损,已恢复送电',
|
||||||
|
ejflMc: '客户侧故障',
|
||||||
|
sjflMc: '表后线'
|
||||||
|
}, { companyName: '国网兰州供电公司' });
|
||||||
|
|
||||||
|
assert.equal(row.gzsb, '表后线');
|
||||||
|
assert.equal(row.gzyy, '表后线烧损');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildBrowserEntrypointResult returns partial when export fails after detail collection succeeds', async () => {
|
||||||
|
const artifact = await buildBrowserEntrypointResult({ period: '2026-03' }, {
|
||||||
|
readSelectedRange: async () => ({ start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' }),
|
||||||
|
queryFaultRows: async () => [{ qxdbh: 'QX-1', bxsj: '2026-03-09 08:00:00', maintGroupName: '抢修一班' }],
|
||||||
|
readCompanyContext: () => ({ companyName: '国网兰州供电公司' }),
|
||||||
|
exportWorkbook: async () => {
|
||||||
|
throw new Error('export_failed');
|
||||||
|
},
|
||||||
|
writeReportLog: async () => ({ success: true })
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.status, 'partial');
|
||||||
|
assert.ok(artifact.partial_reasons.includes('export_failed'));
|
||||||
|
assert.equal(artifact.counts.detail_rows, 1);
|
||||||
|
assert.equal(artifact.downstream.export.attempted, true);
|
||||||
|
assert.equal(artifact.downstream.export.success, false);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildBrowserEntrypointResult returns error when normalized detail rows cannot be produced', async () => {
|
||||||
|
const artifact = await buildBrowserEntrypointResult({ period: '2026-03' }, {
|
||||||
|
readSelectedRange: async () => ({ start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' }),
|
||||||
|
queryFaultRows: async () => [{ qxdbh: '', bxsj: '' }],
|
||||||
|
readCompanyContext: () => ({ companyName: '国网兰州供电公司' })
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.status, 'error');
|
||||||
|
assert.ok(artifact.partial_reasons.includes('detail_normalization_failed'));
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildBrowserEntrypointResult keeps canonical rows empty for empty result and omits downstream before attempts', async () => {
|
||||||
|
const artifact = await buildBrowserEntrypointResult({ period: '2026-03' }, {
|
||||||
|
readSelectedRange: async () => ({ start: '2026-03-08 16:00:00', end: '2026-03-09 16:00:00' }),
|
||||||
|
queryFaultRows: async () => [],
|
||||||
|
readCompanyContext: () => ({ companyName: '国网兰州供电公司' })
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.status, 'empty');
|
||||||
|
assert.deepEqual(artifact.rows, []);
|
||||||
|
assert.equal('downstream' in artifact, false);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
Also add fixture cases derived from the original package’s full classification table and summary counters so the staged skill is forced toward parity, not a subset implementation.
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the staged-skill test file and verify it fails on the new cases**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL on missing full classification parity or downstream partial/error behavior.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Implement the full business logic needed to satisfy the new tests**
|
||||||
|
|
||||||
|
In `collect_fault_details.js`:
|
||||||
|
- port the original classification table and `qxxcjl` text heuristics for `sxfl1`, `sxfl2`, `sxfl3`, `gzsb`, `gzyy`
|
||||||
|
- port the original summary derivation rules and counters completely
|
||||||
|
- add required-field validation so structurally unusable normalized rows escalate to `error`
|
||||||
|
- add downstream `exportWorkbook` and `writeReportLog` stages that record `{attempted, success, path, error}`
|
||||||
|
- keep collection success distinct from downstream failures so export/logging failures become `partial`, not full failure
|
||||||
|
- keep placeholder rows, if needed for downstream empty-export payloads, downstream-only and never in canonical returned `rows`
|
||||||
|
- include both `period` and `selected_range` in the artifact
|
||||||
|
- omit `downstream` when export/report-log have not been attempted yet
|
||||||
|
|
||||||
|
- [ ] **Step 4: Re-run the staged-skill test file and verify it passes**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 4: Align staged-skill metadata and reference docs with the implemented behavior
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/SKILL.toml`
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/SKILL.md`
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/references/collection-flow.md`
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/references/data-quality.md`
|
||||||
|
- Modify: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/fault-details-report/scene.json`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Update the staged metadata/docs to match the implemented runtime contract**
|
||||||
|
|
||||||
|
Required changes:
|
||||||
|
- `SKILL.toml`: description must say the tool collects rows, derives summary, attempts localhost export, and records report history
|
||||||
|
- `SKILL.md`: artifact example must include `selected_range`, `counts`, `status`, `partial_reasons`, and `downstream`
|
||||||
|
- `references/collection-flow.md`: sequence must explicitly include page-selected range -> raw query -> normalization -> summary -> export -> report-log
|
||||||
|
- `references/data-quality.md`: document the original classification tables, `qxxcjl` heuristics, summary rules, partial/error escalation rules, and empty-result semantics explicitly enough to match the implemented helpers
|
||||||
|
- `scene.json`: keep inputs/outputs/status semantics aligned with the richer artifact; do not add routing policy there
|
||||||
|
|
||||||
|
- [ ] **Step 2: Read the updated staged docs and verify they match the implemented JS behavior**
|
||||||
|
|
||||||
|
Read and confirm:
|
||||||
|
- descriptions no longer claim “artifact shell” behavior
|
||||||
|
- docs do not move routing ownership out of `claw-new`
|
||||||
|
- docs do not promise auto-opening/downloading behavior in this slice
|
||||||
|
- docs reflect blocked/error field-presence rules and downstream-attempt semantics
|
||||||
|
|
||||||
|
Expected: staged metadata/docs accurately reflect the implemented collector.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 5: Add Rust red tests for artifact-status interpretation in the direct-submit runtime
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
- Modify: `tests/browser_script_skill_tool_test.rs`
|
||||||
|
- Modify: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- Read only: `src/compat/browser_script_skill_tool.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Add failing direct-submit runtime tests for structured artifact statuses**
|
||||||
|
|
||||||
|
Extend `tests/agent_runtime_test.rs` with focused regressions that use the existing temp skill-root harness but return real `report-artifact` payloads:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_partial_report_artifact_as_success_with_warning_summary() {
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![success_browser_response(
|
||||||
|
1,
|
||||||
|
serde_json::json!({
|
||||||
|
"text": {
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"period": "2026-03",
|
||||||
|
"selected_range": { "start": "2026-03-08 16:00:00", "end": "2026-03-09 16:00:00" },
|
||||||
|
"columns": ["qxdbh"],
|
||||||
|
"rows": [{ "qxdbh": "QX-1" }],
|
||||||
|
"sections": [{ "name": "summary-sheet", "columns": ["index"], "rows": [{ "index": 1 }] }],
|
||||||
|
"counts": { "detail_rows": 1, "summary_rows": 1 },
|
||||||
|
"status": "partial",
|
||||||
|
"partial_reasons": ["report_log_failed"],
|
||||||
|
"downstream": {
|
||||||
|
"export": { "attempted": true, "success": true, "path": "http://localhost/export.xlsx" },
|
||||||
|
"report_log": { "attempted": true, "success": false, "error": "500" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
)]));
|
||||||
|
// ... invoke handle_browser_message_with_context(...)
|
||||||
|
// assert TaskComplete.success == true
|
||||||
|
// assert summary contains partial/report_log_failed/detail_rows=1
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_empty_report_artifact_as_success() { /* status=empty => success=true */ }
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_blocked_report_artifact_as_failure() { /* status=blocked => success=false */ }
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_error_report_artifact_as_failure() { /* status=error => success=false */ }
|
||||||
|
```
|
||||||
|
|
||||||
|
Also add one focused helper regression to `tests/browser_script_skill_tool_test.rs` that proves the browser-script helper can return a structured object payload used by the fault-details path without flattening required fields away.
|
||||||
|
|
||||||
|
Suggested test name:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[tokio::test]
|
||||||
|
async fn execute_browser_script_tool_preserves_structured_report_artifact_payload() { /* ... */ }
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the focused Rust tests and verify they fail**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test submit_task_treats_partial_report_artifact_as_success_with_warning_summary -- --nocapture
|
||||||
|
cargo test --test browser_script_skill_tool_test execute_browser_script_tool_preserves_structured_report_artifact_payload -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: the new `agent_runtime_test` case fails because `execute_direct_submit_skill` still returns raw JSON text and `src/agent/mod.rs` still marks all direct-submit results as success when no Rust-side interpretation exists.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 6: Implement narrow Rust artifact interpretation without moving business rules into Rust
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- Modify: `tests/agent_runtime_test.rs`
|
||||||
|
- Modify: `tests/browser_script_skill_tool_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Implement a narrow structured-artifact interpreter in `src/compat/direct_skill_runtime.rs`**
|
||||||
|
|
||||||
|
Add a small internal result type and parser, for example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
struct DirectSubmitOutcome {
|
||||||
|
success: bool,
|
||||||
|
summary: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn interpret_direct_submit_output(output: &str) -> DirectSubmitOutcome {
|
||||||
|
// parse JSON if possible
|
||||||
|
// if type == "report-artifact", read status/counts/partial_reasons/downstream
|
||||||
|
// map ok/partial/empty => success=true
|
||||||
|
// map blocked/error => success=false
|
||||||
|
// build concise summary with report_name, period, detail_rows, summary_rows, status, partial reasons
|
||||||
|
// fall back to raw output text when payload is not a recognized artifact
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then change the public entrypoint shape from `Result<String, PipeError>` to a narrow result carrying `success` and `summary`, or add a second helper that `src/agent/mod.rs` can use without changing routing ownership.
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- do **not** reimplement fault normalization/classification/summary in Rust
|
||||||
|
- do **not** add fault-specific branching in `src/agent/mod.rs`
|
||||||
|
- keep unrecognized non-artifact outputs working as before
|
||||||
|
- keep explicit `YYYY-MM` derivation and configured `skill.tool` resolution unchanged
|
||||||
|
|
||||||
|
- [ ] **Step 2: Update the submit-path caller to use the interpreted success flag**
|
||||||
|
|
||||||
|
Adjust the direct-submit branch so `TaskComplete.success` comes from the artifact interpretation instead of blindly treating every `Ok(summary)` as success.
|
||||||
|
|
||||||
|
Implementation target:
|
||||||
|
- keep the direct path in `src/agent/mod.rs`
|
||||||
|
- keep error handling narrow
|
||||||
|
- if needed, return a dedicated direct-submit outcome from `execute_direct_submit_skill`
|
||||||
|
|
||||||
|
- [ ] **Step 3: Re-run the focused Rust tests and verify they pass**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test submit_task_treats_partial_report_artifact_as_success_with_warning_summary -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test submit_task_treats_empty_report_artifact_as_success -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test submit_task_treats_blocked_report_artifact_as_failure -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test submit_task_treats_error_report_artifact_as_failure -- --nocapture
|
||||||
|
cargo test --test browser_script_skill_tool_test execute_browser_script_tool_preserves_structured_report_artifact_payload -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 7: Run the full verification sweep for the staged skill and direct runtime
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Verify only
|
||||||
|
|
||||||
|
- [ ] **Step 1: Run the staged-skill deterministic test file**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the relevant Rust regression suites**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Run the broader compatibility coverage and build**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_runtime_test -- --nocapture
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
cargo build --bin sgclaw
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Manually verify the requirements against the approved spec**
|
||||||
|
|
||||||
|
Checklist:
|
||||||
|
- staged skill now reads page-selected range instead of inventing a month window after entry
|
||||||
|
- staged skill returns canonical detail rows and summary rows
|
||||||
|
- staged skill ports the original classification table, `qxxcjl` heuristics, and summary counters with parity coverage
|
||||||
|
- staged skill records downstream export/report-log outcome
|
||||||
|
- staged skill distinguishes `ok` / `partial` / `empty` / `blocked` / `error`
|
||||||
|
- `blocked` / `error` artifacts keep the required top-level fields, and preserve known `selected_range` / `counts` when failure happens late enough
|
||||||
|
- `downstream` is omitted when export/report-log were not attempted and included with attempted/success flags once they were attempted
|
||||||
|
- empty-result canonical `rows` stay empty even if downstream export uses a placeholder transport row
|
||||||
|
- `claw-new` maps `ok` / `partial` / `empty` to success and `blocked` / `error` to failure
|
||||||
|
- no new routing metadata was added to `SKILL.toml` or `scene.json`
|
||||||
|
- no new browser protocol or opener/UI behavior was introduced
|
||||||
|
|
||||||
|
Expected: all checklist items satisfied before calling the work complete.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification Checklist
|
||||||
|
|
||||||
|
### Staged skill behavior
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: deterministic fixture coverage passes for normalization, full classification parity, summary derivation, artifact shape, empty semantics, and downstream partial semantics.
|
||||||
|
|
||||||
|
### Direct-submit runtime mapping
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test agent_runtime_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected:
|
||||||
|
- valid artifact `ok` / `partial` / `empty` completes successfully
|
||||||
|
- valid artifact `blocked` / `error` completes as failure
|
||||||
|
- existing invalid config regression still passes
|
||||||
|
- existing direct-submit happy path still passes
|
||||||
|
|
||||||
|
### Browser-script helper safety
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test browser_script_skill_tool_test -- --nocapture
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: current browser-script execution semantics remain intact while returning structured artifact payloads.
|
||||||
|
|
||||||
|
### Compatibility/build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo test --test compat_runtime_test -- --nocapture
|
||||||
|
cargo test --test compat_config_test -- --nocapture
|
||||||
|
cargo build --bin sgclaw
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: no regressions in compat execution/config loading; main binary builds cleanly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes For The Engineer
|
||||||
|
|
||||||
|
- The paired spec is `docs/superpowers/specs/2026-04-10-fault-details-full-skill-alignment-design.md`.
|
||||||
|
- Keep all fault business transforms in `skill_staging`, not in Rust.
|
||||||
|
- Keep direct routing config-owned via `skillsDir` + `directSubmitSkill`.
|
||||||
|
- Do **not** broaden this slice into LLM routing, generic dispatch policy, new browser opcodes, or export auto-open behavior.
|
||||||
|
- If the original package reveals extra classification rules that are needed for parity, add them only inside `collect_fault_details.js` and its staged references/tests, not in `claw-new`.
|
||||||
@@ -0,0 +1,808 @@
|
|||||||
|
# TQ Lineloss Deterministic Skill Implementation Plan
|
||||||
|
|
||||||
|
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||||
|
|
||||||
|
**Goal:** Add a staged `tq-lineloss-report.collect_lineloss` browser-script skill plus a `。。。` deterministic submit path in `claw-new` that extracts and normalizes company/month/week parameters without LLM, executes through the existing pipe browser-script seam, and does not regress Zhihu hotlist behavior.
|
||||||
|
|
||||||
|
**Architecture:** Keep the new behavior behind a narrow deterministic branch that activates only when the raw instruction ends with the exact suffix `。。。`. `claw-new` owns deterministic trigger detection, explicit scene matching, semantic extraction, canonical normalization, prompt-or-execute control flow, and artifact interpretation; the staged skill owns page inspection, source/API collection, row normalization, export/report-log behavior, and final artifact generation. Reuse the existing `browser_script` execution seam already used by the direct browser path so the backend can later swap from pipe to ws without changing the deterministic contract.
|
||||||
|
|
||||||
|
**Tech Stack:** Rust 2021, Cargo tests, existing `BrowserPipeTool` / `execute_browser_script_tool` seam, staged skill packaging under `claw/claw/skills/skill_staging`, browser-side JavaScript, deterministic string parsing and normalization.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Context
|
||||||
|
|
||||||
|
- Follow @superpowers:test-driven-development for every behavior change.
|
||||||
|
- Follow @superpowers:verification-before-completion before claiming each task is done.
|
||||||
|
- Do **not** create a git worktree unless the user explicitly asks.
|
||||||
|
- Keep the new behavior as a narrow branch; do **not** redesign the whole runtime into a general registry engine in this slice.
|
||||||
|
- Preserve `src/runtime/engine.rs:147-159` and `src/runtime/engine.rs:265-286` behavior unless a failing regression test proves a change is required.
|
||||||
|
- Do **not** add ws runtime requirements on `main`; keep ws-readiness isolated to backend-neutral contracts only.
|
||||||
|
- Never fall back to page defaults for missing company, mode, or period in deterministic mode.
|
||||||
|
- If a deterministic request does not match the lineloss whitelist scene, return a deterministic mismatch prompt instead of falling through to ordinary orchestration.
|
||||||
|
|
||||||
|
## File Map
|
||||||
|
|
||||||
|
### New or modified files in `claw-new`
|
||||||
|
|
||||||
|
- Create: `src/compat/deterministic_submit.rs`
|
||||||
|
- suffix detection, deterministic scene match, prompt-or-execute decision
|
||||||
|
- Create: `src/compat/tq_lineloss/mod.rs`
|
||||||
|
- public normalization and artifact helpers
|
||||||
|
- Create: `src/compat/tq_lineloss/contracts.rs`
|
||||||
|
- canonical request/result data structures and status semantics
|
||||||
|
- Create: `src/compat/tq_lineloss/org_resolver.rs`
|
||||||
|
- alias generation, canonical label/code resolution, ambiguity handling
|
||||||
|
- Create: `src/compat/tq_lineloss/period_resolver.rs`
|
||||||
|
- month/week extraction, contradiction detection, canonical payload building
|
||||||
|
- Create: `src/compat/tq_lineloss/org_units.rs`
|
||||||
|
- checked-in canonical unit dictionary derived from the real source tree data
|
||||||
|
- Modify: `src/compat/mod.rs`
|
||||||
|
- export the deterministic and lineloss modules
|
||||||
|
- Modify: `src/agent/mod.rs`
|
||||||
|
- insert the deterministic branch before ordinary LLM interpretation, but only when the exact suffix is present
|
||||||
|
- Modify only if code duplication would otherwise occur: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- extract narrow shared browser-script execution helpers without changing current configured direct-submit behavior
|
||||||
|
- Read but avoid changing unless tests force it: `src/runtime/engine.rs`
|
||||||
|
- existing Zhihu hotlist routing/prompt logic must remain intact
|
||||||
|
|
||||||
|
### New staged skill package in `claw`
|
||||||
|
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/SKILL.md`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/SKILL.toml`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/references/collection-flow.md`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/references/data-quality.md`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/assets/scene-snapshot/index.html`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.js`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.test.js`
|
||||||
|
- Create if staging conventions require it: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/tq-lineloss-report/scene.json`
|
||||||
|
|
||||||
|
### Tests
|
||||||
|
|
||||||
|
- Create: `tests/deterministic_submit_test.rs`
|
||||||
|
- Modify: `tests/compat_runtime_test.rs`
|
||||||
|
- Modify only if end-to-end submit coverage requires it: `tests/runtime_task_flow_test.rs`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Locked contracts
|
||||||
|
|
||||||
|
### Deterministic trigger contract
|
||||||
|
|
||||||
|
- Trigger only when the raw instruction ends with the exact suffix `。。。`.
|
||||||
|
- No suffix: current behavior unchanged.
|
||||||
|
- Suffix + unsupported scene: explicit deterministic mismatch prompt.
|
||||||
|
- Suffix is not permission for arbitrary browser actions; only fixed deterministic scenes are allowed.
|
||||||
|
- Negative cases must stay non-deterministic or mismatched exactly as designed:
|
||||||
|
- ASCII `...` is not the trigger
|
||||||
|
- `。。。。` is not the trigger
|
||||||
|
- `。。。` appearing in the middle of the instruction is not the trigger
|
||||||
|
- any trailing whitespace after `。。。` is not the trigger in this slice
|
||||||
|
|
||||||
|
### Canonical org contract
|
||||||
|
|
||||||
|
The resolver must output both display and backend values:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct ResolvedOrg {
|
||||||
|
pub label: String,
|
||||||
|
pub code: String,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Required supported inputs include:
|
||||||
|
- `兰州公司`
|
||||||
|
- `天水公司`
|
||||||
|
- `国网兰州供电公司`
|
||||||
|
- `城关供电分公司`
|
||||||
|
- `榆中县供电公司`
|
||||||
|
- normalized shorthand such as `榆中县公司`
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- derive aliases from the real unit tree data
|
||||||
|
- require uniqueness before execution
|
||||||
|
- ambiguous aliases prompt and stop
|
||||||
|
- missing company prompts and stop
|
||||||
|
|
||||||
|
### Canonical period contract
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum PeriodMode {
|
||||||
|
Month,
|
||||||
|
Week,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ResolvedPeriod {
|
||||||
|
pub mode: PeriodMode,
|
||||||
|
pub mode_code: String,
|
||||||
|
pub value: String,
|
||||||
|
pub payload: serde_json::Value,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Required supported inputs include:
|
||||||
|
- `月累计 2026-03`
|
||||||
|
- `月累计 2026年3月`
|
||||||
|
- `周累计 2026年第12周`
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- month and week intent are mutually exclusive
|
||||||
|
- missing mode prompts and stop
|
||||||
|
- missing period prompts and stop
|
||||||
|
- bare `第12周` is incomplete in this slice and must prompt for year instead of guessing
|
||||||
|
- derive the real backend `period_mode_code` values and request payload field names from the source page/API contract before implementation; do not ship placeholder enum echoes such as `month`/`week` unless the source materials prove those are the real backend codes
|
||||||
|
- never use page-selected defaults in deterministic mode
|
||||||
|
|
||||||
|
### Artifact contract
|
||||||
|
|
||||||
|
Lock the field names now so `claw-new` can interpret status without re-embedding business logic:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "tq-lineloss-report",
|
||||||
|
"status": "ok",
|
||||||
|
"org": {
|
||||||
|
"label": "国网兰州供电公司",
|
||||||
|
"code": "008df5db70319f73e0508eoac23e0c3c"
|
||||||
|
},
|
||||||
|
"period": {
|
||||||
|
"mode": "month",
|
||||||
|
"mode_code": "<real-backend-mode-code>",
|
||||||
|
"value": "2026-03",
|
||||||
|
"payload": {
|
||||||
|
"<real-backend-field>": "<real-backend-value>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"columns": [],
|
||||||
|
"rows": [],
|
||||||
|
"counts": {
|
||||||
|
"rows": 0
|
||||||
|
},
|
||||||
|
"export": {
|
||||||
|
"attempted": false,
|
||||||
|
"status": "skipped",
|
||||||
|
"message": null
|
||||||
|
},
|
||||||
|
"reasons": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Status mapping in `claw-new`:
|
||||||
|
- `ok` -> task success
|
||||||
|
- `partial` -> task success with partial summary
|
||||||
|
- `blocked` -> task failure
|
||||||
|
- `error` -> task failure
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 1: Scaffold the staged skill package and written contract
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/SKILL.md`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/SKILL.toml`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/references/collection-flow.md`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/references/data-quality.md`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/assets/scene-snapshot/index.html`
|
||||||
|
- Create if required: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/tq-lineloss-report/scene.json`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the failing package contract files**
|
||||||
|
|
||||||
|
Create the package using `fault-details-report` as the structure reference. Lock one tool only:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[tools]]
|
||||||
|
name = "collect_lineloss"
|
||||||
|
kind = "browser_script"
|
||||||
|
description = "Collect 台区线损月/周累计线损率 rows using normalized company and period parameters and return a structured report artifact."
|
||||||
|
```
|
||||||
|
|
||||||
|
Declare required args in `SKILL.toml`:
|
||||||
|
- `expected_domain`
|
||||||
|
- `org_label`
|
||||||
|
- `org_code`
|
||||||
|
- `period_mode`
|
||||||
|
- `period_mode_code`
|
||||||
|
- `period_value`
|
||||||
|
- `period_payload`
|
||||||
|
|
||||||
|
- [ ] **Step 2: Write `SKILL.md` before implementation**
|
||||||
|
|
||||||
|
Document:
|
||||||
|
- when to use / when not to use
|
||||||
|
- required normalized args only
|
||||||
|
- blocked/error semantics
|
||||||
|
- exact returned artifact fields
|
||||||
|
- no raw natural-language values passed to backend requests
|
||||||
|
|
||||||
|
- [ ] **Step 3: Write the reference docs**
|
||||||
|
|
||||||
|
`references/collection-flow.md` must describe:
|
||||||
|
- relevant page state
|
||||||
|
- month request mapping
|
||||||
|
- week request mapping
|
||||||
|
- export/report-log flow if retained
|
||||||
|
|
||||||
|
`references/data-quality.md` must define:
|
||||||
|
- canonical output columns
|
||||||
|
- required field coverage
|
||||||
|
- status semantics
|
||||||
|
- partial/error rules
|
||||||
|
- org/period normalization assumptions
|
||||||
|
|
||||||
|
- [ ] **Step 4: Add scene metadata if the current staging registry needs it**
|
||||||
|
|
||||||
|
Keep it narrow: one scene, one tool, one artifact type.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Add an automated staged-skill load/resolve check**
|
||||||
|
|
||||||
|
Add `tests/deterministic_submit_test.rs` coverage that loads the staged skills root used by runtime tests, resolves `tq-lineloss-report.collect_lineloss`, and asserts the tool is discoverable with the required args:
|
||||||
|
- `expected_domain`
|
||||||
|
- `org_label`
|
||||||
|
- `org_code`
|
||||||
|
- `period_mode`
|
||||||
|
- `period_mode_code`
|
||||||
|
- `period_value`
|
||||||
|
- `period_payload`
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test deterministic_submit_discovers_tq_lineloss_skill_contract -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL before the package is fully wired, PASS once the staged skill contract is discoverable and complete.
|
||||||
|
|
||||||
|
- [ ] **Step 6: Verify structural parity with `fault-details-report`**
|
||||||
|
|
||||||
|
Run a manual file-layout diff and confirm there are no placeholder descriptions or missing required docs.
|
||||||
|
|
||||||
|
- [ ] **Step 7: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report" "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/tq-lineloss-report/scene.json"
|
||||||
|
git commit -m "feat: scaffold tq lineloss staged skill contract"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 2: Add browser-side JS red tests and implement the staged collector
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.js`
|
||||||
|
- Create: `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.test.js`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write the failing JS tests first**
|
||||||
|
|
||||||
|
Cover deterministic pure helpers for:
|
||||||
|
- missing normalized args -> blocked/error contract
|
||||||
|
- month request shape uses `org_code` + canonical month payload
|
||||||
|
- week request shape uses `org_code` + canonical week payload
|
||||||
|
- artifact field names and counts
|
||||||
|
- partial/error status shaping
|
||||||
|
- no raw user-entered org text leakage into request fields
|
||||||
|
|
||||||
|
Example test skeleton:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const test = require('node:test');
|
||||||
|
const assert = require('node:assert/strict');
|
||||||
|
|
||||||
|
const {
|
||||||
|
validateArgs,
|
||||||
|
buildMonthRequest,
|
||||||
|
buildWeekRequest,
|
||||||
|
normalizeRows,
|
||||||
|
buildArtifact
|
||||||
|
} = require('./collect_lineloss.js');
|
||||||
|
|
||||||
|
test('buildMonthRequest uses canonical org code and month payload', () => {
|
||||||
|
const request = buildMonthRequest({
|
||||||
|
org_code: 'ORG-1',
|
||||||
|
period_payload: { year: 2026, month: 3 }
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(request.orgCode, 'ORG-1');
|
||||||
|
assert.equal(request.year, 2026);
|
||||||
|
assert.equal(request.month, 3);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildArtifact locks field names and partial semantics', () => {
|
||||||
|
const artifact = buildArtifact({
|
||||||
|
org_label: '国网兰州供电公司',
|
||||||
|
org_code: 'ORG-1',
|
||||||
|
period_mode: 'month',
|
||||||
|
period_mode_code: 'month',
|
||||||
|
period_value: '2026-03',
|
||||||
|
period_payload: { year: 2026, month: 3 },
|
||||||
|
rows: [{ id: 1 }],
|
||||||
|
status: 'partial',
|
||||||
|
reasons: ['export_failed']
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(artifact.report_name, 'tq-lineloss-report');
|
||||||
|
assert.equal(artifact.org.code, 'ORG-1');
|
||||||
|
assert.equal(artifact.period.value, '2026-03');
|
||||||
|
assert.deepEqual(artifact.reasons, ['export_failed']);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the JS test file to confirm failure**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
node --test "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the script/helpers do not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Write the minimal browser-side implementation**
|
||||||
|
|
||||||
|
Required structure:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function validateArgs(args) { /* require normalized canonical args */ }
|
||||||
|
function buildMonthRequest(args) { /* build month request from canonical values */ }
|
||||||
|
function buildWeekRequest(args) { /* build week request from canonical values */ }
|
||||||
|
function normalizeRows(rawRows) { /* canonical columns only */ }
|
||||||
|
function buildArtifact(input) { /* locked artifact shape */ }
|
||||||
|
|
||||||
|
return (async () => {
|
||||||
|
const args = __SKILL_ARGS__;
|
||||||
|
validateArgs(args);
|
||||||
|
// validate page context
|
||||||
|
// collect from page/API
|
||||||
|
// normalize rows
|
||||||
|
// optionally attempt export/report-log if the real business flow requires it
|
||||||
|
return buildArtifact(result);
|
||||||
|
})();
|
||||||
|
```
|
||||||
|
|
||||||
|
Keep test exports behind an environment-safe guard so the file still works as browser-eval code.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Re-run the JS tests until they pass**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
node --test "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.js" "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.test.js"
|
||||||
|
git commit -m "feat: add tq lineloss browser collection script"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 3: Add deterministic suffix detection and explicit scene routing
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `src/compat/deterministic_submit.rs`
|
||||||
|
- Modify: `src/compat/mod.rs`
|
||||||
|
- Modify: `src/agent/mod.rs`
|
||||||
|
- Create: `tests/deterministic_submit_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write failing routing tests**
|
||||||
|
|
||||||
|
Add Rust tests for:
|
||||||
|
- exact raw `。。。` suffix enables deterministic mode
|
||||||
|
- no suffix leaves current routing untouched
|
||||||
|
- suffix + unsupported deterministic request returns supported-scene prompt
|
||||||
|
- when page URL/title context is available and does not match the lineloss scene, deterministic routing returns mismatch/block prompt instead of proceeding
|
||||||
|
- Zhihu hotlist request without suffix keeps the current route
|
||||||
|
- ASCII `...` does not trigger deterministic mode
|
||||||
|
- `。。。。` does not trigger deterministic mode
|
||||||
|
- `。。。` in the middle of the instruction does not trigger deterministic mode
|
||||||
|
- trailing whitespace after `。。。` does not trigger deterministic mode in this slice
|
||||||
|
|
||||||
|
Suggested tests:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_requires_exact_suffix() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_nonmatch_returns_supported_scene_message() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_rejects_page_context_mismatch() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn zhihu_hotlist_request_without_suffix_keeps_existing_route() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_rejects_non_exact_suffix_variants() {}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the targeted routing tests and confirm failure**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test deterministic_submit_requires_exact_suffix -- --exact
|
||||||
|
cargo test deterministic_submit_nonmatch_returns_supported_scene_message -- --exact
|
||||||
|
cargo test zhihu_hotlist_request_without_suffix_keeps_existing_route -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the deterministic routing seam does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Implement the narrow deterministic routing module**
|
||||||
|
|
||||||
|
Recommended public shape:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum DeterministicSubmitDecision {
|
||||||
|
NotDeterministic,
|
||||||
|
Prompt { summary: String },
|
||||||
|
Execute(DeterministicExecutionPlan),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`src/agent/mod.rs` should:
|
||||||
|
1. detect deterministic suffix
|
||||||
|
2. if not deterministic, continue current flow untouched
|
||||||
|
3. if prompt, return `TaskComplete`
|
||||||
|
4. if execute, pass the plan into the browser-script execution seam
|
||||||
|
|
||||||
|
- [ ] **Step 4: Re-run the routing tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test deterministic_submit_requires_exact_suffix -- --exact
|
||||||
|
cargo test deterministic_submit_nonmatch_returns_supported_scene_message -- --exact
|
||||||
|
cargo test zhihu_hotlist_request_without_suffix_keeps_existing_route -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/compat/deterministic_submit.rs src/compat/mod.rs src/agent/mod.rs tests/deterministic_submit_test.rs
|
||||||
|
git commit -m "feat: add deterministic submit routing seam"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 4: Implement company/unit normalization from real source data
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `src/compat/tq_lineloss/mod.rs`
|
||||||
|
- Create: `src/compat/tq_lineloss/contracts.rs`
|
||||||
|
- Create: `src/compat/tq_lineloss/org_resolver.rs`
|
||||||
|
- Create: `src/compat/tq_lineloss/org_units.rs`
|
||||||
|
- Modify: `tests/deterministic_submit_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write failing org resolver tests**
|
||||||
|
|
||||||
|
Cover:
|
||||||
|
- `兰州公司` -> canonical `国网兰州供电公司` + correct code
|
||||||
|
- `天水公司` -> canonical `国网天水供电公司` + correct code
|
||||||
|
- `城关供电分公司` -> lower-level direct match
|
||||||
|
- `榆中县公司` -> normalized county alias match
|
||||||
|
- ambiguous alias prompts instead of guessing
|
||||||
|
- missing company prompts instead of executing
|
||||||
|
|
||||||
|
Example skeleton:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn lineloss_org_resolver_matches_city_alias() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_org_resolver_matches_county_alias() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_org_resolver_prompts_on_ambiguity() {}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the org tests and confirm failure**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test lineloss_org_resolver_matches_city_alias -- --exact
|
||||||
|
cargo test lineloss_org_resolver_matches_county_alias -- --exact
|
||||||
|
cargo test lineloss_org_resolver_prompts_on_ambiguity -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the resolver and checked-in unit dictionary do not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Check in the canonical unit dictionary and implement alias resolution**
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- derive data from the real source materials, not guessed literals
|
||||||
|
- keep canonical `label` and `code`
|
||||||
|
- generate normalized aliases from formal names
|
||||||
|
- support both city-company and district/county/sub-company levels
|
||||||
|
- require uniqueness before execution
|
||||||
|
|
||||||
|
- [ ] **Step 4: Implement explicit prompt messages**
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `已命中台区线损报表技能,但缺少供电单位,请补充如“兰州公司”或“城关供电分公司”。`
|
||||||
|
- `已命中台区线损报表技能,但供电单位存在歧义,请补充更完整名称。`
|
||||||
|
|
||||||
|
- [ ] **Step 5: Re-run the org tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test lineloss_org_resolver_matches_city_alias -- --exact
|
||||||
|
cargo test lineloss_org_resolver_matches_county_alias -- --exact
|
||||||
|
cargo test lineloss_org_resolver_prompts_on_ambiguity -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 6: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/compat/tq_lineloss/mod.rs src/compat/tq_lineloss/contracts.rs src/compat/tq_lineloss/org_resolver.rs src/compat/tq_lineloss/org_units.rs tests/deterministic_submit_test.rs
|
||||||
|
git commit -m "feat: add tq lineloss org normalization"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 5: Implement period extraction and canonical payload building
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Create: `src/compat/tq_lineloss/period_resolver.rs`
|
||||||
|
- Modify: `src/compat/tq_lineloss/mod.rs`
|
||||||
|
- Modify: `tests/deterministic_submit_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write failing period resolver tests**
|
||||||
|
|
||||||
|
Cover:
|
||||||
|
- `月累计 2026-03`
|
||||||
|
- `月累计 2026年3月`
|
||||||
|
- `周累计 2026年第12周`
|
||||||
|
- contradictory month/week expressions prompt
|
||||||
|
- missing mode prompts
|
||||||
|
- missing period prompts
|
||||||
|
- bare `第12周` prompts for year in this slice
|
||||||
|
- real backend month/week mode codes and request payload field names are derived from source materials instead of placeholder values
|
||||||
|
|
||||||
|
Example skeleton:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_parses_month_text() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_parses_week_text() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_prompts_for_missing_year_on_week() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_rejects_contradictory_mode() {}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the period tests and confirm failure**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test lineloss_period_resolver_parses_month_text -- --exact
|
||||||
|
cargo test lineloss_period_resolver_parses_week_text -- --exact
|
||||||
|
cargo test lineloss_period_resolver_prompts_for_missing_year_on_week -- --exact
|
||||||
|
cargo test lineloss_period_resolver_rejects_contradictory_mode -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the period resolver does not exist yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Implement the minimal resolver**
|
||||||
|
|
||||||
|
Output contract:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct ResolvedPeriod {
|
||||||
|
pub mode: PeriodMode,
|
||||||
|
pub mode_code: String,
|
||||||
|
pub value: String,
|
||||||
|
pub payload: serde_json::Value,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- no page-default fallback
|
||||||
|
- no implicit current-year assumptions
|
||||||
|
- no mixed month/week execution
|
||||||
|
|
||||||
|
- [ ] **Step 4: Re-run the period tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test lineloss_period_resolver_parses_month_text -- --exact
|
||||||
|
cargo test lineloss_period_resolver_parses_week_text -- --exact
|
||||||
|
cargo test lineloss_period_resolver_prompts_for_missing_year_on_week -- --exact
|
||||||
|
cargo test lineloss_period_resolver_rejects_contradictory_mode -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/compat/tq_lineloss/period_resolver.rs src/compat/tq_lineloss/mod.rs tests/deterministic_submit_test.rs
|
||||||
|
git commit -m "feat: add tq lineloss period normalization"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 6: Wire deterministic execution through the existing browser-script seam
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `src/compat/deterministic_submit.rs`
|
||||||
|
- Modify: `src/agent/mod.rs`
|
||||||
|
- Modify if needed: `src/compat/direct_skill_runtime.rs`
|
||||||
|
- Modify: `tests/deterministic_submit_test.rs`
|
||||||
|
- Modify: `tests/compat_runtime_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Write failing execution tests**
|
||||||
|
|
||||||
|
Cover:
|
||||||
|
- successful deterministic lineloss request builds canonical tool args
|
||||||
|
- missing company/mode/period returns prompt without browser execution
|
||||||
|
- `partial` artifact maps to successful partial summary
|
||||||
|
- `blocked` and `error` artifacts map to failed completion
|
||||||
|
|
||||||
|
Example skeleton:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_execution_passes_canonical_args() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_missing_company_does_not_invoke_browser() {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_partial_artifact_maps_to_partial_summary() {}
|
||||||
|
```
|
||||||
|
|
||||||
|
- [ ] **Step 2: Run the execution tests and confirm failure**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test deterministic_lineloss_execution_passes_canonical_args -- --exact
|
||||||
|
cargo test deterministic_lineloss_missing_company_does_not_invoke_browser -- --exact
|
||||||
|
cargo test deterministic_lineloss_partial_artifact_maps_to_partial_summary -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: FAIL because the deterministic execution plan is not wired yet.
|
||||||
|
|
||||||
|
- [ ] **Step 3: Implement execution via the existing `browser_script` seam**
|
||||||
|
|
||||||
|
Build tool args only from normalized values:
|
||||||
|
- `expected_domain`
|
||||||
|
- `org_label`
|
||||||
|
- `org_code`
|
||||||
|
- `period_mode`
|
||||||
|
- `period_mode_code`
|
||||||
|
- `period_value`
|
||||||
|
- `period_payload`
|
||||||
|
|
||||||
|
Resolve the tool explicitly to:
|
||||||
|
- `tq-lineloss-report.collect_lineloss`
|
||||||
|
|
||||||
|
Do not introduce a new browser opcode family or second browser protocol.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Implement central artifact interpretation**
|
||||||
|
|
||||||
|
Recommended helper:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
fn summarize_lineloss_artifact(artifact: &serde_json::Value) -> (bool, String)
|
||||||
|
```
|
||||||
|
|
||||||
|
Summary must include canonical org/period and row counts, and surface blocked/partial/error reasons.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Re-run the execution tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test deterministic_lineloss_execution_passes_canonical_args -- --exact
|
||||||
|
cargo test deterministic_lineloss_missing_company_does_not_invoke_browser -- --exact
|
||||||
|
cargo test deterministic_lineloss_partial_artifact_maps_to_partial_summary -- --exact
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 6: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add src/compat/deterministic_submit.rs src/agent/mod.rs src/compat/direct_skill_runtime.rs tests/deterministic_submit_test.rs tests/compat_runtime_test.rs
|
||||||
|
git commit -m "feat: execute deterministic tq lineloss skill through browser script seam"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Task 7: Add Zhihu regression coverage and run the full verification set
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- Modify: `tests/compat_runtime_test.rs`
|
||||||
|
- Modify only if required: `tests/runtime_task_flow_test.rs`
|
||||||
|
- Reuse: `tests/deterministic_submit_test.rs`
|
||||||
|
|
||||||
|
- [ ] **Step 1: Add focused Zhihu regression tests**
|
||||||
|
|
||||||
|
Required assertions:
|
||||||
|
- ordinary Zhihu hotlist requests without `。。。` still use the current path
|
||||||
|
- existing export/presentation requests still preserve their current behavior
|
||||||
|
- deterministic suffix does not silently route unmatched requests into Zhihu logic
|
||||||
|
- an existing non-lineloss direct `browser_script` path outside the new scene still behaves unchanged
|
||||||
|
|
||||||
|
- [ ] **Step 2: Add end-to-end deterministic submit coverage**
|
||||||
|
|
||||||
|
Required assertions:
|
||||||
|
- suffix detection
|
||||||
|
- scene match
|
||||||
|
- page-context mismatch prompt/block behavior when URL/title contradict the lineloss scene
|
||||||
|
- missing/ambiguous prompts
|
||||||
|
- canonical args passed to the browser-script tool
|
||||||
|
- returned summary shows canonical org and period
|
||||||
|
- execution stays on the existing pipe-backed browser-script seam with no ws-only dependency introduced on `main`
|
||||||
|
|
||||||
|
- [ ] **Step 3: Run the focused Rust tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test --test deterministic_submit_test
|
||||||
|
cargo test --test compat_runtime_test
|
||||||
|
cargo test --test runtime_task_flow_test
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 4: Run the whole Rust suite**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
cargo test
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 5: Re-run the staged skill JS tests**
|
||||||
|
|
||||||
|
Run:
|
||||||
|
```bash
|
||||||
|
node --test "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/tq-lineloss-report/scripts/collect_lineloss.test.js"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected: PASS.
|
||||||
|
|
||||||
|
- [ ] **Step 6: Commit**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add tests/deterministic_submit_test.rs tests/compat_runtime_test.rs tests/runtime_task_flow_test.rs
|
||||||
|
git commit -m "test: cover deterministic tq lineloss routing and zhihu regression"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Final verification checklist
|
||||||
|
|
||||||
|
- [ ] `。。。` is the only deterministic trigger.
|
||||||
|
- [ ] Non-`。。。` requests preserve current routing.
|
||||||
|
- [ ] Deterministic page-context mismatch blocks or mismatches before execution when URL/title contradict the lineloss scene.
|
||||||
|
- [ ] Zhihu hotlist behavior is unchanged.
|
||||||
|
- [ ] Existing non-lineloss direct `browser_script` behavior is unchanged.
|
||||||
|
- [ ] Deterministic non-match returns an explicit supported-scene message.
|
||||||
|
- [ ] Missing company prompts.
|
||||||
|
- [ ] Ambiguous company prompts.
|
||||||
|
- [ ] Missing mode prompts.
|
||||||
|
- [ ] Missing period prompts.
|
||||||
|
- [ ] Bare `第12周` prompts for year.
|
||||||
|
- [ ] Canonical org code is passed to the staged skill.
|
||||||
|
- [ ] Canonical period mode code and payload are passed to the staged skill.
|
||||||
|
- [ ] The staged skill returns the locked artifact shape.
|
||||||
|
- [ ] Execution uses the existing `browser_script` seam only.
|
||||||
|
- [ ] No ws-specific runtime dependency is added on `main`.
|
||||||
|
|
||||||
|
## Implementation notes
|
||||||
|
|
||||||
|
- Prefer extracting a tiny shared execution helper from `src/compat/direct_skill_runtime.rs` if needed instead of duplicating tool lookup or browser-script invocation code.
|
||||||
|
- Keep deterministic whitelist configuration in one place, but do not expand this slice into a full general scene-registry redesign.
|
||||||
|
- If a failing test suggests changing Zhihu behavior, fix the deterministic branch or test harness instead of weakening the existing Zhihu path.
|
||||||
|
- The checked-in unit dictionary is part of the deterministic contract; treat updates to that data as explicit behavior changes and cover them with tests.
|
||||||
@@ -0,0 +1,125 @@
|
|||||||
|
# Config-Owned Direct Skill Dispatch Design
|
||||||
|
|
||||||
|
**Goal:** Preserve the current minimal submit flow where sgClaw accepts natural-language input, directly invokes one configured staged browser skill without calling an LLM, and keeps dispatch ownership in sgClaw configuration rather than external skill metadata.
|
||||||
|
|
||||||
|
**Status:** Approved design direction for the next slice. The current minimal direct-submit path already works; this document records the ownership boundary that future dispatch-policy work should follow.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Decision Summary
|
||||||
|
|
||||||
|
1. Keep direct-skill selection in sgClaw configuration.
|
||||||
|
2. Continue using `skillsDir` plus `directSubmitSkill` as the only control surface for the no-LLM direct path.
|
||||||
|
3. Do not add sgClaw-specific dispatch fields to files under `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging` in this slice.
|
||||||
|
4. Keep the currently bound skill as `fault-details-report.collect_fault_details`.
|
||||||
|
5. When dispatch expands beyond one fixed skill, add the next policy layer on the sgClaw side first, not in `scene.json` or `SKILL.toml`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Current Minimal Flow
|
||||||
|
|
||||||
|
The intended user experience stays unchanged:
|
||||||
|
- the user types natural language into the input box
|
||||||
|
- sgClaw receives `BrowserMessage::SubmitTask`
|
||||||
|
- sgClaw loads runtime config
|
||||||
|
- if `directSubmitSkill` is configured, sgClaw bypasses LLM routing and directly resolves the configured staged skill from `skillsDir`
|
||||||
|
- sgClaw executes the target `browser_script` tool through the browser runtime and returns the result
|
||||||
|
- if `directSubmitSkill` is absent, sgClaw falls back to the existing orchestration / compat behavior
|
||||||
|
|
||||||
|
This keeps the first slice small while preserving a clear seam for future expansion.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ownership Boundary
|
||||||
|
|
||||||
|
### sgClaw configuration owns dispatch choice
|
||||||
|
|
||||||
|
sgClaw configuration is responsible for deciding whether submit-task should bypass the LLM path and which direct skill should run.
|
||||||
|
|
||||||
|
For the current slice, that means:
|
||||||
|
- `skillsDir` tells sgClaw where to load staged skills from
|
||||||
|
- `directSubmitSkill` tells sgClaw which `skill.tool` should be used for the direct path
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"skillsDir": "D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging",
|
||||||
|
"directSubmitSkill": "fault-details-report.collect_fault_details"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### skill_staging owns skill identity and execution assets
|
||||||
|
|
||||||
|
Files under `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging` remain responsible for describing the skill package, tool identity, and browser-script implementation.
|
||||||
|
|
||||||
|
For the current bound skill:
|
||||||
|
- `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/scenes/fault-details-report/scene.json`
|
||||||
|
- `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/SKILL.toml`
|
||||||
|
- `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
|
||||||
|
These files already provide enough information for sgClaw to locate the package and run the tool. This slice does not add a new dispatch field inside them.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Why This Boundary Is Recommended
|
||||||
|
|
||||||
|
### One source of truth for routing
|
||||||
|
|
||||||
|
If sgClaw configuration owns the direct-skill decision, the operator can switch the direct skill by changing config only. There is no need to edit code and no need to mutate external skill assets just to change routing.
|
||||||
|
|
||||||
|
### Avoid freezing external manifest semantics too early
|
||||||
|
|
||||||
|
`skill_staging` is an external skill asset set. Adding sgClaw-specific dispatch metadata now would couple the staged-skill format to one integration strategy before the policy model is stable.
|
||||||
|
|
||||||
|
### Preserve a clean migration path
|
||||||
|
|
||||||
|
The current minimal path is intentionally narrow: one fixed configured direct skill, no LLM dispatch, no per-skill policy registry yet. Keeping dispatch control in sgClaw makes it easier to add a broader policy layer later without rewriting the staged-skill package format first.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Explicit Non-Goals
|
||||||
|
|
||||||
|
This design does not do the following:
|
||||||
|
- redesign the submit-task protocol
|
||||||
|
- move dispatch control into `scene.json` or `SKILL.toml`
|
||||||
|
- require every staged skill to declare `direct_browser` or `llm_agent` right now
|
||||||
|
- expand the current direct path into generic natural-language intent classification
|
||||||
|
- change the browser-script execution model
|
||||||
|
- change the current fallback orchestration / compat execution semantics when `directSubmitSkill` is not configured
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Current Skill Contract
|
||||||
|
|
||||||
|
The current direct path remains intentionally deterministic.
|
||||||
|
|
||||||
|
For `fault-details-report.collect_fault_details`, sgClaw derives only the minimum required arguments:
|
||||||
|
- `expected_domain` from the current `page_url`
|
||||||
|
- `period` from an explicit `YYYY-MM` token in the user's natural-language input
|
||||||
|
|
||||||
|
That means the UX still looks like natural-language submission, but the runtime does not ask an LLM to infer intent or invent missing parameters. If the period is missing, sgClaw should return a clear error instead of guessing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future Dispatch Policy Direction
|
||||||
|
|
||||||
|
When more than one staged skill needs routing control, the next layer should still begin on the sgClaw side.
|
||||||
|
|
||||||
|
Recommended direction:
|
||||||
|
- keep `directSubmitSkill` as the current bootstrap switch for the minimal fixed-skill path
|
||||||
|
- introduce a sgClaw-owned registry or config mapping that can later express `skill.tool -> direct_browser | llm_agent`
|
||||||
|
- keep external skill manifests unchanged until the policy surface proves stable in real use
|
||||||
|
|
||||||
|
Only after the routing model is stable should we consider whether external skill metadata needs a default dispatch hint.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Resulting Design Rule
|
||||||
|
|
||||||
|
For this project, the direct-skill decision remains config-owned:
|
||||||
|
- sgClaw config decides whether submit-task bypasses the LLM path
|
||||||
|
- staged skill metadata identifies what the skill is and how its browser tool runs
|
||||||
|
- future per-skill dispatch policy should be added in sgClaw first, not in `skill_staging`
|
||||||
|
|
||||||
|
This is the approved baseline for the next dispatch-policy slice.
|
||||||
@@ -0,0 +1,495 @@
|
|||||||
|
# Fault Details Full Skill Alignment Design
|
||||||
|
|
||||||
|
**Goal:** Upgrade `fault-details-report.collect_fault_details` from an empty artifact shell into a real staged business skill that matches the original fault-details package's collection, normalization, summary, export, and report-history behavior, while keeping direct-skill routing config-owned in `claw-new`.
|
||||||
|
|
||||||
|
**Status:** Approved design direction for the next remediation slice.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Decision Summary
|
||||||
|
|
||||||
|
1. Keep direct-skill selection in `claw-new` via `skillsDir` + `directSubmitSkill`; do not move dispatch ownership into `skill_staging` manifests.
|
||||||
|
2. Put the fault-details business logic in `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging`, not in `claw-new`.
|
||||||
|
3. Align the staged skill with the original package's real behavior: query raw rows, normalize detail columns, derive summary rows, call localhost export, and write report history.
|
||||||
|
4. Keep the current browser-execution seam narrow: use the existing `browser_script` / browser-eval path, not a new browser protocol or new opcodes.
|
||||||
|
5. Add a narrow artifact interpreter in `claw-new` so structured fault-results map cleanly to `TaskComplete.success` and a readable completion summary.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Why This Slice Exists
|
||||||
|
|
||||||
|
The current staged skill contract and the current staged skill implementation do not match.
|
||||||
|
|
||||||
|
### What the original package actually does
|
||||||
|
|
||||||
|
The original package under `D:/desk/智能体资料/大四区报告监测项/故障明细` does all of the following:
|
||||||
|
|
||||||
|
- reads the selected date range from the page UI
|
||||||
|
- queries the D4 repair-order data source
|
||||||
|
- filters and normalizes raw rows into the canonical detail export schema
|
||||||
|
- derives grouped summary rows by `gds`
|
||||||
|
- calls `http://localhost:13313/SurfaceServices/personalBread/export/faultDetailsExportXLSXS`
|
||||||
|
- auto-opens/downloads the generated file
|
||||||
|
- writes report history through `http://localhost:13313/ReportServices/Api/setReportLog`
|
||||||
|
|
||||||
|
### What the staged skill currently does
|
||||||
|
|
||||||
|
The current staged `collect_fault_details.js` only returns an empty `report-artifact` shell with empty `rows` and empty summary `sections`.
|
||||||
|
|
||||||
|
It also still uses a Node-style export shape instead of the browser-eval entrypoint shape that the current `browser_script` runtime expects. In practice, this means the staged script is not yet aligned with the real runtime contract even before business behavior is considered.
|
||||||
|
|
||||||
|
This slice closes that gap by making the staged skill actually perform the work the original package performs, but through the current sgClaw direct-skill runtime.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Design Rules
|
||||||
|
|
||||||
|
### 1. `claw-new` owns routing, not business transforms
|
||||||
|
|
||||||
|
`claw-new` stays responsible for:
|
||||||
|
|
||||||
|
- loading config
|
||||||
|
- deciding whether submit-task takes the direct-skill path
|
||||||
|
- resolving the configured staged skill
|
||||||
|
- executing the staged browser-script tool
|
||||||
|
- turning the returned artifact into `TaskComplete.success` + human-readable summary
|
||||||
|
|
||||||
|
`claw-new` must **not** become the place where the original fault classification table, detail-row field mapping, or summary aggregation rules are reimplemented.
|
||||||
|
|
||||||
|
### 2. `skill_staging` owns fault-details business behavior
|
||||||
|
|
||||||
|
The staged skill package owns:
|
||||||
|
|
||||||
|
- query orchestration inside the browser page context
|
||||||
|
- raw-row extraction
|
||||||
|
- canonical detail-row normalization
|
||||||
|
- classification and derived fields
|
||||||
|
- summary-sheet derivation
|
||||||
|
- localhost export request
|
||||||
|
- localhost report-log request
|
||||||
|
- structured result payload
|
||||||
|
|
||||||
|
### 3. Keep the current browser seam narrow
|
||||||
|
|
||||||
|
Do not introduce a new browser bridge, callback protocol, or skill-specific browser opcode for this slice.
|
||||||
|
|
||||||
|
The implementation should continue using the current `browser_script` execution seam already wired through `claw-new/src/compat/browser_script_skill_tool.rs` and `claw-new/src/compat/direct_skill_runtime.rs`.
|
||||||
|
|
||||||
|
### 4. Match business behavior, not the original shell verbatim
|
||||||
|
|
||||||
|
The original package is a local HTML/Vue shell that uses `BrowserAction(...)`, timers, and hidden-browser choreography. That shell does **not** need to be recreated inside `claw-new`.
|
||||||
|
|
||||||
|
What must be preserved is the business outcome:
|
||||||
|
|
||||||
|
- same canonical detail columns
|
||||||
|
- same key field mappings
|
||||||
|
- same classification rules
|
||||||
|
- same summary metrics
|
||||||
|
- same downstream export/history behavior
|
||||||
|
- same distinction between empty, partial, blocked, and failed work
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ownership Boundary and Landing Zones
|
||||||
|
|
||||||
|
### Staged skill changes
|
||||||
|
|
||||||
|
These changes land in `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging`.
|
||||||
|
|
||||||
|
Primary files:
|
||||||
|
|
||||||
|
- `skills/fault-details-report/scripts/collect_fault_details.js`
|
||||||
|
- becomes the real browser-eval entrypoint
|
||||||
|
- must directly `return` the final structured artifact from the wrapped browser script
|
||||||
|
- may contain internal helper functions, but should remain self-contained for the current runtime
|
||||||
|
- `skills/fault-details-report/SKILL.toml`
|
||||||
|
- keep `browser_script`
|
||||||
|
- tighten the tool description so it matches the real behavior
|
||||||
|
- do not turn `SKILL.toml` into the source of truth for classification rules or routing policy
|
||||||
|
- `skills/fault-details-report/SKILL.md`
|
||||||
|
- align the written contract with the implemented runtime behavior
|
||||||
|
- `skills/fault-details-report/references/collection-flow.md`
|
||||||
|
- align the staged flow with the implemented query/export/history sequence
|
||||||
|
- `skills/fault-details-report/references/data-quality.md`
|
||||||
|
- stay authoritative for canonical columns, required fields, classification tables, `qxxcjl`-based reason heuristics, summary rules, and partial semantics
|
||||||
|
- `scenes/fault-details-report/scene.json`
|
||||||
|
- keep the scene contract aligned with the actual output and state semantics
|
||||||
|
- do not move classification or routing policy into scene metadata
|
||||||
|
|
||||||
|
### Caller/runtime changes
|
||||||
|
|
||||||
|
These changes land in `D:/data/ideaSpace/rust/sgClaw/claw-new`.
|
||||||
|
|
||||||
|
Primary files:
|
||||||
|
|
||||||
|
- `src/compat/direct_skill_runtime.rs`
|
||||||
|
- keep configured direct-skill execution here
|
||||||
|
- add narrow structured-artifact interpretation after the browser-script returns
|
||||||
|
- `src/agent/mod.rs`
|
||||||
|
- keep the current direct-submit routing seam here
|
||||||
|
- do not add fault-specific business logic here
|
||||||
|
- `src/compat/browser_script_skill_tool.rs`
|
||||||
|
- keep the browser-script contract strict: browser-eval entrypoint, no Node-only assumptions
|
||||||
|
- `tests/agent_runtime_test.rs`
|
||||||
|
- direct-submit path and result-surface regressions
|
||||||
|
- `tests/browser_script_skill_tool_test.rs`
|
||||||
|
- browser-script execution-shape regressions
|
||||||
|
|
||||||
|
If a new helper is needed in `claw-new`, it should be a narrow artifact-format/parser helper, not a new business-rules module.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Target Runtime Flow
|
||||||
|
|
||||||
|
### Step 1: Submit-task stays config-owned
|
||||||
|
|
||||||
|
The user still types natural language into the current sgClaw input.
|
||||||
|
|
||||||
|
`claw-new`:
|
||||||
|
|
||||||
|
- receives `BrowserMessage::SubmitTask`
|
||||||
|
- loads `SgClawSettings`
|
||||||
|
- sees `directSubmitSkill = "fault-details-report.collect_fault_details"`
|
||||||
|
- bypasses LLM routing exactly as it does now
|
||||||
|
- resolves the staged skill from `skillsDir`
|
||||||
|
|
||||||
|
This preserves the already approved config-owned routing boundary.
|
||||||
|
|
||||||
|
### Step 2: Browser-script tool executes as a true browser entrypoint
|
||||||
|
|
||||||
|
`collect_fault_details.js` must be shaped for the current runtime:
|
||||||
|
|
||||||
|
- the script runs inside the current browser page context through `eval`
|
||||||
|
- it must not rely on `module.exports`
|
||||||
|
- it must directly `return collectFaultDetails(args)` from the wrapped script body
|
||||||
|
|
||||||
|
This is required because the current sgClaw browser-script runtime reads one script file and wraps it in a browser-side IIFE.
|
||||||
|
|
||||||
|
### Step 3: The skill reads the page-selected time range
|
||||||
|
|
||||||
|
The source-of-truth query window should come from the current page state, matching the original package behavior.
|
||||||
|
|
||||||
|
Design rule:
|
||||||
|
|
||||||
|
- read the selected start and end time from the business page controls or page state
|
||||||
|
- include that exact selected range in the returned artifact
|
||||||
|
- keep `period` as a bootstrap label from `claw-new`, not as a license to silently guess a different business range
|
||||||
|
|
||||||
|
Compatibility rule with the current direct-submit seam:
|
||||||
|
|
||||||
|
- the current `claw-new` direct path still requires an explicit `YYYY-MM` token in the user's instruction in order to enter the configured direct-skill flow
|
||||||
|
- that requirement remains in place for this slice
|
||||||
|
- once inside the skill, the browser page's selected start/end range is the source of truth for collection
|
||||||
|
- the returned artifact should include both the user-visible `period` label and the exact selected page range so mismatches are observable instead of hidden
|
||||||
|
|
||||||
|
If the page-selected range cannot be read reliably, the skill should return `blocked` instead of inventing a month-wide query window from `period` alone.
|
||||||
|
|
||||||
|
### Step 4: The skill collects raw rows and normalizes detail fields
|
||||||
|
|
||||||
|
The staged skill must reproduce the original package's detail normalization logic inside the browser-executed script.
|
||||||
|
|
||||||
|
That includes preserving the canonical detail schema from the original `excleIni[0].cols`, including the key transforms already present in the original package, such as:
|
||||||
|
|
||||||
|
- `slsj = bxsj`
|
||||||
|
- `gssgs = "甘肃省电力公司"`
|
||||||
|
- `sgs` derived from the current company/city context
|
||||||
|
- `gddw = maintOrgName`
|
||||||
|
- `gds = maintGroupName`
|
||||||
|
- `clzt = "处理完成"`
|
||||||
|
- `bdz = bdzMc`
|
||||||
|
- `line = xlmc10`
|
||||||
|
- `pb = byqmc`
|
||||||
|
|
||||||
|
The staged skill must also port the original classification/derivation logic that fills:
|
||||||
|
|
||||||
|
- `sxfl1`
|
||||||
|
- `sxfl2`
|
||||||
|
- `sxfl3`
|
||||||
|
- `gzsb`
|
||||||
|
- `gzyy`
|
||||||
|
|
||||||
|
That includes the original matching table and the `qxxcjl`-based text extraction heuristics that derive the fault reason.
|
||||||
|
|
||||||
|
### Step 5: The skill derives summary rows from normalized detail rows
|
||||||
|
|
||||||
|
The staged skill must derive the summary sheet from grouped detail rows, keyed around the same business totals the original package computes.
|
||||||
|
|
||||||
|
At minimum that includes:
|
||||||
|
|
||||||
|
- `index`
|
||||||
|
- `gsName`
|
||||||
|
- `fwDept`
|
||||||
|
- `className`
|
||||||
|
- `allCount`
|
||||||
|
- `wxCount`
|
||||||
|
- `khcCount`
|
||||||
|
- `sbdSbCount`
|
||||||
|
- `gyGzCount`
|
||||||
|
- `dyGzCount`
|
||||||
|
- `tqdzCount`
|
||||||
|
- `tqbxCount`
|
||||||
|
- `dyxlCount`
|
||||||
|
- `bqxCount`
|
||||||
|
- `jllCount`
|
||||||
|
- `bhxCount`
|
||||||
|
- `qftdCount`
|
||||||
|
|
||||||
|
The summary derivation must stay in the staged skill so the same package can later be routed by LLM without moving business logic back into `claw-new`.
|
||||||
|
|
||||||
|
### Step 6: The skill performs downstream export and report logging
|
||||||
|
|
||||||
|
After detail rows and summary rows are available, the staged skill should reproduce the original package's downstream behavior:
|
||||||
|
|
||||||
|
- build the export payload for `faultDetailsExportXLSXS`
|
||||||
|
- call the localhost export endpoint
|
||||||
|
- capture the returned export path/URL
|
||||||
|
- write report history via `setReportLog`
|
||||||
|
|
||||||
|
Important boundary:
|
||||||
|
|
||||||
|
- export/report-log are downstream side effects
|
||||||
|
- they do not redefine whether collection itself succeeded
|
||||||
|
- if collection succeeds but export/logging fails, the result is `partial`, not a full collection failure
|
||||||
|
- auto-opening/downloading the exported file is out of scope for this slice; this slice records the export path/result in the artifact but does not add new opener/UI behavior in `claw-new`
|
||||||
|
|
||||||
|
### Step 7: The skill returns one structured artifact
|
||||||
|
|
||||||
|
The staged skill should return one self-describing JSON artifact containing:
|
||||||
|
|
||||||
|
- business identity (`type`, `report_name`)
|
||||||
|
- selected period label
|
||||||
|
- exact selected start/end range
|
||||||
|
- canonical detail columns + normalized rows
|
||||||
|
- summary section columns + rows
|
||||||
|
- counts
|
||||||
|
- business status
|
||||||
|
- partial reasons if any
|
||||||
|
- downstream export outcome
|
||||||
|
- downstream report-log outcome
|
||||||
|
|
||||||
|
### Step 8: `claw-new` interprets the artifact, not the business rules
|
||||||
|
|
||||||
|
After the browser-script returns, `claw-new` should parse the JSON artifact and map it into final submit-task behavior.
|
||||||
|
|
||||||
|
Recommended mapping:
|
||||||
|
|
||||||
|
- `status = ok` -> `TaskComplete.success = true`
|
||||||
|
- `status = partial` -> `TaskComplete.success = true`, with warnings in summary
|
||||||
|
- `status = empty` -> `TaskComplete.success = true`, clearly reported as empty-result
|
||||||
|
- `status = blocked` -> `TaskComplete.success = false`
|
||||||
|
- `status = error` -> `TaskComplete.success = false`
|
||||||
|
|
||||||
|
This keeps business classification in the staged skill while preventing false-positive success in the direct path.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Artifact Contract
|
||||||
|
|
||||||
|
The returned payload should stay `type = "report-artifact"`, but it must become rich enough to describe the real run.
|
||||||
|
|
||||||
|
Recommended contract:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"period": "2026-03",
|
||||||
|
"selected_range": {
|
||||||
|
"start": "2026-03-08 16:00:00",
|
||||||
|
"end": "2026-03-09 16:00:00"
|
||||||
|
},
|
||||||
|
"columns": ["qxdbh", "gssgs", "sgs", "gddw", "gds", "slsj", "yjflMc", "ejflMc", "sjflMc", "gzms", "yhbh", "yhmc", "lxr", "gzdd", "lxdh", "bxsj", "gdsj", "clzt", "qxxcjl", "bdz", "line", "pb", "sxfl1", "sxfl2", "sxfl3", "gzsb", "gzyy", "bz"],
|
||||||
|
"rows": [],
|
||||||
|
"sections": [
|
||||||
|
{
|
||||||
|
"name": "summary-sheet",
|
||||||
|
"columns": ["index", "gsName", "fwDept", "className", "allCount", "wxCount", "khcCount", "sbdSbCount", "gyGzCount", "dyGzCount", "tqdzCount", "tqbxCount", "dyxlCount", "bqxCount", "jllCount", "bhxCount", "qftdCount"],
|
||||||
|
"rows": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"counts": {
|
||||||
|
"detail_rows": 0,
|
||||||
|
"summary_rows": 0
|
||||||
|
},
|
||||||
|
"status": "ok",
|
||||||
|
"partial_reasons": [],
|
||||||
|
"downstream": {
|
||||||
|
"export": {
|
||||||
|
"attempted": true,
|
||||||
|
"success": true,
|
||||||
|
"path": "http://localhost:13313/.../fault-details.xlsx"
|
||||||
|
},
|
||||||
|
"report_log": {
|
||||||
|
"attempted": true,
|
||||||
|
"success": true,
|
||||||
|
"report_name": "国网XX故障报修明细表(03月09日)",
|
||||||
|
"path": "http://localhost:13313/.../fault-details.xlsx"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Contract notes
|
||||||
|
|
||||||
|
- `rows` is the canonical returned detail table, not the export-service transport payload.
|
||||||
|
- If the export service still requires a placeholder row for an empty spreadsheet, that placeholder should be synthesized only for the downstream export call, not as the canonical returned `rows` contract.
|
||||||
|
- `counts` should be computed from the canonical returned tables.
|
||||||
|
- `selected_range`, `columns`, `sections`, `counts`, `status`, and `partial_reasons` should always be present for `ok`, `partial`, and `empty`.
|
||||||
|
- For `blocked` and `error`, the artifact should still include `type`, `report_name`, `period`, `status`, and `partial_reasons`; `selected_range`, `columns`, `sections`, and `counts` should be included whenever they were already known before the failure point.
|
||||||
|
- `downstream` should be omitted only when export/report-log were not attempted yet; otherwise include it with `attempted` / `success` flags and any available path or failure detail.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Handling and Status Semantics
|
||||||
|
|
||||||
|
### `ok`
|
||||||
|
|
||||||
|
Use `ok` when all of the following are true:
|
||||||
|
|
||||||
|
- raw collection succeeded
|
||||||
|
- required detail-field normalization succeeded
|
||||||
|
- summary derivation succeeded
|
||||||
|
- export succeeded
|
||||||
|
- report-log write succeeded
|
||||||
|
|
||||||
|
### `partial`
|
||||||
|
|
||||||
|
Use `partial` when detail collection succeeded but at least one downstream stage degraded, including:
|
||||||
|
|
||||||
|
- one or more required fields could not be normalized, but the row set still remains exportable and summary derivation can proceed with explicit gaps recorded
|
||||||
|
- summary derivation was incomplete, but the detail table is still available
|
||||||
|
- export failed after rows were available
|
||||||
|
- report-log write failed after rows/export were available
|
||||||
|
|
||||||
|
Escalation rule:
|
||||||
|
|
||||||
|
- if the raw query succeeds but required fields are missing so broadly that the canonical detail table cannot be produced at all, use `error`, not `partial`
|
||||||
|
- if summary derivation cannot even start because the normalized detail rows are structurally unusable, use `error`, not `partial`
|
||||||
|
|
||||||
|
`partial_reasons` must name the degraded stage instead of hiding it.
|
||||||
|
|
||||||
|
### `empty`
|
||||||
|
|
||||||
|
Use `empty` when:
|
||||||
|
|
||||||
|
- the query succeeds for the selected range
|
||||||
|
- zero real detail rows match
|
||||||
|
|
||||||
|
This is not a failure.
|
||||||
|
|
||||||
|
If the business flow still wants an empty export file or placeholder export payload, that happens downstream without changing the semantic meaning of the result.
|
||||||
|
|
||||||
|
### `blocked`
|
||||||
|
|
||||||
|
Use `blocked` when the page/session preconditions are not met, for example:
|
||||||
|
|
||||||
|
- expected page/session is not available
|
||||||
|
- required page controls cannot be read
|
||||||
|
- login/session state is missing or expired
|
||||||
|
- required browser-visible APIs are unavailable in the current page context
|
||||||
|
|
||||||
|
### `error`
|
||||||
|
|
||||||
|
Use `error` when the run starts but fails due to operational or parsing problems, for example:
|
||||||
|
|
||||||
|
- request failure
|
||||||
|
- page script failure
|
||||||
|
- raw response parse failure
|
||||||
|
- malformed export response
|
||||||
|
|
||||||
|
### `claw-new` completion mapping
|
||||||
|
|
||||||
|
`claw-new` should convert structured status into final submit completion behavior:
|
||||||
|
|
||||||
|
- `ok` / `partial` / `empty`: return a success completion with a concise human summary
|
||||||
|
- `blocked` / `error`: return a failed completion with a concise human summary
|
||||||
|
|
||||||
|
This avoids the current risk where a structured error-like payload could still be surfaced as a nominal success string.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing and Acceptance Strategy
|
||||||
|
|
||||||
|
### Skill-side deterministic coverage
|
||||||
|
|
||||||
|
Add deterministic coverage around the staged skill's business logic in `skill_staging` for:
|
||||||
|
|
||||||
|
- canonical detail field mapping
|
||||||
|
- classification table parity
|
||||||
|
- `gzyy` extraction heuristics
|
||||||
|
- summary aggregation parity
|
||||||
|
- empty-result handling
|
||||||
|
- partial-result generation when downstream export/logging fails
|
||||||
|
- browser-script entrypoint shape (`return ...`, not `module.exports`)
|
||||||
|
|
||||||
|
The classification/summary tests should use fixed raw-row fixtures so the business rules are validated without a live browser session.
|
||||||
|
|
||||||
|
### `claw-new` runtime regressions
|
||||||
|
|
||||||
|
Add Rust coverage in `claw-new` for:
|
||||||
|
|
||||||
|
- direct-submit success with a populated `report-artifact`
|
||||||
|
- `partial` artifact mapping to `TaskComplete.success = true`
|
||||||
|
- `empty` artifact mapping to `TaskComplete.success = true`
|
||||||
|
- `blocked` / `error` artifact mapping to `TaskComplete.success = false`
|
||||||
|
- browser-script helper behavior for a real browser-eval return payload
|
||||||
|
|
||||||
|
### Manual acceptance
|
||||||
|
|
||||||
|
The live manual acceptance bar for this slice should be:
|
||||||
|
|
||||||
|
1. Configure `skillsDir` to the staged skill root and `directSubmitSkill` to `fault-details-report.collect_fault_details`.
|
||||||
|
2. Attach sgClaw to the real target browser page/session.
|
||||||
|
3. Submit a natural-language fault-details request without LLM routing.
|
||||||
|
4. Verify the staged skill:
|
||||||
|
- reads the selected page range
|
||||||
|
- queries real fault rows
|
||||||
|
- produces populated detail rows
|
||||||
|
- produces populated summary rows
|
||||||
|
- exports the workbook through localhost
|
||||||
|
- writes report history
|
||||||
|
5. Verify the final sgClaw completion message reports the correct status, counts, and downstream file/log outcome.
|
||||||
|
|
||||||
|
### Acceptance matrix
|
||||||
|
|
||||||
|
At minimum, acceptance should cover:
|
||||||
|
|
||||||
|
- normal populated result
|
||||||
|
- empty result with no matching rows
|
||||||
|
- partial result where export or report-log fails after collection
|
||||||
|
- blocked result where page/session preconditions are missing
|
||||||
|
- error result where parsing/query execution fails
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Explicit Non-Goals
|
||||||
|
|
||||||
|
This slice does **not**:
|
||||||
|
|
||||||
|
- move routing ownership out of `claw-new`
|
||||||
|
- require LLM routing to be available first
|
||||||
|
- add per-skill dispatch metadata to external manifests for routing policy
|
||||||
|
- introduce a new browser protocol or browser opcode
|
||||||
|
- recreate the original Vue shell inside `claw-new`
|
||||||
|
- move fault classification logic into Rust
|
||||||
|
- redesign the submit-task protocol beyond better interpretation of the returned artifact
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Resulting Design Rule
|
||||||
|
|
||||||
|
For the fault-details path:
|
||||||
|
|
||||||
|
- `claw-new` decides whether to invoke the fixed staged skill
|
||||||
|
- the staged skill performs the real fault business workflow
|
||||||
|
- the staged skill returns a structured artifact that describes collection + downstream outcomes
|
||||||
|
- `claw-new` interprets that artifact for submit-task success/failure and summary output
|
||||||
|
|
||||||
|
That keeps routing config-owned, keeps business logic with the staged skill, and makes `fault-details-report.collect_fault_details` ready for both the current no-LLM path and a later LLM-routed path.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Document Landing Zones
|
||||||
|
|
||||||
|
- Approved spec: `docs/superpowers/specs/2026-04-10-fault-details-full-skill-alignment-design.md`
|
||||||
|
- Follow-up implementation plan: `docs/superpowers/plans/2026-04-10-fault-details-full-skill-alignment-plan.md`
|
||||||
@@ -0,0 +1,618 @@
|
|||||||
|
# TQ Line-Loss Deterministic Skill Design
|
||||||
|
|
||||||
|
**Goal:** Add a staged business skill for `台区线损大数据-月_周累计线损率统计分析` and a deterministic natural-language routing path in `claw-new` that can bypass LLM when the instruction ends with `。。。`, while preserving the existing Zhihu hotlist behavior and keeping the execution seam pipe-first but ws-ready.
|
||||||
|
|
||||||
|
**Status:** Approved design direction for implementation planning.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Decision Summary
|
||||||
|
|
||||||
|
1. Add a new staged skill package `tq-lineloss-report` under `D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills/`, following the same packaging discipline as `fault-details-report`.
|
||||||
|
2. In `claw-new`, add a deterministic submit path triggered only when the instruction ends with the three-Chinese-dot suffix `。。。`.
|
||||||
|
3. In deterministic mode, route only through a fixed whitelist of staged skills; for this slice the new target is `tq-lineloss-report.collect_lineloss`.
|
||||||
|
4. Deterministic mode must extract business parameters from natural language without using an LLM: company/unit, month-vs-week mode, and period text.
|
||||||
|
5. Parsed natural-language parameters are not the final backend parameters. They must be normalized into the canonical codes required by the source page / source APIs (for example company code and period mode code).
|
||||||
|
6. If required parameters are missing or ambiguous, the runtime must stop and ask the user to provide them explicitly. It must **not** silently fall back to page defaults in this slice.
|
||||||
|
7. Skill execution must reuse the existing browser-script → pipe injection seam already proven by the Zhihu hotlist path. Do not create a second browser execution protocol.
|
||||||
|
8. The design must not regress or weaken the existing Zhihu hotlist direct path, browser-script path, export path, or current routing behavior.
|
||||||
|
9. The main branch implementation remains pipe-only, but all new deterministic-routing and skill contracts must stay backend-neutral so the execution backend can later be swapped to ws on the ws branch.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Non-Negotiable Boundaries
|
||||||
|
|
||||||
|
### 1. Do not break the existing Zhihu hotlist flow
|
||||||
|
|
||||||
|
This is the top safety boundary for the slice.
|
||||||
|
|
||||||
|
The new deterministic routing for `tq-lineloss-report` must not break, narrow, or silently change:
|
||||||
|
|
||||||
|
- current Zhihu hotlist routing
|
||||||
|
- current Zhihu direct browser-script execution
|
||||||
|
- current Zhihu export behavior
|
||||||
|
- current browser-script skill loading/execution
|
||||||
|
- existing direct-submit configuration behavior
|
||||||
|
|
||||||
|
Design implication:
|
||||||
|
|
||||||
|
- The new deterministic path must be added as a narrow, explicit branch.
|
||||||
|
- Existing Zhihu logic must keep its current trigger semantics and current execution seam.
|
||||||
|
- Verification for this slice must include targeted Zhihu regression coverage before implementation is considered complete.
|
||||||
|
|
||||||
|
### 2. Current main branch is pipe-only
|
||||||
|
|
||||||
|
The implementation landing on `main` must execute browser-script skills through the current pipe-backed browser execution seam.
|
||||||
|
|
||||||
|
Do not introduce ws as an active runtime requirement for this slice.
|
||||||
|
|
||||||
|
### 3. Future ws migration must stay cheap
|
||||||
|
|
||||||
|
Although `main` remains pipe-only, the new work must leave a clean extension seam so that after this slice is merged into `ws`, the browser backend can be switched without redesigning:
|
||||||
|
|
||||||
|
- the staged skill package
|
||||||
|
- the deterministic trigger contract
|
||||||
|
- the parameter extraction contract
|
||||||
|
- the parameter normalization contract
|
||||||
|
- the returned artifact contract
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Why This Slice Exists
|
||||||
|
|
||||||
|
The user wants a staged business skill for `台区线损大数据-月_周累计线损率统计分析` that behaves like a deterministic business operation, not a free-form LLM task.
|
||||||
|
|
||||||
|
The desired operator experience is:
|
||||||
|
|
||||||
|
- ordinary instructions continue to use the current normal routing / LLM path
|
||||||
|
- an instruction ending in `。。。` switches to deterministic business execution
|
||||||
|
- deterministic execution targets a fixed staged skill
|
||||||
|
- business parameters are extracted from the instruction
|
||||||
|
- those parameters are normalized to the real coded values the source page/API needs
|
||||||
|
- the staged browser-script is injected into the third-party browser through the existing pipe seam
|
||||||
|
|
||||||
|
This provides an inner-network-safe path that can work without a model today, while reserving an upgrade path for future semantic fallback.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Terminology
|
||||||
|
|
||||||
|
### Deterministic mode
|
||||||
|
|
||||||
|
A submit-task mode enabled only when the instruction ends with `。。。`.
|
||||||
|
|
||||||
|
### Natural-language business parameters
|
||||||
|
|
||||||
|
Values expressed by the user in text, such as:
|
||||||
|
|
||||||
|
- `兰州公司`
|
||||||
|
- `天水公司`
|
||||||
|
- `月累计`
|
||||||
|
- `周累计`
|
||||||
|
- `2026-03`
|
||||||
|
- `2026年第12周`
|
||||||
|
|
||||||
|
These are intermediate semantic values, not final backend parameters.
|
||||||
|
|
||||||
|
### Canonical execution parameters
|
||||||
|
|
||||||
|
The normalized values required by the source page / source API, such as:
|
||||||
|
|
||||||
|
- canonical company label
|
||||||
|
- canonical company code
|
||||||
|
- period mode code (month/week)
|
||||||
|
- canonical request period payload
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ownership Boundary and Landing Zones
|
||||||
|
|
||||||
|
### Staged skill changes
|
||||||
|
|
||||||
|
These land in:
|
||||||
|
|
||||||
|
`D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging`
|
||||||
|
|
||||||
|
Primary landing zone:
|
||||||
|
|
||||||
|
- `skills/tq-lineloss-report/`
|
||||||
|
|
||||||
|
Target package structure:
|
||||||
|
|
||||||
|
- `SKILL.md`
|
||||||
|
- `SKILL.toml`
|
||||||
|
- `references/collection-flow.md`
|
||||||
|
- `references/data-quality.md`
|
||||||
|
- `assets/scene-snapshot/index.html`
|
||||||
|
- `scripts/collect_lineloss.js`
|
||||||
|
- `scripts/collect_lineloss.test.js`
|
||||||
|
|
||||||
|
Potential aligned scene metadata (if included in this slice):
|
||||||
|
|
||||||
|
- `scenes/tq-lineloss-report/scene.json`
|
||||||
|
- optional scene registry updates if the current staging conventions require it
|
||||||
|
|
||||||
|
### Caller/runtime changes
|
||||||
|
|
||||||
|
These land in:
|
||||||
|
|
||||||
|
`D:/data/ideaSpace/rust/sgClaw/claw-new`
|
||||||
|
|
||||||
|
Likely ownership areas:
|
||||||
|
|
||||||
|
- deterministic instruction detection and deterministic skill matching
|
||||||
|
- parameter extraction and normalization
|
||||||
|
- deterministic skill dispatch to the existing browser-script seam
|
||||||
|
- narrow result interpretation for the returned artifact
|
||||||
|
- focused regression tests
|
||||||
|
|
||||||
|
Design rule:
|
||||||
|
|
||||||
|
`claw-new` owns routing, extraction, normalization, and dispatch.
|
||||||
|
|
||||||
|
`claw-new` must **not** absorb the line-loss business logic itself.
|
||||||
|
|
||||||
|
The staged skill package owns:
|
||||||
|
|
||||||
|
- page inspection
|
||||||
|
- page-side state reading
|
||||||
|
- page/API data collection
|
||||||
|
- row normalization
|
||||||
|
- export/report-log behavior
|
||||||
|
- final artifact generation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Target Runtime Flow
|
||||||
|
|
||||||
|
### Step 1: Submit-task enters deterministic mode only on `。。。`
|
||||||
|
|
||||||
|
When the user instruction does **not** end in `。。。`:
|
||||||
|
|
||||||
|
- keep the current runtime behavior unchanged
|
||||||
|
- preserve existing Zhihu hotlist behavior exactly
|
||||||
|
- preserve existing direct-submit and compat/LLM flows
|
||||||
|
|
||||||
|
When the instruction **does** end in `。。。`:
|
||||||
|
|
||||||
|
- enter deterministic mode
|
||||||
|
- do not run the ordinary LLM interpretation branch for this request
|
||||||
|
- evaluate only the deterministic skill whitelist
|
||||||
|
|
||||||
|
### Step 2: Deterministic whitelist match
|
||||||
|
|
||||||
|
The runtime should match the instruction against deterministic business scenes.
|
||||||
|
|
||||||
|
For this slice the new required deterministic scene is:
|
||||||
|
|
||||||
|
- `tq-lineloss-report.collect_lineloss`
|
||||||
|
|
||||||
|
The matching layer should remain narrow and explicit. It should not become a general scene-registry runtime in this slice.
|
||||||
|
|
||||||
|
Matching should use a deterministic combination of:
|
||||||
|
|
||||||
|
- instruction keywords
|
||||||
|
- optional page URL/title constraints when available
|
||||||
|
|
||||||
|
The runtime must not accidentally steal instructions that should still go down the Zhihu path.
|
||||||
|
|
||||||
|
### Step 3: Extract semantic business parameters from natural language
|
||||||
|
|
||||||
|
After `tq-lineloss-report` is matched, the runtime extracts semantic business parameters from the instruction.
|
||||||
|
|
||||||
|
Required semantic categories:
|
||||||
|
|
||||||
|
- company/unit expression
|
||||||
|
- period mode (`month` vs `week`)
|
||||||
|
- period text/value
|
||||||
|
|
||||||
|
Examples of accepted user-facing expressions include:
|
||||||
|
|
||||||
|
- `兰州公司`
|
||||||
|
- `天水公司`
|
||||||
|
- `国网兰州供电公司`
|
||||||
|
- `城关供电分公司`
|
||||||
|
- `2026-03`
|
||||||
|
- `2026年3月`
|
||||||
|
- `2026年第12周`
|
||||||
|
- `第12周`
|
||||||
|
- `月累计`
|
||||||
|
- `周累计`
|
||||||
|
|
||||||
|
### Step 4: Normalize semantic values into canonical coded values
|
||||||
|
|
||||||
|
This is a required separate design step.
|
||||||
|
|
||||||
|
The runtime must not pass raw natural-language company text directly to the business request layer.
|
||||||
|
|
||||||
|
Instead it must normalize semantic values into canonical execution parameters, including:
|
||||||
|
|
||||||
|
- `org_label` — canonical unit label
|
||||||
|
- `org_code` — the actual code/value required by the business page/API
|
||||||
|
- `period_mode` — canonical mode (`month` or `week`)
|
||||||
|
- `period_mode_code` — the page/API code (for example `timeChage`-style encoded mode)
|
||||||
|
- canonical time payload required by the source APIs/page state
|
||||||
|
|
||||||
|
This normalization should be derived from the actual source materials, including page-side dictionaries such as the existing unit tree data.
|
||||||
|
|
||||||
|
### Step 5: Missing and ambiguous parameters must stop execution
|
||||||
|
|
||||||
|
This slice must not silently infer missing parameters from page defaults.
|
||||||
|
|
||||||
|
If a required parameter is missing, execution must stop with an explicit prompt to the user.
|
||||||
|
|
||||||
|
If a parameter is ambiguous, execution must stop with an explicit ambiguity prompt.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- no company matched
|
||||||
|
- no month/week mode matched
|
||||||
|
- no period value matched when required
|
||||||
|
- a short company alias matches multiple canonical units
|
||||||
|
- both monthly and weekly intent appear in the same instruction
|
||||||
|
|
||||||
|
This is preferable to silently using the wrong company code or the wrong query period.
|
||||||
|
|
||||||
|
### Step 6: Execute the staged skill through the existing pipe seam
|
||||||
|
|
||||||
|
If and only if parameters are present and successfully normalized:
|
||||||
|
|
||||||
|
- resolve `tq-lineloss-report.collect_lineloss`
|
||||||
|
- build the args object
|
||||||
|
- execute it through the current `browser_script` runtime
|
||||||
|
- inject the script into the browser through the existing pipe-backed browser tool seam
|
||||||
|
|
||||||
|
This slice must reuse the execution pattern already proven by the current browser-script/direct-skill infrastructure and the current Zhihu hotlist path.
|
||||||
|
|
||||||
|
Do not introduce a second browser protocol, new browser opcode family, or parallel execution harness.
|
||||||
|
|
||||||
|
### Step 7: Skill JS performs page-side work and returns one artifact
|
||||||
|
|
||||||
|
The staged script owns the actual line-loss business behavior:
|
||||||
|
|
||||||
|
- reading page-side state when needed
|
||||||
|
- validating the page context
|
||||||
|
- using normalized codes/parameters from args
|
||||||
|
- building source API requests
|
||||||
|
- collecting/normalizing rows
|
||||||
|
- export/report logging behavior if required by the final business contract
|
||||||
|
- returning a structured artifact
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Deterministic Trigger Contract
|
||||||
|
|
||||||
|
### Trigger rule
|
||||||
|
|
||||||
|
Deterministic mode is activated only when the raw instruction ends with the exact three-Chinese-dot suffix:
|
||||||
|
|
||||||
|
- `。。。`
|
||||||
|
|
||||||
|
This suffix is a user-controlled explicit mode switch.
|
||||||
|
|
||||||
|
### Why the suffix exists
|
||||||
|
|
||||||
|
It lets the user force business-deterministic behavior without relying on a model, while preserving the normal LLM path for ordinary requests.
|
||||||
|
|
||||||
|
### Scope rule
|
||||||
|
|
||||||
|
The suffix is not a free pass to run arbitrary browser actions.
|
||||||
|
|
||||||
|
It only selects among the deterministic skill whitelist.
|
||||||
|
|
||||||
|
If no deterministic scene matches, the runtime should return a deterministic-mode mismatch error that explains the currently supported deterministic scenes, rather than silently dropping into another behavior.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Company / Unit Matching Contract
|
||||||
|
|
||||||
|
### Accepted input style
|
||||||
|
|
||||||
|
The user does **not** need to type the exact full canonical label.
|
||||||
|
|
||||||
|
The runtime should support business shorthand such as:
|
||||||
|
|
||||||
|
- `兰州公司`
|
||||||
|
- `天水公司`
|
||||||
|
- `白银公司`
|
||||||
|
- `城关供电分公司`
|
||||||
|
- `榆中县供电公司`
|
||||||
|
|
||||||
|
### Matching approach
|
||||||
|
|
||||||
|
Do not use regex alone as the primary company-resolution mechanism.
|
||||||
|
|
||||||
|
Use a three-stage resolution strategy:
|
||||||
|
|
||||||
|
1. text normalization
|
||||||
|
2. alias/candidate generation from canonical unit names
|
||||||
|
3. uniqueness resolution against the real unit dictionary
|
||||||
|
|
||||||
|
### Normalization examples
|
||||||
|
|
||||||
|
Canonical names such as:
|
||||||
|
|
||||||
|
- `国网兰州供电公司`
|
||||||
|
- `国网天水供电公司`
|
||||||
|
- `国网榆中县供电公司`
|
||||||
|
|
||||||
|
should be matchable from business shorthand forms such as:
|
||||||
|
|
||||||
|
- `兰州公司`
|
||||||
|
- `天水公司`
|
||||||
|
- `榆中县公司`
|
||||||
|
- `榆中供电公司`
|
||||||
|
|
||||||
|
### Data source for canonical mapping
|
||||||
|
|
||||||
|
The company/unit resolver should derive canonical mappings from the real source materials used by the business page, such as the current unit tree dictionary embedded in the source page resources.
|
||||||
|
|
||||||
|
Design implication:
|
||||||
|
|
||||||
|
- the resolver should produce the real `value`/code required downstream
|
||||||
|
- the resolver should also keep the canonical label for display/auditability
|
||||||
|
|
||||||
|
### Ambiguity rule
|
||||||
|
|
||||||
|
If a short alias resolves to more than one valid unit, execution must stop and ask the user to be more specific.
|
||||||
|
|
||||||
|
Do not auto-guess.
|
||||||
|
|
||||||
|
### Supported granularity
|
||||||
|
|
||||||
|
The first implementation must support both:
|
||||||
|
|
||||||
|
- city-company level
|
||||||
|
- district/county/sub-company level
|
||||||
|
|
||||||
|
This includes forms like:
|
||||||
|
|
||||||
|
- `兰州公司`
|
||||||
|
- `天水公司`
|
||||||
|
- `城关供电分公司`
|
||||||
|
- `榆中县供电公司`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Period Extraction and Normalization Contract
|
||||||
|
|
||||||
|
### Required period dimensions
|
||||||
|
|
||||||
|
The runtime must identify:
|
||||||
|
|
||||||
|
- mode: `month` or `week`
|
||||||
|
- actual requested period value in a canonical form
|
||||||
|
|
||||||
|
### Accepted user-facing patterns
|
||||||
|
|
||||||
|
At minimum the design should account for patterns such as:
|
||||||
|
|
||||||
|
- `月累计`
|
||||||
|
- `周累计`
|
||||||
|
- `2026-03`
|
||||||
|
- `2026年3月`
|
||||||
|
- `2026年第12周`
|
||||||
|
- `第12周`
|
||||||
|
|
||||||
|
### Normalization output
|
||||||
|
|
||||||
|
The resolver should produce:
|
||||||
|
|
||||||
|
- a canonical mode enum/string
|
||||||
|
- a mode code required by the page/API
|
||||||
|
- a canonical period payload consumable by the script/business request layer
|
||||||
|
|
||||||
|
### Ambiguity rule
|
||||||
|
|
||||||
|
If both month and week intent appear, stop and ask the user to clarify.
|
||||||
|
|
||||||
|
### Missing-period rule
|
||||||
|
|
||||||
|
If the selected line-loss query requires a time period and the instruction does not provide enough information to construct one, stop and ask the user to provide it.
|
||||||
|
|
||||||
|
Do not default to the page-selected period in this slice.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Parameter Prompting Contract
|
||||||
|
|
||||||
|
When deterministic mode matches `tq-lineloss-report` but one or more required parameters are missing or ambiguous, the runtime should return a user-facing prompt rather than executing.
|
||||||
|
|
||||||
|
Expected prompting cases include:
|
||||||
|
|
||||||
|
- missing company/unit
|
||||||
|
- missing month/week mode
|
||||||
|
- missing period value
|
||||||
|
- ambiguous company alias
|
||||||
|
- contradictory period expressions
|
||||||
|
|
||||||
|
The prompt should be specific enough to let the user correct only the missing field(s).
|
||||||
|
|
||||||
|
Example style:
|
||||||
|
|
||||||
|
- `已命中台区线损报表技能,但缺少供电单位,请补充如“兰州公司”或“城关供电分公司”。`
|
||||||
|
- `已命中台区线损报表技能,但未识别到月/周类型,请补充“月累计”或“周累计”。`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Skill Package Contract
|
||||||
|
|
||||||
|
### SKILL.toml
|
||||||
|
|
||||||
|
The new skill package must declare a single deterministic collection entrypoint:
|
||||||
|
|
||||||
|
- tool name: `collect_lineloss`
|
||||||
|
- kind: `browser_script`
|
||||||
|
|
||||||
|
The tool description must reflect the real staged behavior, not a placeholder shell.
|
||||||
|
|
||||||
|
### SKILL.md
|
||||||
|
|
||||||
|
The written contract should cover:
|
||||||
|
|
||||||
|
- when to use the skill
|
||||||
|
- when not to use it
|
||||||
|
- collection workflow
|
||||||
|
- runtime contract
|
||||||
|
- explicit missing/partial/error semantics
|
||||||
|
- returned artifact contract
|
||||||
|
|
||||||
|
### references/collection-flow.md
|
||||||
|
|
||||||
|
Must explain:
|
||||||
|
|
||||||
|
- the source page state used by the skill
|
||||||
|
- how company and period parameters map to business requests
|
||||||
|
- which page/API calls are used for month vs week
|
||||||
|
- export/report-log sequencing if retained in the business flow
|
||||||
|
|
||||||
|
### references/data-quality.md
|
||||||
|
|
||||||
|
Must define:
|
||||||
|
|
||||||
|
- canonical output columns
|
||||||
|
- required field coverage
|
||||||
|
- status semantics
|
||||||
|
- partial/error conditions
|
||||||
|
- company/period normalization assumptions that the script relies on
|
||||||
|
|
||||||
|
### scripts/collect_lineloss.js
|
||||||
|
|
||||||
|
This is the real browser-side entrypoint. It should:
|
||||||
|
|
||||||
|
- accept normalized args
|
||||||
|
- validate page context
|
||||||
|
- execute deterministic page/API data collection
|
||||||
|
- normalize rows
|
||||||
|
- perform downstream export/report-history behavior if required
|
||||||
|
- directly return the final artifact from the browser-script runtime entrypoint shape
|
||||||
|
|
||||||
|
### scripts/collect_lineloss.test.js
|
||||||
|
|
||||||
|
Must cover the business transforms that can be tested off-browser, especially:
|
||||||
|
|
||||||
|
- company normalization assumptions consumed by the script
|
||||||
|
- monthly vs weekly request-shape logic
|
||||||
|
- status semantics
|
||||||
|
- artifact shaping
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Returned Artifact Contract
|
||||||
|
|
||||||
|
The final line-loss skill should return one structured artifact object rather than free-form prose.
|
||||||
|
|
||||||
|
At minimum it should expose:
|
||||||
|
|
||||||
|
- artifact type
|
||||||
|
- report name
|
||||||
|
- canonical company label/code used for the query
|
||||||
|
- period mode and canonical period value used for the query
|
||||||
|
- columns
|
||||||
|
- rows
|
||||||
|
- status
|
||||||
|
- counts
|
||||||
|
- downstream export/report-log status when applicable
|
||||||
|
- clear reasons for blocked/partial/error states
|
||||||
|
|
||||||
|
The exact field names may be finalized during implementation planning, but the contract must be structured enough for `claw-new` to interpret success vs partial vs blocked without re-embedding business logic.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Pipe-First / Ws-Ready Execution Seam
|
||||||
|
|
||||||
|
### Current requirement
|
||||||
|
|
||||||
|
The first implementation on `main` must use the existing pipe-backed browser execution path.
|
||||||
|
|
||||||
|
### Future requirement
|
||||||
|
|
||||||
|
The design must allow later ws adoption without redesigning the skill or routing contract.
|
||||||
|
|
||||||
|
### Practical design rule
|
||||||
|
|
||||||
|
Keep these backend-neutral:
|
||||||
|
|
||||||
|
- deterministic trigger contract
|
||||||
|
- skill matching contract
|
||||||
|
- parameter extraction contract
|
||||||
|
- parameter normalization contract
|
||||||
|
- tool args contract
|
||||||
|
- artifact contract
|
||||||
|
|
||||||
|
Keep backend-specific code isolated to the execution seam only.
|
||||||
|
|
||||||
|
That way the later ws migration can replace the browser backend beneath the same deterministic skill contract.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Caller/Runtime Design Rules
|
||||||
|
|
||||||
|
### 1. Keep new business logic out of broad orchestration
|
||||||
|
|
||||||
|
Do not thread line-loss-specific business behavior through the general orchestration/runtime path.
|
||||||
|
|
||||||
|
### 2. Add a narrow deterministic-routing seam
|
||||||
|
|
||||||
|
This slice should add a narrow deterministic branch around submit-task routing, rather than rewriting the whole runtime decision tree.
|
||||||
|
|
||||||
|
### 3. Separate extraction from normalization
|
||||||
|
|
||||||
|
Do not mix “what the user typed” with “what the backend needs”.
|
||||||
|
|
||||||
|
There must be a distinct normalization step.
|
||||||
|
|
||||||
|
### 4. Keep the direct-skill browser seam narrow
|
||||||
|
|
||||||
|
Reuse the current `browser_script` execution seam instead of inventing a new browser bridge.
|
||||||
|
|
||||||
|
### 5. Preserve Zhihu behavior by design, not by hope
|
||||||
|
|
||||||
|
The design should assume new deterministic routing can accidentally steal or alter existing Zhihu behavior unless explicitly guarded against.
|
||||||
|
|
||||||
|
This is why focused Zhihu regression coverage is mandatory.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification Requirements for the Future Implementation Plan
|
||||||
|
|
||||||
|
Implementation planning must include explicit verification for:
|
||||||
|
|
||||||
|
1. deterministic suffix detection
|
||||||
|
2. deterministic lineloss scene matching
|
||||||
|
3. company alias normalization to canonical code
|
||||||
|
4. support for both company-level and district/county/sub-company-level units
|
||||||
|
5. month/week extraction and normalization
|
||||||
|
6. missing-parameter prompt behavior
|
||||||
|
7. ambiguous-company prompt behavior
|
||||||
|
8. pipe-backed browser-script execution for the new skill
|
||||||
|
9. no regression to the existing Zhihu hotlist path
|
||||||
|
10. preserved direct-skill/browser-script behavior outside the new line-loss scene
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Out of Scope for This Slice
|
||||||
|
|
||||||
|
- enabling ws execution on `main`
|
||||||
|
- replacing the current Zhihu routing model
|
||||||
|
- general scene-registry runtime architecture redesign
|
||||||
|
- full free-form semantic understanding of arbitrary business language
|
||||||
|
- typo-tolerant fuzzy NLP beyond deterministic business-safe matching
|
||||||
|
- making page defaults the hidden source of truth when the user omitted parameters
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Planning Notes
|
||||||
|
|
||||||
|
The implementation plan should likely split into distinct work items for:
|
||||||
|
|
||||||
|
1. staged skill package creation and business contract definition
|
||||||
|
2. deterministic trigger + scene match in `claw-new`
|
||||||
|
3. company/unit normalization and ambiguity handling
|
||||||
|
4. period extraction/normalization and ambiguity handling
|
||||||
|
5. pipe-backed direct execution integration
|
||||||
|
6. returned artifact interpretation
|
||||||
|
7. Zhihu regression verification
|
||||||
|
8. ws-readiness seam verification
|
||||||
|
|
||||||
|
The plan should explicitly keep the “do not break Zhihu hotlist” boundary visible in every execution and verification stage.
|
||||||
637
resources/zhihu-hotlist-echarts.html
Normal file
637
resources/zhihu-hotlist-echarts.html
Normal file
@@ -0,0 +1,637 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="zh-CN">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
|
<title>知乎热榜图表驾驶舱</title>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/echarts@5/dist/echarts.min.js"></script>
|
||||||
|
<style>
|
||||||
|
:root {
|
||||||
|
--bg: #06111f;
|
||||||
|
--bg-2: #0a1f37;
|
||||||
|
--panel: rgba(8, 25, 42, 0.88);
|
||||||
|
--panel-strong: rgba(10, 32, 55, 0.95);
|
||||||
|
--line: rgba(101, 187, 255, 0.18);
|
||||||
|
--line-strong: rgba(236, 186, 81, 0.26);
|
||||||
|
--text: #eef6ff;
|
||||||
|
--muted: #8ea6c2;
|
||||||
|
--accent: #62d0ff;
|
||||||
|
--accent-2: #ecba51;
|
||||||
|
--accent-3: #6df0c2;
|
||||||
|
--danger: #ff8b7e;
|
||||||
|
--shadow: 0 20px 48px rgba(0, 0, 0, 0.34);
|
||||||
|
--font-heading: "DIN Alternate", "Bahnschrift", "Microsoft YaHei UI", sans-serif;
|
||||||
|
--font-body: "Segoe UI Variable Text", "Microsoft YaHei", "PingFang SC", sans-serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
* {
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
|
||||||
|
html,
|
||||||
|
body {
|
||||||
|
margin: 0;
|
||||||
|
min-height: 100%;
|
||||||
|
background:
|
||||||
|
radial-gradient(circle at 16% 10%, rgba(98, 208, 255, 0.18), transparent 22%),
|
||||||
|
radial-gradient(circle at 86% 12%, rgba(236, 186, 81, 0.14), transparent 18%),
|
||||||
|
linear-gradient(145deg, var(--bg) 0%, var(--bg-2) 42%, #030910 100%);
|
||||||
|
color: var(--text);
|
||||||
|
font-family: var(--font-body);
|
||||||
|
}
|
||||||
|
|
||||||
|
body::before {
|
||||||
|
content: "";
|
||||||
|
position: fixed;
|
||||||
|
inset: 0;
|
||||||
|
pointer-events: none;
|
||||||
|
background-image:
|
||||||
|
linear-gradient(rgba(101, 187, 255, 0.05) 1px, transparent 1px),
|
||||||
|
linear-gradient(90deg, rgba(101, 187, 255, 0.05) 1px, transparent 1px);
|
||||||
|
background-size: 44px 44px;
|
||||||
|
mask-image: radial-gradient(circle at center, black 34%, rgba(0, 0, 0, 0.22) 88%, transparent 100%);
|
||||||
|
}
|
||||||
|
|
||||||
|
.page {
|
||||||
|
min-height: 100vh;
|
||||||
|
padding: 18px;
|
||||||
|
display: grid;
|
||||||
|
grid-template-rows: auto auto 1fr auto;
|
||||||
|
gap: 14px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.panel {
|
||||||
|
position: relative;
|
||||||
|
overflow: hidden;
|
||||||
|
background:
|
||||||
|
linear-gradient(180deg, rgba(255, 255, 255, 0.035), rgba(255, 255, 255, 0.01)),
|
||||||
|
linear-gradient(145deg, rgba(9, 30, 51, 0.97), rgba(6, 20, 34, 0.92));
|
||||||
|
border: 1px solid var(--line);
|
||||||
|
border-radius: 22px;
|
||||||
|
box-shadow: var(--shadow);
|
||||||
|
}
|
||||||
|
|
||||||
|
.panel::before {
|
||||||
|
content: "";
|
||||||
|
position: absolute;
|
||||||
|
left: 18px;
|
||||||
|
right: 18px;
|
||||||
|
top: 0;
|
||||||
|
height: 2px;
|
||||||
|
background: linear-gradient(90deg, transparent, var(--accent), var(--accent-2), transparent);
|
||||||
|
opacity: 0.95;
|
||||||
|
}
|
||||||
|
|
||||||
|
.hero {
|
||||||
|
padding: 18px 24px;
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: minmax(0, 1fr) 360px;
|
||||||
|
gap: 16px;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.eyebrow {
|
||||||
|
color: var(--accent);
|
||||||
|
letter-spacing: 2px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
font-size: 12px;
|
||||||
|
margin-bottom: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1 {
|
||||||
|
margin: 0;
|
||||||
|
font-family: var(--font-heading);
|
||||||
|
font-size: 38px;
|
||||||
|
line-height: 1.08;
|
||||||
|
letter-spacing: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#snapshot-meta {
|
||||||
|
margin: 10px 0 0;
|
||||||
|
color: var(--muted);
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.hero-notes {
|
||||||
|
display: grid;
|
||||||
|
gap: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.note-card {
|
||||||
|
padding: 14px 16px;
|
||||||
|
border-radius: 16px;
|
||||||
|
background: linear-gradient(135deg, rgba(98, 208, 255, 0.08), rgba(236, 186, 81, 0.08));
|
||||||
|
border: 1px solid rgba(255, 255, 255, 0.05);
|
||||||
|
}
|
||||||
|
|
||||||
|
.note-card strong {
|
||||||
|
display: block;
|
||||||
|
margin-bottom: 6px;
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.note-card span {
|
||||||
|
color: var(--muted);
|
||||||
|
font-size: 12px;
|
||||||
|
line-height: 1.5;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metrics {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(4, 1fr);
|
||||||
|
gap: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metric {
|
||||||
|
padding: 18px 18px 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metric-label {
|
||||||
|
color: var(--muted);
|
||||||
|
font-size: 12px;
|
||||||
|
letter-spacing: 1px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metric-value {
|
||||||
|
margin-top: 10px;
|
||||||
|
font-family: var(--font-heading);
|
||||||
|
font-size: 34px;
|
||||||
|
color: var(--text);
|
||||||
|
}
|
||||||
|
|
||||||
|
.metric-sub {
|
||||||
|
margin-top: 8px;
|
||||||
|
color: var(--accent);
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.charts {
|
||||||
|
min-height: 0;
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 1.2fr 1fr 0.95fr;
|
||||||
|
grid-template-rows: 360px 320px;
|
||||||
|
gap: 14px;
|
||||||
|
grid-template-areas:
|
||||||
|
"bar top pie"
|
||||||
|
"bubble table table";
|
||||||
|
}
|
||||||
|
|
||||||
|
.chart-panel {
|
||||||
|
padding: 14px 16px 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.bar-panel { grid-area: bar; }
|
||||||
|
.top-panel { grid-area: top; }
|
||||||
|
.pie-panel { grid-area: pie; }
|
||||||
|
.bubble-panel { grid-area: bubble; }
|
||||||
|
.table-panel { grid-area: table; padding: 14px 16px 10px; }
|
||||||
|
|
||||||
|
.section-head {
|
||||||
|
display: flex;
|
||||||
|
align-items: end;
|
||||||
|
justify-content: space-between;
|
||||||
|
gap: 12px;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.section-head h2 {
|
||||||
|
margin: 0;
|
||||||
|
font-size: 22px;
|
||||||
|
font-family: var(--font-heading);
|
||||||
|
letter-spacing: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.section-head span {
|
||||||
|
color: var(--muted);
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.chart {
|
||||||
|
width: 100%;
|
||||||
|
height: calc(100% - 42px);
|
||||||
|
}
|
||||||
|
|
||||||
|
.table-wrap {
|
||||||
|
height: calc(100% - 42px);
|
||||||
|
overflow: auto;
|
||||||
|
padding-right: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
table {
|
||||||
|
width: 100%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
thead th {
|
||||||
|
position: sticky;
|
||||||
|
top: 0;
|
||||||
|
z-index: 1;
|
||||||
|
background: rgba(6, 19, 32, 0.96);
|
||||||
|
padding: 10px 8px;
|
||||||
|
text-align: left;
|
||||||
|
font-size: 12px;
|
||||||
|
color: var(--muted);
|
||||||
|
letter-spacing: 1px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
border-bottom: 1px solid var(--line-strong);
|
||||||
|
}
|
||||||
|
|
||||||
|
tbody td {
|
||||||
|
padding: 11px 8px;
|
||||||
|
border-bottom: 1px solid rgba(255, 255, 255, 0.05);
|
||||||
|
font-size: 13px;
|
||||||
|
vertical-align: top;
|
||||||
|
}
|
||||||
|
|
||||||
|
tbody tr:nth-child(odd) {
|
||||||
|
background: rgba(255, 255, 255, 0.016);
|
||||||
|
}
|
||||||
|
|
||||||
|
.rank {
|
||||||
|
font-family: var(--font-heading);
|
||||||
|
color: var(--accent-2);
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.heat {
|
||||||
|
color: var(--accent-3);
|
||||||
|
font-family: var(--font-heading);
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.tag {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
padding: 4px 10px;
|
||||||
|
border-radius: 999px;
|
||||||
|
background: rgba(98, 208, 255, 0.12);
|
||||||
|
color: var(--accent);
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer {
|
||||||
|
padding: 10px 16px;
|
||||||
|
color: var(--muted);
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 1440px) {
|
||||||
|
.hero {
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metrics {
|
||||||
|
grid-template-columns: repeat(2, 1fr);
|
||||||
|
}
|
||||||
|
|
||||||
|
.charts {
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
grid-template-rows: 320px 320px 320px 320px 420px;
|
||||||
|
grid-template-areas:
|
||||||
|
"bar"
|
||||||
|
"top"
|
||||||
|
"pie"
|
||||||
|
"bubble"
|
||||||
|
"table";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 760px) {
|
||||||
|
.page {
|
||||||
|
padding: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1 {
|
||||||
|
font-size: 28px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.metrics {
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="page">
|
||||||
|
<section class="panel hero">
|
||||||
|
<div>
|
||||||
|
<div class="eyebrow">Zhihu Hotlist Visual Command Center</div>
|
||||||
|
<h1>知乎热榜图表驾驶舱</h1>
|
||||||
|
<p id="snapshot-meta">由 sgClaw screen_html_export 生成的本地静态展示页</p>
|
||||||
|
</div>
|
||||||
|
<div class="hero-notes">
|
||||||
|
<div class="note-card">
|
||||||
|
<strong>图表表达</strong>
|
||||||
|
<span>同一份热榜数据同时映射为分类热度、头部热点、结构占比和热度散点,适合现场讲解图表能力。</span>
|
||||||
|
</div>
|
||||||
|
<div class="note-card">
|
||||||
|
<strong>演示建议</strong>
|
||||||
|
<span id="lead-summary">优先讲解榜首热点、分类分布与热度层级,再向下展开全量榜单细节。</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="metrics">
|
||||||
|
<article class="panel metric">
|
||||||
|
<div class="metric-label">热榜条目数</div>
|
||||||
|
<div id="metric-total" class="metric-value">0</div>
|
||||||
|
<div class="metric-sub">Tracked items</div>
|
||||||
|
</article>
|
||||||
|
<article class="panel metric">
|
||||||
|
<div class="metric-label">主题分类数</div>
|
||||||
|
<div id="metric-categories" class="metric-value">0</div>
|
||||||
|
<div class="metric-sub">Topic groups</div>
|
||||||
|
</article>
|
||||||
|
<article class="panel metric">
|
||||||
|
<div class="metric-label">累计热度</div>
|
||||||
|
<div id="metric-heat" class="metric-value">0</div>
|
||||||
|
<div class="metric-sub">Total heat</div>
|
||||||
|
</article>
|
||||||
|
<article class="panel metric">
|
||||||
|
<div class="metric-label">头部峰值</div>
|
||||||
|
<div id="metric-peak" class="metric-value">0</div>
|
||||||
|
<div class="metric-sub">Peak topic heat</div>
|
||||||
|
</article>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="charts">
|
||||||
|
<section class="panel chart-panel bar-panel">
|
||||||
|
<div class="section-head">
|
||||||
|
<h2>分类总热度</h2>
|
||||||
|
<span>横向对比</span>
|
||||||
|
</div>
|
||||||
|
<div id="bar-chart" class="chart"></div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="panel chart-panel top-panel">
|
||||||
|
<div class="section-head">
|
||||||
|
<h2>Top10 热点</h2>
|
||||||
|
<span>柱状排行</span>
|
||||||
|
</div>
|
||||||
|
<div id="top-chart" class="chart"></div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="panel chart-panel pie-panel">
|
||||||
|
<div class="section-head">
|
||||||
|
<h2>分类占比</h2>
|
||||||
|
<span>环形结构</span>
|
||||||
|
</div>
|
||||||
|
<div id="pie-chart" class="chart"></div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="panel chart-panel bubble-panel">
|
||||||
|
<div class="section-head">
|
||||||
|
<h2>热度分层</h2>
|
||||||
|
<span>散点气泡</span>
|
||||||
|
</div>
|
||||||
|
<div id="bubble-chart" class="chart"></div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="panel table-panel">
|
||||||
|
<div class="section-head">
|
||||||
|
<h2>热榜明细</h2>
|
||||||
|
<span id="table-note">按原始顺序保留</span>
|
||||||
|
</div>
|
||||||
|
<div class="table-wrap">
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>排名</th>
|
||||||
|
<th>标题</th>
|
||||||
|
<th>分类</th>
|
||||||
|
<th>热度</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="table-body"></tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="panel footer">
|
||||||
|
本页由 `screen_html_export` 生成,适合在系统浏览器中直接打开进行展示。
|
||||||
|
</section>
|
||||||
|
</div>
|
||||||
|
<script>
|
||||||
|
const defaultPayload = {
|
||||||
|
"snapshot_id": "template-snapshot",
|
||||||
|
"generated_at_ms": 0,
|
||||||
|
"categories": [],
|
||||||
|
"table": []
|
||||||
|
}
|
||||||
|
|
||||||
|
const themeMeta = {
|
||||||
|
title: "知乎热榜图表驾驶舱",
|
||||||
|
renderer: "screen_html_export"
|
||||||
|
};
|
||||||
|
|
||||||
|
const chartColors = ["#62d0ff", "#ecba51", "#6df0c2", "#7f8cff", "#ff8b7e", "#9fcbff", "#58a6ff"];
|
||||||
|
const charts = {};
|
||||||
|
|
||||||
|
function formatNumber(value) {
|
||||||
|
return new Intl.NumberFormat("zh-CN").format(Number(value || 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTotalHeat(categories) {
|
||||||
|
return (categories || []).reduce((sum, item) => sum + Number(item.total_heat || 0), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getPeakHeat(table) {
|
||||||
|
return (table || []).reduce((max, row) => Math.max(max, Number(row.heat_value || 0)), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildLeadSummary(table, categories) {
|
||||||
|
const top = (table || [])[0];
|
||||||
|
const category = (categories || []).slice().sort((a, b) => (b.total_heat || 0) - (a.total_heat || 0))[0];
|
||||||
|
const parts = [];
|
||||||
|
if (top) {
|
||||||
|
parts.push(`榜首是“${top.title}”`);
|
||||||
|
}
|
||||||
|
if (category) {
|
||||||
|
parts.push(`主导分类为“${category.category_label}”`);
|
||||||
|
}
|
||||||
|
parts.push(`共覆盖 ${(table || []).length} 条热点`);
|
||||||
|
return parts.join(",");
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureCharts() {
|
||||||
|
if (!window.echarts) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
charts.bar = charts.bar || echarts.init(document.getElementById("bar-chart"));
|
||||||
|
charts.top = charts.top || echarts.init(document.getElementById("top-chart"));
|
||||||
|
charts.pie = charts.pie || echarts.init(document.getElementById("pie-chart"));
|
||||||
|
charts.bubble = charts.bubble || echarts.init(document.getElementById("bubble-chart"));
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderBarChart(categories) {
|
||||||
|
const sorted = (categories || []).slice().sort((a, b) => Number(a.total_heat || 0) - Number(b.total_heat || 0));
|
||||||
|
charts.bar.setOption({
|
||||||
|
animationDuration: 700,
|
||||||
|
grid: {left: 90, right: 18, top: 10, bottom: 20},
|
||||||
|
xAxis: {
|
||||||
|
type: "value",
|
||||||
|
axisLabel: {color: "#8ea6c2"},
|
||||||
|
splitLine: {lineStyle: {color: "rgba(255,255,255,0.06)"}}
|
||||||
|
},
|
||||||
|
yAxis: {
|
||||||
|
type: "category",
|
||||||
|
data: sorted.map((item) => item.category_label),
|
||||||
|
axisLabel: {color: "#eef6ff"},
|
||||||
|
axisLine: {lineStyle: {color: "rgba(255,255,255,0.1)"}}
|
||||||
|
},
|
||||||
|
tooltip: {trigger: "axis", axisPointer: {type: "shadow"}},
|
||||||
|
series: [{
|
||||||
|
type: "bar",
|
||||||
|
data: sorted.map((item, index) => ({
|
||||||
|
value: Number(item.total_heat || 0),
|
||||||
|
itemStyle: {color: chartColors[index % chartColors.length], borderRadius: [0, 8, 8, 0]}
|
||||||
|
})),
|
||||||
|
label: {show: true, position: "right", color: "#dfeeff"}
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderTopChart(table) {
|
||||||
|
const top = (table || []).slice(0, 10);
|
||||||
|
charts.top.setOption({
|
||||||
|
animationDuration: 700,
|
||||||
|
grid: {left: 42, right: 12, top: 26, bottom: 46},
|
||||||
|
tooltip: {trigger: "axis", axisPointer: {type: "shadow"}},
|
||||||
|
xAxis: {
|
||||||
|
type: "category",
|
||||||
|
data: top.map((row) => `#${row.rank}`),
|
||||||
|
axisLabel: {color: "#8ea6c2"},
|
||||||
|
axisLine: {lineStyle: {color: "rgba(255,255,255,0.1)"}}
|
||||||
|
},
|
||||||
|
yAxis: {
|
||||||
|
type: "value",
|
||||||
|
axisLabel: {color: "#8ea6c2"},
|
||||||
|
splitLine: {lineStyle: {color: "rgba(255,255,255,0.06)"}}
|
||||||
|
},
|
||||||
|
series: [{
|
||||||
|
type: "bar",
|
||||||
|
data: top.map((row, index) => ({
|
||||||
|
value: Number(row.heat_value || 0),
|
||||||
|
itemStyle: {color: chartColors[index % chartColors.length], borderRadius: [8, 8, 0, 0]}
|
||||||
|
})),
|
||||||
|
label: {show: true, position: "top", color: "#eef6ff", formatter: ({dataIndex}) => top[dataIndex].heat_text}
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderPieChart(categories) {
|
||||||
|
charts.pie.setOption({
|
||||||
|
animationDuration: 700,
|
||||||
|
color: chartColors,
|
||||||
|
tooltip: {trigger: "item"},
|
||||||
|
legend: {
|
||||||
|
bottom: 2,
|
||||||
|
textStyle: {color: "#8ea6c2", fontSize: 11},
|
||||||
|
itemWidth: 12,
|
||||||
|
itemHeight: 8
|
||||||
|
},
|
||||||
|
series: [{
|
||||||
|
type: "pie",
|
||||||
|
radius: ["44%", "72%"],
|
||||||
|
center: ["50%", "44%"],
|
||||||
|
itemStyle: {borderColor: "#081a2c", borderWidth: 2},
|
||||||
|
label: {
|
||||||
|
color: "#eef6ff",
|
||||||
|
formatter: "{b}\n{d}%"
|
||||||
|
},
|
||||||
|
data: (categories || []).map((item) => ({
|
||||||
|
name: item.category_label,
|
||||||
|
value: Number(item.total_heat || 0)
|
||||||
|
}))
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderBubbleChart(table) {
|
||||||
|
const top = (table || []).slice(0, 12);
|
||||||
|
charts.bubble.setOption({
|
||||||
|
animationDuration: 700,
|
||||||
|
color: chartColors,
|
||||||
|
grid: {left: 44, right: 18, top: 16, bottom: 36},
|
||||||
|
xAxis: {
|
||||||
|
type: "value",
|
||||||
|
name: "排名",
|
||||||
|
inverse: true,
|
||||||
|
min: 0,
|
||||||
|
max: Math.max(...top.map((row) => Number(row.rank || 0)), 10) + 1,
|
||||||
|
nameTextStyle: {color: "#8ea6c2"},
|
||||||
|
axisLabel: {color: "#8ea6c2"},
|
||||||
|
splitLine: {lineStyle: {color: "rgba(255,255,255,0.06)"}}
|
||||||
|
},
|
||||||
|
yAxis: {
|
||||||
|
type: "value",
|
||||||
|
name: "热度值",
|
||||||
|
nameTextStyle: {color: "#8ea6c2"},
|
||||||
|
axisLabel: {color: "#8ea6c2"},
|
||||||
|
splitLine: {lineStyle: {color: "rgba(255,255,255,0.06)"}}
|
||||||
|
},
|
||||||
|
tooltip: {
|
||||||
|
formatter: (params) => {
|
||||||
|
const row = params.data.raw;
|
||||||
|
return `${row.title}<br/>排名 #${row.rank}<br/>热度 ${row.heat_text}<br/>分类 ${row.category_label}`;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
series: [{
|
||||||
|
type: "scatter",
|
||||||
|
symbolSize: (value) => Math.max(18, Math.min(56, value[2] / 80000)),
|
||||||
|
data: top.map((row, index) => ({
|
||||||
|
value: [Number(row.rank || 0), Number(row.heat_value || 0), Number(row.heat_value || 0)],
|
||||||
|
raw: row,
|
||||||
|
itemStyle: {color: chartColors[index % chartColors.length], opacity: 0.82}
|
||||||
|
}))
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderTable(table) {
|
||||||
|
document.getElementById("table-body").innerHTML = (table || []).map((row) => `
|
||||||
|
<tr>
|
||||||
|
<td class="rank">#${row.rank}</td>
|
||||||
|
<td>${row.title}</td>
|
||||||
|
<td><span class="tag">${row.category_label}</span></td>
|
||||||
|
<td class="heat">${row.heat_text}</td>
|
||||||
|
</tr>
|
||||||
|
`).join("");
|
||||||
|
}
|
||||||
|
|
||||||
|
function render(payload) {
|
||||||
|
const data = payload || defaultPayload;
|
||||||
|
const categories = data.categories || [];
|
||||||
|
const table = data.table || [];
|
||||||
|
|
||||||
|
document.title = themeMeta.title;
|
||||||
|
document.getElementById("snapshot-meta").textContent =
|
||||||
|
`${data.snapshot_id} | 生成时间 ${new Date(data.generated_at_ms || 0).toLocaleString()}`;
|
||||||
|
document.getElementById("metric-total").textContent = formatNumber(table.length);
|
||||||
|
document.getElementById("metric-categories").textContent = formatNumber(categories.length);
|
||||||
|
document.getElementById("metric-heat").textContent = formatNumber(getTotalHeat(categories));
|
||||||
|
document.getElementById("metric-peak").textContent = formatNumber(getPeakHeat(table));
|
||||||
|
document.getElementById("lead-summary").textContent = buildLeadSummary(table, categories);
|
||||||
|
document.getElementById("table-note").textContent =
|
||||||
|
table.length > 0 ? `当前展示 ${table.length} 条热点` : "暂无热榜数据";
|
||||||
|
|
||||||
|
renderTable(table);
|
||||||
|
ensureCharts();
|
||||||
|
if (window.echarts) {
|
||||||
|
renderBarChart(categories);
|
||||||
|
renderTopChart(table);
|
||||||
|
renderPieChart(categories);
|
||||||
|
renderBubbleChart(table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
window.addEventListener("resize", () => {
|
||||||
|
Object.values(charts).forEach((chart) => chart && chart.resize());
|
||||||
|
});
|
||||||
|
|
||||||
|
render(defaultPayload);
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -164,8 +164,9 @@ pub fn handle_browser_message_with_context<T: Transport + 'static>(
|
|||||||
page_url,
|
page_url,
|
||||||
page_title,
|
page_title,
|
||||||
} => {
|
} => {
|
||||||
let instruction = instruction.trim().to_string();
|
let raw_instruction = instruction;
|
||||||
if instruction.is_empty() {
|
let trimmed_instruction = raw_instruction.trim().to_string();
|
||||||
|
if trimmed_instruction.is_empty() {
|
||||||
return transport.send(&AgentMessage::TaskComplete {
|
return transport.send(&AgentMessage::TaskComplete {
|
||||||
success: false,
|
success: false,
|
||||||
summary: "请输入任务内容。".to_string(),
|
summary: "请输入任务内容。".to_string(),
|
||||||
@@ -179,6 +180,25 @@ pub fn handle_browser_message_with_context<T: Transport + 'static>(
|
|||||||
page_url: (!page_url.trim().is_empty()).then_some(page_url),
|
page_url: (!page_url.trim().is_empty()).then_some(page_url),
|
||||||
page_title: (!page_title.trim().is_empty()).then_some(page_title),
|
page_title: (!page_title.trim().is_empty()).then_some(page_title),
|
||||||
};
|
};
|
||||||
|
let mut instruction = trimmed_instruction;
|
||||||
|
let mut deterministic_plan = None;
|
||||||
|
match crate::compat::deterministic_submit::decide_deterministic_submit(
|
||||||
|
&raw_instruction,
|
||||||
|
task_context.page_url.as_deref(),
|
||||||
|
task_context.page_title.as_deref(),
|
||||||
|
) {
|
||||||
|
crate::compat::deterministic_submit::DeterministicSubmitDecision::NotDeterministic => {}
|
||||||
|
crate::compat::deterministic_submit::DeterministicSubmitDecision::Prompt { summary } => {
|
||||||
|
return transport.send(&AgentMessage::TaskComplete {
|
||||||
|
success: false,
|
||||||
|
summary,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
crate::compat::deterministic_submit::DeterministicSubmitDecision::Execute(plan) => {
|
||||||
|
instruction = plan.instruction.clone();
|
||||||
|
deterministic_plan = Some(plan);
|
||||||
|
}
|
||||||
|
}
|
||||||
let _ = transport.send(&AgentMessage::LogEntry {
|
let _ = transport.send(&AgentMessage::LogEntry {
|
||||||
level: "info".to_string(),
|
level: "info".to_string(),
|
||||||
message: runtime_version_log_message(),
|
message: runtime_version_log_message(),
|
||||||
@@ -219,6 +239,25 @@ pub fn handle_browser_message_with_context<T: Transport + 'static>(
|
|||||||
settings.runtime_profile, settings.skills_prompt_mode
|
settings.runtime_profile, settings.skills_prompt_mode
|
||||||
),
|
),
|
||||||
});
|
});
|
||||||
|
if let Some(plan) = deterministic_plan.as_ref() {
|
||||||
|
let _ = send_mode_log(transport, "direct_skill_primary");
|
||||||
|
let completion = match crate::compat::deterministic_submit::execute_deterministic_submit(
|
||||||
|
browser_tool.clone(),
|
||||||
|
plan,
|
||||||
|
&context.workspace_root,
|
||||||
|
&settings,
|
||||||
|
) {
|
||||||
|
Ok(outcome) => AgentMessage::TaskComplete {
|
||||||
|
success: outcome.success,
|
||||||
|
summary: outcome.summary,
|
||||||
|
},
|
||||||
|
Err(err) => AgentMessage::TaskComplete {
|
||||||
|
success: false,
|
||||||
|
summary: err.to_string(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return transport.send(&completion);
|
||||||
|
}
|
||||||
if crate::compat::orchestration::should_use_primary_orchestration(
|
if crate::compat::orchestration::should_use_primary_orchestration(
|
||||||
&instruction,
|
&instruction,
|
||||||
task_context.page_url.as_deref(),
|
task_context.page_url.as_deref(),
|
||||||
@@ -247,6 +286,31 @@ pub fn handle_browser_message_with_context<T: Transport + 'static>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if settings
|
||||||
|
.direct_submit_skill
|
||||||
|
.as_deref()
|
||||||
|
.map(str::trim)
|
||||||
|
.is_some_and(|value| !value.is_empty())
|
||||||
|
{
|
||||||
|
let _ = send_mode_log(transport, "direct_skill_primary");
|
||||||
|
let completion = match crate::compat::direct_skill_runtime::execute_direct_submit_skill(
|
||||||
|
browser_tool.clone(),
|
||||||
|
&instruction,
|
||||||
|
&task_context,
|
||||||
|
&context.workspace_root,
|
||||||
|
&settings,
|
||||||
|
) {
|
||||||
|
Ok(outcome) => AgentMessage::TaskComplete {
|
||||||
|
success: outcome.success,
|
||||||
|
summary: outcome.summary,
|
||||||
|
},
|
||||||
|
Err(err) => AgentMessage::TaskComplete {
|
||||||
|
success: false,
|
||||||
|
summary: err.to_string(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return transport.send(&completion);
|
||||||
|
}
|
||||||
let _ = send_mode_log(transport, "compat_llm_primary");
|
let _ = send_mode_log(transport, "compat_llm_primary");
|
||||||
match crate::compat::runtime::execute_task_with_sgclaw_settings(
|
match crate::compat::runtime::execute_task_with_sgclaw_settings(
|
||||||
transport,
|
transport,
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ use crate::pipe::{Action, BrowserPipeTool, Transport};
|
|||||||
pub struct BrowserScriptSkillTool<T: Transport> {
|
pub struct BrowserScriptSkillTool<T: Transport> {
|
||||||
tool_name: String,
|
tool_name: String,
|
||||||
tool_description: String,
|
tool_description: String,
|
||||||
|
skill_root: PathBuf,
|
||||||
script_path: PathBuf,
|
script_path: PathBuf,
|
||||||
args: HashMap<String, String>,
|
args: HashMap<String, String>,
|
||||||
browser_tool: BrowserPipeTool<T>,
|
browser_tool: BrowserPipeTool<T>,
|
||||||
@@ -25,27 +26,13 @@ impl<T: Transport> BrowserScriptSkillTool<T> {
|
|||||||
skill_root: &Path,
|
skill_root: &Path,
|
||||||
browser_tool: BrowserPipeTool<T>,
|
browser_tool: BrowserPipeTool<T>,
|
||||||
) -> anyhow::Result<Self> {
|
) -> anyhow::Result<Self> {
|
||||||
let script_path = skill_root.join(&tool.command);
|
let script_path = resolve_browser_script_path(skill_root, &tool.command)?;
|
||||||
let canonical_skill_root = skill_root
|
|
||||||
.canonicalize()
|
|
||||||
.unwrap_or_else(|_| skill_root.to_path_buf());
|
|
||||||
let canonical_script_path = script_path.canonicalize().map_err(|err| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"failed to resolve browser script {}: {err}",
|
|
||||||
script_path.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if !canonical_script_path.starts_with(&canonical_skill_root) {
|
|
||||||
anyhow::bail!(
|
|
||||||
"browser script path escapes skill root: {}",
|
|
||||||
canonical_script_path.display()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
tool_name: format!("{}.{}", skill_name, tool.name),
|
tool_name: format!("{}.{}", skill_name, tool.name),
|
||||||
tool_description: tool.description.clone(),
|
tool_description: tool.description.clone(),
|
||||||
script_path: canonical_script_path,
|
skill_root: skill_root.to_path_buf(),
|
||||||
|
script_path,
|
||||||
args: tool.args.clone(),
|
args: tool.args.clone(),
|
||||||
browser_tool,
|
browser_tool,
|
||||||
})
|
})
|
||||||
@@ -97,82 +84,101 @@ impl<T: Transport + 'static> Tool for BrowserScriptSkillTool<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn execute(&self, args: Value) -> anyhow::Result<ToolResult> {
|
async fn execute(&self, args: Value) -> anyhow::Result<ToolResult> {
|
||||||
let mut args = match args {
|
let tool = SkillTool {
|
||||||
Value::Object(args) => args,
|
name: self.tool_name.clone(),
|
||||||
other => {
|
description: self.tool_description.clone(),
|
||||||
return Ok(failed_tool_result(format!(
|
kind: "browser_script".to_string(),
|
||||||
"expected object arguments, got {other}"
|
command: self.script_path.to_string_lossy().into_owned(),
|
||||||
)))
|
args: self.args.clone(),
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let raw_expected_domain = match args.remove("expected_domain") {
|
execute_browser_script_tool(&tool, &self.skill_root, self.browser_tool.clone(), args).await
|
||||||
Some(Value::String(value)) if !value.trim().is_empty() => value,
|
}
|
||||||
Some(other) => {
|
}
|
||||||
return Ok(failed_tool_result(format!(
|
|
||||||
"expected_domain must be a non-empty string, got {other}"
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
return Ok(failed_tool_result(
|
|
||||||
"missing required field expected_domain".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let expected_domain = match normalize_domain_like(&raw_expected_domain) {
|
|
||||||
Some(value) => value,
|
|
||||||
None => {
|
|
||||||
return Ok(failed_tool_result(format!(
|
|
||||||
"expected_domain must resolve to a hostname, got {raw_expected_domain:?}"
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for required_arg in self.args.keys() {
|
pub async fn execute_browser_script_tool<T: Transport + 'static>(
|
||||||
if !args.contains_key(required_arg) {
|
tool: &SkillTool,
|
||||||
return Ok(failed_tool_result(format!(
|
skill_root: &Path,
|
||||||
"missing required field {required_arg}"
|
browser_tool: BrowserPipeTool<T>,
|
||||||
)));
|
args: Value,
|
||||||
}
|
) -> anyhow::Result<ToolResult> {
|
||||||
|
if tool.kind != "browser_script" {
|
||||||
|
return Ok(failed_tool_result(format!(
|
||||||
|
"browser script tool kind must be browser_script, got {}",
|
||||||
|
tool.kind
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let script_path = resolve_browser_script_path(skill_root, &tool.command)?;
|
||||||
|
let mut args = match args {
|
||||||
|
Value::Object(args) => args,
|
||||||
|
other => return Ok(failed_tool_result(format!("expected object arguments, got {other}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let raw_expected_domain = match args.remove("expected_domain") {
|
||||||
|
Some(Value::String(value)) if !value.trim().is_empty() => value,
|
||||||
|
Some(other) => {
|
||||||
|
return Ok(failed_tool_result(format!(
|
||||||
|
"expected_domain must be a non-empty string, got {other}"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
None => {
|
||||||
|
return Ok(failed_tool_result(
|
||||||
|
"missing required field expected_domain".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let expected_domain = match normalize_domain_like(&raw_expected_domain) {
|
||||||
|
Some(value) => value,
|
||||||
|
None => {
|
||||||
|
return Ok(failed_tool_result(format!(
|
||||||
|
"expected_domain must resolve to a hostname, got {raw_expected_domain:?}"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let script_body = match fs::read_to_string(&self.script_path) {
|
for required_arg in tool.args.keys() {
|
||||||
Ok(value) => value,
|
if !args.contains_key(required_arg) {
|
||||||
Err(err) => {
|
return Ok(failed_tool_result(format!(
|
||||||
return Ok(failed_tool_result(format!(
|
"missing required field {required_arg}"
|
||||||
"failed to read browser script {}: {err}",
|
|
||||||
self.script_path.display()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let wrapped_script = wrap_browser_script(&script_body, &Value::Object(args.clone()));
|
|
||||||
let result = match self.browser_tool.invoke(
|
|
||||||
Action::Eval,
|
|
||||||
json!({ "script": wrapped_script }),
|
|
||||||
&expected_domain,
|
|
||||||
) {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(err) => return Ok(failed_tool_result(err.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !result.success {
|
|
||||||
return Ok(failed_tool_result(format_browser_script_error(
|
|
||||||
&result.data,
|
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let payload = result
|
|
||||||
.data
|
|
||||||
.get("text")
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_else(|| result.data.clone());
|
|
||||||
Ok(ToolResult {
|
|
||||||
success: true,
|
|
||||||
output: stringify_tool_payload(&payload)?,
|
|
||||||
error: None,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let script_body = match fs::read_to_string(&script_path) {
|
||||||
|
Ok(value) => value,
|
||||||
|
Err(err) => {
|
||||||
|
return Ok(failed_tool_result(format!(
|
||||||
|
"failed to read browser script {}: {err}",
|
||||||
|
script_path.display()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let wrapped_script = wrap_browser_script(&script_body, &Value::Object(args.clone()));
|
||||||
|
let result = match browser_tool.invoke(
|
||||||
|
Action::Eval,
|
||||||
|
json!({ "script": wrapped_script }),
|
||||||
|
&expected_domain,
|
||||||
|
) {
|
||||||
|
Ok(result) => result,
|
||||||
|
Err(err) => return Ok(failed_tool_result(err.to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !result.success {
|
||||||
|
return Ok(failed_tool_result(format_browser_script_error(&result.data)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload = result
|
||||||
|
.data
|
||||||
|
.get("text")
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| result.data.clone());
|
||||||
|
Ok(ToolResult {
|
||||||
|
success: true,
|
||||||
|
output: stringify_tool_payload(&payload)?,
|
||||||
|
error: None,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn build_browser_script_skill_tools<T: Transport + 'static>(
|
pub fn build_browser_script_skill_tools<T: Transport + 'static>(
|
||||||
@@ -213,6 +219,32 @@ fn wrap_browser_script(script_body: &str, args: &Value) -> String {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn resolve_browser_script_path(skill_root: &Path, command: &str) -> anyhow::Result<PathBuf> {
|
||||||
|
let script_path = PathBuf::from(command);
|
||||||
|
let script_path = if script_path.is_absolute() {
|
||||||
|
script_path
|
||||||
|
} else {
|
||||||
|
skill_root.join(script_path)
|
||||||
|
};
|
||||||
|
let canonical_skill_root = skill_root
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap_or_else(|_| skill_root.to_path_buf());
|
||||||
|
let canonical_script_path = script_path.canonicalize().map_err(|err| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"failed to resolve browser script {}: {err}",
|
||||||
|
script_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
if !canonical_script_path.starts_with(&canonical_skill_root) {
|
||||||
|
anyhow::bail!(
|
||||||
|
"browser script path escapes skill root: {}",
|
||||||
|
canonical_script_path.display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(canonical_script_path)
|
||||||
|
}
|
||||||
|
|
||||||
fn stringify_tool_payload(payload: &Value) -> anyhow::Result<String> {
|
fn stringify_tool_payload(payload: &Value) -> anyhow::Result<String> {
|
||||||
Ok(match payload {
|
Ok(match payload {
|
||||||
Value::String(value) => value.clone(),
|
Value::String(value) => value.clone(),
|
||||||
|
|||||||
272
src/compat/deterministic_submit.rs
Normal file
272
src/compat/deterministic_submit.rs
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use serde_json::{Map, Value};
|
||||||
|
|
||||||
|
use crate::compat::direct_skill_runtime::DirectSubmitOutcome;
|
||||||
|
use crate::config::SgClawSettings;
|
||||||
|
use crate::pipe::{BrowserPipeTool, PipeError, Transport};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct DeterministicExecutionPlan {
|
||||||
|
pub instruction: String,
|
||||||
|
pub tool_name: String,
|
||||||
|
pub expected_domain: String,
|
||||||
|
pub org_label: String,
|
||||||
|
pub org_code: String,
|
||||||
|
pub period_mode: String,
|
||||||
|
pub period_mode_code: String,
|
||||||
|
pub period_value: String,
|
||||||
|
pub period_payload: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum DeterministicSubmitDecision {
|
||||||
|
NotDeterministic,
|
||||||
|
Prompt { summary: String },
|
||||||
|
Execute(DeterministicExecutionPlan),
|
||||||
|
}
|
||||||
|
|
||||||
|
const DETERMINISTIC_SUFFIX: &str = "。。。";
|
||||||
|
const LINELLOSS_HOST: &str = "20.76.57.61";
|
||||||
|
const LINELLOSS_TOOL: &str = "tq-lineloss-report.collect_lineloss";
|
||||||
|
|
||||||
|
pub fn decide_deterministic_submit(
|
||||||
|
raw_instruction: &str,
|
||||||
|
page_url: Option<&str>,
|
||||||
|
page_title: Option<&str>,
|
||||||
|
) -> DeterministicSubmitDecision {
|
||||||
|
let Some(instruction) = strip_exact_deterministic_suffix(raw_instruction) else {
|
||||||
|
return DeterministicSubmitDecision::NotDeterministic;
|
||||||
|
};
|
||||||
|
|
||||||
|
let normalized_instruction = instruction.trim();
|
||||||
|
if normalized_instruction.is_empty() {
|
||||||
|
return unsupported_scene_prompt();
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matches_lineloss_scene(normalized_instruction) {
|
||||||
|
return unsupported_scene_prompt();
|
||||||
|
}
|
||||||
|
|
||||||
|
let resolved_org = match crate::compat::tq_lineloss::org_resolver::resolve_org_from_instruction(
|
||||||
|
normalized_instruction,
|
||||||
|
) {
|
||||||
|
Ok(Some(resolved_org)) => resolved_org,
|
||||||
|
Ok(None) => {
|
||||||
|
return DeterministicSubmitDecision::Prompt {
|
||||||
|
summary: crate::compat::tq_lineloss::contracts::missing_company_prompt(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Err(summary) => {
|
||||||
|
return DeterministicSubmitDecision::Prompt { summary };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let resolved_period = match crate::compat::tq_lineloss::period_resolver::resolve_period(
|
||||||
|
normalized_instruction,
|
||||||
|
) {
|
||||||
|
Ok(resolved_period) => resolved_period,
|
||||||
|
Err(summary) => {
|
||||||
|
return DeterministicSubmitDecision::Prompt { summary };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if page_context_conflicts_with_lineloss(page_url, page_title) {
|
||||||
|
return DeterministicSubmitDecision::Prompt {
|
||||||
|
summary:
|
||||||
|
"已命中台区线损报表技能,但当前页面与台区线损场景不匹配,请切换到线损页面后重试。"
|
||||||
|
.to_string(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
DeterministicSubmitDecision::Execute(DeterministicExecutionPlan {
|
||||||
|
instruction: normalized_instruction.to_string(),
|
||||||
|
tool_name: LINELLOSS_TOOL.to_string(),
|
||||||
|
expected_domain: LINELLOSS_HOST.to_string(),
|
||||||
|
org_label: resolved_org.label,
|
||||||
|
org_code: resolved_org.code,
|
||||||
|
period_mode: period_mode_name(&resolved_period.mode).to_string(),
|
||||||
|
period_mode_code: resolved_period.mode_code,
|
||||||
|
period_value: resolved_period.value,
|
||||||
|
period_payload: serde_json::to_string(&resolved_period.payload)
|
||||||
|
.unwrap_or_else(|_| "{}".to_string()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute_deterministic_submit<T: Transport + 'static>(
|
||||||
|
browser_tool: BrowserPipeTool<T>,
|
||||||
|
plan: &DeterministicExecutionPlan,
|
||||||
|
workspace_root: &Path,
|
||||||
|
settings: &SgClawSettings,
|
||||||
|
) -> Result<DirectSubmitOutcome, PipeError> {
|
||||||
|
let mut args = Map::new();
|
||||||
|
args.insert(
|
||||||
|
"expected_domain".to_string(),
|
||||||
|
Value::String(plan.expected_domain.clone()),
|
||||||
|
);
|
||||||
|
args.insert(
|
||||||
|
"org_label".to_string(),
|
||||||
|
Value::String(plan.org_label.clone()),
|
||||||
|
);
|
||||||
|
args.insert(
|
||||||
|
"org_code".to_string(),
|
||||||
|
Value::String(plan.org_code.clone()),
|
||||||
|
);
|
||||||
|
args.insert(
|
||||||
|
"period_mode".to_string(),
|
||||||
|
Value::String(plan.period_mode.clone()),
|
||||||
|
);
|
||||||
|
args.insert(
|
||||||
|
"period_mode_code".to_string(),
|
||||||
|
Value::String(plan.period_mode_code.clone()),
|
||||||
|
);
|
||||||
|
args.insert(
|
||||||
|
"period_value".to_string(),
|
||||||
|
Value::String(plan.period_value.clone()),
|
||||||
|
);
|
||||||
|
args.insert(
|
||||||
|
"period_payload".to_string(),
|
||||||
|
Value::String(plan.period_payload.clone()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let output = crate::compat::direct_skill_runtime::execute_browser_script_skill_raw_output(
|
||||||
|
browser_tool,
|
||||||
|
&plan.tool_name,
|
||||||
|
workspace_root,
|
||||||
|
settings,
|
||||||
|
args,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(summarize_lineloss_output(&output))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn summarize_lineloss_output(output: &str) -> DirectSubmitOutcome {
|
||||||
|
let Some(payload) = serde_json::from_str::<Value>(output).ok() else {
|
||||||
|
return DirectSubmitOutcome {
|
||||||
|
success: true,
|
||||||
|
summary: output.to_string(),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let artifact = payload
|
||||||
|
.as_object()
|
||||||
|
.and_then(|object| object.get("text"))
|
||||||
|
.unwrap_or(&payload);
|
||||||
|
|
||||||
|
summarize_lineloss_artifact(artifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn summarize_lineloss_artifact(artifact: &Value) -> DirectSubmitOutcome {
|
||||||
|
let Some(artifact) = artifact.as_object() else {
|
||||||
|
return DirectSubmitOutcome {
|
||||||
|
success: true,
|
||||||
|
summary: artifact.to_string(),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
if artifact.get("type").and_then(Value::as_str) != Some("report-artifact") {
|
||||||
|
return DirectSubmitOutcome {
|
||||||
|
success: true,
|
||||||
|
summary: Value::Object(artifact.clone()).to_string(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = artifact
|
||||||
|
.get("status")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("ok");
|
||||||
|
let success = matches!(status, "ok" | "partial" | "empty");
|
||||||
|
let report_name = artifact
|
||||||
|
.get("report_name")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("tq-lineloss-report");
|
||||||
|
let org_label = artifact
|
||||||
|
.get("org")
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.and_then(|org| org.get("label"))
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("");
|
||||||
|
let period_value = artifact
|
||||||
|
.get("period")
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.and_then(|period| period.get("value"))
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("");
|
||||||
|
let rows = artifact
|
||||||
|
.get("counts")
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.and_then(|counts| counts.get("rows"))
|
||||||
|
.and_then(Value::as_u64)
|
||||||
|
.map(|value| value as usize)
|
||||||
|
.or_else(|| artifact.get("rows").and_then(Value::as_array).map(Vec::len))
|
||||||
|
.unwrap_or(0);
|
||||||
|
let reasons = artifact
|
||||||
|
.get("reasons")
|
||||||
|
.and_then(Value::as_array)
|
||||||
|
.map(|reasons| {
|
||||||
|
reasons
|
||||||
|
.iter()
|
||||||
|
.filter_map(Value::as_str)
|
||||||
|
.filter(|value| !value.trim().is_empty())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let mut parts = vec![report_name.to_string()];
|
||||||
|
if !org_label.is_empty() {
|
||||||
|
parts.push(org_label.to_string());
|
||||||
|
}
|
||||||
|
if !period_value.is_empty() {
|
||||||
|
parts.push(period_value.to_string());
|
||||||
|
}
|
||||||
|
parts.push(format!("status={status}"));
|
||||||
|
parts.push(format!("rows={rows}"));
|
||||||
|
if !reasons.is_empty() {
|
||||||
|
parts.push(format!("reasons={}", reasons.join(",")));
|
||||||
|
}
|
||||||
|
|
||||||
|
DirectSubmitOutcome {
|
||||||
|
success,
|
||||||
|
summary: parts.join(" "),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn strip_exact_deterministic_suffix(raw_instruction: &str) -> Option<&str> {
|
||||||
|
let without_suffix = raw_instruction.strip_suffix(DETERMINISTIC_SUFFIX)?;
|
||||||
|
if without_suffix.ends_with('。') {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(without_suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn matches_lineloss_scene(instruction: &str) -> bool {
|
||||||
|
instruction.contains("线损") || instruction.contains("月累计") || instruction.contains("周累计")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn page_context_conflicts_with_lineloss(page_url: Option<&str>, page_title: Option<&str>) -> bool {
|
||||||
|
let url = page_url.unwrap_or_default().to_ascii_lowercase();
|
||||||
|
let title = page_title.unwrap_or_default();
|
||||||
|
let has_context = !url.is_empty() || !title.is_empty();
|
||||||
|
if !has_context {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let url_matches = url.contains(LINELLOSS_HOST) || url.contains("lineloss");
|
||||||
|
let title_matches = title.contains("线损");
|
||||||
|
|
||||||
|
!(url_matches || title_matches)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn period_mode_name(mode: &crate::compat::tq_lineloss::contracts::PeriodMode) -> &'static str {
|
||||||
|
match mode {
|
||||||
|
crate::compat::tq_lineloss::contracts::PeriodMode::Month => "month",
|
||||||
|
crate::compat::tq_lineloss::contracts::PeriodMode::Week => "week",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unsupported_scene_prompt() -> DeterministicSubmitDecision {
|
||||||
|
DeterministicSubmitDecision::Prompt {
|
||||||
|
summary: "确定性提交当前只支持台区线损月/周累计线损率报表场景,请补充台区线损请求。"
|
||||||
|
.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
371
src/compat/direct_skill_runtime.rs
Normal file
371
src/compat/direct_skill_runtime.rs
Normal file
@@ -0,0 +1,371 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use reqwest::Url;
|
||||||
|
use serde_json::{Map, Value};
|
||||||
|
use zeroclaw::skills::{load_skills_from_directory, SkillTool};
|
||||||
|
|
||||||
|
use crate::compat::browser_script_skill_tool::execute_browser_script_tool;
|
||||||
|
use crate::compat::config_adapter::resolve_skills_dir_from_sgclaw_settings;
|
||||||
|
use crate::compat::runtime::CompatTaskContext;
|
||||||
|
use crate::config::SgClawSettings;
|
||||||
|
use crate::pipe::{BrowserPipeTool, PipeError, Transport};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct DirectSubmitOutcome {
|
||||||
|
pub success: bool,
|
||||||
|
pub summary: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute_direct_submit_skill<T: Transport + 'static>(
|
||||||
|
browser_tool: BrowserPipeTool<T>,
|
||||||
|
instruction: &str,
|
||||||
|
task_context: &CompatTaskContext,
|
||||||
|
workspace_root: &Path,
|
||||||
|
settings: &SgClawSettings,
|
||||||
|
) -> Result<DirectSubmitOutcome, PipeError> {
|
||||||
|
let configured_tool = settings
|
||||||
|
.direct_submit_skill
|
||||||
|
.as_deref()
|
||||||
|
.map(str::trim)
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
.ok_or_else(|| PipeError::Protocol("direct submit skill is not configured".to_string()))?;
|
||||||
|
let expected_domain = derive_expected_domain(task_context)?;
|
||||||
|
let period = derive_period(instruction)?;
|
||||||
|
|
||||||
|
let mut args = Map::new();
|
||||||
|
args.insert("expected_domain".to_string(), Value::String(expected_domain));
|
||||||
|
args.insert("period".to_string(), Value::String(period));
|
||||||
|
|
||||||
|
let output = execute_browser_script_skill_raw_output(
|
||||||
|
browser_tool,
|
||||||
|
configured_tool,
|
||||||
|
workspace_root,
|
||||||
|
settings,
|
||||||
|
args,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(interpret_direct_submit_output(&output))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute_browser_script_skill_raw_output<T: Transport + 'static>(
|
||||||
|
browser_tool: BrowserPipeTool<T>,
|
||||||
|
configured_tool: &str,
|
||||||
|
workspace_root: &Path,
|
||||||
|
settings: &SgClawSettings,
|
||||||
|
args: Map<String, Value>,
|
||||||
|
) -> Result<String, PipeError> {
|
||||||
|
let (skill_name, tool_name) = parse_configured_tool_name(configured_tool)?;
|
||||||
|
let skills_dir = resolve_skills_dir_from_sgclaw_settings(workspace_root, settings);
|
||||||
|
let skills = load_skills_from_directory(&skills_dir, true);
|
||||||
|
let skill = skills
|
||||||
|
.iter()
|
||||||
|
.find(|skill| skill.name == skill_name)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
PipeError::Protocol(format!(
|
||||||
|
"direct submit skill {skill_name} was not found in {}",
|
||||||
|
skills_dir.display()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let tool = skill
|
||||||
|
.tools
|
||||||
|
.iter()
|
||||||
|
.find(|tool| tool.name == tool_name)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
PipeError::Protocol(format!(
|
||||||
|
"direct submit tool {configured_tool} was not found"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let skill_root = skill
|
||||||
|
.location
|
||||||
|
.as_deref()
|
||||||
|
.and_then(Path::parent)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
PipeError::Protocol(format!(
|
||||||
|
"direct submit skill {skill_name} is missing a resolvable location"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
execute_browser_script_tool_output(browser_tool, configured_tool, tool, skill_root, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute_browser_script_tool_output<T: Transport + 'static>(
|
||||||
|
browser_tool: BrowserPipeTool<T>,
|
||||||
|
configured_tool: &str,
|
||||||
|
tool: &SkillTool,
|
||||||
|
skill_root: &Path,
|
||||||
|
args: Map<String, Value>,
|
||||||
|
) -> Result<String, PipeError> {
|
||||||
|
if tool.kind != "browser_script" {
|
||||||
|
return Err(PipeError::Protocol(format!(
|
||||||
|
"direct submit tool {configured_tool} must be browser_script, got {}",
|
||||||
|
tool.kind
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut tool = tool.clone();
|
||||||
|
tool.args.remove("expected_domain");
|
||||||
|
|
||||||
|
let runtime = tokio::runtime::Runtime::new()
|
||||||
|
.map_err(|err| PipeError::Protocol(format!("failed to create tokio runtime: {err}")))?;
|
||||||
|
let result = runtime
|
||||||
|
.block_on(execute_browser_script_tool(
|
||||||
|
&tool,
|
||||||
|
skill_root,
|
||||||
|
browser_tool,
|
||||||
|
Value::Object(args),
|
||||||
|
))
|
||||||
|
.map_err(|err| PipeError::Protocol(err.to_string()))?;
|
||||||
|
|
||||||
|
if result.success {
|
||||||
|
Ok(result.output)
|
||||||
|
} else {
|
||||||
|
Err(PipeError::Protocol(
|
||||||
|
result
|
||||||
|
.error
|
||||||
|
.unwrap_or_else(|| "direct submit skill execution failed".to_string()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn interpret_direct_submit_output(output: &str) -> DirectSubmitOutcome {
|
||||||
|
let Some(payload) = serde_json::from_str::<Value>(output).ok() else {
|
||||||
|
return DirectSubmitOutcome {
|
||||||
|
success: true,
|
||||||
|
summary: output.to_string(),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(artifact) = payload.as_object() else {
|
||||||
|
return DirectSubmitOutcome {
|
||||||
|
success: true,
|
||||||
|
summary: output.to_string(),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
if artifact.get("type").and_then(Value::as_str) != Some("report-artifact") {
|
||||||
|
return DirectSubmitOutcome {
|
||||||
|
success: true,
|
||||||
|
summary: output.to_string(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = artifact
|
||||||
|
.get("status")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("ok");
|
||||||
|
let success = matches!(status, "ok" | "partial" | "empty");
|
||||||
|
let report_name = artifact
|
||||||
|
.get("report_name")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("report-artifact");
|
||||||
|
let period = artifact
|
||||||
|
.get("period")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("");
|
||||||
|
let detail_rows = count_rows(artifact.get("counts"), artifact.get("rows"), "detail_rows");
|
||||||
|
let summary_rows = count_summary_rows(artifact.get("counts"), artifact.get("sections"));
|
||||||
|
let partial_reasons = artifact
|
||||||
|
.get("partial_reasons")
|
||||||
|
.and_then(Value::as_array)
|
||||||
|
.map(|reasons| {
|
||||||
|
reasons
|
||||||
|
.iter()
|
||||||
|
.filter_map(Value::as_str)
|
||||||
|
.filter(|value| !value.trim().is_empty())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let mut parts = vec![report_name.to_string()];
|
||||||
|
if !period.trim().is_empty() {
|
||||||
|
parts.push(period.to_string());
|
||||||
|
}
|
||||||
|
parts.push(format!("status={status}"));
|
||||||
|
parts.push(format!("detail_rows={detail_rows}"));
|
||||||
|
parts.push(format!("summary_rows={summary_rows}"));
|
||||||
|
if !partial_reasons.is_empty() {
|
||||||
|
parts.push(format!("partial_reasons={}", partial_reasons.join(",")));
|
||||||
|
}
|
||||||
|
|
||||||
|
DirectSubmitOutcome {
|
||||||
|
success,
|
||||||
|
summary: parts.join(" "),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count_rows(counts: Option<&Value>, rows: Option<&Value>, key: &str) -> usize {
|
||||||
|
counts
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.and_then(|counts| counts.get(key))
|
||||||
|
.and_then(Value::as_u64)
|
||||||
|
.map(|count| count as usize)
|
||||||
|
.or_else(|| rows.and_then(Value::as_array).map(Vec::len))
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count_summary_rows(counts: Option<&Value>, sections: Option<&Value>) -> usize {
|
||||||
|
counts
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.and_then(|counts| counts.get("summary_rows"))
|
||||||
|
.and_then(Value::as_u64)
|
||||||
|
.map(|count| count as usize)
|
||||||
|
.or_else(|| {
|
||||||
|
sections
|
||||||
|
.and_then(Value::as_array)
|
||||||
|
.and_then(|sections| {
|
||||||
|
sections.iter().find_map(|section| {
|
||||||
|
section
|
||||||
|
.as_object()
|
||||||
|
.and_then(|section| section.get("rows"))
|
||||||
|
.and_then(Value::as_array)
|
||||||
|
.map(Vec::len)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_configured_tool_name(configured_tool: &str) -> Result<(&str, &str), PipeError> {
|
||||||
|
let (skill_name, tool_name) = configured_tool.split_once('.').ok_or_else(|| {
|
||||||
|
PipeError::Protocol(format!(
|
||||||
|
"direct submit skill must use skill.tool format, got {configured_tool}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let skill_name = skill_name.trim();
|
||||||
|
let tool_name = tool_name.trim();
|
||||||
|
if skill_name.is_empty() || tool_name.is_empty() {
|
||||||
|
return Err(PipeError::Protocol(format!(
|
||||||
|
"direct submit skill must use skill.tool format, got {configured_tool}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok((skill_name, tool_name))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn derive_expected_domain(task_context: &CompatTaskContext) -> Result<String, PipeError> {
|
||||||
|
let page_url = task_context
|
||||||
|
.page_url
|
||||||
|
.as_deref()
|
||||||
|
.map(str::trim)
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
PipeError::Protocol(
|
||||||
|
"direct submit skill requires page_url so expected_domain can be derived"
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Url::parse(page_url)
|
||||||
|
.ok()
|
||||||
|
.and_then(|url| url.host_str().map(|host| host.to_ascii_lowercase()))
|
||||||
|
.ok_or_else(|| {
|
||||||
|
PipeError::Protocol(format!(
|
||||||
|
"direct submit skill could not derive expected_domain from page_url {page_url:?}"
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn derive_period(instruction: &str) -> Result<String, PipeError> {
|
||||||
|
let chars = instruction.chars().collect::<Vec<_>>();
|
||||||
|
if chars.len() < 7 {
|
||||||
|
return Err(PipeError::Protocol(
|
||||||
|
"direct submit skill requires an explicit YYYY-MM period in the instruction"
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for start in 0..=chars.len() - 7 {
|
||||||
|
let candidate = chars[start..start + 7].iter().collect::<String>();
|
||||||
|
if is_year_month(&candidate) {
|
||||||
|
return Ok(candidate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(PipeError::Protocol(
|
||||||
|
"direct submit skill requires an explicit YYYY-MM period in the instruction"
|
||||||
|
.to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_year_month(candidate: &str) -> bool {
|
||||||
|
let bytes = candidate.as_bytes();
|
||||||
|
bytes.len() == 7
|
||||||
|
&& bytes[0..4].iter().all(u8::is_ascii_digit)
|
||||||
|
&& bytes[4] == b'-'
|
||||||
|
&& bytes[5..7].iter().all(u8::is_ascii_digit)
|
||||||
|
&& matches!((bytes[5] - b'0') * 10 + (bytes[6] - b'0'), 1..=12)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{
|
||||||
|
count_rows, count_summary_rows, derive_period, interpret_direct_submit_output,
|
||||||
|
is_year_month, parse_configured_tool_name,
|
||||||
|
};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_configured_tool_name_requires_skill_and_tool() {
|
||||||
|
assert_eq!(
|
||||||
|
parse_configured_tool_name("fault-details-report.collect_fault_details")
|
||||||
|
.unwrap(),
|
||||||
|
("fault-details-report", "collect_fault_details")
|
||||||
|
);
|
||||||
|
assert!(parse_configured_tool_name("fault-details-report").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn derive_period_requires_explicit_year_month() {
|
||||||
|
assert_eq!(derive_period("收集 2026-03 故障明细").unwrap(), "2026-03");
|
||||||
|
assert!(derive_period("收集三月故障明细").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn year_month_validation_rejects_invalid_month() {
|
||||||
|
assert!(is_year_month("2026-12"));
|
||||||
|
assert!(!is_year_month("2026-00"));
|
||||||
|
assert!(!is_year_month("2026-13"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interpret_direct_submit_output_maps_report_artifact_statuses() {
|
||||||
|
let partial = interpret_direct_submit_output(
|
||||||
|
&json!({
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"period": "2026-03",
|
||||||
|
"counts": { "detail_rows": 1, "summary_rows": 1 },
|
||||||
|
"status": "partial",
|
||||||
|
"partial_reasons": ["report_log_failed"]
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
assert!(partial.success);
|
||||||
|
assert!(partial.summary.contains("status=partial"));
|
||||||
|
assert!(partial.summary.contains("report_log_failed"));
|
||||||
|
|
||||||
|
let blocked = interpret_direct_submit_output(
|
||||||
|
&json!({
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"status": "blocked",
|
||||||
|
"partial_reasons": ["selected_range_unavailable"]
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
assert!(!blocked.success);
|
||||||
|
assert!(blocked.summary.contains("status=blocked"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn row_count_helpers_fall_back_to_payload_shapes() {
|
||||||
|
assert_eq!(
|
||||||
|
count_rows(None, Some(&json!([{ "qxdbh": "QX-1" }, { "qxdbh": "QX-2" }])), "detail_rows"),
|
||||||
|
2
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
count_summary_rows(None, Some(&json!([{ "name": "summary-sheet", "rows": [{ "index": 1 }] }]))),
|
||||||
|
1
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,10 +2,13 @@ pub mod browser_script_skill_tool;
|
|||||||
pub mod browser_tool_adapter;
|
pub mod browser_tool_adapter;
|
||||||
pub mod config_adapter;
|
pub mod config_adapter;
|
||||||
pub mod cron_adapter;
|
pub mod cron_adapter;
|
||||||
|
pub mod deterministic_submit;
|
||||||
|
pub mod direct_skill_runtime;
|
||||||
pub mod event_bridge;
|
pub mod event_bridge;
|
||||||
pub mod memory_adapter;
|
pub mod memory_adapter;
|
||||||
pub mod openxml_office_tool;
|
pub mod openxml_office_tool;
|
||||||
pub mod orchestration;
|
pub mod orchestration;
|
||||||
pub mod runtime;
|
pub mod runtime;
|
||||||
pub mod screen_html_export_tool;
|
pub mod screen_html_export_tool;
|
||||||
|
pub mod tq_lineloss;
|
||||||
pub mod workflow_executor;
|
pub mod workflow_executor;
|
||||||
|
|||||||
@@ -4,10 +4,13 @@ use serde_json::{json, Value};
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
use std::io::{Read, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use zeroclaw::tools::{Tool, ToolResult};
|
use zeroclaw::tools::{Tool, ToolResult};
|
||||||
|
use zip::write::FileOptions;
|
||||||
|
use zip::{CompressionMethod, ZipWriter};
|
||||||
|
|
||||||
const OPENXML_OFFICE_TOOL_NAME: &str = "openxml_office";
|
const OPENXML_OFFICE_TOOL_NAME: &str = "openxml_office";
|
||||||
const DEFAULT_SHEET_NAME: &str = "知乎热榜";
|
const DEFAULT_SHEET_NAME: &str = "知乎热榜";
|
||||||
@@ -280,13 +283,8 @@ fn run_openxml_cli(request_path: &Path) -> anyhow::Result<Value> {
|
|||||||
.parent()
|
.parent()
|
||||||
.map(|path| path.join("openxml_cli").join("Cargo.toml"))
|
.map(|path| path.join("openxml_cli").join("Cargo.toml"))
|
||||||
.ok_or_else(|| anyhow::anyhow!("failed to resolve openxml_cli manifest path"))?;
|
.ok_or_else(|| anyhow::anyhow!("failed to resolve openxml_cli manifest path"))?;
|
||||||
let binary_path = manifest_path
|
let output = if let Some(binary_path) = resolve_openxml_cli_binary(&manifest_path) {
|
||||||
.parent()
|
Command::new(binary_path)
|
||||||
.map(|path| path.join("target").join("debug").join("openxml-cli"))
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("failed to resolve openxml_cli binary path"))?;
|
|
||||||
|
|
||||||
let output = if binary_path.exists() {
|
|
||||||
Command::new(&binary_path)
|
|
||||||
.args([
|
.args([
|
||||||
"template",
|
"template",
|
||||||
"render",
|
"render",
|
||||||
@@ -325,6 +323,34 @@ fn run_openxml_cli(request_path: &Path) -> anyhow::Result<Value> {
|
|||||||
Ok(serde_json::from_str(&stdout)?)
|
Ok(serde_json::from_str(&stdout)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn resolve_openxml_cli_binary(manifest_path: &Path) -> Option<PathBuf> {
|
||||||
|
let cli_dir = manifest_path.parent()?;
|
||||||
|
openxml_cli_candidate_paths(cli_dir)
|
||||||
|
.into_iter()
|
||||||
|
.find(|path| path.exists())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn openxml_cli_candidate_paths(cli_dir: &Path) -> Vec<PathBuf> {
|
||||||
|
let mut paths = Vec::new();
|
||||||
|
for profile in ["release", "debug"] {
|
||||||
|
paths.push(
|
||||||
|
cli_dir
|
||||||
|
.join("target")
|
||||||
|
.join(profile)
|
||||||
|
.join(openxml_cli_binary_name()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
paths
|
||||||
|
}
|
||||||
|
|
||||||
|
fn openxml_cli_binary_name() -> &'static str {
|
||||||
|
if cfg!(windows) {
|
||||||
|
"openxml-cli.exe"
|
||||||
|
} else {
|
||||||
|
"openxml-cli"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn value_to_string(value: &Value) -> String {
|
fn value_to_string(value: &Value) -> String {
|
||||||
match value {
|
match value {
|
||||||
Value::String(text) => text.clone(),
|
Value::String(text) => text.clone(),
|
||||||
@@ -363,22 +389,81 @@ fn write_hotlist_template(path: &Path, row_count: usize) -> anyhow::Result<()> {
|
|||||||
fs::remove_file(path)?;
|
fs::remove_file(path)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let zip = Command::new("zip")
|
zip_directory(&build_root, path)?;
|
||||||
.current_dir(&build_root)
|
|
||||||
.args(["-q", "-r", path.to_string_lossy().as_ref(), "."])
|
|
||||||
.output()?;
|
|
||||||
if !zip.status.success() {
|
|
||||||
let stderr = String::from_utf8_lossy(&zip.stderr);
|
|
||||||
return Err(anyhow::anyhow!(format!(
|
|
||||||
"failed to create xlsx template: {}",
|
|
||||||
stderr.trim()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let _ = fs::remove_dir_all(&build_root);
|
let _ = fs::remove_dir_all(&build_root);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{openxml_cli_binary_name, openxml_cli_candidate_paths, zip_entry_name};
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn openxml_cli_candidates_prefer_release_before_debug() {
|
||||||
|
let paths = openxml_cli_candidate_paths(Path::new("E:\\coding\\codex\\openxml_cli"));
|
||||||
|
assert_eq!(paths.len(), 2);
|
||||||
|
assert_eq!(
|
||||||
|
paths[0],
|
||||||
|
Path::new("E:\\coding\\codex\\openxml_cli")
|
||||||
|
.join("target")
|
||||||
|
.join("release")
|
||||||
|
.join(openxml_cli_binary_name())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
paths[1],
|
||||||
|
Path::new("E:\\coding\\codex\\openxml_cli")
|
||||||
|
.join("target")
|
||||||
|
.join("debug")
|
||||||
|
.join(openxml_cli_binary_name())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn zip_entry_name_normalizes_windows_separators() {
|
||||||
|
let rel = Path::new("xl\\worksheets\\sheet1.xml");
|
||||||
|
assert_eq!(zip_entry_name(rel), "xl/worksheets/sheet1.xml");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn zip_directory(source_root: &Path, zip_path: &Path) -> anyhow::Result<()> {
|
||||||
|
let file = fs::File::create(zip_path)?;
|
||||||
|
let mut writer = ZipWriter::new(file);
|
||||||
|
let options = FileOptions::default().compression_method(CompressionMethod::Stored);
|
||||||
|
add_directory_to_zip(&mut writer, source_root, source_root, options)?;
|
||||||
|
writer.finish()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_directory_to_zip<W: Write + std::io::Seek>(
|
||||||
|
writer: &mut ZipWriter<W>,
|
||||||
|
source_root: &Path,
|
||||||
|
current_dir: &Path,
|
||||||
|
options: FileOptions,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
for entry in fs::read_dir(current_dir)? {
|
||||||
|
let entry = entry?;
|
||||||
|
let path = entry.path();
|
||||||
|
if path.is_dir() {
|
||||||
|
add_directory_to_zip(writer, source_root, &path, options)?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let relative_path = path.strip_prefix(source_root)?;
|
||||||
|
writer.start_file(zip_entry_name(relative_path), options)?;
|
||||||
|
let mut input = fs::File::open(&path)?;
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
input.read_to_end(&mut buffer)?;
|
||||||
|
writer.write_all(&buffer)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn zip_entry_name(path: &Path) -> String {
|
||||||
|
path.to_string_lossy().replace('\\', "/")
|
||||||
|
}
|
||||||
|
|
||||||
fn worksheet_xml(row_count: usize) -> String {
|
fn worksheet_xml(row_count: usize) -> String {
|
||||||
let mut rows = Vec::new();
|
let mut rows = Vec::new();
|
||||||
rows.push(
|
rows.push(
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
use crate::compat::config_adapter::resolve_skills_dir_from_sgclaw_settings;
|
||||||
use crate::compat::runtime::CompatTaskContext;
|
use crate::compat::runtime::CompatTaskContext;
|
||||||
use crate::config::SgClawSettings;
|
use crate::config::SgClawSettings;
|
||||||
use crate::pipe::{BrowserPipeTool, PipeError, Transport};
|
use crate::pipe::{BrowserPipeTool, PipeError, Transport};
|
||||||
@@ -34,6 +35,7 @@ pub fn execute_task_with_sgclaw_settings<T: Transport + 'static>(
|
|||||||
workspace_root: &Path,
|
workspace_root: &Path,
|
||||||
settings: &SgClawSettings,
|
settings: &SgClawSettings,
|
||||||
) -> Result<String, PipeError> {
|
) -> Result<String, PipeError> {
|
||||||
|
let skills_dir = resolve_skills_dir_from_sgclaw_settings(workspace_root, settings);
|
||||||
let route = crate::compat::workflow_executor::detect_route(
|
let route = crate::compat::workflow_executor::detect_route(
|
||||||
instruction,
|
instruction,
|
||||||
task_context.page_url.as_deref(),
|
task_context.page_url.as_deref(),
|
||||||
@@ -45,6 +47,7 @@ pub fn execute_task_with_sgclaw_settings<T: Transport + 'static>(
|
|||||||
transport,
|
transport,
|
||||||
&browser_tool,
|
&browser_tool,
|
||||||
workspace_root,
|
workspace_root,
|
||||||
|
&skills_dir,
|
||||||
instruction,
|
instruction,
|
||||||
task_context,
|
task_context,
|
||||||
route,
|
route,
|
||||||
@@ -70,6 +73,7 @@ pub fn execute_task_with_sgclaw_settings<T: Transport + 'static>(
|
|||||||
transport,
|
transport,
|
||||||
&browser_tool,
|
&browser_tool,
|
||||||
workspace_root,
|
workspace_root,
|
||||||
|
&skills_dir,
|
||||||
instruction,
|
instruction,
|
||||||
task_context,
|
task_context,
|
||||||
route,
|
route,
|
||||||
@@ -80,6 +84,7 @@ pub fn execute_task_with_sgclaw_settings<T: Transport + 'static>(
|
|||||||
transport,
|
transport,
|
||||||
&browser_tool,
|
&browser_tool,
|
||||||
workspace_root,
|
workspace_root,
|
||||||
|
&skills_dir,
|
||||||
instruction,
|
instruction,
|
||||||
task_context,
|
task_context,
|
||||||
route,
|
route,
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ const SCREEN_HTML_EXPORT_TOOL_NAME: &str = "screen_html_export";
|
|||||||
const DEFAULT_SCREEN_TITLE: &str = "知乎热榜主题分类分析大屏";
|
const DEFAULT_SCREEN_TITLE: &str = "知乎热榜主题分类分析大屏";
|
||||||
const TEMPLATE: &str = include_str!(concat!(
|
const TEMPLATE: &str = include_str!(concat!(
|
||||||
env!("CARGO_MANIFEST_DIR"),
|
env!("CARGO_MANIFEST_DIR"),
|
||||||
"/../skill_lib/skills/zhihu-hotlist-screen/assets/zhihu-hotlist-echarts.html"
|
"/resources/zhihu-hotlist-echarts.html"
|
||||||
));
|
));
|
||||||
const PAYLOAD_START_MARKER: &str = " const defaultPayload = ";
|
const PAYLOAD_START_MARKER: &str = " const defaultPayload = ";
|
||||||
const PAYLOAD_END_MARKER: &str = "\n\n const themeMeta = {";
|
const PAYLOAD_END_MARKER: &str = "\n\n const themeMeta = {";
|
||||||
|
|||||||
50
src/compat/tq_lineloss/contracts.rs
Normal file
50
src/compat/tq_lineloss/contracts.rs
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct ResolvedOrg {
|
||||||
|
pub label: String,
|
||||||
|
pub code: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum PeriodMode {
|
||||||
|
Month,
|
||||||
|
Week,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct ResolvedPeriod {
|
||||||
|
pub mode: PeriodMode,
|
||||||
|
pub mode_code: String,
|
||||||
|
pub value: String,
|
||||||
|
pub payload: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn missing_company_prompt() -> String {
|
||||||
|
"已命中台区线损报表技能,但缺少供电单位,请补充如“兰州公司”或“城关供电分公司”。"
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ambiguous_company_prompt() -> String {
|
||||||
|
"已命中台区线损报表技能,但供电单位存在歧义,请补充更完整名称。".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn missing_period_mode_prompt() -> String {
|
||||||
|
"已命中台区线损报表技能,但未识别到月/周类型,请补充“月累计”或“周累计”。"
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn missing_period_prompt() -> String {
|
||||||
|
"已命中台区线损报表技能,但缺少统计周期,请补充如“2026-03”或“2026年第12周”。"
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn contradictory_period_mode_prompt() -> String {
|
||||||
|
"已命中台区线损报表技能,但月/周类型存在冲突,请只保留“月累计”或“周累计”之一。"
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn missing_week_year_prompt() -> String {
|
||||||
|
"已命中台区线损报表技能,但周累计缺少年份,请补充如“2026年第12周”。"
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
4
src/compat/tq_lineloss/mod.rs
Normal file
4
src/compat/tq_lineloss/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
pub mod contracts;
|
||||||
|
pub mod org_resolver;
|
||||||
|
pub mod org_units;
|
||||||
|
pub mod period_resolver;
|
||||||
71
src/compat/tq_lineloss/org_resolver.rs
Normal file
71
src/compat/tq_lineloss/org_resolver.rs
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
use super::contracts::{ambiguous_company_prompt, ResolvedOrg};
|
||||||
|
use super::org_units::{OrgUnit, ORG_UNITS};
|
||||||
|
|
||||||
|
fn normalize(value: &str) -> String {
|
||||||
|
value.chars().filter(|ch| !ch.is_whitespace()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn candidate_names(unit: &'static OrgUnit) -> impl Iterator<Item = &'static str> {
|
||||||
|
std::iter::once(unit.label).chain(unit.aliases.iter().copied())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_resolved_org(unit: &OrgUnit) -> ResolvedOrg {
|
||||||
|
ResolvedOrg {
|
||||||
|
label: unit.label.to_string(),
|
||||||
|
code: unit.code.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resolve_org(input: &str) -> Result<ResolvedOrg, String> {
|
||||||
|
let normalized = normalize(input);
|
||||||
|
if normalized.is_empty() {
|
||||||
|
return Err(super::contracts::missing_company_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
let exact_matches: Vec<&OrgUnit> = ORG_UNITS
|
||||||
|
.iter()
|
||||||
|
.filter(|unit| candidate_names(unit).any(|name| normalize(name) == normalized))
|
||||||
|
.collect();
|
||||||
|
if exact_matches.len() == 1 {
|
||||||
|
return Ok(to_resolved_org(exact_matches[0]));
|
||||||
|
}
|
||||||
|
if exact_matches.len() > 1 {
|
||||||
|
return Err(ambiguous_company_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
let fuzzy_matches: Vec<&OrgUnit> = ORG_UNITS
|
||||||
|
.iter()
|
||||||
|
.filter(|unit| {
|
||||||
|
candidate_names(unit).any(|name| {
|
||||||
|
let normalized_name = normalize(name);
|
||||||
|
normalized_name.contains(&normalized) || normalized.contains(&normalized_name)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
if fuzzy_matches.len() == 1 {
|
||||||
|
return Ok(to_resolved_org(fuzzy_matches[0]));
|
||||||
|
}
|
||||||
|
if fuzzy_matches.len() > 1 {
|
||||||
|
return Err(ambiguous_company_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(super::contracts::missing_company_prompt())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resolve_org_from_instruction(instruction: &str) -> Result<Option<ResolvedOrg>, String> {
|
||||||
|
let normalized_instruction = normalize(instruction);
|
||||||
|
let direct_matches: Vec<&OrgUnit> = ORG_UNITS
|
||||||
|
.iter()
|
||||||
|
.filter(|unit| {
|
||||||
|
candidate_names(unit).any(|name| normalized_instruction.contains(&normalize(name)))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
if direct_matches.len() == 1 {
|
||||||
|
return Ok(Some(to_resolved_org(direct_matches[0])));
|
||||||
|
}
|
||||||
|
if direct_matches.len() > 1 {
|
||||||
|
return Err(ambiguous_company_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
33
src/compat/tq_lineloss/org_units.rs
Normal file
33
src/compat/tq_lineloss/org_units.rs
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
pub(crate) struct OrgUnit {
|
||||||
|
pub(crate) label: &'static str,
|
||||||
|
pub(crate) code: &'static str,
|
||||||
|
pub(crate) aliases: &'static [&'static str],
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) const ORG_UNITS: &[OrgUnit] = &[
|
||||||
|
OrgUnit {
|
||||||
|
label: "国网兰州供电公司",
|
||||||
|
code: "62401",
|
||||||
|
aliases: &["国网兰州供电公司", "兰州供电公司", "兰州公司"],
|
||||||
|
},
|
||||||
|
OrgUnit {
|
||||||
|
label: "国网天水供电公司",
|
||||||
|
code: "62403",
|
||||||
|
aliases: &["国网天水供电公司", "天水供电公司", "天水公司"],
|
||||||
|
},
|
||||||
|
OrgUnit {
|
||||||
|
label: "城关供电分公司",
|
||||||
|
code: "6240108",
|
||||||
|
aliases: &["城关供电分公司", "城关分公司"],
|
||||||
|
},
|
||||||
|
OrgUnit {
|
||||||
|
label: "国网榆中县供电公司",
|
||||||
|
code: "6240121",
|
||||||
|
aliases: &["国网榆中县供电公司", "榆中县供电公司", "榆中县公司"],
|
||||||
|
},
|
||||||
|
OrgUnit {
|
||||||
|
label: "榆中城关供电所",
|
||||||
|
code: "624012108",
|
||||||
|
aliases: &["榆中城关供电所"],
|
||||||
|
},
|
||||||
|
];
|
||||||
244
src/compat/tq_lineloss/period_resolver.rs
Normal file
244
src/compat/tq_lineloss/period_resolver.rs
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
use chrono::{Datelike, Duration, Local, NaiveDate};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use super::contracts::{
|
||||||
|
contradictory_period_mode_prompt, missing_period_mode_prompt, missing_period_prompt,
|
||||||
|
missing_week_year_prompt, PeriodMode, ResolvedPeriod,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn resolve_period(input: &str) -> Result<ResolvedPeriod, String> {
|
||||||
|
let has_month = input.contains("月累计");
|
||||||
|
let has_week = input.contains("周累计");
|
||||||
|
|
||||||
|
match (has_month, has_week) {
|
||||||
|
(true, true) => return Err(contradictory_period_mode_prompt()),
|
||||||
|
(false, false) => return Err(missing_period_mode_prompt()),
|
||||||
|
(true, false) => resolve_month_period(input),
|
||||||
|
(false, true) => resolve_week_period(input),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_month_period(input: &str) -> Result<ResolvedPeriod, String> {
|
||||||
|
if let Some(value) = extract_year_month_dash(input) {
|
||||||
|
return Ok(ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Month,
|
||||||
|
mode_code: "1".to_string(),
|
||||||
|
value: value.clone(),
|
||||||
|
payload: json!({ "fdate": value }),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(value) = extract_year_month_cn(input) {
|
||||||
|
return Ok(ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Month,
|
||||||
|
mode_code: "1".to_string(),
|
||||||
|
value: value.clone(),
|
||||||
|
payload: json!({ "fdate": value }),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if contains_explicit_month_period_hint(input) {
|
||||||
|
return Err(missing_period_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(default_month_period())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_week_period(input: &str) -> Result<ResolvedPeriod, String> {
|
||||||
|
if input.contains('第') && input.contains('周') && !input.contains('年') {
|
||||||
|
return Err(missing_week_year_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some((year, week)) = extract_year_week(input) {
|
||||||
|
let Some(week_start) = week_start_date(year, week) else {
|
||||||
|
return Err(missing_period_prompt());
|
||||||
|
};
|
||||||
|
let week_end = week_start + Duration::days(6);
|
||||||
|
|
||||||
|
return Ok(ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Week,
|
||||||
|
mode_code: "2".to_string(),
|
||||||
|
value: format!("{year}-W{week:02}"),
|
||||||
|
payload: json!({
|
||||||
|
"tjzq": "week",
|
||||||
|
"level": "00",
|
||||||
|
"weekSfdate": week_start.format("%Y-%m-%d").to_string(),
|
||||||
|
"weekEfdate": week_end.format("%Y-%m-%d").to_string(),
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if contains_explicit_week_period_hint(input) {
|
||||||
|
return Err(missing_period_prompt());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(default_week_period())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_month_period() -> ResolvedPeriod {
|
||||||
|
let today = Local::now().date_naive();
|
||||||
|
let (year, month) = if today.month() == 1 {
|
||||||
|
(today.year() - 1, 12)
|
||||||
|
} else {
|
||||||
|
(today.year(), today.month() - 1)
|
||||||
|
};
|
||||||
|
let value = format!("{year}-{month:02}");
|
||||||
|
|
||||||
|
ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Month,
|
||||||
|
mode_code: "1".to_string(),
|
||||||
|
value: value.clone(),
|
||||||
|
payload: json!({ "fdate": value }),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_week_period() -> ResolvedPeriod {
|
||||||
|
let today = Local::now().date_naive();
|
||||||
|
let month_start = today.with_day(1).expect("current month should have day 1");
|
||||||
|
let start = month_start.format("%Y-%m-%d").to_string();
|
||||||
|
let end = today.format("%Y-%m-%d").to_string();
|
||||||
|
|
||||||
|
ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Week,
|
||||||
|
mode_code: "2".to_string(),
|
||||||
|
value: format!("{start}至{end}"),
|
||||||
|
payload: json!({
|
||||||
|
"tjzq": "week",
|
||||||
|
"level": "00",
|
||||||
|
"weekSfdate": start,
|
||||||
|
"weekEfdate": end,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contains_explicit_month_period_hint(input: &str) -> bool {
|
||||||
|
let trimmed = input.replace("月累计", "");
|
||||||
|
trimmed.contains('年')
|
||||||
|
|| trimmed.contains('月')
|
||||||
|
|| trimmed.contains('-')
|
||||||
|
|| trimmed.chars().any(|ch| ch.is_ascii_digit())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contains_explicit_week_period_hint(input: &str) -> bool {
|
||||||
|
let trimmed = input.replace("周累计", "");
|
||||||
|
trimmed.contains('年')
|
||||||
|
|| trimmed.contains('第')
|
||||||
|
|| trimmed.contains('周')
|
||||||
|
|| trimmed.contains('-')
|
||||||
|
|| trimmed.chars().any(|ch| ch.is_ascii_digit())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_year_month_dash(input: &str) -> Option<String> {
|
||||||
|
let chars: Vec<char> = input.chars().collect();
|
||||||
|
for window in chars.windows(7) {
|
||||||
|
let candidate: String = window.iter().collect();
|
||||||
|
if is_year_month_dash(&candidate) {
|
||||||
|
return Some(candidate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_year_month_dash(candidate: &str) -> bool {
|
||||||
|
let bytes = candidate.as_bytes();
|
||||||
|
bytes.len() == 7
|
||||||
|
&& bytes[0..4].iter().all(u8::is_ascii_digit)
|
||||||
|
&& bytes[4] == b'-'
|
||||||
|
&& bytes[5..7].iter().all(u8::is_ascii_digit)
|
||||||
|
&& matches!((bytes[5] - b'0') * 10 + (bytes[6] - b'0'), 1..=12)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_year_month_cn(input: &str) -> Option<String> {
|
||||||
|
let chars: Vec<char> = input.chars().collect();
|
||||||
|
for index in 0..chars.len() {
|
||||||
|
if index + 6 >= chars.len() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if !chars[index..index + 4].iter().all(|ch| ch.is_ascii_digit()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if chars[index + 4] != '年' {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut month_digits = String::new();
|
||||||
|
let mut cursor = index + 5;
|
||||||
|
while cursor < chars.len() && chars[cursor].is_ascii_digit() && month_digits.len() < 2 {
|
||||||
|
month_digits.push(chars[cursor]);
|
||||||
|
cursor += 1;
|
||||||
|
}
|
||||||
|
if month_digits.is_empty() || cursor >= chars.len() || chars[cursor] != '月' {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let month: u32 = month_digits.parse().ok()?;
|
||||||
|
if !(1..=12).contains(&month) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let year: String = chars[index..index + 4].iter().collect();
|
||||||
|
return Some(format!("{year}-{month:02}"));
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_year_week(input: &str) -> Option<(i32, u32)> {
|
||||||
|
let chars: Vec<char> = input.chars().collect();
|
||||||
|
for index in 0..chars.len() {
|
||||||
|
if index + 7 >= chars.len() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if !chars[index..index + 4].iter().all(|ch| ch.is_ascii_digit()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if chars[index + 4] != '年' || chars[index + 5] != '第' {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut week_digits = String::new();
|
||||||
|
let mut cursor = index + 6;
|
||||||
|
while cursor < chars.len() && chars[cursor].is_ascii_digit() && week_digits.len() < 2 {
|
||||||
|
week_digits.push(chars[cursor]);
|
||||||
|
cursor += 1;
|
||||||
|
}
|
||||||
|
if week_digits.is_empty() || cursor >= chars.len() || chars[cursor] != '周' {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let year: i32 = chars[index..index + 4].iter().collect::<String>().parse().ok()?;
|
||||||
|
let week: u32 = week_digits.parse().ok()?;
|
||||||
|
if !(1..=53).contains(&week) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return Some((year, week));
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn week_start_date(year: i32, week: u32) -> Option<NaiveDate> {
|
||||||
|
let jan4 = NaiveDate::from_ymd_opt(year, 1, 4)?;
|
||||||
|
let iso_week1_monday = jan4 - Duration::days(jan4.weekday().num_days_from_monday() as i64);
|
||||||
|
let candidate = iso_week1_monday + Duration::weeks((week - 1) as i64);
|
||||||
|
let iso = candidate.iso_week();
|
||||||
|
(iso.year() == year && iso.week() == week).then_some(candidate)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::resolve_period;
|
||||||
|
use crate::compat::tq_lineloss::contracts::PeriodMode;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn resolves_dash_month() {
|
||||||
|
let resolved = resolve_period("月累计 2026-03").unwrap();
|
||||||
|
assert_eq!(resolved.mode, PeriodMode::Month);
|
||||||
|
assert_eq!(resolved.payload["fdate"], "2026-03");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn resolves_week_range() {
|
||||||
|
let resolved = resolve_period("周累计 2026年第12周").unwrap();
|
||||||
|
assert_eq!(resolved.mode, PeriodMode::Week);
|
||||||
|
assert_eq!(resolved.payload["weekSfdate"], "2026-03-16");
|
||||||
|
assert_eq!(resolved.payload["weekEfdate"], "2026-03-22");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -117,6 +117,7 @@ pub fn execute_route<T: Transport + 'static>(
|
|||||||
transport: &T,
|
transport: &T,
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
workspace_root: &Path,
|
workspace_root: &Path,
|
||||||
|
skills_dir: &Path,
|
||||||
instruction: &str,
|
instruction: &str,
|
||||||
task_context: &CompatTaskContext,
|
task_context: &CompatTaskContext,
|
||||||
route: WorkflowRoute,
|
route: WorkflowRoute,
|
||||||
@@ -124,7 +125,13 @@ pub fn execute_route<T: Transport + 'static>(
|
|||||||
match route {
|
match route {
|
||||||
WorkflowRoute::ZhihuHotlistExportXlsx | WorkflowRoute::ZhihuHotlistScreen => {
|
WorkflowRoute::ZhihuHotlistExportXlsx | WorkflowRoute::ZhihuHotlistScreen => {
|
||||||
let top_n = extract_top_n(instruction);
|
let top_n = extract_top_n(instruction);
|
||||||
let items = collect_hotlist_items(transport, browser_tool, top_n, task_context)?;
|
let items = collect_hotlist_items(
|
||||||
|
transport,
|
||||||
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
|
top_n,
|
||||||
|
task_context,
|
||||||
|
)?;
|
||||||
if items.is_empty() {
|
if items.is_empty() {
|
||||||
return Err(PipeError::Protocol(
|
return Err(PipeError::Protocol(
|
||||||
"知乎热榜采集失败:未能从页面文本中解析到热榜条目".to_string(),
|
"知乎热榜采集失败:未能从页面文本中解析到热榜条目".to_string(),
|
||||||
@@ -141,13 +148,27 @@ pub fn execute_route<T: Transport + 'static>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
WorkflowRoute::ZhihuArticleEntry => {
|
WorkflowRoute::ZhihuArticleEntry => {
|
||||||
execute_zhihu_article_entry_route(transport, browser_tool)
|
execute_zhihu_article_entry_route(transport, browser_tool, skills_dir)
|
||||||
}
|
}
|
||||||
WorkflowRoute::ZhihuArticleDraft => {
|
WorkflowRoute::ZhihuArticleDraft => {
|
||||||
execute_zhihu_article_route(transport, browser_tool, instruction, task_context, false)
|
execute_zhihu_article_route(
|
||||||
|
transport,
|
||||||
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
|
instruction,
|
||||||
|
task_context,
|
||||||
|
false,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
WorkflowRoute::ZhihuArticlePublish => {
|
WorkflowRoute::ZhihuArticlePublish => {
|
||||||
execute_zhihu_article_route(transport, browser_tool, instruction, task_context, true)
|
execute_zhihu_article_route(
|
||||||
|
transport,
|
||||||
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
|
instruction,
|
||||||
|
task_context,
|
||||||
|
true,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -155,10 +176,13 @@ pub fn execute_route<T: Transport + 'static>(
|
|||||||
fn collect_hotlist_items<T: Transport + 'static>(
|
fn collect_hotlist_items<T: Transport + 'static>(
|
||||||
transport: &T,
|
transport: &T,
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
|
skills_dir: &Path,
|
||||||
top_n: usize,
|
top_n: usize,
|
||||||
task_context: &CompatTaskContext,
|
task_context: &CompatTaskContext,
|
||||||
) -> Result<Vec<HotlistItem>, PipeError> {
|
) -> Result<Vec<HotlistItem>, PipeError> {
|
||||||
if let Some(items) = ensure_hotlist_page_ready(transport, browser_tool, top_n, task_context)? {
|
if let Some(items) =
|
||||||
|
ensure_hotlist_page_ready(transport, browser_tool, skills_dir, top_n, task_context)?
|
||||||
|
{
|
||||||
return Ok(items);
|
return Ok(items);
|
||||||
}
|
}
|
||||||
transport.send(&AgentMessage::LogEntry {
|
transport.send(&AgentMessage::LogEntry {
|
||||||
@@ -167,7 +191,7 @@ fn collect_hotlist_items<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let response = browser_tool.invoke(
|
let response = browser_tool.invoke(
|
||||||
Action::Eval,
|
Action::Eval,
|
||||||
json!({ "script": load_hotlist_extractor_script(top_n)? }),
|
json!({ "script": load_hotlist_extractor_script(skills_dir, top_n)? }),
|
||||||
ZHIHU_DOMAIN,
|
ZHIHU_DOMAIN,
|
||||||
)?;
|
)?;
|
||||||
if !response.success {
|
if !response.success {
|
||||||
@@ -188,6 +212,7 @@ fn collect_hotlist_items<T: Transport + 'static>(
|
|||||||
fn ensure_hotlist_page_ready<T: Transport + 'static>(
|
fn ensure_hotlist_page_ready<T: Transport + 'static>(
|
||||||
transport: &T,
|
transport: &T,
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
|
skills_dir: &Path,
|
||||||
top_n: usize,
|
top_n: usize,
|
||||||
task_context: &CompatTaskContext,
|
task_context: &CompatTaskContext,
|
||||||
) -> Result<Option<Vec<HotlistItem>>, PipeError> {
|
) -> Result<Option<Vec<HotlistItem>>, PipeError> {
|
||||||
@@ -204,7 +229,7 @@ fn ensure_hotlist_page_ready<T: Transport + 'static>(
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
if starts_on_hotlist {
|
if starts_on_hotlist {
|
||||||
if let Some(items) = probe_hotlist_extractor(transport, browser_tool, top_n)? {
|
if let Some(items) = probe_hotlist_extractor(transport, browser_tool, skills_dir, top_n)? {
|
||||||
return Ok(Some(items));
|
return Ok(Some(items));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -215,7 +240,7 @@ fn ensure_hotlist_page_ready<T: Transport + 'static>(
|
|||||||
if poll_for_hotlist_readiness(browser_tool)? {
|
if poll_for_hotlist_readiness(browser_tool)? {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
if let Some(items) = probe_hotlist_extractor(transport, browser_tool, top_n)? {
|
if let Some(items) = probe_hotlist_extractor(transport, browser_tool, skills_dir, top_n)? {
|
||||||
return Ok(Some(items));
|
return Ok(Some(items));
|
||||||
}
|
}
|
||||||
last_error = Some(PipeError::Protocol(format!(
|
last_error = Some(PipeError::Protocol(format!(
|
||||||
@@ -230,6 +255,7 @@ fn ensure_hotlist_page_ready<T: Transport + 'static>(
|
|||||||
fn probe_hotlist_extractor<T: Transport + 'static>(
|
fn probe_hotlist_extractor<T: Transport + 'static>(
|
||||||
transport: &T,
|
transport: &T,
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
|
skills_dir: &Path,
|
||||||
top_n: usize,
|
top_n: usize,
|
||||||
) -> Result<Option<Vec<HotlistItem>>, PipeError> {
|
) -> Result<Option<Vec<HotlistItem>>, PipeError> {
|
||||||
transport.send(&AgentMessage::LogEntry {
|
transport.send(&AgentMessage::LogEntry {
|
||||||
@@ -238,7 +264,7 @@ fn probe_hotlist_extractor<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let response = browser_tool.invoke(
|
let response = browser_tool.invoke(
|
||||||
Action::Eval,
|
Action::Eval,
|
||||||
json!({ "script": load_hotlist_extractor_script(top_n)? }),
|
json!({ "script": load_hotlist_extractor_script(skills_dir, top_n)? }),
|
||||||
ZHIHU_DOMAIN,
|
ZHIHU_DOMAIN,
|
||||||
)?;
|
)?;
|
||||||
if !response.success {
|
if !response.success {
|
||||||
@@ -379,6 +405,7 @@ fn export_screen<T: Transport>(
|
|||||||
fn execute_zhihu_article_route<T: Transport + 'static>(
|
fn execute_zhihu_article_route<T: Transport + 'static>(
|
||||||
transport: &T,
|
transport: &T,
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
|
skills_dir: &Path,
|
||||||
instruction: &str,
|
instruction: &str,
|
||||||
task_context: &CompatTaskContext,
|
task_context: &CompatTaskContext,
|
||||||
publish_mode: bool,
|
publish_mode: bool,
|
||||||
@@ -401,6 +428,7 @@ fn execute_zhihu_article_route<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let creator_state = execute_browser_skill_script(
|
let creator_state = execute_browser_skill_script(
|
||||||
browser_tool,
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
"zhihu-navigate",
|
"zhihu-navigate",
|
||||||
"open_creator_entry.js",
|
"open_creator_entry.js",
|
||||||
json!({ "desired_target": "article_editor" }),
|
json!({ "desired_target": "article_editor" }),
|
||||||
@@ -424,6 +452,7 @@ fn execute_zhihu_article_route<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let editor_state = execute_browser_skill_script(
|
let editor_state = execute_browser_skill_script(
|
||||||
browser_tool,
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
"zhihu-write",
|
"zhihu-write",
|
||||||
"prepare_article_editor.js",
|
"prepare_article_editor.js",
|
||||||
json!({ "desired_mode": if publish_mode { "publish" } else { "draft" } }),
|
json!({ "desired_mode": if publish_mode { "publish" } else { "draft" } }),
|
||||||
@@ -446,6 +475,7 @@ fn execute_zhihu_article_route<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let fill_result = execute_browser_skill_script(
|
let fill_result = execute_browser_skill_script(
|
||||||
browser_tool,
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
"zhihu-write",
|
"zhihu-write",
|
||||||
"fill_article_draft.js",
|
"fill_article_draft.js",
|
||||||
json!({
|
json!({
|
||||||
@@ -482,6 +512,7 @@ fn execute_zhihu_article_route<T: Transport + 'static>(
|
|||||||
fn execute_zhihu_article_entry_route<T: Transport + 'static>(
|
fn execute_zhihu_article_entry_route<T: Transport + 'static>(
|
||||||
transport: &T,
|
transport: &T,
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
|
skills_dir: &Path,
|
||||||
) -> Result<String, PipeError> {
|
) -> Result<String, PipeError> {
|
||||||
navigate_zhihu_page(transport, browser_tool, ZHIHU_CREATOR_URL)?;
|
navigate_zhihu_page(transport, browser_tool, ZHIHU_CREATOR_URL)?;
|
||||||
transport.send(&AgentMessage::LogEntry {
|
transport.send(&AgentMessage::LogEntry {
|
||||||
@@ -490,6 +521,7 @@ fn execute_zhihu_article_entry_route<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let creator_state = execute_browser_skill_script(
|
let creator_state = execute_browser_skill_script(
|
||||||
browser_tool,
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
"zhihu-navigate",
|
"zhihu-navigate",
|
||||||
"open_creator_entry.js",
|
"open_creator_entry.js",
|
||||||
json!({ "desired_target": "article_editor" }),
|
json!({ "desired_target": "article_editor" }),
|
||||||
@@ -513,6 +545,7 @@ fn execute_zhihu_article_entry_route<T: Transport + 'static>(
|
|||||||
})?;
|
})?;
|
||||||
let editor_state = execute_browser_skill_script(
|
let editor_state = execute_browser_skill_script(
|
||||||
browser_tool,
|
browser_tool,
|
||||||
|
skills_dir,
|
||||||
"zhihu-write",
|
"zhihu-write",
|
||||||
"prepare_article_editor.js",
|
"prepare_article_editor.js",
|
||||||
json!({ "desired_mode": "draft" }),
|
json!({ "desired_mode": "draft" }),
|
||||||
@@ -532,8 +565,9 @@ fn execute_zhihu_article_entry_route<T: Transport + 'static>(
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_hotlist_extractor_script(top_n: usize) -> Result<String, PipeError> {
|
fn load_hotlist_extractor_script(skills_dir: &Path, top_n: usize) -> Result<String, PipeError> {
|
||||||
load_browser_skill_script(
|
load_browser_skill_script(
|
||||||
|
skills_dir,
|
||||||
"zhihu-hotlist",
|
"zhihu-hotlist",
|
||||||
"extract_hotlist.js",
|
"extract_hotlist.js",
|
||||||
json!({ "top_n": top_n.to_string() }),
|
json!({ "top_n": top_n.to_string() }),
|
||||||
@@ -618,12 +652,14 @@ fn navigate_zhihu_page<T: Transport + 'static>(
|
|||||||
|
|
||||||
fn execute_browser_skill_script<T: Transport + 'static>(
|
fn execute_browser_skill_script<T: Transport + 'static>(
|
||||||
browser_tool: &BrowserPipeTool<T>,
|
browser_tool: &BrowserPipeTool<T>,
|
||||||
|
skills_dir: &Path,
|
||||||
skill_name: &str,
|
skill_name: &str,
|
||||||
script_name: &str,
|
script_name: &str,
|
||||||
args: Value,
|
args: Value,
|
||||||
expected_domain: &str,
|
expected_domain: &str,
|
||||||
) -> Result<Value, PipeError> {
|
) -> Result<Value, PipeError> {
|
||||||
let wrapped_script = load_browser_skill_script(skill_name, script_name, args)?;
|
let wrapped_script =
|
||||||
|
load_browser_skill_script(skills_dir, skill_name, script_name, args)?;
|
||||||
let response = browser_tool.invoke(
|
let response = browser_tool.invoke(
|
||||||
Action::Eval,
|
Action::Eval,
|
||||||
json!({ "script": wrapped_script }),
|
json!({ "script": wrapped_script }),
|
||||||
@@ -977,15 +1013,12 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn load_browser_skill_script(
|
fn load_browser_skill_script(
|
||||||
|
skills_dir: &Path,
|
||||||
skill_name: &str,
|
skill_name: &str,
|
||||||
script_name: &str,
|
script_name: &str,
|
||||||
args: Value,
|
args: Value,
|
||||||
) -> Result<String, PipeError> {
|
) -> Result<String, PipeError> {
|
||||||
let script_path = Path::new(env!("CARGO_MANIFEST_DIR"))
|
let script_path = skills_dir
|
||||||
.parent()
|
|
||||||
.unwrap_or_else(|| Path::new(env!("CARGO_MANIFEST_DIR")))
|
|
||||||
.join("skill_lib")
|
|
||||||
.join("skills")
|
|
||||||
.join(skill_name)
|
.join(skill_name)
|
||||||
.join("scripts")
|
.join("scripts")
|
||||||
.join(script_name);
|
.join(script_name);
|
||||||
|
|||||||
@@ -10,6 +10,10 @@ pub use zeroclaw::config::SkillsPromptInjectionMode as SkillsPromptMode;
|
|||||||
const DEFAULT_DEEPSEEK_BASE_URL: &str = "https://api.deepseek.com";
|
const DEFAULT_DEEPSEEK_BASE_URL: &str = "https://api.deepseek.com";
|
||||||
const DEFAULT_DEEPSEEK_MODEL: &str = "deepseek-chat";
|
const DEFAULT_DEEPSEEK_MODEL: &str = "deepseek-chat";
|
||||||
const DEFAULT_PROVIDER_ID: &str = "deepseek";
|
const DEFAULT_PROVIDER_ID: &str = "deepseek";
|
||||||
|
const DIRECT_SUBMIT_PROVIDER_ID: &str = "direct-submit";
|
||||||
|
const DIRECT_SUBMIT_BASE_URL: &str = "http://127.0.0.1/direct-submit";
|
||||||
|
const DIRECT_SUBMIT_MODEL: &str = "direct-submit-placeholder-model";
|
||||||
|
const DIRECT_SUBMIT_API_KEY: &str = "direct-submit-placeholder-key";
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
pub enum PlannerMode {
|
pub enum PlannerMode {
|
||||||
@@ -66,6 +70,19 @@ impl ProviderSettings {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn direct_submit_placeholder() -> Self {
|
||||||
|
Self {
|
||||||
|
id: DIRECT_SUBMIT_PROVIDER_ID.to_string(),
|
||||||
|
provider: DIRECT_SUBMIT_PROVIDER_ID.to_string(),
|
||||||
|
api_key: DIRECT_SUBMIT_API_KEY.to_string(),
|
||||||
|
base_url: Some(DIRECT_SUBMIT_BASE_URL.to_string()),
|
||||||
|
model: DIRECT_SUBMIT_MODEL.to_string(),
|
||||||
|
api_path: None,
|
||||||
|
wire_api: None,
|
||||||
|
requires_openai_auth: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn from_raw(raw: RawProviderSettings) -> Result<Self, ConfigError> {
|
fn from_raw(raw: RawProviderSettings) -> Result<Self, ConfigError> {
|
||||||
let id = raw.id.trim().to_string();
|
let id = raw.id.trim().to_string();
|
||||||
if id.is_empty() {
|
if id.is_empty() {
|
||||||
@@ -125,6 +142,7 @@ pub struct SgClawSettings {
|
|||||||
pub provider_base_url: String,
|
pub provider_base_url: String,
|
||||||
pub provider_model: String,
|
pub provider_model: String,
|
||||||
pub skills_dir: Option<PathBuf>,
|
pub skills_dir: Option<PathBuf>,
|
||||||
|
pub direct_submit_skill: Option<String>,
|
||||||
pub skills_prompt_mode: SkillsPromptMode,
|
pub skills_prompt_mode: SkillsPromptMode,
|
||||||
pub runtime_profile: RuntimeProfile,
|
pub runtime_profile: RuntimeProfile,
|
||||||
pub planner_mode: PlannerMode,
|
pub planner_mode: PlannerMode,
|
||||||
@@ -163,6 +181,7 @@ impl SgClawSettings {
|
|||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
Vec::new(),
|
Vec::new(),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
@@ -198,6 +217,7 @@ impl SgClawSettings {
|
|||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
|
None,
|
||||||
Vec::new(),
|
Vec::new(),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
@@ -278,6 +298,7 @@ impl SgClawSettings {
|
|||||||
config.base_url,
|
config.base_url,
|
||||||
config.model,
|
config.model,
|
||||||
resolve_configured_skills_dir(config.skills_dir, config_dir),
|
resolve_configured_skills_dir(config.skills_dir, config_dir),
|
||||||
|
config.direct_submit_skill,
|
||||||
skills_prompt_mode,
|
skills_prompt_mode,
|
||||||
runtime_profile,
|
runtime_profile,
|
||||||
planner_mode,
|
planner_mode,
|
||||||
@@ -294,6 +315,7 @@ impl SgClawSettings {
|
|||||||
base_url: String,
|
base_url: String,
|
||||||
model: String,
|
model: String,
|
||||||
skills_dir: Option<PathBuf>,
|
skills_dir: Option<PathBuf>,
|
||||||
|
direct_submit_skill: Option<String>,
|
||||||
skills_prompt_mode: Option<SkillsPromptMode>,
|
skills_prompt_mode: Option<SkillsPromptMode>,
|
||||||
runtime_profile: Option<RuntimeProfile>,
|
runtime_profile: Option<RuntimeProfile>,
|
||||||
planner_mode: Option<PlannerMode>,
|
planner_mode: Option<PlannerMode>,
|
||||||
@@ -302,10 +324,15 @@ impl SgClawSettings {
|
|||||||
browser_backend: Option<BrowserBackend>,
|
browser_backend: Option<BrowserBackend>,
|
||||||
office_backend: Option<OfficeBackend>,
|
office_backend: Option<OfficeBackend>,
|
||||||
) -> Result<Self, ConfigError> {
|
) -> Result<Self, ConfigError> {
|
||||||
|
let direct_submit_skill = normalize_direct_submit_skill(direct_submit_skill)?;
|
||||||
let providers = if providers.is_empty() {
|
let providers = if providers.is_empty() {
|
||||||
vec![ProviderSettings::from_legacy_deepseek(
|
if direct_submit_skill.is_some() {
|
||||||
api_key, base_url, model,
|
vec![ProviderSettings::direct_submit_placeholder()]
|
||||||
)?]
|
} else {
|
||||||
|
vec![ProviderSettings::from_legacy_deepseek(
|
||||||
|
api_key, base_url, model,
|
||||||
|
)?]
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
providers
|
providers
|
||||||
};
|
};
|
||||||
@@ -329,6 +356,7 @@ impl SgClawSettings {
|
|||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
provider_model: active_provider_settings.model.clone(),
|
provider_model: active_provider_settings.model.clone(),
|
||||||
skills_dir,
|
skills_dir,
|
||||||
|
direct_submit_skill,
|
||||||
skills_prompt_mode: skills_prompt_mode.unwrap_or(SkillsPromptMode::Compact),
|
skills_prompt_mode: skills_prompt_mode.unwrap_or(SkillsPromptMode::Compact),
|
||||||
runtime_profile: runtime_profile.unwrap_or(RuntimeProfile::BrowserAttached),
|
runtime_profile: runtime_profile.unwrap_or(RuntimeProfile::BrowserAttached),
|
||||||
planner_mode: planner_mode.unwrap_or(PlannerMode::ZeroclawPlanFirst),
|
planner_mode: planner_mode.unwrap_or(PlannerMode::ZeroclawPlanFirst),
|
||||||
@@ -447,6 +475,29 @@ fn normalize_optional_value(raw: Option<String>) -> Option<String> {
|
|||||||
.filter(|value| !value.is_empty())
|
.filter(|value| !value.is_empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn normalize_direct_submit_skill(raw: Option<String>) -> Result<Option<String>, ConfigError> {
|
||||||
|
let value = normalize_optional_value(raw);
|
||||||
|
let Some(value) = value.as_deref() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some((skill_name, tool_name)) = value.split_once('.') else {
|
||||||
|
return Err(ConfigError::InvalidValue(
|
||||||
|
"directSubmitSkill",
|
||||||
|
format!("must use skill.tool format, got {value}"),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
if skill_name.trim().is_empty() || tool_name.trim().is_empty() {
|
||||||
|
return Err(ConfigError::InvalidValue(
|
||||||
|
"directSubmitSkill",
|
||||||
|
format!("must use skill.tool format, got {value}"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(value.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
fn normalize_base_url(raw: String) -> String {
|
fn normalize_base_url(raw: String) -> String {
|
||||||
let trimmed = raw.trim();
|
let trimmed = raw.trim();
|
||||||
if trimmed.is_empty() {
|
if trimmed.is_empty() {
|
||||||
@@ -483,6 +534,8 @@ struct RawSgClawSettings {
|
|||||||
model: String,
|
model: String,
|
||||||
#[serde(rename = "skillsDir", alias = "skills_dir", default)]
|
#[serde(rename = "skillsDir", alias = "skills_dir", default)]
|
||||||
skills_dir: Option<String>,
|
skills_dir: Option<String>,
|
||||||
|
#[serde(rename = "directSubmitSkill", alias = "direct_submit_skill", default)]
|
||||||
|
direct_submit_skill: Option<String>,
|
||||||
#[serde(rename = "skillsPromptMode", alias = "skills_prompt_mode", default)]
|
#[serde(rename = "skillsPromptMode", alias = "skills_prompt_mode", default)]
|
||||||
skills_prompt_mode: Option<String>,
|
skills_prompt_mode: Option<String>,
|
||||||
#[serde(rename = "runtimeProfile", alias = "runtime_profile", default)]
|
#[serde(rename = "runtimeProfile", alias = "runtime_profile", default)]
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
mod common;
|
mod common;
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
use common::MockTransport;
|
use common::MockTransport;
|
||||||
use sgclaw::agent::handle_browser_message;
|
use sgclaw::agent::{
|
||||||
|
handle_browser_message, handle_browser_message_with_context, AgentRuntimeContext,
|
||||||
|
};
|
||||||
use sgclaw::agent::runtime::{browser_action_tool_definition, execute_task_with_provider};
|
use sgclaw::agent::runtime::{browser_action_tool_definition, execute_task_with_provider};
|
||||||
|
use sgclaw::compat::runtime::CompatTaskContext;
|
||||||
|
use sgclaw::config::SgClawSettings;
|
||||||
use sgclaw::llm::{ChatMessage, LlmError, LlmProvider, ToolDefinition, ToolFunctionCall};
|
use sgclaw::llm::{ChatMessage, LlmError, LlmProvider, ToolDefinition, ToolFunctionCall};
|
||||||
use sgclaw::pipe::{Action, AgentMessage, BrowserMessage, BrowserPipeTool, Timing};
|
use sgclaw::pipe::{Action, AgentMessage, BrowserMessage, BrowserPipeTool, Timing};
|
||||||
use sgclaw::security::MacPolicy;
|
use sgclaw::security::MacPolicy;
|
||||||
@@ -24,20 +32,605 @@ impl LlmProvider for FakeProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_policy() -> MacPolicy {
|
fn provider_path_test_policy() -> MacPolicy {
|
||||||
|
policy_for_domains(&["www.baidu.com"])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn direct_runtime_test_policy() -> MacPolicy {
|
||||||
|
policy_for_domains(&["95598.sgcc.com.cn"])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn policy_for_domains(domains: &[&str]) -> MacPolicy {
|
||||||
MacPolicy::from_json_str(
|
MacPolicy::from_json_str(
|
||||||
r#"{
|
&serde_json::json!({
|
||||||
"version": "1.0",
|
"version": "1.0",
|
||||||
"domains": { "allowed": ["www.baidu.com"] },
|
"domains": { "allowed": domains },
|
||||||
"pipe_actions": {
|
"pipe_actions": {
|
||||||
"allowed": ["click", "type", "navigate", "getText"],
|
"allowed": ["click", "type", "navigate", "getText", "eval"],
|
||||||
"blocked": []
|
"blocked": []
|
||||||
}
|
}
|
||||||
}"#,
|
})
|
||||||
|
.to_string(),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn build_direct_runtime_skill_root() -> PathBuf {
|
||||||
|
let root = std::env::temp_dir().join(format!(
|
||||||
|
"sgclaw-agent-runtime-skill-root-{}",
|
||||||
|
Uuid::new_v4()
|
||||||
|
));
|
||||||
|
let skill_dir = root.join("fault-details-report");
|
||||||
|
let script_dir = skill_dir.join("scripts");
|
||||||
|
|
||||||
|
fs::create_dir_all(&script_dir).unwrap();
|
||||||
|
fs::write(
|
||||||
|
skill_dir.join("SKILL.toml"),
|
||||||
|
r#"
|
||||||
|
[skill]
|
||||||
|
name = "fault-details-report"
|
||||||
|
description = "Collect 95598 fault detail data via browser eval."
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[[tools]]
|
||||||
|
name = "collect_fault_details"
|
||||||
|
description = "Collect structured fault detail rows for a specific period."
|
||||||
|
kind = "browser_script"
|
||||||
|
command = "scripts/collect_fault_details.js"
|
||||||
|
|
||||||
|
[tools.args]
|
||||||
|
period = "YYYY-MM period to collect."
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
fs::write(
|
||||||
|
script_dir.join("collect_fault_details.js"),
|
||||||
|
r#"
|
||||||
|
return {
|
||||||
|
fault_type: "outage",
|
||||||
|
observed_at: `${args.period}-15 09:00`,
|
||||||
|
affected_scope: "line-7",
|
||||||
|
expected_domain: args.expected_domain,
|
||||||
|
artifact_payload: "report artifact payload"
|
||||||
|
};
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
root
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_direct_submit_config(workspace_root: &std::path::Path, skill_root: &std::path::Path) -> PathBuf {
|
||||||
|
let config_path = workspace_root.join("sgclaw_config.json");
|
||||||
|
fs::write(
|
||||||
|
&config_path,
|
||||||
|
serde_json::json!({
|
||||||
|
"providers": [],
|
||||||
|
"skillsDir": skill_root,
|
||||||
|
"directSubmitSkill": "fault-details-report.collect_fault_details"
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
config_path
|
||||||
|
}
|
||||||
|
|
||||||
|
fn direct_submit_runtime_context(skill_root: &std::path::Path) -> AgentRuntimeContext {
|
||||||
|
let workspace_root = std::env::temp_dir().join(format!(
|
||||||
|
"sgclaw-agent-runtime-workspace-{}",
|
||||||
|
Uuid::new_v4()
|
||||||
|
));
|
||||||
|
fs::create_dir_all(&workspace_root).unwrap();
|
||||||
|
let config_path = write_direct_submit_config(&workspace_root, skill_root);
|
||||||
|
AgentRuntimeContext::new(Some(config_path), workspace_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn submit_fault_details_message() -> BrowserMessage {
|
||||||
|
BrowserMessage::SubmitTask {
|
||||||
|
instruction: "请采集 2026-03 的故障明细并返回结果".to_string(),
|
||||||
|
conversation_id: String::new(),
|
||||||
|
messages: vec![],
|
||||||
|
page_url: "https://95598.sgcc.com.cn/".to_string(),
|
||||||
|
page_title: "网上国网".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn submit_zhihu_hotlist_export_message() -> BrowserMessage {
|
||||||
|
BrowserMessage::SubmitTask {
|
||||||
|
instruction: "打开知乎热榜,获取前10条数据,并导出 Excel".to_string(),
|
||||||
|
conversation_id: String::new(),
|
||||||
|
messages: vec![],
|
||||||
|
page_url: String::new(),
|
||||||
|
page_title: String::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn direct_submit_mode_logs(sent: &[AgentMessage]) -> Vec<String> {
|
||||||
|
sent.iter()
|
||||||
|
.filter_map(|message| match message {
|
||||||
|
AgentMessage::LogEntry { level, message } if level == "mode" => Some(message.clone()),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn direct_submit_completion(sent: &[AgentMessage]) -> Option<(bool, String)> {
|
||||||
|
sent.iter().find_map(|message| match message {
|
||||||
|
AgentMessage::TaskComplete { success, summary } => Some((*success, summary.clone())),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn success_browser_response(seq: u64, data: serde_json::Value) -> BrowserMessage {
|
||||||
|
BrowserMessage::Response {
|
||||||
|
seq,
|
||||||
|
success: true,
|
||||||
|
data,
|
||||||
|
aom_snapshot: vec![],
|
||||||
|
timing: Timing {
|
||||||
|
queue_ms: 1,
|
||||||
|
exec_ms: 10,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report_artifact_browser_response(
|
||||||
|
seq: u64,
|
||||||
|
status: &str,
|
||||||
|
partial_reasons: &[&str],
|
||||||
|
detail_rows: Vec<serde_json::Value>,
|
||||||
|
summary_rows: Vec<serde_json::Value>,
|
||||||
|
) -> BrowserMessage {
|
||||||
|
success_browser_response(
|
||||||
|
seq,
|
||||||
|
serde_json::json!({
|
||||||
|
"text": {
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"period": "2026-03",
|
||||||
|
"selected_range": {
|
||||||
|
"start": "2026-03-08 16:00:00",
|
||||||
|
"end": "2026-03-09 16:00:00"
|
||||||
|
},
|
||||||
|
"columns": ["qxdbh"],
|
||||||
|
"rows": detail_rows,
|
||||||
|
"sections": [{
|
||||||
|
"name": "summary-sheet",
|
||||||
|
"columns": ["index"],
|
||||||
|
"rows": summary_rows
|
||||||
|
}],
|
||||||
|
"counts": {
|
||||||
|
"detail_rows": detail_rows.len(),
|
||||||
|
"summary_rows": summary_rows.len()
|
||||||
|
},
|
||||||
|
"status": status,
|
||||||
|
"partial_reasons": partial_reasons,
|
||||||
|
"downstream": {
|
||||||
|
"export": {
|
||||||
|
"attempted": true,
|
||||||
|
"success": status != "blocked" && status != "error",
|
||||||
|
"path": "http://localhost/export.xlsx"
|
||||||
|
},
|
||||||
|
"report_log": {
|
||||||
|
"attempted": true,
|
||||||
|
"success": partial_reasons.is_empty(),
|
||||||
|
"error": partial_reasons
|
||||||
|
.first()
|
||||||
|
.copied()
|
||||||
|
.unwrap_or("")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn direct_submit_runtime_executes_fault_details_skill_without_provider_path() {
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![success_browser_response(
|
||||||
|
1,
|
||||||
|
serde_json::json!({
|
||||||
|
"text": {
|
||||||
|
"fault_type": "outage",
|
||||||
|
"observed_at": "2026-03-15 09:00",
|
||||||
|
"affected_scope": "line-7"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
let mut settings = SgClawSettings::from_legacy_deepseek_fields(
|
||||||
|
"unused-key".to_string(),
|
||||||
|
"http://127.0.0.1:9".to_string(),
|
||||||
|
"unused-model".to_string(),
|
||||||
|
Some(skill_root.clone()),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
settings.direct_submit_skill = Some("fault-details-report.collect_fault_details".to_string());
|
||||||
|
|
||||||
|
let summary = sgclaw::compat::direct_skill_runtime::execute_direct_submit_skill(
|
||||||
|
browser_tool,
|
||||||
|
"请采集 2026-03 的故障明细并返回结果",
|
||||||
|
&CompatTaskContext {
|
||||||
|
page_url: Some("https://95598.sgcc.com.cn/".to_string()),
|
||||||
|
..CompatTaskContext::default()
|
||||||
|
},
|
||||||
|
PathBuf::from(env!("CARGO_MANIFEST_DIR")).as_path(),
|
||||||
|
&settings,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(summary.success);
|
||||||
|
assert!(summary.summary.contains("fault_type"));
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
assert!(sent.iter().all(|message| !matches!(message, AgentMessage::LogEntry { level, message } if level == "info" && message.contains("DeepSeek config loaded"))));
|
||||||
|
assert!(matches!(
|
||||||
|
&sent[0],
|
||||||
|
AgentMessage::Command {
|
||||||
|
seq,
|
||||||
|
action,
|
||||||
|
params,
|
||||||
|
security,
|
||||||
|
} if *seq == 1
|
||||||
|
&& action == &Action::Eval
|
||||||
|
&& security.expected_domain == "95598.sgcc.com.cn"
|
||||||
|
&& params["script"].as_str().is_some_and(|script| script.contains("2026-03"))
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_uses_direct_skill_mode_without_llm_configuration() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![success_browser_response(
|
||||||
|
1,
|
||||||
|
serde_json::json!({
|
||||||
|
"text": {
|
||||||
|
"fault_type": "outage",
|
||||||
|
"observed_at": "2026-03-15 09:00",
|
||||||
|
"affected_scope": "line-7",
|
||||||
|
"artifact_payload": "report artifact payload"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let completion = direct_submit_completion(&sent).expect("task completion");
|
||||||
|
|
||||||
|
assert!(completion.0, "expected direct submit task to succeed: {sent:?}");
|
||||||
|
assert!(
|
||||||
|
completion.1.contains("report artifact payload"),
|
||||||
|
"expected report artifact payload in summary: {}",
|
||||||
|
completion.1
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
!completion.1.contains("未配置大语言模型"),
|
||||||
|
"did not expect missing-llm summary: {}",
|
||||||
|
completion.1
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_rejects_invalid_direct_submit_skill_config_before_routing() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let workspace_root = std::env::temp_dir().join(format!(
|
||||||
|
"sgclaw-invalid-direct-submit-workspace-{}",
|
||||||
|
Uuid::new_v4()
|
||||||
|
));
|
||||||
|
fs::create_dir_all(&workspace_root).unwrap();
|
||||||
|
let config_path = workspace_root.join("sgclaw_config.json");
|
||||||
|
fs::write(
|
||||||
|
&config_path,
|
||||||
|
serde_json::json!({
|
||||||
|
"providers": [],
|
||||||
|
"skillsDir": skill_root,
|
||||||
|
"directSubmitSkill": "fault-details-report"
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let runtime_context = AgentRuntimeContext::new(Some(config_path), workspace_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
assert!(matches!(
|
||||||
|
sent.last(),
|
||||||
|
Some(AgentMessage::TaskComplete { success, summary })
|
||||||
|
if !success && summary.contains("skill.tool")
|
||||||
|
));
|
||||||
|
assert!(direct_submit_mode_logs(&sent).is_empty());
|
||||||
|
assert!(!sent.iter().any(|message| matches!(message, AgentMessage::Command { .. })));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_partial_report_artifact_as_success_with_warning_summary() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![report_artifact_browser_response(
|
||||||
|
1,
|
||||||
|
"partial",
|
||||||
|
&["report_log_failed"],
|
||||||
|
vec![serde_json::json!({ "qxdbh": "QX-1" })],
|
||||||
|
vec![serde_json::json!({ "index": 1 })],
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let completion = direct_submit_completion(&sent).expect("task completion");
|
||||||
|
|
||||||
|
assert!(completion.0, "expected partial artifact to succeed: {sent:?}");
|
||||||
|
assert!(completion.1.contains("fault-details-report"));
|
||||||
|
assert!(completion.1.contains("2026-03"));
|
||||||
|
assert!(completion.1.contains("status=partial"));
|
||||||
|
assert!(completion.1.contains("detail_rows=1"));
|
||||||
|
assert!(completion.1.contains("summary_rows=1"));
|
||||||
|
assert!(completion.1.contains("report_log_failed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_empty_report_artifact_as_success() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![report_artifact_browser_response(
|
||||||
|
1,
|
||||||
|
"empty",
|
||||||
|
&[],
|
||||||
|
vec![],
|
||||||
|
vec![],
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let completion = direct_submit_completion(&sent).expect("task completion");
|
||||||
|
|
||||||
|
assert!(completion.0, "expected empty artifact to succeed: {sent:?}");
|
||||||
|
assert!(completion.1.contains("status=empty"));
|
||||||
|
assert!(completion.1.contains("detail_rows=0"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_blocked_report_artifact_as_failure() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![report_artifact_browser_response(
|
||||||
|
1,
|
||||||
|
"blocked",
|
||||||
|
&["selected_range_unavailable"],
|
||||||
|
vec![],
|
||||||
|
vec![],
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let completion = direct_submit_completion(&sent).expect("task completion");
|
||||||
|
|
||||||
|
assert!(!completion.0, "expected blocked artifact to fail: {sent:?}");
|
||||||
|
assert!(completion.1.contains("status=blocked"));
|
||||||
|
assert!(completion.1.contains("selected_range_unavailable"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_treats_error_report_artifact_as_failure() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![report_artifact_browser_response(
|
||||||
|
1,
|
||||||
|
"error",
|
||||||
|
&["detail_normalization_failed"],
|
||||||
|
vec![],
|
||||||
|
vec![],
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let completion = direct_submit_completion(&sent).expect("task completion");
|
||||||
|
|
||||||
|
assert!(!completion.0, "expected error artifact to fail: {sent:?}");
|
||||||
|
assert!(completion.1.contains("status=error"));
|
||||||
|
assert!(completion.1.contains("detail_normalization_failed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn submit_task_routes_zhihu_hotlist_export_before_direct_submit() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
policy_for_domains(&["www.zhihu.com"]),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_zhihu_hotlist_export_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let mode_logs = direct_submit_mode_logs(&sent);
|
||||||
|
let completion = direct_submit_completion(&sent).expect("task completion");
|
||||||
|
|
||||||
|
assert_eq!(mode_logs, vec!["zeroclaw_process_message_primary".to_string()]);
|
||||||
|
assert!(
|
||||||
|
!completion.0,
|
||||||
|
"expected zhihu export without page context to fail before browser actions: {sent:?}"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
!completion
|
||||||
|
.1
|
||||||
|
.contains("direct submit skill requires page_url so expected_domain can be derived"),
|
||||||
|
"unexpected direct submit fallback: {sent:?}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn direct_skill_mode_logs_direct_skill_primary() {
|
||||||
|
std::env::remove_var("DEEPSEEK_API_KEY");
|
||||||
|
std::env::remove_var("DEEPSEEK_BASE_URL");
|
||||||
|
std::env::remove_var("DEEPSEEK_MODEL");
|
||||||
|
|
||||||
|
let skill_root = build_direct_runtime_skill_root();
|
||||||
|
let runtime_context = direct_submit_runtime_context(&skill_root);
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![success_browser_response(
|
||||||
|
1,
|
||||||
|
serde_json::json!({
|
||||||
|
"text": {
|
||||||
|
"fault_type": "outage",
|
||||||
|
"observed_at": "2026-03-15 09:00",
|
||||||
|
"affected_scope": "line-7",
|
||||||
|
"artifact_payload": "report artifact payload"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
)]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
direct_runtime_test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
handle_browser_message_with_context(
|
||||||
|
transport.as_ref(),
|
||||||
|
&browser_tool,
|
||||||
|
&runtime_context,
|
||||||
|
submit_fault_details_message(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
let mode_logs = direct_submit_mode_logs(&sent);
|
||||||
|
|
||||||
|
assert_eq!(mode_logs, vec!["direct_skill_primary".to_string()]);
|
||||||
|
assert!(
|
||||||
|
!mode_logs.iter().any(|mode| mode == "compat_llm_primary"),
|
||||||
|
"unexpected compat mode logs: {mode_logs:?}"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
!mode_logs
|
||||||
|
.iter()
|
||||||
|
.any(|mode| mode == "zeroclaw_process_message_primary"),
|
||||||
|
"unexpected zeroclaw mode logs: {mode_logs:?}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn browser_action_tool_definition_uses_expected_name() {
|
fn browser_action_tool_definition_uses_expected_name() {
|
||||||
let tool = browser_action_tool_definition();
|
let tool = browser_action_tool_definition();
|
||||||
@@ -73,7 +666,7 @@ fn runtime_executes_provider_tool_calls_and_returns_summary() {
|
|||||||
]));
|
]));
|
||||||
let browser_tool = BrowserPipeTool::new(
|
let browser_tool = BrowserPipeTool::new(
|
||||||
transport.clone(),
|
transport.clone(),
|
||||||
test_policy(),
|
provider_path_test_policy(),
|
||||||
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
)
|
)
|
||||||
.with_response_timeout(Duration::from_secs(1));
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
@@ -148,7 +741,7 @@ fn production_submit_task_does_not_route_into_legacy_runtime_without_llm_config(
|
|||||||
let transport = Arc::new(MockTransport::new(vec![]));
|
let transport = Arc::new(MockTransport::new(vec![]));
|
||||||
let browser_tool = BrowserPipeTool::new(
|
let browser_tool = BrowserPipeTool::new(
|
||||||
transport.clone(),
|
transport.clone(),
|
||||||
test_policy(),
|
provider_path_test_policy(),
|
||||||
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
)
|
)
|
||||||
.with_response_timeout(Duration::from_secs(1));
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ use std::time::{SystemTime, UNIX_EPOCH};
|
|||||||
|
|
||||||
use common::MockTransport;
|
use common::MockTransport;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use sgclaw::compat::browser_script_skill_tool::BrowserScriptSkillTool;
|
use sgclaw::compat::browser_script_skill_tool::{
|
||||||
|
execute_browser_script_tool, BrowserScriptSkillTool,
|
||||||
|
};
|
||||||
use sgclaw::pipe::{Action, AgentMessage, BrowserMessage, BrowserPipeTool, Timing};
|
use sgclaw::pipe::{Action, AgentMessage, BrowserMessage, BrowserPipeTool, Timing};
|
||||||
use sgclaw::security::MacPolicy;
|
use sgclaw::security::MacPolicy;
|
||||||
use zeroclaw::skills::SkillTool;
|
use zeroclaw::skills::SkillTool;
|
||||||
@@ -29,6 +31,174 @@ fn test_policy() -> MacPolicy {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn execute_browser_script_tool_runs_packaged_script_with_expected_domain() {
|
||||||
|
let skill_dir = unique_temp_dir("sgclaw-browser-script-helper");
|
||||||
|
let scripts_dir = skill_dir.join("scripts");
|
||||||
|
fs::create_dir_all(&scripts_dir).unwrap();
|
||||||
|
fs::write(
|
||||||
|
scripts_dir.join("extract_hotlist.js"),
|
||||||
|
"return { wrapped_args: args, source: \"packaged script\" };\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![BrowserMessage::Response {
|
||||||
|
seq: 1,
|
||||||
|
success: true,
|
||||||
|
data: json!({
|
||||||
|
"text": {
|
||||||
|
"sheet_name": "知乎热榜",
|
||||||
|
"rows": [[1, "标题", "10条"]]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
aom_snapshot: vec![],
|
||||||
|
timing: Timing {
|
||||||
|
queue_ms: 1,
|
||||||
|
exec_ms: 5,
|
||||||
|
},
|
||||||
|
}]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
let mut tool_args = HashMap::new();
|
||||||
|
tool_args.insert("top_n".to_string(), "How many rows to extract".to_string());
|
||||||
|
let skill_tool = SkillTool {
|
||||||
|
name: "extract_hotlist".to_string(),
|
||||||
|
description: "Extract structured hotlist rows".to_string(),
|
||||||
|
kind: "browser_script".to_string(),
|
||||||
|
command: "scripts/extract_hotlist.js".to_string(),
|
||||||
|
args: tool_args,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = execute_browser_script_tool(
|
||||||
|
&skill_tool,
|
||||||
|
&skill_dir,
|
||||||
|
browser_tool,
|
||||||
|
json!({
|
||||||
|
"expected_domain": "https://WWW.ZHIHU.COM/hot?foo=bar",
|
||||||
|
"top_n": "10"
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
assert!(result.success);
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::from_str::<serde_json::Value>(&result.output).unwrap(),
|
||||||
|
json!({
|
||||||
|
"sheet_name": "知乎热榜",
|
||||||
|
"rows": [[1, "标题", "10条"]]
|
||||||
|
})
|
||||||
|
);
|
||||||
|
assert!(matches!(
|
||||||
|
&sent[0],
|
||||||
|
AgentMessage::Command {
|
||||||
|
action,
|
||||||
|
params,
|
||||||
|
security,
|
||||||
|
..
|
||||||
|
} if action == &Action::Eval
|
||||||
|
&& security.expected_domain == "www.zhihu.com"
|
||||||
|
&& params["script"].as_str().unwrap().contains("const args = {\"top_n\":\"10\"};")
|
||||||
|
&& params["script"].as_str().unwrap().contains("source: \"packaged script\"")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn execute_browser_script_tool_rejects_non_browser_script_tool_kind() {
|
||||||
|
let skill_dir = unique_temp_dir("sgclaw-browser-script-helper-invalid-kind");
|
||||||
|
let scripts_dir = skill_dir.join("scripts");
|
||||||
|
fs::create_dir_all(&scripts_dir).unwrap();
|
||||||
|
fs::write(scripts_dir.join("extract_hotlist.js"), "return 'unused';\n").unwrap();
|
||||||
|
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
let mut tool_args = HashMap::new();
|
||||||
|
tool_args.insert("top_n".to_string(), "How many rows to extract".to_string());
|
||||||
|
let skill_tool = SkillTool {
|
||||||
|
name: "extract_hotlist".to_string(),
|
||||||
|
description: "Extract structured hotlist rows".to_string(),
|
||||||
|
kind: "shell".to_string(),
|
||||||
|
command: "scripts/extract_hotlist.js".to_string(),
|
||||||
|
args: tool_args,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = execute_browser_script_tool(
|
||||||
|
&skill_tool,
|
||||||
|
&skill_dir,
|
||||||
|
browser_tool,
|
||||||
|
json!({
|
||||||
|
"expected_domain": "www.zhihu.com",
|
||||||
|
"top_n": "10"
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(!result.success);
|
||||||
|
assert_eq!(
|
||||||
|
result.error.as_deref(),
|
||||||
|
Some("browser script tool kind must be browser_script, got shell")
|
||||||
|
);
|
||||||
|
assert!(transport.sent_messages().is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn execute_browser_script_tool_rejects_missing_expected_domain() {
|
||||||
|
let skill_dir = unique_temp_dir("sgclaw-browser-script-helper-invalid-domain");
|
||||||
|
let scripts_dir = skill_dir.join("scripts");
|
||||||
|
fs::create_dir_all(&scripts_dir).unwrap();
|
||||||
|
fs::write(scripts_dir.join("extract_hotlist.js"), "return 'unused';\n").unwrap();
|
||||||
|
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
let mut tool_args = HashMap::new();
|
||||||
|
tool_args.insert("top_n".to_string(), "How many rows to extract".to_string());
|
||||||
|
let skill_tool = SkillTool {
|
||||||
|
name: "extract_hotlist".to_string(),
|
||||||
|
description: "Extract structured hotlist rows".to_string(),
|
||||||
|
kind: "browser_script".to_string(),
|
||||||
|
command: "scripts/extract_hotlist.js".to_string(),
|
||||||
|
args: tool_args,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = execute_browser_script_tool(
|
||||||
|
&skill_tool,
|
||||||
|
&skill_dir,
|
||||||
|
browser_tool,
|
||||||
|
json!({
|
||||||
|
"expected_domain": " ",
|
||||||
|
"top_n": "10"
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(!result.success);
|
||||||
|
assert_eq!(
|
||||||
|
result.error.as_deref(),
|
||||||
|
Some("expected_domain must be a non-empty string, got \" \"")
|
||||||
|
);
|
||||||
|
assert!(transport.sent_messages().is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn browser_script_skill_tool_executes_packaged_script_via_eval() {
|
async fn browser_script_skill_tool_executes_packaged_script_via_eval() {
|
||||||
let skill_dir = unique_temp_dir("sgclaw-browser-script-skill");
|
let skill_dir = unique_temp_dir("sgclaw-browser-script-skill");
|
||||||
@@ -111,6 +281,202 @@ return {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn browser_script_skill_tool_executes_script_directly_under_skill_root() {
|
||||||
|
let skill_root = unique_temp_dir("sgclaw-browser-script-direct-root");
|
||||||
|
let script_name = "extract_hotlist_direct.js";
|
||||||
|
let script_path = skill_root.join(script_name);
|
||||||
|
fs::write(
|
||||||
|
&script_path,
|
||||||
|
r#"
|
||||||
|
return {
|
||||||
|
sheet_name: "知乎热榜",
|
||||||
|
rows: [[1, "标题", args.top_n]]
|
||||||
|
};
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![BrowserMessage::Response {
|
||||||
|
seq: 1,
|
||||||
|
success: true,
|
||||||
|
data: json!({
|
||||||
|
"text": {
|
||||||
|
"sheet_name": "知乎热榜",
|
||||||
|
"rows": [[1, "标题", "10条"]]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
aom_snapshot: vec![],
|
||||||
|
timing: Timing {
|
||||||
|
queue_ms: 1,
|
||||||
|
exec_ms: 5,
|
||||||
|
},
|
||||||
|
}]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
let mut args = HashMap::new();
|
||||||
|
args.insert("top_n".to_string(), "How many rows to extract".to_string());
|
||||||
|
let skill_tool = SkillTool {
|
||||||
|
name: "extract_hotlist".to_string(),
|
||||||
|
description: "Extract structured hotlist rows".to_string(),
|
||||||
|
kind: "browser_script".to_string(),
|
||||||
|
command: script_name.to_string(),
|
||||||
|
args,
|
||||||
|
};
|
||||||
|
let tool = BrowserScriptSkillTool::new("zhihu-hotlist", &skill_tool, &skill_root, browser_tool)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result = tool
|
||||||
|
.execute(json!({
|
||||||
|
"expected_domain": "https://www.zhihu.com/hot",
|
||||||
|
"top_n": "10条"
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let sent = transport.sent_messages();
|
||||||
|
assert!(result.success);
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::from_str::<serde_json::Value>(&result.output).unwrap(),
|
||||||
|
json!({
|
||||||
|
"sheet_name": "知乎热榜",
|
||||||
|
"rows": [[1, "标题", "10条"]]
|
||||||
|
})
|
||||||
|
);
|
||||||
|
assert!(matches!(
|
||||||
|
&sent[0],
|
||||||
|
AgentMessage::Command {
|
||||||
|
action,
|
||||||
|
params,
|
||||||
|
security,
|
||||||
|
..
|
||||||
|
} if action == &Action::Eval
|
||||||
|
&& security.expected_domain == "www.zhihu.com"
|
||||||
|
&& params["script"].as_str().unwrap().contains("const args = {\"top_n\":\"10条\"};")
|
||||||
|
&& params["script"].as_str().unwrap().contains("rows: [[1, \"标题\", args.top_n]]")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn execute_browser_script_tool_preserves_structured_report_artifact_payload() {
|
||||||
|
let skill_dir = unique_temp_dir("sgclaw-browser-script-helper-report-artifact");
|
||||||
|
let scripts_dir = skill_dir.join("scripts");
|
||||||
|
fs::create_dir_all(&scripts_dir).unwrap();
|
||||||
|
fs::write(
|
||||||
|
scripts_dir.join("collect_fault_details.js"),
|
||||||
|
r#"
|
||||||
|
return {
|
||||||
|
type: "report-artifact",
|
||||||
|
report_name: "fault-details-report",
|
||||||
|
period: args.period,
|
||||||
|
selected_range: {
|
||||||
|
start: "2026-03-08 16:00:00",
|
||||||
|
end: "2026-03-09 16:00:00"
|
||||||
|
},
|
||||||
|
columns: ["qxdbh"],
|
||||||
|
rows: [{ qxdbh: "QX-1" }],
|
||||||
|
sections: [{ name: "summary-sheet", columns: ["index"], rows: [{ index: 1 }] }],
|
||||||
|
counts: { detail_rows: 1, summary_rows: 1 },
|
||||||
|
status: "partial",
|
||||||
|
partial_reasons: ["report_log_failed"],
|
||||||
|
downstream: {
|
||||||
|
export: { attempted: true, success: true, path: "http://localhost/export.xlsx" },
|
||||||
|
report_log: { attempted: true, success: false, error: "500" }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let transport = Arc::new(MockTransport::new(vec![BrowserMessage::Response {
|
||||||
|
seq: 1,
|
||||||
|
success: true,
|
||||||
|
data: json!({
|
||||||
|
"text": {
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"period": "2026-03",
|
||||||
|
"selected_range": {
|
||||||
|
"start": "2026-03-08 16:00:00",
|
||||||
|
"end": "2026-03-09 16:00:00"
|
||||||
|
},
|
||||||
|
"columns": ["qxdbh"],
|
||||||
|
"rows": [{ "qxdbh": "QX-1" }],
|
||||||
|
"sections": [{ "name": "summary-sheet", "columns": ["index"], "rows": [{ "index": 1 }] }],
|
||||||
|
"counts": { "detail_rows": 1, "summary_rows": 1 },
|
||||||
|
"status": "partial",
|
||||||
|
"partial_reasons": ["report_log_failed"],
|
||||||
|
"downstream": {
|
||||||
|
"export": { "attempted": true, "success": true, "path": "http://localhost/export.xlsx" },
|
||||||
|
"report_log": { "attempted": true, "success": false, "error": "500" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
aom_snapshot: vec![],
|
||||||
|
timing: Timing {
|
||||||
|
queue_ms: 1,
|
||||||
|
exec_ms: 5,
|
||||||
|
},
|
||||||
|
}]));
|
||||||
|
let browser_tool = BrowserPipeTool::new(
|
||||||
|
transport.clone(),
|
||||||
|
test_policy(),
|
||||||
|
vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||||
|
)
|
||||||
|
.with_response_timeout(Duration::from_secs(1));
|
||||||
|
|
||||||
|
let mut tool_args = HashMap::new();
|
||||||
|
tool_args.insert("period".to_string(), "YYYY-MM period to collect".to_string());
|
||||||
|
let skill_tool = SkillTool {
|
||||||
|
name: "collect_fault_details".to_string(),
|
||||||
|
description: "Collect structured fault details".to_string(),
|
||||||
|
kind: "browser_script".to_string(),
|
||||||
|
command: "scripts/collect_fault_details.js".to_string(),
|
||||||
|
args: tool_args,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = execute_browser_script_tool(
|
||||||
|
&skill_tool,
|
||||||
|
&skill_dir,
|
||||||
|
browser_tool,
|
||||||
|
json!({
|
||||||
|
"expected_domain": "https://www.zhihu.com/",
|
||||||
|
"period": "2026-03"
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(result.success);
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::from_str::<serde_json::Value>(&result.output).unwrap(),
|
||||||
|
json!({
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "fault-details-report",
|
||||||
|
"period": "2026-03",
|
||||||
|
"selected_range": {
|
||||||
|
"start": "2026-03-08 16:00:00",
|
||||||
|
"end": "2026-03-09 16:00:00"
|
||||||
|
},
|
||||||
|
"columns": ["qxdbh"],
|
||||||
|
"rows": [{ "qxdbh": "QX-1" }],
|
||||||
|
"sections": [{ "name": "summary-sheet", "columns": ["index"], "rows": [{ "index": 1 }] }],
|
||||||
|
"counts": { "detail_rows": 1, "summary_rows": 1 },
|
||||||
|
"status": "partial",
|
||||||
|
"partial_reasons": ["report_log_failed"],
|
||||||
|
"downstream": {
|
||||||
|
"export": { "attempted": true, "success": true, "path": "http://localhost/export.xlsx" },
|
||||||
|
"report_log": { "attempted": true, "success": false, "error": "500" }
|
||||||
|
}
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
fn unique_temp_dir(prefix: &str) -> PathBuf {
|
fn unique_temp_dir(prefix: &str) -> PathBuf {
|
||||||
let nanos = SystemTime::now()
|
let nanos = SystemTime::now()
|
||||||
.duration_since(UNIX_EPOCH)
|
.duration_since(UNIX_EPOCH)
|
||||||
|
|||||||
@@ -161,6 +161,60 @@ fn sgclaw_settings_default_to_compact_skills_and_browser_attached_profile() {
|
|||||||
assert_eq!(settings.skills_prompt_mode, SkillsPromptMode::Compact);
|
assert_eq!(settings.skills_prompt_mode, SkillsPromptMode::Compact);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sgclaw_settings_load_direct_submit_only_config_and_resolve_relative_skills_dir() {
|
||||||
|
let root = std::env::temp_dir().join(format!("sgclaw-direct-submit-only-config-{}", Uuid::new_v4()));
|
||||||
|
fs::create_dir_all(&root).unwrap();
|
||||||
|
let config_path = root.join("sgclaw_config.json");
|
||||||
|
|
||||||
|
fs::write(
|
||||||
|
&config_path,
|
||||||
|
r#"{
|
||||||
|
"providers": [],
|
||||||
|
"skillsDir": "skill_lib",
|
||||||
|
"directSubmitSkill": "fault-details-report.collect_fault_details"
|
||||||
|
}"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let settings = SgClawSettings::load(Some(config_path.as_path()))
|
||||||
|
.unwrap()
|
||||||
|
.expect("expected sgclaw settings from config file");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
settings.direct_submit_skill.as_deref(),
|
||||||
|
Some("fault-details-report.collect_fault_details")
|
||||||
|
);
|
||||||
|
assert_eq!(settings.skills_dir, Some(root.join("skill_lib")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sgclaw_settings_reject_invalid_direct_submit_skill_format() {
|
||||||
|
let root = std::env::temp_dir().join(format!(
|
||||||
|
"sgclaw-invalid-direct-submit-skill-{}",
|
||||||
|
Uuid::new_v4()
|
||||||
|
));
|
||||||
|
fs::create_dir_all(&root).unwrap();
|
||||||
|
let config_path = root.join("sgclaw_config.json");
|
||||||
|
|
||||||
|
fs::write(
|
||||||
|
&config_path,
|
||||||
|
r#"{
|
||||||
|
"providers": [],
|
||||||
|
"skillsDir": "skill_lib",
|
||||||
|
"directSubmitSkill": "fault-details-report"
|
||||||
|
}"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let err = SgClawSettings::load(Some(config_path.as_path()))
|
||||||
|
.expect_err("expected invalid directSubmitSkill format");
|
||||||
|
let message = err.to_string();
|
||||||
|
|
||||||
|
assert!(message.contains("directSubmitSkill"));
|
||||||
|
assert!(message.contains("skill.tool"));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sgclaw_settings_load_new_runtime_fields_from_browser_config() {
|
fn sgclaw_settings_load_new_runtime_fields_from_browser_config() {
|
||||||
let root = std::env::temp_dir().join(format!("sgclaw-runtime-config-{}", Uuid::new_v4()));
|
let root = std::env::temp_dir().join(format!("sgclaw-runtime-config-{}", Uuid::new_v4()));
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Command as ProcessCommand;
|
use std::{fs::File, io::Read};
|
||||||
|
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use sgclaw::compat::openxml_office_tool::OpenXmlOfficeTool;
|
use sgclaw::compat::openxml_office_tool::OpenXmlOfficeTool;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use zeroclaw::tools::Tool;
|
use zeroclaw::tools::Tool;
|
||||||
|
use zip::ZipArchive;
|
||||||
|
|
||||||
fn temp_workspace_root() -> PathBuf {
|
fn temp_workspace_root() -> PathBuf {
|
||||||
let root = std::env::temp_dir().join(format!("sgclaw-openxml-office-{}", Uuid::new_v4()));
|
let root = std::env::temp_dir().join(format!("sgclaw-openxml-office-{}", Uuid::new_v4()));
|
||||||
@@ -12,6 +13,15 @@ fn temp_workspace_root() -> PathBuf {
|
|||||||
root
|
root
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn read_sheet_xml(output_path: &std::path::Path) -> String {
|
||||||
|
let file = File::open(output_path).unwrap();
|
||||||
|
let mut archive = ZipArchive::new(file).unwrap();
|
||||||
|
let mut entry = archive.by_name("xl/worksheets/sheet1.xml").unwrap();
|
||||||
|
let mut xml = String::new();
|
||||||
|
entry.read_to_string(&mut xml).unwrap();
|
||||||
|
xml
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn openxml_office_tool_renders_hotlist_xlsx_from_rows() {
|
async fn openxml_office_tool_renders_hotlist_xlsx_from_rows() {
|
||||||
let workspace_root = temp_workspace_root();
|
let workspace_root = temp_workspace_root();
|
||||||
@@ -33,19 +43,12 @@ async fn openxml_office_tool_renders_hotlist_xlsx_from_rows() {
|
|||||||
|
|
||||||
assert!(result.success, "{result:?}");
|
assert!(result.success, "{result:?}");
|
||||||
assert!(output_path.exists());
|
assert!(output_path.exists());
|
||||||
assert!(result.output.contains(output_path.to_str().unwrap()));
|
let output_json: serde_json::Value = serde_json::from_str(&result.output).unwrap();
|
||||||
|
assert_eq!(output_json["row_count"], 2);
|
||||||
|
assert_eq!(output_json["renderer"], "openxml_office");
|
||||||
|
assert!(!output_json["output_path"].as_str().unwrap().is_empty());
|
||||||
|
|
||||||
let unzip = ProcessCommand::new("unzip")
|
let xml = read_sheet_xml(&output_path);
|
||||||
.args([
|
|
||||||
"-p",
|
|
||||||
output_path.to_str().unwrap(),
|
|
||||||
"xl/worksheets/sheet1.xml",
|
|
||||||
])
|
|
||||||
.output()
|
|
||||||
.unwrap();
|
|
||||||
assert!(unzip.status.success());
|
|
||||||
|
|
||||||
let xml = String::from_utf8(unzip.stdout).unwrap();
|
|
||||||
assert!(xml.contains("问题一"));
|
assert!(xml.contains("问题一"));
|
||||||
assert!(xml.contains("344万"));
|
assert!(xml.contains("344万"));
|
||||||
assert!(xml.contains("问题二"));
|
assert!(xml.contains("问题二"));
|
||||||
@@ -74,17 +77,7 @@ async fn openxml_office_tool_accepts_reordered_columns_when_rows_are_structured(
|
|||||||
assert!(result.success, "{result:?}");
|
assert!(result.success, "{result:?}");
|
||||||
assert!(output_path.exists());
|
assert!(output_path.exists());
|
||||||
|
|
||||||
let unzip = ProcessCommand::new("unzip")
|
let xml = read_sheet_xml(&output_path);
|
||||||
.args([
|
|
||||||
"-p",
|
|
||||||
output_path.to_str().unwrap(),
|
|
||||||
"xl/worksheets/sheet1.xml",
|
|
||||||
])
|
|
||||||
.output()
|
|
||||||
.unwrap();
|
|
||||||
assert!(unzip.status.success());
|
|
||||||
|
|
||||||
let xml = String::from_utf8(unzip.stdout).unwrap();
|
|
||||||
assert!(xml.contains("问题一"));
|
assert!(xml.contains("问题一"));
|
||||||
assert!(xml.contains("344万"));
|
assert!(xml.contains("344万"));
|
||||||
assert!(xml.contains(">1<"));
|
assert!(xml.contains(">1<"));
|
||||||
@@ -112,17 +105,7 @@ async fn openxml_office_tool_accepts_localized_hotlist_column_aliases() {
|
|||||||
assert!(result.success, "{result:?}");
|
assert!(result.success, "{result:?}");
|
||||||
assert!(output_path.exists());
|
assert!(output_path.exists());
|
||||||
|
|
||||||
let unzip = ProcessCommand::new("unzip")
|
let xml = read_sheet_xml(&output_path);
|
||||||
.args([
|
|
||||||
"-p",
|
|
||||||
output_path.to_str().unwrap(),
|
|
||||||
"xl/worksheets/sheet1.xml",
|
|
||||||
])
|
|
||||||
.output()
|
|
||||||
.unwrap();
|
|
||||||
assert!(unzip.status.success());
|
|
||||||
|
|
||||||
let xml = String::from_utf8(unzip.stdout).unwrap();
|
|
||||||
assert!(xml.contains("问题一"));
|
assert!(xml.contains("问题一"));
|
||||||
assert!(xml.contains("344万"));
|
assert!(xml.contains("344万"));
|
||||||
assert!(xml.contains(">1<"));
|
assert!(xml.contains(">1<"));
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -43,6 +43,7 @@ async fn screen_html_export_tool_renders_dashboard_html_with_presentation_contra
|
|||||||
.as_str()
|
.as_str()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.starts_with("file://"));
|
.starts_with("file://"));
|
||||||
|
assert!(html.contains("知乎热榜态势驾驶舱"));
|
||||||
assert!(html.contains("snapshot-20260329"));
|
assert!(html.contains("snapshot-20260329"));
|
||||||
assert!(html.contains("问题一"));
|
assert!(html.contains("问题一"));
|
||||||
assert!(html.contains("344万"));
|
assert!(html.contains("344万"));
|
||||||
|
|||||||
452
tests/deterministic_submit_test.rs
Normal file
452
tests/deterministic_submit_test.rs
Normal file
@@ -0,0 +1,452 @@
|
|||||||
|
mod common;
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use chrono::{Datelike, Local};
|
||||||
|
use zeroclaw::skills::load_skills_from_directory;
|
||||||
|
|
||||||
|
use sgclaw::compat::deterministic_submit::{
|
||||||
|
decide_deterministic_submit, DeterministicSubmitDecision,
|
||||||
|
};
|
||||||
|
use sgclaw::compat::tq_lineloss::{
|
||||||
|
contracts::{PeriodMode, ResolvedOrg, ResolvedPeriod},
|
||||||
|
org_resolver::resolve_org,
|
||||||
|
period_resolver::resolve_period,
|
||||||
|
};
|
||||||
|
use sgclaw::runtime::is_zhihu_hotlist_task;
|
||||||
|
|
||||||
|
fn expected_default_month() -> String {
|
||||||
|
let today = Local::now().date_naive();
|
||||||
|
let (year, month) = if today.month() == 1 {
|
||||||
|
(today.year() - 1, 12)
|
||||||
|
} else {
|
||||||
|
(today.year(), today.month() - 1)
|
||||||
|
};
|
||||||
|
format!("{year}-{month:02}")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expected_default_week_range() -> (String, String, String) {
|
||||||
|
let today = Local::now().date_naive();
|
||||||
|
let month_start = today.with_day(1).expect("current month should have day 1");
|
||||||
|
let start = month_start.format("%Y-%m-%d").to_string();
|
||||||
|
let end = today.format("%Y-%m-%d").to_string();
|
||||||
|
(format!("{start}至{end}"), start, end)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_discovers_tq_lineloss_skill_contract() {
|
||||||
|
let skills_root = PathBuf::from("D:/data/ideaSpace/rust/sgClaw/claw/claw/skills/skill_staging/skills");
|
||||||
|
let skills = load_skills_from_directory(&skills_root, true);
|
||||||
|
|
||||||
|
let skill = skills
|
||||||
|
.iter()
|
||||||
|
.find(|skill| skill.name == "tq-lineloss-report")
|
||||||
|
.expect("tq-lineloss-report should be discoverable from staged skills root");
|
||||||
|
|
||||||
|
let tool = skill
|
||||||
|
.tools
|
||||||
|
.iter()
|
||||||
|
.find(|tool| tool.name == "collect_lineloss")
|
||||||
|
.expect("collect_lineloss tool should be discoverable");
|
||||||
|
|
||||||
|
assert_eq!(tool.kind, "browser_script");
|
||||||
|
assert_eq!(tool.command, "scripts/collect_lineloss.js");
|
||||||
|
|
||||||
|
let required_args = [
|
||||||
|
"expected_domain",
|
||||||
|
"org_label",
|
||||||
|
"org_code",
|
||||||
|
"period_mode",
|
||||||
|
"period_mode_code",
|
||||||
|
"period_value",
|
||||||
|
"period_payload",
|
||||||
|
];
|
||||||
|
|
||||||
|
for arg in required_args {
|
||||||
|
assert!(
|
||||||
|
tool.args.contains_key(arg),
|
||||||
|
"expected required arg {arg} in tq-lineloss-report.collect_lineloss"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(tool.args.len(), required_args.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_requires_exact_suffix() {
|
||||||
|
assert!(matches!(
|
||||||
|
decide_deterministic_submit("兰州公司 月累计 2026-03。。。", None, None),
|
||||||
|
DeterministicSubmitDecision::Execute(_)
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
decide_deterministic_submit("兰州公司 月累计 2026-03", None, None),
|
||||||
|
DeterministicSubmitDecision::NotDeterministic
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_nonmatch_returns_supported_scene_message() {
|
||||||
|
let decision = decide_deterministic_submit("帮我打开百度。。。", None, None);
|
||||||
|
|
||||||
|
match decision {
|
||||||
|
DeterministicSubmitDecision::Prompt { summary } => {
|
||||||
|
assert!(summary.contains("台区线损") || summary.contains("支持场景"));
|
||||||
|
}
|
||||||
|
other => panic!("expected deterministic prompt for unsupported scene, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_rejects_page_context_mismatch() {
|
||||||
|
let decision = decide_deterministic_submit(
|
||||||
|
"兰州公司 月累计 2026-03。。。",
|
||||||
|
Some("https://www.zhihu.com/hot"),
|
||||||
|
Some("知乎热榜"),
|
||||||
|
);
|
||||||
|
|
||||||
|
match decision {
|
||||||
|
DeterministicSubmitDecision::Prompt { summary } => {
|
||||||
|
assert!(summary.contains("台区线损") || summary.contains("页面") || summary.contains("不匹配"));
|
||||||
|
}
|
||||||
|
other => panic!("expected deterministic mismatch prompt, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn zhihu_hotlist_request_without_suffix_keeps_existing_route() {
|
||||||
|
assert!(is_zhihu_hotlist_task(
|
||||||
|
"打开知乎热榜",
|
||||||
|
Some("https://www.zhihu.com/hot"),
|
||||||
|
Some("知乎热榜")
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
decide_deterministic_submit(
|
||||||
|
"打开知乎热榜",
|
||||||
|
Some("https://www.zhihu.com/hot"),
|
||||||
|
Some("知乎热榜")
|
||||||
|
),
|
||||||
|
DeterministicSubmitDecision::NotDeterministic
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_rejects_non_exact_suffix_variants() {
|
||||||
|
for instruction in [
|
||||||
|
"兰州公司 月累计 2026-03...",
|
||||||
|
"兰州公司 月累计 2026-03。。。。",
|
||||||
|
"兰州公司。。。月累计 2026-03",
|
||||||
|
"兰州公司 月累计 2026-03。。。 ",
|
||||||
|
] {
|
||||||
|
assert!(matches!(
|
||||||
|
decide_deterministic_submit(instruction, None, None),
|
||||||
|
DeterministicSubmitDecision::NotDeterministic
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_org_resolver_matches_city_alias() {
|
||||||
|
assert_eq!(
|
||||||
|
resolve_org("兰州公司").unwrap(),
|
||||||
|
ResolvedOrg {
|
||||||
|
label: "国网兰州供电公司".to_string(),
|
||||||
|
code: "62401".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
resolve_org("天水公司").unwrap(),
|
||||||
|
ResolvedOrg {
|
||||||
|
label: "国网天水供电公司".to_string(),
|
||||||
|
code: "62403".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_org_resolver_matches_county_alias() {
|
||||||
|
assert_eq!(
|
||||||
|
resolve_org("榆中县公司").unwrap(),
|
||||||
|
ResolvedOrg {
|
||||||
|
label: "国网榆中县供电公司".to_string(),
|
||||||
|
code: "6240121".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
resolve_org("城关供电分公司").unwrap(),
|
||||||
|
ResolvedOrg {
|
||||||
|
label: "城关供电分公司".to_string(),
|
||||||
|
code: "6240108".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_org_resolver_prompts_on_ambiguity() {
|
||||||
|
let summary = resolve_org("城关")
|
||||||
|
.expect_err("ambiguous alias should prompt instead of guessing");
|
||||||
|
|
||||||
|
assert!(summary.contains("供电单位存在歧义") || summary.contains("更完整名称"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_submit_lineloss_missing_company_prompts() {
|
||||||
|
let decision = decide_deterministic_submit("月累计 2026-03。。。", None, None);
|
||||||
|
|
||||||
|
match decision {
|
||||||
|
DeterministicSubmitDecision::Prompt { summary } => {
|
||||||
|
assert!(summary.contains("缺少供电单位") || summary.contains("兰州公司"));
|
||||||
|
}
|
||||||
|
other => panic!("expected missing-company prompt, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_parses_month_text() {
|
||||||
|
assert_eq!(
|
||||||
|
resolve_period("月累计 2026-03").unwrap(),
|
||||||
|
ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Month,
|
||||||
|
mode_code: "1".to_string(),
|
||||||
|
value: "2026-03".to_string(),
|
||||||
|
payload: serde_json::json!({
|
||||||
|
"fdate": "2026-03",
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
resolve_period("月累计 2026年3月").unwrap(),
|
||||||
|
ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Month,
|
||||||
|
mode_code: "1".to_string(),
|
||||||
|
value: "2026-03".to_string(),
|
||||||
|
payload: serde_json::json!({
|
||||||
|
"fdate": "2026-03",
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_parses_week_text() {
|
||||||
|
let resolved = resolve_period("周累计 2026年第12周").unwrap();
|
||||||
|
|
||||||
|
assert_eq!(resolved.mode, PeriodMode::Week);
|
||||||
|
assert_eq!(resolved.mode_code, "2");
|
||||||
|
assert_eq!(resolved.value, "2026-W12");
|
||||||
|
assert_eq!(resolved.payload["tjzq"], "week");
|
||||||
|
assert_eq!(resolved.payload["level"], "00");
|
||||||
|
assert_eq!(resolved.payload["weekSfdate"], "2026-03-16");
|
||||||
|
assert_eq!(resolved.payload["weekEfdate"], "2026-03-22");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_defaults_month_period_from_page_semantics() {
|
||||||
|
let expected_month = expected_default_month();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
resolve_period("兰州公司 月累计").unwrap(),
|
||||||
|
ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Month,
|
||||||
|
mode_code: "1".to_string(),
|
||||||
|
value: expected_month.clone(),
|
||||||
|
payload: serde_json::json!({
|
||||||
|
"fdate": expected_month,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_defaults_week_period_from_page_semantics() {
|
||||||
|
let (expected_value, expected_start, expected_end) = expected_default_week_range();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
resolve_period("兰州公司 周累计").unwrap(),
|
||||||
|
ResolvedPeriod {
|
||||||
|
mode: PeriodMode::Week,
|
||||||
|
mode_code: "2".to_string(),
|
||||||
|
value: expected_value,
|
||||||
|
payload: serde_json::json!({
|
||||||
|
"tjzq": "week",
|
||||||
|
"level": "00",
|
||||||
|
"weekSfdate": expected_start,
|
||||||
|
"weekEfdate": expected_end,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_prompts_for_missing_year_on_week() {
|
||||||
|
let summary = resolve_period("周累计 第12周")
|
||||||
|
.expect_err("bare week should prompt for year instead of guessing");
|
||||||
|
|
||||||
|
assert!(summary.contains("年份") || summary.contains("第12周"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_rejects_contradictory_mode() {
|
||||||
|
let summary = resolve_period("月累计 周累计 2026-03")
|
||||||
|
.expect_err("contradictory month/week intent should not execute");
|
||||||
|
|
||||||
|
assert!(summary.contains("月/周") || summary.contains("冲突") || summary.contains("歧义"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_prompts_for_missing_mode() {
|
||||||
|
let summary = resolve_period("兰州公司 2026-03")
|
||||||
|
.expect_err("missing mode should prompt instead of guessing");
|
||||||
|
|
||||||
|
assert!(summary.contains("月/周类型") || summary.contains("月累计") || summary.contains("周累计"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lineloss_period_resolver_prompts_for_missing_period() {
|
||||||
|
let summary = resolve_period("兰州公司 月累计")
|
||||||
|
.expect_err("missing period should prompt instead of guessing");
|
||||||
|
|
||||||
|
assert!(summary.contains("周期") || summary.contains("时间") || summary.contains("2026-03"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_execution_plan_contains_canonical_args() {
|
||||||
|
let decision = decide_deterministic_submit(
|
||||||
|
"兰州公司 月累计 2026-03。。。",
|
||||||
|
Some("http://20.76.57.61:8080/#/lineloss"),
|
||||||
|
Some("台区线损报表"),
|
||||||
|
);
|
||||||
|
|
||||||
|
match decision {
|
||||||
|
DeterministicSubmitDecision::Execute(plan) => {
|
||||||
|
let debug = format!("{plan:?}");
|
||||||
|
assert!(debug.contains("国网兰州供电公司"), "missing canonical org label: {debug}");
|
||||||
|
assert!(debug.contains("62401"), "missing canonical org code: {debug}");
|
||||||
|
assert!(debug.contains("2026-03"), "missing canonical period value: {debug}");
|
||||||
|
assert!(debug.contains("month") || debug.contains("Month"), "missing canonical month mode: {debug}");
|
||||||
|
assert!(debug.contains("fdate"), "missing canonical month payload: {debug}");
|
||||||
|
}
|
||||||
|
other => panic!("expected deterministic execute plan, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_missing_period_uses_default_month_execution_plan() {
|
||||||
|
let expected_month = expected_default_month();
|
||||||
|
let decision = decide_deterministic_submit("兰州公司 月累计。。。", None, None);
|
||||||
|
|
||||||
|
match decision {
|
||||||
|
DeterministicSubmitDecision::Execute(plan) => {
|
||||||
|
assert_eq!(plan.period_mode, "month");
|
||||||
|
assert_eq!(plan.period_mode_code, "1");
|
||||||
|
assert_eq!(plan.period_value, expected_month);
|
||||||
|
assert!(plan.period_payload.contains("fdate"));
|
||||||
|
}
|
||||||
|
other => panic!("expected missing month period to default into execution, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_missing_period_uses_default_week_execution_plan() {
|
||||||
|
let (expected_value, expected_start, expected_end) = expected_default_week_range();
|
||||||
|
let decision = decide_deterministic_submit("兰州公司 周累计。。。", None, None);
|
||||||
|
|
||||||
|
match decision {
|
||||||
|
DeterministicSubmitDecision::Execute(plan) => {
|
||||||
|
assert_eq!(plan.period_mode, "week");
|
||||||
|
assert_eq!(plan.period_mode_code, "2");
|
||||||
|
assert_eq!(plan.period_value, expected_value);
|
||||||
|
assert!(plan.period_payload.contains(&expected_start));
|
||||||
|
assert!(plan.period_payload.contains(&expected_end));
|
||||||
|
}
|
||||||
|
other => panic!("expected missing week period to default into execution, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_partial_artifact_summary_contract_is_locked() {
|
||||||
|
let artifact = serde_json::json!({
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "tq-lineloss-report",
|
||||||
|
"status": "partial",
|
||||||
|
"org": {
|
||||||
|
"label": "国网兰州供电公司",
|
||||||
|
"code": "62401"
|
||||||
|
},
|
||||||
|
"period": {
|
||||||
|
"mode": "month",
|
||||||
|
"mode_code": "1",
|
||||||
|
"value": "2026-03",
|
||||||
|
"payload": {
|
||||||
|
"fdate": "2026-03"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"columns": ["ORG_NAME", "LINE_LOSS_RATE"],
|
||||||
|
"rows": [
|
||||||
|
{ "ORG_NAME": "国网兰州供电公司", "LINE_LOSS_RATE": "3.21" }
|
||||||
|
],
|
||||||
|
"counts": {
|
||||||
|
"rows": 1
|
||||||
|
},
|
||||||
|
"export": {
|
||||||
|
"attempted": true,
|
||||||
|
"status": "failed",
|
||||||
|
"message": "report_log_failed"
|
||||||
|
},
|
||||||
|
"reasons": ["report_log_failed"]
|
||||||
|
});
|
||||||
|
|
||||||
|
assert_eq!(artifact["type"], "report-artifact");
|
||||||
|
assert_eq!(artifact["report_name"], "tq-lineloss-report");
|
||||||
|
assert_eq!(artifact["status"], "partial");
|
||||||
|
assert_eq!(artifact["org"]["label"], "国网兰州供电公司");
|
||||||
|
assert_eq!(artifact["period"]["value"], "2026-03");
|
||||||
|
assert_eq!(artifact["counts"]["rows"], 1);
|
||||||
|
assert_eq!(artifact["reasons"][0], "report_log_failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deterministic_lineloss_blocked_and_error_artifact_statuses_are_failure_contracts() {
|
||||||
|
for status in ["blocked", "error"] {
|
||||||
|
let artifact = serde_json::json!({
|
||||||
|
"type": "report-artifact",
|
||||||
|
"report_name": "tq-lineloss-report",
|
||||||
|
"status": status,
|
||||||
|
"org": {
|
||||||
|
"label": "国网兰州供电公司",
|
||||||
|
"code": "62401"
|
||||||
|
},
|
||||||
|
"period": {
|
||||||
|
"mode": "week",
|
||||||
|
"mode_code": "2",
|
||||||
|
"value": "2026-W12",
|
||||||
|
"payload": {
|
||||||
|
"tjzq": "week",
|
||||||
|
"level": "00",
|
||||||
|
"weekSfdate": "2026-03-16",
|
||||||
|
"weekEfdate": "2026-03-22"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"columns": [],
|
||||||
|
"rows": [],
|
||||||
|
"counts": {
|
||||||
|
"rows": 0
|
||||||
|
},
|
||||||
|
"export": {
|
||||||
|
"attempted": false,
|
||||||
|
"status": "skipped",
|
||||||
|
"message": null
|
||||||
|
},
|
||||||
|
"reasons": ["selected_range_unavailable"]
|
||||||
|
});
|
||||||
|
|
||||||
|
assert_eq!(artifact["status"], status);
|
||||||
|
assert_eq!(artifact["type"], "report-artifact");
|
||||||
|
assert_eq!(artifact["period"]["mode"], "week");
|
||||||
|
assert_eq!(artifact["reasons"][0], "selected_range_unavailable");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -51,7 +51,9 @@ fn submit_task_without_llm_configuration_returns_clear_error() {
|
|||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
&sent[0],
|
&sent[0],
|
||||||
AgentMessage::LogEntry { level, message }
|
AgentMessage::LogEntry { level, message }
|
||||||
if level == "info" && message == "sgclaw runtime version=0.1.0 protocol=1.0"
|
if level == "info"
|
||||||
|
&& message.starts_with("sgclaw runtime version=")
|
||||||
|
&& message.ends_with(" protocol=1.0")
|
||||||
));
|
));
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
&sent[1],
|
&sent[1],
|
||||||
|
|||||||
Reference in New Issue
Block a user