feat: refactor sgclaw around zeroclaw compat runtime

This commit is contained in:
zyl
2026-03-26 16:23:31 +08:00
parent bca5b75801
commit ff0771a83f
1059 changed files with 409460 additions and 23 deletions

View File

@@ -0,0 +1,378 @@
//! End-to-end integration tests for agent orchestration.
//!
//! These tests exercise the full agent turn cycle through the public API,
//! using mock providers and tools to validate orchestration behavior without
//! external service dependencies. They complement the unit tests in
//! `src/agent/tests.rs` by running at the integration test boundary.
//!
//! Ref: https://github.com/zeroclaw-labs/zeroclaw/issues/618 (item 6)
use crate::support::helpers::{
build_agent, build_agent_xml, build_recording_agent, text_response, tool_response,
StaticMemoryLoader,
};
use crate::support::{CountingTool, EchoTool, MockProvider, RecordingProvider};
use zeroclaw::providers::traits::ChatMessage;
use zeroclaw::providers::{ChatResponse, ConversationMessage, ToolCall};
// ═════════════════════════════════════════════════════════════════════════════
// E2E smoke tests — full agent turn cycle
// ═════════════════════════════════════════════════════════════════════════════
/// Validates the simplest happy path: user message → LLM text response.
#[tokio::test]
async fn e2e_simple_text_response() {
let provider = Box::new(MockProvider::new(vec![text_response(
"Hello from mock provider",
)]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("hi").await.unwrap();
assert!(!response.is_empty(), "Expected non-empty text response");
}
/// Validates single tool call → tool execution → final LLM response.
#[tokio::test]
async fn e2e_single_tool_call_cycle() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: r#"{"message": "hello from tool"}"#.into(),
}]),
text_response("Tool executed successfully"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("run echo").await.unwrap();
assert!(
!response.is_empty(),
"Expected non-empty response after tool execution"
);
}
/// Validates multi-step tool chain: tool A → tool B → tool C → final response.
#[tokio::test]
async fn e2e_multi_step_tool_chain() {
let (counting_tool, count) = CountingTool::new();
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "counter".into(),
arguments: "{}".into(),
}]),
tool_response(vec![ToolCall {
id: "tc2".into(),
name: "counter".into(),
arguments: "{}".into(),
}]),
text_response("Done after 2 tool calls"),
]));
let mut agent = build_agent(provider, vec![Box::new(counting_tool)]);
let response = agent.turn("count twice").await.unwrap();
assert!(
!response.is_empty(),
"Expected non-empty response after tool chain"
);
assert_eq!(*count.lock().unwrap(), 2);
}
/// Validates that the XML dispatcher path also works end-to-end.
#[tokio::test]
async fn e2e_xml_dispatcher_tool_call() {
let provider = Box::new(MockProvider::new(vec![
ChatResponse {
text: Some(
r#"<tool_call>
{"name": "echo", "arguments": {"message": "xml dispatch"}}
</tool_call>"#
.into(),
),
tool_calls: vec![],
usage: None,
reasoning_content: None,
},
text_response("XML tool executed"),
]));
let mut agent = build_agent_xml(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("test xml dispatch").await.unwrap();
assert!(
!response.is_empty(),
"Expected non-empty response from XML dispatcher"
);
}
/// Validates that multiple sequential turns maintain conversation coherence.
#[tokio::test]
async fn e2e_multi_turn_conversation() {
let provider = Box::new(MockProvider::new(vec![
text_response("First response"),
text_response("Second response"),
text_response("Third response"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let r1 = agent.turn("turn 1").await.unwrap();
assert!(!r1.is_empty(), "Expected non-empty first response");
let r2 = agent.turn("turn 2").await.unwrap();
assert!(!r2.is_empty(), "Expected non-empty second response");
assert_ne!(r1, r2, "Sequential turn responses should be distinct");
let r3 = agent.turn("turn 3").await.unwrap();
assert!(!r3.is_empty(), "Expected non-empty third response");
assert_ne!(r2, r3, "Sequential turn responses should be distinct");
}
/// Validates that the agent handles unknown tool names gracefully.
#[tokio::test]
async fn e2e_unknown_tool_recovery() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "nonexistent_tool".into(),
arguments: "{}".into(),
}]),
text_response("Recovered from unknown tool"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("call missing tool").await.unwrap();
assert!(
!response.is_empty(),
"Expected non-empty response after unknown tool recovery"
);
}
/// Validates parallel tool dispatch in a single response.
#[tokio::test]
async fn e2e_parallel_tool_dispatch() {
let (counting_tool, count) = CountingTool::new();
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![
ToolCall {
id: "tc1".into(),
name: "counter".into(),
arguments: "{}".into(),
},
ToolCall {
id: "tc2".into(),
name: "counter".into(),
arguments: "{}".into(),
},
]),
text_response("Both tools ran"),
]));
let mut agent = build_agent(provider, vec![Box::new(counting_tool)]);
let response = agent.turn("run both").await.unwrap();
assert!(
!response.is_empty(),
"Expected non-empty response after parallel dispatch"
);
assert_eq!(*count.lock().unwrap(), 2);
}
// ═════════════════════════════════════════════════════════════════════════════
// Multi-turn history fidelity & memory enrichment tests
// ═════════════════════════════════════════════════════════════════════════════
/// Validates that multi-turn conversation correctly accumulates history
/// and passes growing message sequences to the provider on each turn.
#[tokio::test]
async fn e2e_multi_turn_history_fidelity() {
let (provider, recorded) = RecordingProvider::new(vec![
text_response("response 1"),
text_response("response 2"),
text_response("response 3"),
]);
let mut agent = build_recording_agent(Box::new(provider), vec![], None);
let r1 = agent.turn("msg 1").await.unwrap();
assert_eq!(r1, "response 1");
let r2 = agent.turn("msg 2").await.unwrap();
assert_eq!(r2, "response 2");
let r3 = agent.turn("msg 3").await.unwrap();
assert_eq!(r3, "response 3");
let requests = recorded.lock().unwrap();
assert_eq!(requests.len(), 3, "Provider should receive 3 requests");
// Request 1: system + user("msg 1")
let req1 = &requests[0];
assert!(req1.len() >= 2);
assert_eq!(req1[0].role, "system");
assert_eq!(req1[1].role, "user");
assert!(req1[1].content.contains("msg 1"));
// Request 2: system + user("msg 1") + assistant("response 1") + user("msg 2")
let req2 = &requests[1];
let req2_users: Vec<&ChatMessage> = req2.iter().filter(|m| m.role == "user").collect();
let req2_assts: Vec<&ChatMessage> = req2.iter().filter(|m| m.role == "assistant").collect();
assert_eq!(req2_users.len(), 2, "Request 2: expected 2 user messages");
assert_eq!(
req2_assts.len(),
1,
"Request 2: expected 1 assistant message"
);
assert!(req2_users[0].content.contains("msg 1"));
assert!(req2_users[1].content.contains("msg 2"));
assert_eq!(req2_assts[0].content, "response 1");
// Request 3: full history — 3 user + 2 assistant messages
let req3 = &requests[2];
let req3_users: Vec<&ChatMessage> = req3.iter().filter(|m| m.role == "user").collect();
let req3_assts: Vec<&ChatMessage> = req3.iter().filter(|m| m.role == "assistant").collect();
assert_eq!(req3_users.len(), 3, "Request 3: expected 3 user messages");
assert_eq!(
req3_assts.len(),
2,
"Request 3: expected 2 assistant messages"
);
assert!(req3_users[0].content.contains("msg 1"));
assert!(req3_users[1].content.contains("msg 2"));
assert!(req3_users[2].content.contains("msg 3"));
assert_eq!(req3_assts[0].content, "response 1");
assert_eq!(req3_assts[1].content, "response 2");
// Verify agent history: system + 3*(user + assistant) = 7
let history = agent.history();
assert_eq!(history.len(), 7);
assert!(matches!(&history[0], ConversationMessage::Chat(c) if c.role == "system"));
assert!(matches!(&history[1], ConversationMessage::Chat(c) if c.role == "user"));
assert!(matches!(&history[2], ConversationMessage::Chat(c) if c.role == "assistant"));
assert!(
matches!(&history[6], ConversationMessage::Chat(c) if c.role == "assistant" && c.content == "response 3")
);
}
/// Validates that a custom MemoryLoader injects RAG context into user
/// messages before they reach the provider.
#[tokio::test]
async fn e2e_memory_enrichment_injects_context() {
let (provider, recorded) = RecordingProvider::new(vec![text_response("enriched response")]);
let memory_context = "[Memory context]\n- user_name: test_user\n[/Memory context]\n\n";
let loader = StaticMemoryLoader::new(memory_context);
let mut agent = build_recording_agent(Box::new(provider), vec![], Some(Box::new(loader)));
let response = agent.turn("hello").await.unwrap();
assert_eq!(response, "enriched response");
// Provider received enriched message
let requests = recorded.lock().unwrap();
assert_eq!(requests.len(), 1);
let user_msg = requests[0].iter().find(|m| m.role == "user").unwrap();
assert!(
user_msg.content.contains("[Memory context]"),
"User message should contain memory context, got: {}",
user_msg.content,
);
assert!(
user_msg.content.contains("user_name: test_user"),
"User message should contain memory key-value pair",
);
assert!(
user_msg.content.ends_with("hello"),
"User message should end with original text, got: {}",
user_msg.content,
);
// Agent history also stores enriched message
let history = agent.history();
match &history[1] {
ConversationMessage::Chat(c) => {
assert_eq!(c.role, "user");
assert!(c.content.contains("[Memory context]"));
assert!(c.content.ends_with("hello"));
}
other => panic!("Expected Chat variant for user message, got: {other:?}"),
}
}
/// Validates multi-turn conversation with memory enrichment: every user
/// message is enriched, and the provider sees the full enriched history.
#[tokio::test]
async fn e2e_multi_turn_with_memory_enrichment() {
let (provider, recorded) =
RecordingProvider::new(vec![text_response("answer 1"), text_response("answer 2")]);
let memory_context = "[Memory context]\n- project: zeroclaw\n[/Memory context]\n\n";
let loader = StaticMemoryLoader::new(memory_context);
let mut agent = build_recording_agent(Box::new(provider), vec![], Some(Box::new(loader)));
let r1 = agent.turn("first question").await.unwrap();
assert_eq!(r1, "answer 1");
let r2 = agent.turn("second question").await.unwrap();
assert_eq!(r2, "answer 2");
let requests = recorded.lock().unwrap();
assert_eq!(requests.len(), 2);
// Turn 1: user message is enriched
let req1_user = requests[0].iter().find(|m| m.role == "user").unwrap();
assert!(req1_user.content.contains("[Memory context]"));
assert!(req1_user.content.contains("project: zeroclaw"));
assert!(req1_user.content.ends_with("first question"));
// Turn 2: both user messages enriched, assistant from turn 1 present
let req2_users: Vec<&ChatMessage> = requests[1].iter().filter(|m| m.role == "user").collect();
assert_eq!(req2_users.len(), 2, "Request 2 should have 2 user messages");
// Turn 1 user message still enriched in history
assert!(req2_users[0].content.contains("[Memory context]"));
assert!(req2_users[0].content.ends_with("first question"));
// Turn 2 user message also enriched
assert!(req2_users[1].content.contains("[Memory context]"));
assert!(req2_users[1].content.ends_with("second question"));
// Assistant response from turn 1 preserved
let req2_assts: Vec<&ChatMessage> = requests[1]
.iter()
.filter(|m| m.role == "assistant")
.collect();
assert_eq!(req2_assts.len(), 1);
assert_eq!(req2_assts[0].content, "answer 1");
// History: system + 2*(enriched_user + assistant) = 5
assert_eq!(agent.history().len(), 5);
}
/// Validates that empty memory context does not prepend memory text.
/// A per-turn datetime prefix may still be present.
#[tokio::test]
async fn e2e_empty_memory_context_passthrough() {
let (provider, recorded) = RecordingProvider::new(vec![text_response("plain response")]);
let loader = StaticMemoryLoader::new("");
let mut agent = build_recording_agent(Box::new(provider), vec![], Some(Box::new(loader)));
let response = agent.turn("hello").await.unwrap();
assert_eq!(response, "plain response");
let requests = recorded.lock().unwrap();
let user_msg = requests[0].iter().find(|m| m.role == "user").unwrap();
assert!(
user_msg.content.ends_with("hello"),
"User payload should preserve original text suffix, got: {}",
user_msg.content
);
assert!(
!user_msg.content.contains("[Memory context]"),
"Empty context should not prepend memory context text, got: {}",
user_msg.content
);
}

View File

@@ -0,0 +1,254 @@
//! TG4: Agent Loop Robustness Tests
//!
//! Prevents: Pattern 4 — Agent loop & tool call processing bugs (13% of user bugs).
//! Issues: #746, #418, #777, #848
//!
//! Tests agent behavior with malformed tool calls, empty responses,
//! max iteration limits, and cascading tool failures using mock providers.
//! Complements inline parse_tool_calls tests in `src/agent/loop_.rs`.
use crate::support::helpers::{build_agent, text_response, tool_response};
use crate::support::{CountingTool, EchoTool, FailingTool, MockProvider};
use zeroclaw::providers::{ChatResponse, ToolCall};
// ═════════════════════════════════════════════════════════════════════════════
// TG4.1: Malformed tool call recovery
// ═════════════════════════════════════════════════════════════════════════════
/// Agent should recover when LLM returns text with residual XML tags (#746)
#[tokio::test]
async fn agent_recovers_from_text_with_xml_residue() {
let provider = Box::new(MockProvider::new(vec![text_response(
"Here is the result. Some leftover </tool_call> text after.",
)]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("test").await.unwrap();
assert!(
!response.is_empty(),
"agent should produce non-empty response despite XML residue"
);
}
/// Agent should handle tool call with empty arguments gracefully
#[tokio::test]
async fn agent_handles_tool_call_with_empty_arguments() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: "{}".into(),
}]),
text_response("Tool with empty args executed"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("call with empty args").await.unwrap();
assert!(!response.is_empty());
}
/// Agent should handle unknown tool name without crashing (#848 related)
#[tokio::test]
async fn agent_handles_nonexistent_tool_gracefully() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "absolutely_nonexistent_tool".into(),
arguments: "{}".into(),
}]),
text_response("Recovered from unknown tool"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("call missing tool").await.unwrap();
assert!(
!response.is_empty(),
"agent should recover from unknown tool"
);
}
// ═════════════════════════════════════════════════════════════════════════════
// TG4.2: Tool failure cascade handling (#848)
// ═════════════════════════════════════════════════════════════════════════════
/// Agent should handle repeated tool failures without infinite loop
#[tokio::test]
async fn agent_handles_failing_tool() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "failing_tool".into(),
arguments: "{}".into(),
}]),
text_response("Tool failed but I recovered"),
]));
let mut agent = build_agent(provider, vec![Box::new(FailingTool)]);
let response = agent.turn("use failing tool").await.unwrap();
assert!(
!response.is_empty(),
"agent should produce response even after tool failure"
);
}
/// Agent should handle mixed tool calls (some succeed, some fail)
#[tokio::test]
async fn agent_handles_mixed_tool_success_and_failure() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![
ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: r#"{"message": "success"}"#.into(),
},
ToolCall {
id: "tc2".into(),
name: "failing_tool".into(),
arguments: "{}".into(),
},
]),
text_response("Mixed results processed"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool), Box::new(FailingTool)]);
let response = agent.turn("mixed tools").await.unwrap();
assert!(!response.is_empty());
}
// ═════════════════════════════════════════════════════════════════════════════
// TG4.3: Iteration limit enforcement (#777)
// ═════════════════════════════════════════════════════════════════════════════
/// Agent should not exceed max_tool_iterations (default=10) even with
/// a provider that keeps returning tool calls
#[tokio::test]
async fn agent_respects_max_tool_iterations() {
let (counting_tool, count) = CountingTool::new();
// Create 20 tool call responses - more than the default limit of 10
let mut responses: Vec<ChatResponse> = (0..20)
.map(|i| {
tool_response(vec![ToolCall {
id: format!("tc_{i}"),
name: "counter".into(),
arguments: "{}".into(),
}])
})
.collect();
// Add a final text response that would be used if limit is reached
responses.push(text_response("Final response after iterations"));
let provider = Box::new(MockProvider::new(responses));
let mut agent = build_agent(provider, vec![Box::new(counting_tool)]);
// Agent should complete (either by hitting iteration limit or running out of responses)
let result = agent.turn("keep calling tools").await;
// The agent should complete without hanging
assert!(result.is_ok() || result.is_err());
let invocations = *count.lock().unwrap();
assert!(
invocations <= 10,
"tool invocations ({invocations}) should not exceed default max_tool_iterations (10)"
);
}
// ═════════════════════════════════════════════════════════════════════════════
// TG4.4: Empty and whitespace responses
// ═════════════════════════════════════════════════════════════════════════════
/// Agent should handle empty text response from provider (#418 related)
#[tokio::test]
async fn agent_handles_empty_provider_response() {
let provider = Box::new(MockProvider::new(vec![ChatResponse {
text: Some(String::new()),
tool_calls: vec![],
usage: None,
reasoning_content: None,
}]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
// Should not panic
let _result = agent.turn("test").await;
}
/// Agent should handle None text response from provider
#[tokio::test]
async fn agent_handles_none_text_response() {
let provider = Box::new(MockProvider::new(vec![ChatResponse {
text: None,
tool_calls: vec![],
usage: None,
reasoning_content: None,
}]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let _result = agent.turn("test").await;
}
/// Agent should handle whitespace-only response
#[tokio::test]
async fn agent_handles_whitespace_only_response() {
let provider = Box::new(MockProvider::new(vec![text_response(" \n\t ")]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let _result = agent.turn("test").await;
}
// ═════════════════════════════════════════════════════════════════════════════
// TG4.5: Tool call with special content
// ═════════════════════════════════════════════════════════════════════════════
/// Agent should handle tool arguments with unicode content
#[tokio::test]
async fn agent_handles_unicode_tool_arguments() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: r#"{"message": " 🌍"}"#.into(),
}]),
text_response("Unicode tool executed"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("unicode test").await.unwrap();
assert!(!response.is_empty());
}
/// Agent should handle tool arguments with nested JSON
#[tokio::test]
async fn agent_handles_nested_json_tool_arguments() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: r#"{"message": "{\"nested\": true, \"deep\": {\"level\": 3}}"}"#.into(),
}]),
text_response("Nested JSON tool executed"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("nested json test").await.unwrap();
assert!(!response.is_empty());
}
/// Agent should handle tool call followed by immediate text (no second LLM call)
#[tokio::test]
async fn agent_handles_sequential_tool_then_text() {
let provider = Box::new(MockProvider::new(vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: r#"{"message": "step 1"}"#.into(),
}]),
text_response("Final answer after tool"),
]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let response = agent.turn("two step").await.unwrap();
assert!(
!response.is_empty(),
"should produce final text after tool execution"
);
}

View File

@@ -0,0 +1,310 @@
use tempfile::TempDir;
use zeroclaw::config::schema::{CronJobDecl, CronScheduleDecl};
use zeroclaw::config::Config;
use zeroclaw::cron::{get_job, list_jobs, sync_declarative_jobs, JobType, Schedule};
fn test_config(tmp: &TempDir, schedule_cron: Option<String>) -> Config {
let mut config = Config {
workspace_dir: tmp.path().join("workspace"),
config_path: tmp.path().join("config.toml"),
..Config::default()
};
config.backup.schedule_cron = schedule_cron;
std::fs::create_dir_all(&config.workspace_dir).unwrap();
config
}
#[test]
fn backup_cron_job_synced_when_schedule_set() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp, Some("0 3 * * *".to_string()));
// Synthesize builtin backup job from config.backup.schedule_cron
let mut jobs_with_builtin = config.cron.jobs.clone();
if let Some(schedule_cron) = &config.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_with_builtin.push(backup_job);
}
sync_declarative_jobs(&config, &jobs_with_builtin).unwrap();
let job = get_job(&config, "__builtin_backup").unwrap();
assert_eq!(job.id, "__builtin_backup");
assert_eq!(job.command, "backup create");
assert_eq!(job.source, "declarative");
assert!(matches!(job.schedule, Schedule::Cron { ref expr, .. } if expr == "0 3 * * *"));
}
#[test]
fn backup_cron_job_not_synced_when_schedule_none() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp, None);
// No builtin backup job should be synthesized
let jobs_with_builtin = config.cron.jobs.clone();
sync_declarative_jobs(&config, &jobs_with_builtin).unwrap();
let result = get_job(&config, "__builtin_backup");
assert!(
result.is_err(),
"builtin backup job should not exist when schedule_cron is None"
);
}
#[test]
fn backup_cron_job_removed_when_schedule_cleared() {
let tmp = TempDir::new().unwrap();
let config_with_schedule = test_config(&tmp, Some("0 3 * * *".to_string()));
// First sync: create the builtin backup job
let mut jobs_with_builtin = config_with_schedule.cron.jobs.clone();
if let Some(schedule_cron) = &config_with_schedule.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_with_builtin.push(backup_job);
}
sync_declarative_jobs(&config_with_schedule, &jobs_with_builtin).unwrap();
assert!(get_job(&config_with_schedule, "__builtin_backup").is_ok());
// Second sync: remove schedule_cron from config
let config_without_schedule = test_config(&tmp, None);
let jobs_no_builtin = config_without_schedule.cron.jobs.clone();
sync_declarative_jobs(&config_without_schedule, &jobs_no_builtin).unwrap();
let result = get_job(&config_without_schedule, "__builtin_backup");
assert!(
result.is_err(),
"builtin backup job should be removed when schedule_cron is cleared"
);
}
#[test]
fn backup_cron_job_schedule_updated() {
let tmp = TempDir::new().unwrap();
let config_v1 = test_config(&tmp, Some("0 3 * * *".to_string()));
// First sync with schedule "0 3 * * *"
let mut jobs_v1 = config_v1.cron.jobs.clone();
if let Some(schedule_cron) = &config_v1.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_v1.push(backup_job);
}
sync_declarative_jobs(&config_v1, &jobs_v1).unwrap();
let job_v1 = get_job(&config_v1, "__builtin_backup").unwrap();
let next_run_v1 = job_v1.next_run;
// Second sync with schedule "0 2 * * *"
let config_v2 = test_config(&tmp, Some("0 2 * * *".to_string()));
let mut jobs_v2 = config_v2.cron.jobs.clone();
if let Some(schedule_cron) = &config_v2.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_v2.push(backup_job);
}
sync_declarative_jobs(&config_v2, &jobs_v2).unwrap();
let job_v2 = get_job(&config_v2, "__builtin_backup").unwrap();
assert!(matches!(job_v2.schedule, Schedule::Cron { ref expr, .. } if expr == "0 2 * * *"));
assert_ne!(
job_v2.next_run, next_run_v1,
"next_run should be recalculated when schedule changes"
);
}
#[test]
fn backup_cron_job_id_is_stable() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp, Some("0 3 * * *".to_string()));
// Sync twice with same config
for _ in 0..2 {
let mut jobs_with_builtin = config.cron.jobs.clone();
if let Some(schedule_cron) = &config.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_with_builtin.push(backup_job);
}
sync_declarative_jobs(&config, &jobs_with_builtin).unwrap();
}
// Verify only one job exists with stable ID
let job = get_job(&config, "__builtin_backup").unwrap();
assert_eq!(job.id, "__builtin_backup");
let all_jobs = list_jobs(&config).unwrap();
let backup_jobs: Vec<_> = all_jobs
.iter()
.filter(|j| j.id == "__builtin_backup")
.collect();
assert_eq!(
backup_jobs.len(),
1,
"should have exactly one builtin backup job, not duplicates"
);
}
#[test]
fn backup_cron_job_command_is_backup_create() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp, Some("0 3 * * *".to_string()));
let mut jobs_with_builtin = config.cron.jobs.clone();
if let Some(schedule_cron) = &config.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_with_builtin.push(backup_job);
}
sync_declarative_jobs(&config, &jobs_with_builtin).unwrap();
let job = get_job(&config, "__builtin_backup").unwrap();
assert_eq!(job.command, "backup create");
}
#[test]
fn backup_cron_job_type_is_shell() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp, Some("0 3 * * *".to_string()));
let mut jobs_with_builtin = config.cron.jobs.clone();
if let Some(schedule_cron) = &config.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_with_builtin.push(backup_job);
}
sync_declarative_jobs(&config, &jobs_with_builtin).unwrap();
let job = get_job(&config, "__builtin_backup").unwrap();
assert_eq!(job.job_type, JobType::Shell);
}
#[test]
fn backup_cron_job_source_is_declarative() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp, Some("0 3 * * *".to_string()));
let mut jobs_with_builtin = config.cron.jobs.clone();
if let Some(schedule_cron) = &config.backup.schedule_cron {
let backup_job = CronJobDecl {
id: "__builtin_backup".to_string(),
name: Some("Scheduled backup".to_string()),
job_type: "shell".to_string(),
schedule: CronScheduleDecl::Cron {
expr: schedule_cron.clone(),
tz: None,
},
command: Some("backup create".to_string()),
prompt: None,
enabled: true,
model: None,
allowed_tools: None,
session_target: None,
delivery: None,
};
jobs_with_builtin.push(backup_job);
}
sync_declarative_jobs(&config, &jobs_with_builtin).unwrap();
let job = get_job(&config, "__builtin_backup").unwrap();
assert_eq!(job.source, "declarative");
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,328 @@
//! TG3: Channel Message Identity & Routing Tests
//!
//! Prevents: Pattern 3 — Channel message routing & identity bugs (17% of user bugs).
//! Issues: #496, #483, #620, #415, #503
//!
//! Tests that ChannelMessage fields are used consistently and that the
//! SendMessage → Channel trait contract preserves correct identity semantics.
//! Verifies sender/reply_target field contracts to prevent field swaps.
use async_trait::async_trait;
use zeroclaw::channels::traits::{Channel, ChannelMessage, SendMessage};
// ─────────────────────────────────────────────────────────────────────────────
// ChannelMessage construction and field semantics
// ─────────────────────────────────────────────────────────────────────────────
#[test]
fn channel_message_sender_field_holds_platform_user_id() {
// Simulates Telegram: sender should be numeric chat_id, not username
let msg = ChannelMessage {
id: "msg_1".into(),
sender: "123456789".into(), // numeric chat_id
reply_target: "msg_0".into(),
content: "test message".into(),
channel: "telegram".into(),
timestamp: 1700000000,
thread_ts: None,
interruption_scope_id: None,
attachments: vec![],
};
assert_eq!(msg.sender, "123456789");
// Sender should be the platform-level user/chat identifier
assert!(
msg.sender.chars().all(|c| c.is_ascii_digit()),
"Telegram sender should be numeric chat_id, got: {}",
msg.sender
);
}
#[test]
fn channel_message_reply_target_distinct_from_sender() {
// Simulates Discord: reply_target should be channel_id, not sender user_id
let msg = ChannelMessage {
id: "msg_1".into(),
sender: "user_987654".into(), // Discord user ID
reply_target: "channel_123".into(), // Discord channel ID for replies
content: "test message".into(),
channel: "discord".into(),
timestamp: 1700000000,
thread_ts: None,
interruption_scope_id: None,
attachments: vec![],
};
assert_ne!(
msg.sender, msg.reply_target,
"sender and reply_target should be distinct for Discord"
);
assert_eq!(msg.reply_target, "channel_123");
}
#[test]
fn channel_message_fields_not_swapped() {
// Guards against #496 (Telegram) and #483 (Discord) field swap bugs
let msg = ChannelMessage {
id: "msg_42".into(),
sender: "sender_value".into(),
reply_target: "target_value".into(),
content: "payload".into(),
channel: "test".into(),
timestamp: 1700000000,
thread_ts: None,
interruption_scope_id: None,
attachments: vec![],
};
assert_eq!(
msg.sender, "sender_value",
"sender field should not be swapped"
);
assert_eq!(
msg.reply_target, "target_value",
"reply_target field should not be swapped"
);
assert_ne!(
msg.sender, msg.reply_target,
"sender and reply_target should remain distinct"
);
}
#[test]
fn channel_message_preserves_all_fields_on_clone() {
let original = ChannelMessage {
id: "clone_test".into(),
sender: "sender_123".into(),
reply_target: "target_456".into(),
content: "cloned content".into(),
channel: "test_channel".into(),
timestamp: 1700000001,
thread_ts: None,
interruption_scope_id: None,
attachments: vec![],
};
let cloned = original.clone();
assert_eq!(cloned.id, original.id);
assert_eq!(cloned.sender, original.sender);
assert_eq!(cloned.reply_target, original.reply_target);
assert_eq!(cloned.content, original.content);
assert_eq!(cloned.channel, original.channel);
assert_eq!(cloned.timestamp, original.timestamp);
}
// ─────────────────────────────────────────────────────────────────────────────
// SendMessage construction
// ─────────────────────────────────────────────────────────────────────────────
#[test]
fn send_message_new_sets_content_and_recipient() {
let msg = SendMessage::new("Hello", "recipient_123");
assert_eq!(msg.content, "Hello");
assert_eq!(msg.recipient, "recipient_123");
assert!(msg.subject.is_none(), "subject should be None by default");
}
#[test]
fn send_message_with_subject_sets_all_fields() {
let msg = SendMessage::with_subject("Hello", "recipient_123", "Re: Test");
assert_eq!(msg.content, "Hello");
assert_eq!(msg.recipient, "recipient_123");
assert_eq!(msg.subject.as_deref(), Some("Re: Test"));
}
#[test]
fn send_message_recipient_carries_platform_target() {
// Verifies that SendMessage::recipient is used as the platform delivery target
// For Telegram: this should be the chat_id
// For Discord: this should be the channel_id
let telegram_msg = SendMessage::new("response", "123456789");
assert_eq!(
telegram_msg.recipient, "123456789",
"Telegram SendMessage recipient should be chat_id"
);
let discord_msg = SendMessage::new("response", "channel_987654");
assert_eq!(
discord_msg.recipient, "channel_987654",
"Discord SendMessage recipient should be channel_id"
);
}
// ─────────────────────────────────────────────────────────────────────────────
// Channel trait contract: send/listen roundtrip via DummyChannel
// ─────────────────────────────────────────────────────────────────────────────
/// Test channel that captures sent messages for assertion
struct CapturingChannel {
sent: std::sync::Mutex<Vec<SendMessage>>,
}
impl CapturingChannel {
fn new() -> Self {
Self {
sent: std::sync::Mutex::new(Vec::new()),
}
}
fn sent_messages(&self) -> Vec<SendMessage> {
self.sent.lock().unwrap().clone()
}
}
#[async_trait]
impl Channel for CapturingChannel {
fn name(&self) -> &str {
"capturing"
}
async fn send(&self, message: &SendMessage) -> anyhow::Result<()> {
self.sent.lock().unwrap().push(message.clone());
Ok(())
}
async fn listen(&self, tx: tokio::sync::mpsc::Sender<ChannelMessage>) -> anyhow::Result<()> {
tx.send(ChannelMessage {
id: "listen_1".into(),
sender: "test_sender".into(),
reply_target: "test_target".into(),
content: "incoming".into(),
channel: "capturing".into(),
timestamp: 1700000000,
thread_ts: None,
interruption_scope_id: None,
attachments: vec![],
})
.await
.map_err(|e| anyhow::anyhow!(e.to_string()))
}
}
#[tokio::test]
async fn channel_send_preserves_recipient() {
let channel = CapturingChannel::new();
let msg = SendMessage::new("Hello", "target_123");
channel.send(&msg).await.unwrap();
let sent = channel.sent_messages();
assert_eq!(sent.len(), 1);
assert_eq!(sent[0].recipient, "target_123");
assert_eq!(sent[0].content, "Hello");
}
#[tokio::test]
async fn channel_listen_produces_correct_identity_fields() {
let channel = CapturingChannel::new();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
channel.listen(tx).await.unwrap();
let received = rx.recv().await.expect("should receive message");
assert_eq!(received.sender, "test_sender");
assert_eq!(received.reply_target, "test_target");
assert_ne!(
received.sender, received.reply_target,
"listen() should populate sender and reply_target distinctly"
);
}
#[tokio::test]
async fn channel_send_reply_uses_sender_from_listen() {
let channel = CapturingChannel::new();
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
// Simulate: listen() → receive message → send reply using sender
channel.listen(tx).await.unwrap();
let incoming = rx.recv().await.expect("should receive message");
// Reply should go to the reply_target, not sender
let reply = SendMessage::new("reply content", &incoming.reply_target);
channel.send(&reply).await.unwrap();
let sent = channel.sent_messages();
assert_eq!(sent.len(), 1);
assert_eq!(
sent[0].recipient, "test_target",
"reply should use reply_target as recipient"
);
}
// ─────────────────────────────────────────────────────────────────────────────
// Channel trait default methods
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn channel_health_check_default_returns_true() {
let channel = CapturingChannel::new();
assert!(
channel.health_check().await,
"default health_check should return true"
);
}
#[tokio::test]
async fn channel_typing_defaults_succeed() {
let channel = CapturingChannel::new();
assert!(channel.start_typing("target").await.is_ok());
assert!(channel.stop_typing("target").await.is_ok());
}
#[tokio::test]
async fn channel_draft_defaults() {
let channel = CapturingChannel::new();
assert!(!channel.supports_draft_updates());
let draft_result = channel
.send_draft(&SendMessage::new("draft", "target"))
.await
.unwrap();
assert!(
draft_result.is_none(),
"default send_draft should return None"
);
assert!(channel
.update_draft("target", "msg_1", "updated")
.await
.is_ok());
assert!(channel
.finalize_draft("target", "msg_1", "final")
.await
.is_ok());
}
// ─────────────────────────────────────────────────────────────────────────────
// Multiple messages: conversation context preservation
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn channel_multiple_sends_preserve_order_and_recipients() {
let channel = CapturingChannel::new();
channel
.send(&SendMessage::new("msg 1", "target_a"))
.await
.unwrap();
channel
.send(&SendMessage::new("msg 2", "target_b"))
.await
.unwrap();
channel
.send(&SendMessage::new("msg 3", "target_a"))
.await
.unwrap();
let sent = channel.sent_messages();
assert_eq!(sent.len(), 3);
assert_eq!(sent[0].recipient, "target_a");
assert_eq!(sent[1].recipient, "target_b");
assert_eq!(sent[2].recipient, "target_a");
assert_eq!(sent[0].content, "msg 1");
assert_eq!(sent[1].content, "msg 2");
assert_eq!(sent[2].content, "msg 3");
}

View File

@@ -0,0 +1,96 @@
use async_trait::async_trait;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use zeroclaw::hooks::{HookHandler, HookResult, HookRunner};
use zeroclaw::tools::ToolResult;
struct CounterHook {
gateway_starts: Arc<AtomicUsize>,
tool_calls: Arc<AtomicUsize>,
}
#[async_trait]
impl HookHandler for CounterHook {
fn name(&self) -> &str {
"counter"
}
async fn on_gateway_start(&self, _host: &str, _port: u16) {
self.gateway_starts.fetch_add(1, Ordering::SeqCst);
}
async fn on_after_tool_call(&self, _tool: &str, _result: &ToolResult, _duration: Duration) {
self.tool_calls.fetch_add(1, Ordering::SeqCst);
}
}
struct ToolBlocker {
blocked_tools: Vec<String>,
}
#[async_trait]
impl HookHandler for ToolBlocker {
fn name(&self) -> &str {
"tool-blocker"
}
fn priority(&self) -> i32 {
100
}
async fn before_tool_call(
&self,
name: String,
args: serde_json::Value,
) -> HookResult<(String, serde_json::Value)> {
if self.blocked_tools.contains(&name) {
HookResult::Cancel(format!("{name} is blocked"))
} else {
HookResult::Continue((name, args))
}
}
}
#[tokio::test]
async fn hook_runner_full_pipeline() {
let gateway_starts = Arc::new(AtomicUsize::new(0));
let tool_calls = Arc::new(AtomicUsize::new(0));
let mut runner = HookRunner::new();
runner.register(Box::new(CounterHook {
gateway_starts: gateway_starts.clone(),
tool_calls: tool_calls.clone(),
}));
runner.register(Box::new(ToolBlocker {
blocked_tools: vec!["dangerous".into()],
}));
// Void hook: fire gateway start
runner.fire_gateway_start("127.0.0.1", 8080).await;
assert_eq!(gateway_starts.load(Ordering::SeqCst), 1);
// Modifying hook: safe tool passes through
let result = runner
.run_before_tool_call("safe_tool".into(), serde_json::json!({}))
.await;
assert!(!result.is_cancel());
// Modifying hook: dangerous tool is blocked
let result = runner
.run_before_tool_call("dangerous".into(), serde_json::json!({}))
.await;
assert!(result.is_cancel());
// Void hook: fire after tool call increments counter
let tool_result = ToolResult {
success: true,
output: "ok".into(),
error: None,
};
runner
.fire_after_tool_call("safe_tool", &tool_result, Duration::from_millis(10))
.await;
assert_eq!(tool_calls.load(Ordering::SeqCst), 1);
}

View File

@@ -0,0 +1,453 @@
//! Head-to-head comparison: SQLite vs Markdown memory backends
//!
//! Run with: cargo test --test memory_comparison -- --nocapture
use std::time::Instant;
use tempfile::TempDir;
// We test both backends through the public memory module
use zeroclaw::memory::{markdown::MarkdownMemory, sqlite::SqliteMemory, Memory, MemoryCategory};
// ── Helpers ────────────────────────────────────────────────────
fn sqlite_backend(dir: &std::path::Path) -> SqliteMemory {
SqliteMemory::new(dir).expect("SQLite init failed")
}
fn markdown_backend(dir: &std::path::Path) -> MarkdownMemory {
MarkdownMemory::new(dir)
}
// ── Test 1: Store performance ──────────────────────────────────
#[tokio::test]
async fn compare_store_speed() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
let sq = sqlite_backend(tmp_sq.path());
let md = markdown_backend(tmp_md.path());
let n = 100;
// SQLite: 100 stores
let start = Instant::now();
for i in 0..n {
sq.store(
&format!("key_{i}"),
&format!("Memory entry number {i} about Rust programming"),
MemoryCategory::Core,
None,
)
.await
.unwrap();
}
let sq_dur = start.elapsed();
// Markdown: 100 stores
let start = Instant::now();
for i in 0..n {
md.store(
&format!("key_{i}"),
&format!("Memory entry number {i} about Rust programming"),
MemoryCategory::Core,
None,
)
.await
.unwrap();
}
let md_dur = start.elapsed();
println!("\n============================================================");
println!("STORE {n} entries:");
println!(" SQLite: {:?}", sq_dur);
println!(" Markdown: {:?}", md_dur);
// Both should succeed
assert_eq!(sq.count().await.unwrap(), n);
// Markdown count parses lines, may differ slightly from n
let md_count = md.count().await.unwrap();
assert!(md_count >= n, "Markdown stored {md_count}, expected >= {n}");
}
// ── Test 2: Recall / search quality ────────────────────────────
#[tokio::test]
async fn compare_recall_quality() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
let sq = sqlite_backend(tmp_sq.path());
let md = markdown_backend(tmp_md.path());
// Seed both with identical data
let entries = vec![
(
"lang_pref",
"User prefers Rust over Python",
MemoryCategory::Core,
),
(
"editor",
"Uses VS Code with rust-analyzer",
MemoryCategory::Core,
),
("tz", "Timezone is EST, works 9-5", MemoryCategory::Core),
(
"proj1",
"Working on ZeroClaw AI assistant",
MemoryCategory::Daily,
),
(
"proj2",
"Previous project was a web scraper in Python",
MemoryCategory::Daily,
),
(
"deploy",
"Deploys to Hetzner VPS via Docker",
MemoryCategory::Core,
),
(
"model",
"Prefers Claude Sonnet for coding tasks",
MemoryCategory::Core,
),
(
"style",
"Likes concise responses, no fluff",
MemoryCategory::Core,
),
(
"rust_note",
"Rust's ownership model prevents memory bugs",
MemoryCategory::Daily,
),
(
"perf",
"Cares about binary size and startup time",
MemoryCategory::Core,
),
];
for (key, content, cat) in &entries {
sq.store(key, content, cat.clone(), None).await.unwrap();
md.store(key, content, cat.clone(), None).await.unwrap();
}
// Test queries and compare results
let queries = vec![
("Rust", "Should find Rust-related entries"),
("Python", "Should find Python references"),
("deploy Docker", "Multi-keyword search"),
("Claude", "Specific tool reference"),
("javascript", "No matches expected"),
("binary size startup", "Multi-keyword partial match"),
];
println!("\n============================================================");
println!("RECALL QUALITY (10 entries seeded):\n");
for (query, desc) in &queries {
let sq_results = sq.recall(query, 10, None, None, None).await.unwrap();
let md_results = md.recall(query, 10, None, None, None).await.unwrap();
println!(" Query: \"{query}\"{desc}");
println!(" SQLite: {} results", sq_results.len());
for r in &sq_results {
println!(
" [{:.2}] {}: {}",
r.score.unwrap_or(0.0),
r.key,
&r.content[..r.content.len().min(50)]
);
}
println!(" Markdown: {} results", md_results.len());
for r in &md_results {
println!(
" [{:.2}] {}: {}",
r.score.unwrap_or(0.0),
r.key,
&r.content[..r.content.len().min(50)]
);
}
println!();
}
}
// ── Test 3: Recall speed at scale ──────────────────────────────
#[tokio::test]
async fn compare_recall_speed() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
let sq = sqlite_backend(tmp_sq.path());
let md = markdown_backend(tmp_md.path());
// Seed 200 entries
let n = 200;
for i in 0..n {
let content = if i % 3 == 0 {
format!("Rust is great for systems programming, entry {i}")
} else if i % 3 == 1 {
format!("Python is popular for data science, entry {i}")
} else {
format!("TypeScript powers modern web apps, entry {i}")
};
sq.store(&format!("e{i}"), &content, MemoryCategory::Core, None)
.await
.unwrap();
md.store(&format!("e{i}"), &content, MemoryCategory::Daily, None)
.await
.unwrap();
}
// Benchmark recall
let start = Instant::now();
let sq_results = sq
.recall("Rust systems", 10, None, None, None)
.await
.unwrap();
let sq_dur = start.elapsed();
let start = Instant::now();
let md_results = md
.recall("Rust systems", 10, None, None, None)
.await
.unwrap();
let md_dur = start.elapsed();
println!("\n============================================================");
println!("RECALL from {n} entries (query: \"Rust systems\", limit 10):");
println!(" SQLite: {:?}{} results", sq_dur, sq_results.len());
println!(" Markdown: {:?}{} results", md_dur, md_results.len());
// Both should find results
assert!(!sq_results.is_empty());
assert!(!md_results.is_empty());
}
// ── Test 4: Persistence (SQLite wins by design) ────────────────
#[tokio::test]
async fn compare_persistence() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
// Store in both, then drop and re-open
{
let sq = sqlite_backend(tmp_sq.path());
sq.store(
"persist_test",
"I should survive",
MemoryCategory::Core,
None,
)
.await
.unwrap();
}
{
let md = markdown_backend(tmp_md.path());
md.store(
"persist_test",
"I should survive",
MemoryCategory::Core,
None,
)
.await
.unwrap();
}
// Re-open
let sq2 = sqlite_backend(tmp_sq.path());
let md2 = markdown_backend(tmp_md.path());
let sq_entry = sq2.get("persist_test").await.unwrap();
let md_entry = md2.get("persist_test").await.unwrap();
println!("\n============================================================");
println!("PERSISTENCE (store → drop → re-open → get):");
println!(
" SQLite: {}",
if sq_entry.is_some() {
"✅ Survived"
} else {
"❌ Lost"
}
);
println!(
" Markdown: {}",
if md_entry.is_some() {
"✅ Survived"
} else {
"❌ Lost"
}
);
// SQLite should always persist by key
assert!(sq_entry.is_some());
assert_eq!(sq_entry.unwrap().content, "I should survive");
// Markdown persists content to files (get uses content search)
assert!(md_entry.is_some());
}
// ── Test 5: Upsert / update behavior ──────────────────────────
#[tokio::test]
async fn compare_upsert() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
let sq = sqlite_backend(tmp_sq.path());
let md = markdown_backend(tmp_md.path());
// Store twice with same key, different content
sq.store("pref", "likes Rust", MemoryCategory::Core, None)
.await
.unwrap();
sq.store("pref", "loves Rust", MemoryCategory::Core, None)
.await
.unwrap();
md.store("pref", "likes Rust", MemoryCategory::Core, None)
.await
.unwrap();
md.store("pref", "loves Rust", MemoryCategory::Core, None)
.await
.unwrap();
let sq_count = sq.count().await.unwrap();
let md_count = md.count().await.unwrap();
let sq_entry = sq.get("pref").await.unwrap();
let md_results = md.recall("loves Rust", 5, None, None, None).await.unwrap();
println!("\n============================================================");
println!("UPSERT (store same key twice):");
println!(
" SQLite: count={sq_count}, latest=\"{}\"",
sq_entry.as_ref().map_or("none", |e| &e.content)
);
println!(" Markdown: count={md_count} (append-only, both entries kept)");
println!(" Can still find latest: {}", !md_results.is_empty());
// SQLite: upsert replaces, count stays at 1
assert_eq!(sq_count, 1);
assert_eq!(sq_entry.unwrap().content, "loves Rust");
// Markdown: append-only, count increases
assert!(md_count >= 2, "Markdown should keep both entries");
}
// ── Test 6: Forget / delete capability ─────────────────────────
#[tokio::test]
async fn compare_forget() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
let sq = sqlite_backend(tmp_sq.path());
let md = markdown_backend(tmp_md.path());
sq.store("secret", "API key: sk-1234", MemoryCategory::Core, None)
.await
.unwrap();
md.store("secret", "API key: sk-1234", MemoryCategory::Core, None)
.await
.unwrap();
let sq_forgot = sq.forget("secret").await.unwrap();
let md_forgot = md.forget("secret").await.unwrap();
println!("\n============================================================");
println!("FORGET (delete sensitive data):");
println!(
" SQLite: {} (count={})",
if sq_forgot { "✅ Deleted" } else { "❌ Kept" },
sq.count().await.unwrap()
);
println!(
" Markdown: {} (append-only by design)",
if md_forgot {
"✅ Deleted"
} else {
"⚠️ Cannot delete (audit trail)"
},
);
// SQLite can delete
assert!(sq_forgot);
assert_eq!(sq.count().await.unwrap(), 0);
// Markdown cannot delete (by design)
assert!(!md_forgot);
}
// ── Test 7: Category filtering ─────────────────────────────────
#[tokio::test]
async fn compare_category_filter() {
let tmp_sq = TempDir::new().unwrap();
let tmp_md = TempDir::new().unwrap();
let sq = sqlite_backend(tmp_sq.path());
let md = markdown_backend(tmp_md.path());
// Mix of categories
sq.store("a", "core fact 1", MemoryCategory::Core, None)
.await
.unwrap();
sq.store("b", "core fact 2", MemoryCategory::Core, None)
.await
.unwrap();
sq.store("c", "daily note", MemoryCategory::Daily, None)
.await
.unwrap();
sq.store("d", "convo msg", MemoryCategory::Conversation, None)
.await
.unwrap();
md.store("a", "core fact 1", MemoryCategory::Core, None)
.await
.unwrap();
md.store("b", "core fact 2", MemoryCategory::Core, None)
.await
.unwrap();
md.store("c", "daily note", MemoryCategory::Daily, None)
.await
.unwrap();
let sq_core = sq.list(Some(&MemoryCategory::Core), None).await.unwrap();
let sq_daily = sq.list(Some(&MemoryCategory::Daily), None).await.unwrap();
let sq_conv = sq
.list(Some(&MemoryCategory::Conversation), None)
.await
.unwrap();
let sq_all = sq.list(None, None).await.unwrap();
let md_core = md.list(Some(&MemoryCategory::Core), None).await.unwrap();
let md_daily = md.list(Some(&MemoryCategory::Daily), None).await.unwrap();
let md_all = md.list(None, None).await.unwrap();
println!("\n============================================================");
println!("CATEGORY FILTERING:");
println!(
" SQLite: core={}, daily={}, conv={}, all={}",
sq_core.len(),
sq_daily.len(),
sq_conv.len(),
sq_all.len()
);
println!(
" Markdown: core={}, daily={}, all={}",
md_core.len(),
md_daily.len(),
md_all.len()
);
// SQLite: precise category filtering via SQL WHERE
assert_eq!(sq_core.len(), 2);
assert_eq!(sq_daily.len(), 1);
assert_eq!(sq_conv.len(), 1);
assert_eq!(sq_all.len(), 4);
// Markdown: categories determined by file location
assert!(!md_core.is_empty());
assert!(!md_all.is_empty());
}

View File

@@ -0,0 +1,375 @@
//! TG5: Memory Restart Resilience Tests
//!
//! Prevents: Pattern 5 — Memory & state persistence bugs (10% of user bugs).
//! Issues: #430, #693, #802
//!
//! Tests SqliteMemory deduplication on restart, session scoping, concurrent
//! message ordering, and recall behavior after re-initialization.
use std::sync::Arc;
use zeroclaw::memory::sqlite::SqliteMemory;
use zeroclaw::memory::traits::{Memory, MemoryCategory};
// ─────────────────────────────────────────────────────────────────────────────
// Deduplication: same key overwrites instead of duplicating (#430)
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_store_same_key_deduplicates() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
// Store same key twice with different content
mem.store("greeting", "hello world", MemoryCategory::Core, None)
.await
.unwrap();
mem.store("greeting", "hello updated", MemoryCategory::Core, None)
.await
.unwrap();
// Should have exactly 1 entry, not 2
let count = mem.count().await.unwrap();
assert_eq!(
count, 1,
"storing same key twice should not create duplicates"
);
// Content should be the latest version
let entry = mem
.get("greeting")
.await
.unwrap()
.expect("entry should exist");
assert_eq!(entry.content, "hello updated");
}
#[tokio::test]
async fn sqlite_memory_store_different_keys_creates_separate_entries() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store("key_a", "content a", MemoryCategory::Core, None)
.await
.unwrap();
mem.store("key_b", "content b", MemoryCategory::Core, None)
.await
.unwrap();
let count = mem.count().await.unwrap();
assert_eq!(count, 2, "different keys should create separate entries");
}
// ─────────────────────────────────────────────────────────────────────────────
// Restart resilience: data persists across memory re-initialization
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_persists_across_reinitialization() {
let tmp = tempfile::TempDir::new().unwrap();
// First "session": store data
{
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store(
"persistent_fact",
"Rust is great",
MemoryCategory::Core,
None,
)
.await
.unwrap();
}
// Second "session": re-create memory from same path
{
let mem = SqliteMemory::new(tmp.path()).unwrap();
let entry = mem
.get("persistent_fact")
.await
.unwrap()
.expect("entry should survive reinitialization");
assert_eq!(entry.content, "Rust is great");
}
}
#[tokio::test]
async fn sqlite_memory_restart_does_not_duplicate_on_rewrite() {
let tmp = tempfile::TempDir::new().unwrap();
// First session: store entries
{
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store("fact_1", "original content", MemoryCategory::Core, None)
.await
.unwrap();
mem.store("fact_2", "another fact", MemoryCategory::Core, None)
.await
.unwrap();
}
// Second session: re-store same keys (simulates channel re-reading history)
{
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store("fact_1", "original content", MemoryCategory::Core, None)
.await
.unwrap();
mem.store("fact_2", "another fact", MemoryCategory::Core, None)
.await
.unwrap();
let count = mem.count().await.unwrap();
assert_eq!(
count, 2,
"re-storing same keys after restart should not create duplicates"
);
}
}
// ─────────────────────────────────────────────────────────────────────────────
// Session scoping: messages scoped to sessions don't leak
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_session_scoped_store_and_recall() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
// Store in different sessions
mem.store(
"session_a_fact",
"fact from session A",
MemoryCategory::Conversation,
Some("session_a"),
)
.await
.unwrap();
mem.store(
"session_b_fact",
"fact from session B",
MemoryCategory::Conversation,
Some("session_b"),
)
.await
.unwrap();
// List scoped to session_a
let session_a_entries = mem
.list(Some(&MemoryCategory::Conversation), Some("session_a"))
.await
.unwrap();
assert_eq!(
session_a_entries.len(),
1,
"session_a should have exactly 1 entry"
);
assert_eq!(session_a_entries[0].content, "fact from session A");
}
#[tokio::test]
async fn sqlite_memory_global_recall_includes_all_sessions() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store(
"global_a",
"alpha content",
MemoryCategory::Core,
Some("s1"),
)
.await
.unwrap();
mem.store("global_b", "beta content", MemoryCategory::Core, Some("s2"))
.await
.unwrap();
// Global count should include all
let count = mem.count().await.unwrap();
assert_eq!(
count, 2,
"global count should include entries from all sessions"
);
}
// ─────────────────────────────────────────────────────────────────────────────
// Recall and search behavior
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_recall_returns_relevant_results() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store(
"lang_pref",
"User prefers Rust programming",
MemoryCategory::Core,
None,
)
.await
.unwrap();
mem.store(
"food_pref",
"User likes sushi for lunch",
MemoryCategory::Core,
None,
)
.await
.unwrap();
let results = mem
.recall("Rust programming", 10, None, None, None)
.await
.unwrap();
assert!(!results.is_empty(), "recall should find matching entries");
// The Rust-related entry should be in results
assert!(
results.iter().any(|e| e.content.contains("Rust")),
"recall for 'Rust' should include the Rust-related entry"
);
}
#[tokio::test]
async fn sqlite_memory_recall_respects_limit() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
for i in 0..10 {
mem.store(
&format!("entry_{i}"),
&format!("test content number {i}"),
MemoryCategory::Core,
None,
)
.await
.unwrap();
}
let results = mem
.recall("test content", 3, None, None, None)
.await
.unwrap();
assert!(
results.len() <= 3,
"recall should respect limit of 3, got {}",
results.len()
);
}
#[tokio::test]
async fn sqlite_memory_recall_empty_query_returns_recent_entries() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store("fact", "some content", MemoryCategory::Core, None)
.await
.unwrap();
// Empty query uses time-only path: returns recent entries by updated_at
let results = mem.recall("", 10, None, None, None).await.unwrap();
assert_eq!(results.len(), 1, "empty query should return recent entries");
assert_eq!(results[0].key, "fact");
}
// ─────────────────────────────────────────────────────────────────────────────
// Forget and health check
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_forget_removes_entry() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store("to_forget", "temporary info", MemoryCategory::Core, None)
.await
.unwrap();
assert_eq!(mem.count().await.unwrap(), 1);
let removed = mem.forget("to_forget").await.unwrap();
assert!(removed, "forget should return true for existing key");
assert_eq!(mem.count().await.unwrap(), 0);
}
#[tokio::test]
async fn sqlite_memory_forget_nonexistent_returns_false() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
let removed = mem.forget("nonexistent_key").await.unwrap();
assert!(!removed, "forget should return false for nonexistent key");
}
#[tokio::test]
async fn sqlite_memory_health_check_returns_true() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
assert!(mem.health_check().await, "health_check should return true");
}
// ─────────────────────────────────────────────────────────────────────────────
// Concurrent access
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_concurrent_stores_no_data_loss() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = Arc::new(SqliteMemory::new(tmp.path()).unwrap());
let mut handles = Vec::new();
for i in 0..5 {
let mem_clone = mem.clone();
handles.push(tokio::spawn(async move {
mem_clone
.store(
&format!("concurrent_{i}"),
&format!("content from task {i}"),
MemoryCategory::Core,
None,
)
.await
.unwrap();
}));
}
for handle in handles {
handle.await.unwrap();
}
let count = mem.count().await.unwrap();
assert_eq!(
count, 5,
"all concurrent stores should succeed, got {count}"
);
}
// ─────────────────────────────────────────────────────────────────────────────
// Memory categories
// ─────────────────────────────────────────────────────────────────────────────
#[tokio::test]
async fn sqlite_memory_list_by_category() {
let tmp = tempfile::TempDir::new().unwrap();
let mem = SqliteMemory::new(tmp.path()).unwrap();
mem.store("core_fact", "core info", MemoryCategory::Core, None)
.await
.unwrap();
mem.store("daily_note", "daily note", MemoryCategory::Daily, None)
.await
.unwrap();
mem.store(
"conv_msg",
"conversation msg",
MemoryCategory::Conversation,
None,
)
.await
.unwrap();
let core_entries = mem.list(Some(&MemoryCategory::Core), None).await.unwrap();
assert_eq!(core_entries.len(), 1, "should have 1 Core entry");
assert_eq!(core_entries[0].key, "core_fact");
let daily_entries = mem.list(Some(&MemoryCategory::Daily), None).await.unwrap();
assert_eq!(daily_entries.len(), 1, "should have 1 Daily entry");
}

View File

@@ -0,0 +1,11 @@
mod agent;
mod agent_robustness;
mod backup_cron_scheduling;
mod channel_matrix;
mod channel_routing;
mod hooks;
mod memory_comparison;
mod memory_restart;
mod report_template_tool_test;
mod telegram_attachment_fallback;
mod telegram_finalize_draft;

View File

@@ -0,0 +1,238 @@
//! Integration tests for ReportTemplateTool.
use serde_json::json;
use zeroclaw::tools::{ReportTemplateTool, Tool};
#[tokio::test]
async fn render_weekly_status_en() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status",
"language": "en",
"variables": {
"project_name": "Acme Platform",
"period": "2026-W10",
"completed": "- Task A\n- Task B",
"in_progress": "- Task C",
"blocked": "None",
"next_steps": "- Task D"
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
assert!(result.output.contains("Project: Acme Platform"));
assert!(result.output.contains("Period: 2026-W10"));
assert!(result.output.contains("- Task A"));
assert!(result.output.contains("## Completed"));
}
#[tokio::test]
async fn render_sprint_review_de() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "sprint_review",
"language": "de",
"variables": {
"sprint_dates": "2026-03-01 bis 2026-03-14",
"completed": "Feature X implementiert",
"in_progress": "Feature Y",
"blocked": "Keine",
"velocity": "12 Story Points"
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
assert!(result.output.contains("## Sprint"));
assert!(result.output.contains("## Erledigt"));
assert!(result.output.contains("Feature X implementiert"));
}
#[tokio::test]
async fn render_risk_register_fr() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "risk_register",
"language": "fr",
"variables": {
"project_name": "Projet Alpha",
"risks": "Risque de retard",
"mitigations": "Augmenter les ressources"
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
assert!(result.output.contains("## Projet"));
assert!(result.output.contains("## Risques"));
assert!(result.output.contains("Risque de retard"));
}
#[tokio::test]
async fn render_milestone_report_it() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "milestone_report",
"language": "it",
"variables": {
"project_name": "Progetto Beta",
"milestones": "M1: Completato\nM2: In corso",
"status": "In linea con i tempi"
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
assert!(result.output.contains("## Progetto"));
assert!(result.output.contains("## Milestone"));
assert!(result.output.contains("M1: Completato"));
}
#[tokio::test]
async fn default_language_is_en() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status",
"variables": {
"project_name": "Test",
"period": "W1",
"completed": "Done",
"in_progress": "WIP",
"blocked": "None",
"next_steps": "Next"
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
assert!(result.output.contains("## Summary"));
assert!(result.output.contains("## Completed"));
}
#[tokio::test]
async fn missing_template_param_fails() {
let tool = ReportTemplateTool::new();
let params = json!({
"variables": {
"project_name": "Test"
}
});
let result = tool.execute(params).await;
assert!(result.is_err());
let error = result.unwrap_err().to_string();
assert!(error.contains("missing template"));
}
#[tokio::test]
async fn missing_variables_param_fails() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status"
});
let result = tool.execute(params).await;
assert!(result.is_err());
let error = result.unwrap_err().to_string();
assert!(error.contains("variables must be object"));
}
#[tokio::test]
async fn invalid_template_name_fails() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "unknown_template",
"variables": {
"project_name": "Test"
}
});
let result = tool.execute(params).await;
assert!(result.is_err());
}
#[tokio::test]
async fn invalid_language_code_fails() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status",
"language": "es",
"variables": {
"project_name": "Test"
}
});
let result = tool.execute(params).await;
// Note: The current implementation doesn't fail on invalid language,
// it falls back to English. We test this behavior.
let result = result.unwrap();
assert!(result.success);
// Should render in English (default fallback)
assert!(result.output.contains("## Summary"));
}
#[tokio::test]
async fn empty_variables_map_renders() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status",
"variables": {}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
// Placeholders should remain unchanged
assert!(result.output.contains("{{project_name}}"));
assert!(result.output.contains("{{period}}"));
}
#[tokio::test]
async fn injection_protection_enforced() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status",
"variables": {
"project_name": "Test {{injected}}",
"period": "W1",
"completed": "{{nested_var}}",
"in_progress": "WIP",
"blocked": "None",
"next_steps": "Next",
"injected": "SHOULD_NOT_APPEAR",
"nested_var": "SHOULD_NOT_EXPAND"
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
// The value "Test {{injected}}" should be inserted literally
assert!(result.output.contains("Test {{injected}}"));
// The nested variable should not be expanded recursively
assert!(result.output.contains("{{nested_var}}"));
// The injected values should not appear
assert!(!result.output.contains("SHOULD_NOT_APPEAR"));
assert!(!result.output.contains("SHOULD_NOT_EXPAND"));
}
#[tokio::test]
async fn non_string_variable_values_coerced() {
let tool = ReportTemplateTool::new();
let params = json!({
"template": "weekly_status",
"variables": {
"project_name": "Test",
"period": 123,
"completed": true,
"in_progress": false,
"blocked": null,
"next_steps": ["array", "not", "supported"]
}
});
let result = tool.execute(params).await.unwrap();
assert!(result.success);
// Numbers and booleans should be coerced to strings
// null and arrays should result in empty strings
assert!(result.output.contains("Project: Test"));
}

View File

@@ -0,0 +1,298 @@
//! Regression tests for Telegram attachment fallback behavior.
//!
//! When sending media by URL fails (e.g. Telegram can't fetch the URL or the
//! content type is wrong), the channel should fall back to sending the URL as
//! a text link instead of losing the entire reply.
//!
//! Bug: Previously, `send_attachment()` would propagate the error from
//! `send_document_by_url()` immediately via `?`, causing the entire reply
//! (including already-sent text) to fail with no fallback.
use wiremock::matchers::{method, path_regex};
use wiremock::{Mock, MockServer, ResponseTemplate};
use zeroclaw::channels::telegram::TelegramChannel;
use zeroclaw::channels::traits::{Channel, SendMessage};
/// Helper: create a TelegramChannel pointing at a mock server.
fn test_channel(mock_url: &str) -> TelegramChannel {
TelegramChannel::new("TEST_TOKEN".into(), vec!["*".into()], false)
.with_api_base(mock_url.to_string())
}
/// Helper: mount a mock that accepts sendMessage requests (the fallback path).
async fn mock_send_message_ok(server: &MockServer) {
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendMessage$"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"ok": true,
"result": {
"message_id": 1,
"chat": {"id": 123},
"text": "ok"
}
})))
.expect(1..)
.mount(server)
.await;
}
/// When sendDocument by URL fails with "wrong type of the web page content",
/// the channel should fall back to sending the URL as a text link.
#[tokio::test]
async fn document_url_failure_falls_back_to_text_link() {
let server = MockServer::start().await;
// sendDocument returns 400 (simulates Telegram rejecting the URL)
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendDocument$"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"ok": false,
"error_code": 400,
"description": "Bad Request: wrong type of the web page content"
})))
.expect(1)
.mount(&server)
.await;
// sendMessage should succeed (this is the fallback)
mock_send_message_ok(&server).await;
let channel = test_channel(&server.uri());
let msg = SendMessage::new(
"Here is the report [DOCUMENT:https://example.com/page.html]",
"123",
);
// This should NOT error — it should fall back to text
let result = channel.send(&msg).await;
assert!(
result.is_ok(),
"send should succeed via text fallback, got: {result:?}"
);
}
/// When sendPhoto by URL fails, the channel should fall back to text link.
#[tokio::test]
async fn photo_url_failure_falls_back_to_text_link() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendPhoto$"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"ok": false,
"error_code": 400,
"description": "Bad Request: failed to get HTTP URL content"
})))
.expect(1)
.mount(&server)
.await;
mock_send_message_ok(&server).await;
let channel = test_channel(&server.uri());
let msg = SendMessage::new(
"Check this [IMAGE:https://internal-server.local/screenshot.png]",
"456",
);
let result = channel.send(&msg).await;
assert!(
result.is_ok(),
"send should succeed via text fallback, got: {result:?}"
);
}
/// Text portion of a message with attachments is still delivered even when
/// the attachment fails.
#[tokio::test]
async fn text_portion_delivered_before_attachment_failure() {
let server = MockServer::start().await;
// sendDocument fails
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendDocument$"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"ok": false,
"error_code": 400,
"description": "Bad Request: wrong type of the web page content"
})))
.expect(1)
.mount(&server)
.await;
// sendMessage should be called at least twice:
// 1. for the text portion ("Here is the file")
// 2. for the fallback text link
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendMessage$"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"ok": true,
"result": {
"message_id": 1,
"chat": {"id": 789},
"text": "ok"
}
})))
.expect(2)
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let msg = SendMessage::new(
"Here is the file [DOCUMENT:https://example.com/report.html]",
"789",
);
let result = channel.send(&msg).await;
assert!(result.is_ok(), "send should succeed, got: {result:?}");
}
/// When multiple attachments are present and one fails, the others should
/// still be attempted (each gets its own fallback).
#[tokio::test]
async fn multiple_attachments_independent_fallback() {
let server = MockServer::start().await;
// sendDocument fails (for the .html attachment)
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendDocument$"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"ok": false,
"error_code": 400,
"description": "Bad Request: wrong type of the web page content"
})))
.expect(1)
.mount(&server)
.await;
// sendPhoto also fails
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendPhoto$"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"ok": false,
"error_code": 400,
"description": "Bad Request: failed to get HTTP URL content"
})))
.expect(1)
.mount(&server)
.await;
// sendMessage succeeds (text + 2 fallback links)
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendMessage$"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"ok": true,
"result": {
"message_id": 1,
"chat": {"id": 100},
"text": "ok"
}
})))
.expect(3) // text + doc fallback + image fallback
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let msg = SendMessage::new(
"Files: [DOCUMENT:https://example.com/page.html] and [IMAGE:https://internal.local/pic.png]",
"100",
);
let result = channel.send(&msg).await;
assert!(
result.is_ok(),
"send should succeed with fallbacks for all attachments, got: {result:?}"
);
}
/// When attachment succeeds, no fallback text is sent.
#[tokio::test]
async fn successful_attachment_no_fallback() {
let server = MockServer::start().await;
// sendDocument succeeds
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendDocument$"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"ok": true,
"result": {
"message_id": 2,
"chat": {"id": 200},
"document": {"file_id": "abc"}
}
})))
.expect(1)
.mount(&server)
.await;
// sendMessage should only be called once (for the text portion),
// NOT a second time for a fallback
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendMessage$"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"ok": true,
"result": {
"message_id": 1,
"chat": {"id": 200},
"text": "ok"
}
})))
.expect(1) // only the text portion, no fallback
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let msg = SendMessage::new(
"Report attached [DOCUMENT:https://example.com/report.pdf]",
"200",
);
let result = channel.send(&msg).await;
assert!(
result.is_ok(),
"send should succeed normally, got: {result:?}"
);
}
/// Document-only message (no text) with URL failure should still send
/// a fallback text link.
#[tokio::test]
async fn document_only_message_falls_back_to_text() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendDocument$"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"ok": false,
"error_code": 400,
"description": "Bad Request: failed to get HTTP URL content"
})))
.expect(1)
.mount(&server)
.await;
// Fallback text link
Mock::given(method("POST"))
.and(path_regex(r"/botTEST_TOKEN/sendMessage$"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"ok": true,
"result": {
"message_id": 1,
"chat": {"id": 300},
"text": "ok"
}
})))
.expect(1)
.mount(&server)
.await;
let channel = test_channel(&server.uri());
// Message is ONLY the attachment marker — no surrounding text
let msg = SendMessage::new("[DOCUMENT:https://example.com/file.html]", "300");
let result = channel.send(&msg).await;
assert!(
result.is_ok(),
"document-only message should fall back to text, got: {result:?}"
);
}

View File

@@ -0,0 +1,208 @@
use serde_json::json;
use wiremock::matchers::{body_partial_json, method, path};
use wiremock::{Mock, MockServer, ResponseTemplate};
use zeroclaw::channels::telegram::TelegramChannel;
use zeroclaw::channels::traits::Channel;
fn test_channel(mock_url: &str) -> TelegramChannel {
TelegramChannel::new("TEST_TOKEN".into(), vec!["*".into()], false)
.with_api_base(mock_url.to_string())
}
fn telegram_ok_response(message_id: i64) -> serde_json::Value {
json!({
"ok": true,
"result": {
"message_id": message_id,
"chat": {"id": 123},
"text": "ok"
}
})
}
fn telegram_error_response(description: &str) -> serde_json::Value {
json!({
"ok": false,
"error_code": 400,
"description": description,
})
}
#[tokio::test]
async fn finalize_draft_treats_not_modified_as_success() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/editMessageText"))
.respond_with(
ResponseTemplate::new(400).set_body_json(telegram_error_response(
"Bad Request: message is not modified",
)),
)
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let result = channel.finalize_draft("123", "42", "final text").await;
assert!(
result.is_ok(),
"not modified should be treated as success, got: {result:?}"
);
let requests = server
.received_requests()
.await
.expect("requests should be captured");
assert_eq!(requests.len(), 1, "should stop after first edit response");
assert_eq!(requests[0].url.path(), "/botTEST_TOKEN/editMessageText");
}
#[tokio::test]
async fn finalize_draft_plain_retry_treats_not_modified_as_success() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/editMessageText"))
.and(body_partial_json(json!({
"chat_id": "123",
"message_id": 42,
"parse_mode": "HTML",
})))
.respond_with(
ResponseTemplate::new(400)
.set_body_json(telegram_error_response("Bad Request: can't parse entities")),
)
.expect(1)
.mount(&server)
.await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/editMessageText"))
.and(body_partial_json(json!({
"chat_id": "123",
"message_id": 42,
"text": "Use **bold**",
})))
.respond_with(
ResponseTemplate::new(400).set_body_json(telegram_error_response(
"Bad Request: message is not modified",
)),
)
.expect(1)
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let result = channel.finalize_draft("123", "42", "Use **bold**").await;
assert!(
result.is_ok(),
"plain retry should accept not modified, got: {result:?}"
);
let requests = server
.received_requests()
.await
.expect("requests should be captured");
assert_eq!(requests.len(), 2, "should only attempt the two edit calls");
}
#[tokio::test]
async fn finalize_draft_skips_send_message_when_delete_fails() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/editMessageText"))
.respond_with(
ResponseTemplate::new(400).set_body_json(telegram_error_response(
"Bad Request: message cannot be edited",
)),
)
.expect(2)
.mount(&server)
.await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/deleteMessage"))
.respond_with(
ResponseTemplate::new(400).set_body_json(telegram_error_response(
"Bad Request: message to delete not found",
)),
)
.expect(1)
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let result = channel.finalize_draft("123", "42", "final text").await;
assert!(
result.is_ok(),
"delete failure should skip sendMessage instead of erroring, got: {result:?}"
);
let requests = server
.received_requests()
.await
.expect("requests should be captured");
assert_eq!(
requests
.iter()
.filter(|req| req.url.path() == "/botTEST_TOKEN/sendMessage")
.count(),
0,
"sendMessage should be skipped when deleteMessage fails"
);
}
#[tokio::test]
async fn finalize_draft_sends_fresh_message_after_successful_delete() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/editMessageText"))
.respond_with(
ResponseTemplate::new(400).set_body_json(telegram_error_response(
"Bad Request: message cannot be edited",
)),
)
.expect(2)
.mount(&server)
.await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/deleteMessage"))
.respond_with(ResponseTemplate::new(200).set_body_json(telegram_ok_response(42)))
.expect(1)
.mount(&server)
.await;
Mock::given(method("POST"))
.and(path("/botTEST_TOKEN/sendMessage"))
.respond_with(ResponseTemplate::new(200).set_body_json(telegram_ok_response(43)))
.expect(1)
.mount(&server)
.await;
let channel = test_channel(&server.uri());
let result = channel.finalize_draft("123", "42", "final text").await;
assert!(
result.is_ok(),
"successful delete should allow safe sendMessage fallback, got: {result:?}"
);
let requests = server
.received_requests()
.await
.expect("requests should be captured");
assert_eq!(
requests
.iter()
.filter(|req| req.url.path() == "/botTEST_TOKEN/sendMessage")
.count(),
1,
"sendMessage should be attempted exactly once after delete succeeds"
);
}