Compare commits

...

6 commits

Author SHA1 Message Date
97ad905f6b Big compile time correctness step (but still more to go)
Some checks are pending
/ setup (push) Waiting to run
2025-07-20 23:24:50 -07:00
4f05192229 Pre-typescript mithril commit 2025-07-20 17:54:32 -07:00
bf2678c992 Code/name Service and CLI updated 2025-07-20 16:20:34 -07:00
894bbc35bd Code/name WIP 2025-07-20 16:01:40 -07:00
dcc71bd13b Status code/enum refactor WIP 2025-07-20 14:24:08 -07:00
956bb463ff Upgrade CLI 2025-07-20 11:52:27 -07:00
46 changed files with 6854 additions and 688 deletions

2
.gitignore vendored
View file

@ -12,3 +12,5 @@ node_modules
Cargo.toml
Cargo.lock
databuild/databuild.rs
generated_number
target

View file

@ -22,6 +22,29 @@ python3 scripts/generate_cargo_toml.py
scripts/generate_proto_for_ide.sh
```
### Compiling
```bash
bazel build //...
```
**Bullet-proof compile-time correctness** is essential for production reliability. Backend protobuf changes must cause predictable frontend compilation failures, preventing runtime errors. Our three-pronged approach ensures this:
1. **Complete Type Chain**: Proto → Rust → OpenAPI → TypeScript → Components
- Each step uses generated types, maintaining accuracy across the entire pipeline
- Breaking changes at any layer cause compilation failures in dependent layers
2. **Consistent Data Transformation**: Service boundary layer transforms API responses to dashboard types
- Canonical frontend interfaces isolated from backend implementation details
- Transformations handle protobuf nullability and normalize data shapes
- Components never directly access generated API types
3. **Strict TypeScript Configuration**: Enforces explicit null handling and prevents implicit `any` types
- `strictNullChecks` catches undefined property access patterns
- `noImplicitAny` surfaces type safety gaps
- Runtime type errors become compile-time failures
This system guarantees that backend interface changes are caught during TypeScript compilation, not in production.
### Testing
DataBuild core testing:

View file

@ -11,6 +11,7 @@ rust_binary(
"@crates//:prost",
"@crates//:prost-build",
"@crates//:serde",
"@crates//:schemars",
"@crates//:tempfile",
],
)
@ -34,17 +35,26 @@ genrule(
rust_library(
name = "databuild",
srcs = [
"event_log/mock.rs",
"event_log/mod.rs",
"event_log/postgres.rs",
"event_log/sqlite.rs",
"event_log/stdout.rs",
"event_log/writer.rs",
"format_consistency_test.rs",
"lib.rs",
"mermaid_utils.rs",
"orchestration/error.rs",
"orchestration/events.rs",
"orchestration/mod.rs",
"repositories/builds/mod.rs",
"repositories/jobs/mod.rs",
"repositories/mod.rs",
"repositories/partitions/mod.rs",
"repositories/tasks/mod.rs",
"service/handlers.rs",
"service/mod.rs",
"status_utils.rs",
":generate_databuild_rust",
],
edition = "2021",
@ -70,6 +80,8 @@ rust_library(
)
# OpenAPI Spec Generator binary (no dashboard dependency)
# No need to run this manually - it will automatically generate source and it will be used in
# the related targets (e.g. //databuild/client:extract_openapi_spec)
rust_binary(
name = "openapi_spec_generator",
srcs = ["service/openapi_spec_generator.rs"],

View file

@ -17,6 +17,7 @@ rust_binary(
"//databuild:databuild",
"@crates//:clap",
"@crates//:log",
"@crates//:serde",
"@crates//:serde_json",
"@crates//:simple_logger",
"@crates//:thiserror",

View file

@ -20,6 +20,12 @@ pub enum CliError {
#[error("Invalid arguments: {0}")]
InvalidArguments(String),
#[error("Database error: {0}")]
Database(String),
#[error("Output formatting error: {0}")]
Output(String),
}
pub type Result<T> = std::result::Result<T, CliError>;

View file

@ -1,11 +1,18 @@
use databuild::*;
use databuild::event_log::create_build_event_log;
use databuild::orchestration::{BuildOrchestrator, BuildResult};
use clap::{Arg, Command as ClapCommand};
use databuild::repositories::{
partitions::PartitionsRepository,
jobs::JobsRepository,
tasks::TasksRepository,
builds::BuildsRepository
};
use clap::{Arg, Command as ClapCommand, ArgMatches};
use log::info;
use simple_logger::SimpleLogger;
use std::env;
use std::process::{Command, Stdio};
use std::sync::Arc;
use uuid::Uuid;
mod error;
@ -121,41 +128,7 @@ async fn run_execution(
Ok(BuildResult::Success { jobs_completed: job_graph.nodes.len() })
}
#[tokio::main]
async fn main() -> Result<()> {
// Initialize logger
SimpleLogger::new()
.with_level(log::LevelFilter::Info)
.init()
.map_err(|e| CliError::Environment(format!("Failed to initialize logger: {}", e)))?;
info!("Starting DataBuild CLI wrapper");
// Parse command line arguments
let matches = ClapCommand::new("databuild")
.version("1.0")
.about("DataBuild unified CLI")
.arg(
Arg::new("partitions")
.help("Partition references to build")
.required(true)
.num_args(1..)
.value_name("PARTITIONS")
)
.arg(
Arg::new("event-log")
.long("event-log")
.help("Event log URI (default: stdout)")
.value_name("URI")
)
.arg(
Arg::new("build-request-id")
.long("build-request-id")
.help("Build request ID (default: generate UUID)")
.value_name("ID")
)
.get_matches();
async fn handle_build_command(matches: &ArgMatches) -> Result<()> {
let partitions: Vec<String> = matches.get_many::<String>("partitions")
.unwrap()
.cloned()
@ -203,5 +176,838 @@ async fn main() -> Result<()> {
orchestrator.complete_build(result).await?;
info!("DataBuild CLI completed successfully");
Ok(())
}
fn format_timestamp(timestamp_nanos: i64) -> String {
use std::time::{UNIX_EPOCH, Duration};
let timestamp_secs = timestamp_nanos / 1_000_000_000;
let system_time = UNIX_EPOCH + Duration::from_secs(timestamp_secs as u64);
match system_time.duration_since(UNIX_EPOCH) {
Ok(duration) => {
let secs = duration.as_secs();
let days = secs / 86400;
let hours = (secs % 86400) / 3600;
let minutes = (secs % 3600) / 60;
if days > 0 {
format!("{}d {}h ago", days, hours)
} else if hours > 0 {
format!("{}h {}m ago", hours, minutes)
} else {
format!("{}m ago", minutes)
}
}
Err(_) => "unknown".to_string(),
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Initialize logger
SimpleLogger::new()
.with_level(log::LevelFilter::Info)
.init()
.map_err(|e| CliError::Environment(format!("Failed to initialize logger: {}", e)))?;
// Parse command line arguments
let matches = ClapCommand::new("databuild")
.version("1.0")
.about("DataBuild unified CLI")
.subcommand_required(false)
.arg_required_else_help(false)
.arg(
Arg::new("partitions")
.help("Partition references to build (legacy direct build mode)")
.num_args(1..)
.value_name("PARTITIONS")
)
.subcommand(
ClapCommand::new("build")
.about("Build partitions using the DataBuild execution engine")
.arg(
Arg::new("partitions")
.help("Partition references to build")
.required(true)
.num_args(1..)
.value_name("PARTITIONS")
)
.arg(
Arg::new("event-log")
.long("event-log")
.help("Event log URI (default: stdout)")
.value_name("URI")
)
.arg(
Arg::new("build-request-id")
.long("build-request-id")
.help("Build request ID (default: generate UUID)")
.value_name("ID")
)
)
.subcommand(
ClapCommand::new("partitions")
.about("Query and manage partitions")
.subcommand(
ClapCommand::new("list")
.about("List all partitions")
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of partitions to show"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("show")
.about("Show partition details")
.arg(Arg::new("partition_ref").required(true).help("Partition reference"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("invalidate")
.about("Invalidate a partition")
.arg(Arg::new("partition_ref").required(true).help("Partition reference"))
.arg(Arg::new("reason").long("reason").short('r').required(true).help("Reason for invalidation"))
.arg(Arg::new("build_request_id").long("build-request-id").short('b').required(true).help("Build request ID"))
)
)
.subcommand(
ClapCommand::new("jobs")
.about("Query job execution data")
.subcommand(
ClapCommand::new("list")
.about("List all jobs")
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of jobs to show"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("show")
.about("Show job details")
.arg(Arg::new("job_label").required(true).help("Job label"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
)
.subcommand(
ClapCommand::new("tasks")
.about("Query and manage tasks (job runs)")
.subcommand(
ClapCommand::new("list")
.about("List all tasks")
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of tasks to show"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("show")
.about("Show task details")
.arg(Arg::new("job_run_id").required(true).help("Job run ID"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("cancel")
.about("Cancel a task")
.arg(Arg::new("job_run_id").required(true).help("Job run ID"))
.arg(Arg::new("reason").long("reason").short('r').required(true).help("Reason for cancellation"))
.arg(Arg::new("build_request_id").long("build-request-id").short('b').required(true).help("Build request ID"))
)
)
.subcommand(
ClapCommand::new("builds")
.about("Query and manage build requests")
.subcommand(
ClapCommand::new("list")
.about("List all builds")
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of builds to show"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("show")
.about("Show build details")
.arg(Arg::new("build_request_id").required(true).help("Build request ID"))
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
)
.subcommand(
ClapCommand::new("cancel")
.about("Cancel a build")
.arg(Arg::new("build_request_id").required(true).help("Build request ID"))
.arg(Arg::new("reason").long("reason").short('r').required(true).help("Reason for cancellation"))
)
)
.arg(
Arg::new("event-log")
.long("event-log")
.help("Event log URI (default: sqlite:databuild.db for repository commands)")
.value_name("URI")
.global(true)
)
.get_matches();
// Get global event log URI
let event_log_uri = matches.get_one::<String>("event-log")
.cloned()
.or_else(|| env::var("DATABUILD_BUILD_EVENT_LOG").ok())
.unwrap_or_else(|| "sqlite:databuild.db".to_string());
match matches.subcommand() {
Some(("build", sub_matches)) => {
handle_build_command(sub_matches).await?;
}
Some(("partitions", sub_matches)) => {
handle_partitions_command(sub_matches, &event_log_uri).await?;
}
Some(("jobs", sub_matches)) => {
handle_jobs_command(sub_matches, &event_log_uri).await?;
}
Some(("tasks", sub_matches)) => {
handle_tasks_command(sub_matches, &event_log_uri).await?;
}
Some(("builds", sub_matches)) => {
handle_builds_command(sub_matches, &event_log_uri).await?;
}
_ => {
// Check if direct partition arguments were provided (legacy mode)
if let Some(partitions) = matches.get_many::<String>("partitions") {
let partition_list: Vec<String> = partitions.cloned().collect();
if !partition_list.is_empty() {
// Create a synthetic build command with these partitions
let build_cmd = ClapCommand::new("build")
.arg(Arg::new("partitions").num_args(1..))
.arg(Arg::new("event-log").long("event-log"))
.arg(Arg::new("build-request-id").long("build-request-id"));
let build_matches = build_cmd.try_get_matches_from(
std::iter::once("build".to_string()).chain(partition_list.clone())
).map_err(|e| CliError::InvalidArguments(format!("Failed to parse legacy build arguments: {}", e)))?;
handle_build_command(&build_matches).await?;
return Ok(());
}
}
// Show help if no subcommand or arguments provided
let mut cmd = ClapCommand::new("databuild")
.version("1.0")
.about("DataBuild unified CLI");
cmd.print_help().unwrap();
println!();
}
}
Ok(())
}
async fn handle_partitions_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
let event_log = create_build_event_log(event_log_uri).await
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
let repository = PartitionsRepository::new(Arc::from(event_log));
match matches.subcommand() {
Some(("list", sub_matches)) => {
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse::<u32>().ok());
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
// Use new protobuf response format for consistency with service
let request = PartitionsListRequest {
limit,
offset: None, // TODO: Add offset support to CLI
status_filter: None, // TODO: Add status filtering to CLI
};
let response = repository.list_protobuf(request).await
.map_err(|e| CliError::Database(format!("Failed to list partitions: {}", e)))?;
match format {
"json" => {
let json = serde_json::to_string_pretty(&response)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
if response.partitions.is_empty() {
println!("No partitions found");
return Ok(());
}
println!("Partitions ({} total):", response.total_count);
println!();
println!("{:<30} {:<15} {:<12} {:<12} {:<20}", "Partition", "Status", "Builds", "Invalidated", "Last Updated");
println!("{}", "-".repeat(90));
for partition in response.partitions {
let last_updated = format_timestamp(partition.last_updated);
println!("{:<30} {:<15} {:<12} {:<12} {:<20}",
partition.partition_ref,
partition.status_name, // Use human-readable status name
partition.builds_count,
partition.invalidation_count,
last_updated
);
}
if response.has_more {
println!("\nNote: More results available. Use --limit to control output.");
}
}
}
}
Some(("show", sub_matches)) => {
let partition_ref = sub_matches.get_one::<String>("partition_ref").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show_protobuf(partition_ref).await
.map_err(|e| CliError::Database(format!("Failed to show partition: {}", e)))?;
match result {
Some(detail) => {
match format {
"json" => {
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Partition: {}", detail.partition_ref);
println!("Status: {} ({})", detail.status_name, detail.status_code);
println!("Builds involved: {}", detail.builds_count);
println!("Invalidation count: {}", detail.invalidation_count);
println!("Last updated: {}", format_timestamp(detail.last_updated));
if let Some(ref last_build) = detail.last_successful_build {
println!("\nLast successful build: {}", last_build);
}
if !detail.timeline.is_empty() {
println!("\nTimeline ({} events):", detail.timeline.len());
for event in detail.timeline {
let timestamp = format_timestamp(event.timestamp);
println!(" {} [{}] {}", timestamp, event.status_name, event.message);
if event.message.starts_with("Invalidated:") {
// Invalidation reason is in the message
}
}
}
}
}
}
None => {
match format {
"json" => {
println!("null");
}
_ => {
println!("Partition '{}' not found", partition_ref);
}
}
}
}
}
Some(("invalidate", sub_matches)) => {
let partition_ref = sub_matches.get_one::<String>("partition_ref").unwrap();
let reason = sub_matches.get_one::<String>("reason").unwrap();
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
let partition_ref_obj = PartitionRef { str: partition_ref.clone() };
repository.invalidate(&partition_ref_obj.str, reason.clone(), build_request_id.clone()).await
.map_err(|e| CliError::Database(format!("Failed to invalidate partition: {}", e)))?;
println!("Successfully invalidated partition '{}' with reason: {}", partition_ref, reason);
}
_ => {
println!("Unknown partitions subcommand. Use 'list', 'show', or 'invalidate'.");
}
}
Ok(())
}
async fn handle_jobs_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
let event_log = create_build_event_log(event_log_uri).await
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
let repository = JobsRepository::new(Arc::from(event_log));
match matches.subcommand() {
Some(("list", sub_matches)) => {
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let jobs = repository.list(limit).await
.map_err(|e| CliError::Database(format!("Failed to list jobs: {}", e)))?;
match format {
"json" => {
let json = serde_json::to_string_pretty(&jobs)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
if jobs.is_empty() {
println!("No jobs found");
return Ok(());
}
println!("Jobs ({} total):", jobs.len());
println!();
println!("{:<40} {:<8} {:<8} {:<8} {:<8} {:<8} {:<20}", "Job Label", "Runs", "Success", "Failed", "Cancel", "Avg Parts", "Last Run");
println!("{}", "-".repeat(120));
for job in jobs {
let success_rate = if job.total_runs > 0 {
(job.successful_runs as f64 / job.total_runs as f64 * 100.0) as u32
} else {
0
};
let last_run = format_timestamp(job.last_run_timestamp);
let last_status = format!("{:?}", job.last_run_status);
println!("{:<40} {:<8} {:<8} {:<8} {:<8} {:<8.1} {:<20}",
job.job_label,
job.total_runs,
format!("{}({}%)", job.successful_runs, success_rate),
job.failed_runs,
job.cancelled_runs,
job.average_partitions_per_run,
format!("{} ({})", last_run, last_status)
);
}
}
}
}
Some(("show", sub_matches)) => {
let job_label = sub_matches.get_one::<String>("job_label").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show_protobuf(job_label).await
.map_err(|e| CliError::Database(format!("Failed to show job: {}", e)))?;
match result {
Some(detail) => {
match format {
"json" => {
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Job: {}", detail.job_label);
println!("Total runs: {}", detail.total_runs);
println!("Successful runs: {} ({:.1}%)", detail.successful_runs,
if detail.total_runs > 0 { detail.successful_runs as f64 / detail.total_runs as f64 * 100.0 } else { 0.0 });
println!("Failed runs: {}", detail.failed_runs);
println!("Cancelled runs: {}", detail.cancelled_runs);
println!("Average partitions per run: {:.1}", detail.average_partitions_per_run);
println!("Last run: {} ({} - {})", format_timestamp(detail.last_run_timestamp), detail.last_run_status_name, detail.last_run_status_code);
if !detail.recent_builds.is_empty() {
println!("\nRecent builds:");
for build_id in &detail.recent_builds {
println!(" - {}", build_id);
}
}
if !detail.runs.is_empty() {
println!("\nExecution history ({} runs):", detail.runs.len());
println!("{:<25} {:<15} {:<15} {:<10} {:<30}", "Run ID", "Status", "Duration", "Parts", "Build Request");
println!("{}", "-".repeat(95));
for run in detail.runs.iter().take(10) { // Show last 10 runs
let duration_str = if let Some(duration) = run.duration_ms {
if duration > 1000 {
format!("{:.1}s", duration as f64 / 1000.0)
} else {
format!("{}ms", duration)
}
} else {
"N/A".to_string()
};
println!("{:<25} {:<15} {:<15} {:<10} {:<30}",
run.job_run_id,
run.status_name,
duration_str,
run.target_partitions.len(),
run.build_request_id
);
}
if detail.runs.len() > 10 {
println!("... and {} more runs", detail.runs.len() - 10);
}
}
}
}
}
None => {
match format {
"json" => {
println!("null");
}
_ => {
println!("Job '{}' not found", job_label);
}
}
}
}
}
_ => {
println!("Unknown jobs subcommand. Use 'list' or 'show'.");
}
}
Ok(())
}
async fn handle_tasks_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
let event_log = create_build_event_log(event_log_uri).await
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
let repository = TasksRepository::new(Arc::from(event_log));
match matches.subcommand() {
Some(("list", sub_matches)) => {
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let tasks = repository.list(limit).await
.map_err(|e| CliError::Database(format!("Failed to list tasks: {}", e)))?;
match format {
"json" => {
let json = serde_json::to_string_pretty(&tasks)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
if tasks.is_empty() {
println!("No tasks found");
return Ok(());
}
println!("Tasks ({} total):", tasks.len());
println!();
println!("{:<25} {:<30} {:<15} {:<15} {:<10} {:<20}", "Job Run ID", "Job Label", "Status", "Duration", "Parts", "Scheduled");
println!("{}", "-".repeat(115));
for task in tasks {
let duration_str = if let Some(duration) = task.duration_ms {
if duration > 1000 {
format!("{:.1}s", duration as f64 / 1000.0)
} else {
format!("{}ms", duration)
}
} else {
"N/A".to_string()
};
let scheduled = format_timestamp(task.scheduled_at);
let status_str = if task.cancelled {
format!("{:?}*", task.status) // Add asterisk for cancelled tasks
} else {
format!("{:?}", task.status)
};
println!("{:<25} {:<30} {:<15} {:<15} {:<10} {:<20}",
task.job_run_id,
task.job_label,
status_str,
duration_str,
task.target_partitions.len(),
scheduled
);
}
println!("\n* = Cancelled task");
}
}
}
Some(("show", sub_matches)) => {
let job_run_id = sub_matches.get_one::<String>("job_run_id").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show_protobuf(job_run_id).await
.map_err(|e| CliError::Database(format!("Failed to show task: {}", e)))?;
match result {
Some(detail) => {
match format {
"json" => {
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Task: {}", detail.job_run_id);
println!("Job: {}", detail.job_label);
println!("Build request: {}", detail.build_request_id);
println!("Status: {} ({})", detail.status_name, detail.status_code);
println!("Target partitions: {}", detail.target_partitions.len());
println!("Scheduled: {}", format_timestamp(detail.scheduled_at));
if let Some(started) = detail.started_at {
println!("Started: {}", format_timestamp(started));
}
if let Some(completed) = detail.completed_at {
println!("Completed: {}", format_timestamp(completed));
}
if let Some(duration) = detail.duration_ms {
if duration > 1000 {
println!("Duration: {:.1}s", duration as f64 / 1000.0);
} else {
println!("Duration: {}ms", duration);
}
}
if detail.cancelled {
println!("Cancelled: Yes");
if let Some(ref reason) = detail.cancel_reason {
println!("Cancel reason: {}", reason);
}
}
if !detail.message.is_empty() {
println!("Message: {}", detail.message);
}
if !detail.target_partitions.is_empty() {
println!("\nTarget partitions:");
for partition in &detail.target_partitions {
println!(" - {}", partition.str);
}
}
if !detail.timeline.is_empty() {
println!("\nTimeline ({} events):", detail.timeline.len());
for event in detail.timeline {
let timestamp = format_timestamp(event.timestamp);
let status_info = if let Some(ref status_name) = event.status_name {
format!(" -> {}", status_name)
} else {
String::new()
};
println!(" {} [{}]{} {}", timestamp, event.event_type, status_info, event.message);
if let Some(ref reason) = event.cancel_reason {
println!(" Reason: {}", reason);
}
}
}
}
}
}
None => {
match format {
"json" => {
println!("null");
}
_ => {
println!("Task '{}' not found", job_run_id);
}
}
}
}
}
Some(("cancel", sub_matches)) => {
let job_run_id = sub_matches.get_one::<String>("job_run_id").unwrap();
let reason = sub_matches.get_one::<String>("reason").unwrap();
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
repository.cancel(job_run_id, reason.clone(), build_request_id.clone()).await
.map_err(|e| CliError::Database(format!("Failed to cancel task: {}", e)))?;
println!("Successfully cancelled task '{}' with reason: {}", job_run_id, reason);
}
_ => {
println!("Unknown tasks subcommand. Use 'list', 'show', or 'cancel'.");
}
}
Ok(())
}
async fn handle_builds_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
let event_log = create_build_event_log(event_log_uri).await
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
let repository = BuildsRepository::new(Arc::from(event_log));
match matches.subcommand() {
Some(("list", sub_matches)) => {
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let builds = repository.list(limit).await
.map_err(|e| CliError::Database(format!("Failed to list builds: {}", e)))?;
match format {
"json" => {
let json = serde_json::to_string_pretty(&builds)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
if builds.is_empty() {
println!("No builds found");
return Ok(());
}
println!("Builds ({} total):", builds.len());
println!();
println!("{:<40} {:<15} {:<15} {:<8} {:<8} {:<8} {:<20}", "Build Request ID", "Status", "Duration", "Parts", "Jobs", "Comp", "Requested");
println!("{}", "-".repeat(120));
for build in builds {
let duration_str = if let Some(duration) = build.duration_ms {
if duration > 60000 {
format!("{:.1}m", duration as f64 / 60000.0)
} else if duration > 1000 {
format!("{:.1}s", duration as f64 / 1000.0)
} else {
format!("{}ms", duration)
}
} else {
"N/A".to_string()
};
let requested = format_timestamp(build.requested_at);
let status_str = if build.cancelled {
format!("{:?}*", build.status) // Add asterisk for cancelled builds
} else {
format!("{:?}", build.status)
};
let completion_rate = if build.total_jobs > 0 {
format!("{}/{}", build.completed_jobs, build.total_jobs)
} else {
"0/0".to_string()
};
println!("{:<40} {:<15} {:<15} {:<8} {:<8} {:<8} {:<20}",
build.build_request_id,
status_str,
duration_str,
build.requested_partitions.len(),
build.total_jobs,
completion_rate,
requested
);
}
println!("\n* = Cancelled build");
}
}
}
Some(("show", sub_matches)) => {
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show_protobuf(build_request_id).await
.map_err(|e| CliError::Database(format!("Failed to show build: {}", e)))?;
match result {
Some(detail) => {
match format {
"json" => {
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Build: {}", detail.build_request_id);
println!("Status: {} ({})", detail.status_name, detail.status_code);
println!("Requested partitions: {}", detail.requested_partitions.len());
println!("Total jobs: {}", detail.total_jobs);
println!("Completed jobs: {}", detail.completed_jobs);
println!("Failed jobs: {}", detail.failed_jobs);
println!("Cancelled jobs: {}", detail.cancelled_jobs);
println!("Requested: {}", format_timestamp(detail.requested_at));
if let Some(started) = detail.started_at {
println!("Started: {}", format_timestamp(started));
}
if let Some(completed) = detail.completed_at {
println!("Completed: {}", format_timestamp(completed));
}
if let Some(duration) = detail.duration_ms {
if duration > 60000 {
println!("Duration: {:.1}m", duration as f64 / 60000.0);
} else if duration > 1000 {
println!("Duration: {:.1}s", duration as f64 / 1000.0);
} else {
println!("Duration: {}ms", duration);
}
}
if detail.cancelled {
println!("Cancelled: Yes");
if let Some(ref reason) = detail.cancel_reason {
println!("Cancel reason: {}", reason);
}
}
if !detail.requested_partitions.is_empty() {
println!("\nRequested partitions:");
for partition in &detail.requested_partitions {
println!(" - {}", partition.str);
}
}
// Show job statistics
if detail.total_jobs > 0 {
let success_rate = (detail.completed_jobs as f64 / detail.total_jobs as f64 * 100.0) as u32;
println!("\nJob statistics:");
println!(" Success rate: {}% ({}/{})", success_rate, detail.completed_jobs, detail.total_jobs);
if detail.failed_jobs > 0 {
println!(" Failed: {}", detail.failed_jobs);
}
if detail.cancelled_jobs > 0 {
println!(" Cancelled: {}", detail.cancelled_jobs);
}
}
if !detail.timeline.is_empty() {
println!("\nTimeline ({} events):", detail.timeline.len());
for event in detail.timeline {
let timestamp = format_timestamp(event.timestamp);
let status_info = if let Some(ref status_name) = event.status_name {
format!(" -> {}", status_name)
} else {
String::new()
};
println!(" {} [{}]{} {}", timestamp, event.event_type, status_info, event.message);
if let Some(ref reason) = event.cancel_reason {
println!(" Reason: {}", reason);
}
}
}
}
}
}
None => {
match format {
"json" => {
println!("null");
}
_ => {
println!("Build '{}' not found", build_request_id);
}
}
}
}
}
Some(("cancel", sub_matches)) => {
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
let reason = sub_matches.get_one::<String>("reason").unwrap();
repository.cancel(build_request_id, reason.clone()).await
.map_err(|e| CliError::Database(format!("Failed to cancel build: {}", e)))?;
println!("Successfully cancelled build '{}' with reason: {}", build_request_id, reason);
}
_ => {
println!("Unknown builds subcommand. Use 'list', 'show', or 'cancel'.");
}
}
Ok(())
}

View file

@ -32,29 +32,56 @@ genrule(
"typescript_generated/src/apis/DefaultApi.ts",
"typescript_generated/src/apis/index.ts",
"typescript_generated/src/models/index.ts",
"typescript_generated/src/models/ActivityApiResponse.ts",
"typescript_generated/src/models/ActivityResponse.ts",
"typescript_generated/src/models/AnalyzeRequest.ts",
"typescript_generated/src/models/AnalyzeResponse.ts",
"typescript_generated/src/models/BuildCancelPathRequest.ts",
"typescript_generated/src/models/BuildCancelRepositoryResponse.ts",
"typescript_generated/src/models/BuildDetailRequest.ts",
"typescript_generated/src/models/BuildDetailResponse.ts",
"typescript_generated/src/models/BuildEventSummary.ts",
"typescript_generated/src/models/BuildRequest.ts",
"typescript_generated/src/models/BuildRequestResponse.ts",
"typescript_generated/src/models/BuildStatusRequest.ts",
"typescript_generated/src/models/BuildStatusResponse.ts",
"typescript_generated/src/models/BuildSummary.ts",
"typescript_generated/src/models/BuildTimelineEvent.ts",
"typescript_generated/src/models/BuildsListApiResponse.ts",
"typescript_generated/src/models/BuildsListResponse.ts",
"typescript_generated/src/models/CancelBuildRequest.ts",
"typescript_generated/src/models/CancelBuildRepositoryRequest.ts",
"typescript_generated/src/models/CancelTaskRequest.ts",
"typescript_generated/src/models/InvalidatePartitionRequest.ts",
"typescript_generated/src/models/JobDailyStats.ts",
"typescript_generated/src/models/JobDetailRequest.ts",
"typescript_generated/src/models/JobDetailResponse.ts",
"typescript_generated/src/models/JobMetricsRequest.ts",
"typescript_generated/src/models/JobMetricsResponse.ts",
"typescript_generated/src/models/JobRunDetail.ts",
"typescript_generated/src/models/JobRunSummary.ts",
"typescript_generated/src/models/JobSummary.ts",
"typescript_generated/src/models/JobsListApiResponse.ts",
"typescript_generated/src/models/JobsListResponse.ts",
"typescript_generated/src/models/PaginationInfo.ts",
"typescript_generated/src/models/PartitionDetailRequest.ts",
"typescript_generated/src/models/PartitionDetailResponse.ts",
"typescript_generated/src/models/PartitionEventsRequest.ts",
"typescript_generated/src/models/PartitionEventsResponse.ts",
"typescript_generated/src/models/PartitionInvalidatePathRequest.ts",
"typescript_generated/src/models/PartitionInvalidateResponse.ts",
"typescript_generated/src/models/PartitionRef.ts",
"typescript_generated/src/models/PartitionStatusRequest.ts",
"typescript_generated/src/models/PartitionStatusResponse.ts",
"typescript_generated/src/models/PartitionSummary.ts",
"typescript_generated/src/models/PartitionTimelineEvent.ts",
"typescript_generated/src/models/PartitionsListApiResponse.ts",
"typescript_generated/src/models/PartitionsListResponse.ts",
"typescript_generated/src/models/TaskCancelPathRequest.ts",
"typescript_generated/src/models/TaskCancelResponse.ts",
"typescript_generated/src/models/TaskDetailRequest.ts",
"typescript_generated/src/models/TaskDetailResponse.ts",
"typescript_generated/src/models/TaskSummary.ts",
"typescript_generated/src/models/TaskTimelineEvent.ts",
"typescript_generated/src/models/TasksListApiResponse.ts",
"typescript_generated/src/models/TasksListResponse.ts",
"typescript_generated/src/runtime.ts",
"typescript_generated/src/index.ts",
],
@ -79,29 +106,56 @@ genrule(
cp $$TEMP_DIR/src/apis/DefaultApi.ts $(location typescript_generated/src/apis/DefaultApi.ts)
cp $$TEMP_DIR/src/apis/index.ts $(location typescript_generated/src/apis/index.ts)
cp $$TEMP_DIR/src/models/index.ts $(location typescript_generated/src/models/index.ts)
cp $$TEMP_DIR/src/models/ActivityApiResponse.ts $(location typescript_generated/src/models/ActivityApiResponse.ts)
cp $$TEMP_DIR/src/models/ActivityResponse.ts $(location typescript_generated/src/models/ActivityResponse.ts)
cp $$TEMP_DIR/src/models/AnalyzeRequest.ts $(location typescript_generated/src/models/AnalyzeRequest.ts)
cp $$TEMP_DIR/src/models/AnalyzeResponse.ts $(location typescript_generated/src/models/AnalyzeResponse.ts)
cp $$TEMP_DIR/src/models/BuildCancelPathRequest.ts $(location typescript_generated/src/models/BuildCancelPathRequest.ts)
cp $$TEMP_DIR/src/models/BuildCancelRepositoryResponse.ts $(location typescript_generated/src/models/BuildCancelRepositoryResponse.ts)
cp $$TEMP_DIR/src/models/BuildDetailRequest.ts $(location typescript_generated/src/models/BuildDetailRequest.ts)
cp $$TEMP_DIR/src/models/BuildDetailResponse.ts $(location typescript_generated/src/models/BuildDetailResponse.ts)
cp $$TEMP_DIR/src/models/BuildEventSummary.ts $(location typescript_generated/src/models/BuildEventSummary.ts)
cp $$TEMP_DIR/src/models/BuildRequest.ts $(location typescript_generated/src/models/BuildRequest.ts)
cp $$TEMP_DIR/src/models/BuildRequestResponse.ts $(location typescript_generated/src/models/BuildRequestResponse.ts)
cp $$TEMP_DIR/src/models/BuildStatusRequest.ts $(location typescript_generated/src/models/BuildStatusRequest.ts)
cp $$TEMP_DIR/src/models/BuildStatusResponse.ts $(location typescript_generated/src/models/BuildStatusResponse.ts)
cp $$TEMP_DIR/src/models/BuildSummary.ts $(location typescript_generated/src/models/BuildSummary.ts)
cp $$TEMP_DIR/src/models/BuildTimelineEvent.ts $(location typescript_generated/src/models/BuildTimelineEvent.ts)
cp $$TEMP_DIR/src/models/BuildsListApiResponse.ts $(location typescript_generated/src/models/BuildsListApiResponse.ts)
cp $$TEMP_DIR/src/models/BuildsListResponse.ts $(location typescript_generated/src/models/BuildsListResponse.ts)
cp $$TEMP_DIR/src/models/CancelBuildRequest.ts $(location typescript_generated/src/models/CancelBuildRequest.ts)
cp $$TEMP_DIR/src/models/CancelBuildRepositoryRequest.ts $(location typescript_generated/src/models/CancelBuildRepositoryRequest.ts)
cp $$TEMP_DIR/src/models/CancelTaskRequest.ts $(location typescript_generated/src/models/CancelTaskRequest.ts)
cp $$TEMP_DIR/src/models/InvalidatePartitionRequest.ts $(location typescript_generated/src/models/InvalidatePartitionRequest.ts)
cp $$TEMP_DIR/src/models/JobDailyStats.ts $(location typescript_generated/src/models/JobDailyStats.ts)
cp $$TEMP_DIR/src/models/JobDetailRequest.ts $(location typescript_generated/src/models/JobDetailRequest.ts)
cp $$TEMP_DIR/src/models/JobDetailResponse.ts $(location typescript_generated/src/models/JobDetailResponse.ts)
cp $$TEMP_DIR/src/models/JobMetricsRequest.ts $(location typescript_generated/src/models/JobMetricsRequest.ts)
cp $$TEMP_DIR/src/models/JobMetricsResponse.ts $(location typescript_generated/src/models/JobMetricsResponse.ts)
cp $$TEMP_DIR/src/models/JobRunDetail.ts $(location typescript_generated/src/models/JobRunDetail.ts)
cp $$TEMP_DIR/src/models/JobRunSummary.ts $(location typescript_generated/src/models/JobRunSummary.ts)
cp $$TEMP_DIR/src/models/JobSummary.ts $(location typescript_generated/src/models/JobSummary.ts)
cp $$TEMP_DIR/src/models/JobsListApiResponse.ts $(location typescript_generated/src/models/JobsListApiResponse.ts)
cp $$TEMP_DIR/src/models/JobsListResponse.ts $(location typescript_generated/src/models/JobsListResponse.ts)
cp $$TEMP_DIR/src/models/PaginationInfo.ts $(location typescript_generated/src/models/PaginationInfo.ts)
cp $$TEMP_DIR/src/models/PartitionDetailRequest.ts $(location typescript_generated/src/models/PartitionDetailRequest.ts)
cp $$TEMP_DIR/src/models/PartitionDetailResponse.ts $(location typescript_generated/src/models/PartitionDetailResponse.ts)
cp $$TEMP_DIR/src/models/PartitionEventsRequest.ts $(location typescript_generated/src/models/PartitionEventsRequest.ts)
cp $$TEMP_DIR/src/models/PartitionEventsResponse.ts $(location typescript_generated/src/models/PartitionEventsResponse.ts)
cp $$TEMP_DIR/src/models/PartitionInvalidatePathRequest.ts $(location typescript_generated/src/models/PartitionInvalidatePathRequest.ts)
cp $$TEMP_DIR/src/models/PartitionInvalidateResponse.ts $(location typescript_generated/src/models/PartitionInvalidateResponse.ts)
cp $$TEMP_DIR/src/models/PartitionRef.ts $(location typescript_generated/src/models/PartitionRef.ts)
cp $$TEMP_DIR/src/models/PartitionStatusRequest.ts $(location typescript_generated/src/models/PartitionStatusRequest.ts)
cp $$TEMP_DIR/src/models/PartitionStatusResponse.ts $(location typescript_generated/src/models/PartitionStatusResponse.ts)
cp $$TEMP_DIR/src/models/PartitionSummary.ts $(location typescript_generated/src/models/PartitionSummary.ts)
cp $$TEMP_DIR/src/models/PartitionTimelineEvent.ts $(location typescript_generated/src/models/PartitionTimelineEvent.ts)
cp $$TEMP_DIR/src/models/PartitionsListApiResponse.ts $(location typescript_generated/src/models/PartitionsListApiResponse.ts)
cp $$TEMP_DIR/src/models/PartitionsListResponse.ts $(location typescript_generated/src/models/PartitionsListResponse.ts)
cp $$TEMP_DIR/src/models/TaskCancelPathRequest.ts $(location typescript_generated/src/models/TaskCancelPathRequest.ts)
cp $$TEMP_DIR/src/models/TaskCancelResponse.ts $(location typescript_generated/src/models/TaskCancelResponse.ts)
cp $$TEMP_DIR/src/models/TaskDetailRequest.ts $(location typescript_generated/src/models/TaskDetailRequest.ts)
cp $$TEMP_DIR/src/models/TaskDetailResponse.ts $(location typescript_generated/src/models/TaskDetailResponse.ts)
cp $$TEMP_DIR/src/models/TaskSummary.ts $(location typescript_generated/src/models/TaskSummary.ts)
cp $$TEMP_DIR/src/models/TaskTimelineEvent.ts $(location typescript_generated/src/models/TaskTimelineEvent.ts)
cp $$TEMP_DIR/src/models/TasksListApiResponse.ts $(location typescript_generated/src/models/TasksListApiResponse.ts)
cp $$TEMP_DIR/src/models/TasksListResponse.ts $(location typescript_generated/src/models/TasksListResponse.ts)
cp $$TEMP_DIR/src/runtime.ts $(location typescript_generated/src/runtime.ts)
cp $$TEMP_DIR/src/index.ts $(location typescript_generated/src/index.ts)
""",

View file

@ -64,6 +64,7 @@ ts_project(
"layout.ts",
"pages.ts",
"services.ts",
"types.ts",
"utils.ts",
],
allow_js = True,

View file

@ -10,23 +10,42 @@ import {
GraphAnalysis
} from './pages';
import { decodePartitionRef } from './utils';
import {
TypedComponent,
LayoutWrapperAttrs,
RecentActivityAttrs,
BuildStatusAttrs,
PartitionStatusAttrs,
PartitionsListAttrs,
JobsListAttrs,
JobMetricsAttrs,
GraphAnalysisAttrs
} from './types';
export const appName = "databuild";
// Wrapper components that include layout
const LayoutWrapper = (component: any) => ({
view: (vnode: any) => m(Layout, m(component, vnode.attrs))
});
// Wrapper components that include layout - now with type safety
function createLayoutWrapper<TAttrs>(component: TypedComponent<TAttrs>): m.Component<TAttrs> {
return {
oninit: component.oninit ? (vnode: m.Vnode<TAttrs>) => component.oninit!.call(component, vnode) : undefined,
oncreate: component.oncreate ? (vnode: m.VnodeDOM<TAttrs>) => component.oncreate!.call(component, vnode) : undefined,
onupdate: component.onupdate ? (vnode: m.VnodeDOM<TAttrs>) => component.onupdate!.call(component, vnode) : undefined,
onbeforeremove: component.onbeforeremove ? (vnode: m.VnodeDOM<TAttrs>) => component.onbeforeremove!.call(component, vnode) : undefined,
onremove: component.onremove ? (vnode: m.VnodeDOM<TAttrs>) => component.onremove!.call(component, vnode) : undefined,
onbeforeupdate: component.onbeforeupdate ? (vnode: m.Vnode<TAttrs>, old: m.VnodeDOM<TAttrs>) => component.onbeforeupdate!.call(component, vnode, old) : undefined,
view: (vnode: m.Vnode<TAttrs>) => m(Layout, [component.view.call(component, vnode)])
};
}
// Route definitions
// Route definitions with type safety
const routes = {
'/': LayoutWrapper(RecentActivity),
'/builds/:id': LayoutWrapper(BuildStatus),
'/partitions': LayoutWrapper(PartitionsList),
'/partitions/:base64_ref': LayoutWrapper(PartitionStatus),
'/jobs': LayoutWrapper(JobsList),
'/jobs/:label': LayoutWrapper(JobMetrics),
'/analyze': LayoutWrapper(GraphAnalysis),
'/': createLayoutWrapper<RecentActivityAttrs>(RecentActivity),
'/builds/:id': createLayoutWrapper<BuildStatusAttrs>(BuildStatus),
'/partitions': createLayoutWrapper<PartitionsListAttrs>(PartitionsList),
'/partitions/:base64_ref': createLayoutWrapper<PartitionStatusAttrs>(PartitionStatus),
'/jobs': createLayoutWrapper<JobsListAttrs>(JobsList),
'/jobs/:label': createLayoutWrapper<JobMetricsAttrs>(JobMetrics),
'/analyze': createLayoutWrapper<GraphAnalysisAttrs>(GraphAnalysis),
};
if (typeof window !== "undefined") {

View file

@ -1,9 +1,20 @@
import m from 'mithril';
import { DashboardService, pollingManager, formatTime, formatDateTime, formatDuration, formatDate, RecentActivitySummary } from './services';
import { encodePartitionRef, decodePartitionRef, encodeJobLabel, decodeJobLabel, BuildStatusBadge, PartitionStatusBadge, EventTypeBadge } from './utils';
import {
TypedComponent,
RecentActivityAttrs,
BuildStatusAttrs,
PartitionStatusAttrs,
PartitionsListAttrs,
JobsListAttrs,
JobMetricsAttrs,
GraphAnalysisAttrs,
getTypedRouteParams
} from './types';
// Page scaffold components
export const RecentActivity = {
export const RecentActivity: TypedComponent<RecentActivityAttrs> = {
data: null as RecentActivitySummary | null,
loading: true,
error: null as string | null,
@ -30,7 +41,7 @@ export const RecentActivity = {
});
},
oninit() {
oninit(vnode: m.Vnode<RecentActivityAttrs>) {
// Load initial data - Mithril will automatically redraw after promise resolves
this.loadData();
@ -42,12 +53,12 @@ export const RecentActivity = {
}
},
onremove() {
onremove(vnode: m.VnodeDOM<RecentActivityAttrs>) {
// Clean up polling when component is removed
pollingManager.stopPolling('recent-activity');
},
view: function() {
view: function(vnode: m.Vnode<RecentActivityAttrs>) {
if (this.loading && !this.data) {
return m('div.container.mx-auto.p-4', [
@ -190,7 +201,7 @@ export const RecentActivity = {
])
]),
m('tbody',
data.recentBuilds.map(build =>
data.recentBuilds.map((build: any) =>
m('tr.hover', [
m('td', [
m('a.link.link-primary.font-mono.text-sm', {
@ -243,7 +254,7 @@ export const RecentActivity = {
])
]),
m('tbody',
data.recentPartitions.map(partition =>
data.recentPartitions.map((partition: any) =>
m('tr.hover', [
m('td', [
m('a.link.link-primary.font-mono.text-sm.break-all', {
@ -271,20 +282,20 @@ export const RecentActivity = {
}
};
export const BuildStatus = {
export const BuildStatus: TypedComponent<BuildStatusAttrs> = {
data: null as any | null,
loading: true,
error: null as string | null,
partitionStatuses: new Map<string, any>(),
buildId: '',
oninit(vnode: any) {
oninit(vnode: m.Vnode<BuildStatusAttrs>) {
this.buildId = vnode.attrs.id;
this.loadBuild();
this.startPolling();
},
onremove() {
onremove(vnode: m.VnodeDOM<BuildStatusAttrs>) {
pollingManager.stopPolling(`build-status-${this.buildId}`);
},
@ -306,13 +317,13 @@ export const BuildStatus = {
if (buildResponse.requested_partitions) {
for (const partition_ref of buildResponse.requested_partitions) {
try {
const partition_status = await apiClient.apiV1PartitionsRefStatusGet({
ref: partition_ref
const partition_status = await apiClient.apiV1PartitionsPartitionRefStatusGet({
partition_ref: partition_ref.str
});
console.log(`Loaded status for partition ${partition_ref}:`, partition_status);
this.partitionStatuses.set(partition_ref, partition_status);
console.log(`Loaded status for partition ${partition_ref.str}:`, partition_status);
this.partitionStatuses.set(partition_ref.str, partition_status);
} catch (e) {
console.warn(`Failed to load status for partition ${partition_ref}:`, e);
console.warn(`Failed to load status for partition ${partition_ref.str}:`, e);
}
}
}
@ -373,16 +384,16 @@ export const BuildStatus = {
}
},
oncreate() {
oncreate(vnode: m.VnodeDOM<BuildStatusAttrs>) {
(window as any).mermaid.init();
},
onupdate() {
onupdate(vnode: m.VnodeDOM<BuildStatusAttrs>) {
(window as any).mermaid.init();
},
view() {
view(vnode: m.Vnode<BuildStatusAttrs>) {
// Loading/error states similar to RecentActivity component
if (this.loading && !this.data) {
return m('div.container.mx-auto.p-4', [
@ -516,7 +527,7 @@ export const BuildStatus = {
}
};
export const PartitionsList = {
export const PartitionsList: TypedComponent<PartitionsListAttrs> = {
data: null as any | null,
loading: true,
error: null as string | null,
@ -573,11 +584,11 @@ export const PartitionsList = {
);
},
oninit() {
oninit(vnode: m.Vnode<PartitionsListAttrs>) {
this.loadPartitions();
},
view() {
view(vnode: m.Vnode<PartitionsListAttrs>) {
if (this.loading && !this.data) {
return m('div.container.mx-auto.p-4', [
m('div.flex.flex-col.justify-center.items-center.min-h-96', [
@ -701,7 +712,7 @@ export const PartitionsList = {
}
};
export const PartitionStatus = {
export const PartitionStatus: TypedComponent<PartitionStatusAttrs> = {
data: null as any | null,
events: null as any | null,
loading: true,
@ -719,14 +730,14 @@ export const PartitionStatus = {
const apiClient = new DefaultApi(new Configuration({ basePath: '' }));
// Load partition status
const statusResponse = await apiClient.apiV1PartitionsRefStatusGet({
ref: this.partitionRef
const statusResponse = await apiClient.apiV1PartitionsPartitionRefStatusGet({
partition_ref: this.partitionRef
});
this.data = statusResponse;
// Load partition events for build history
const eventsResponse = await apiClient.apiV1PartitionsRefEventsGet({
ref: this.partitionRef
const eventsResponse = await apiClient.apiV1PartitionsPartitionRefEventsGet({
partition_ref: this.partitionRef
});
this.events = eventsResponse;
@ -802,12 +813,12 @@ export const PartitionStatus = {
}
},
oninit(vnode: any) {
oninit(vnode: m.Vnode<PartitionStatusAttrs>) {
this.partitionRef = decodePartitionRef(vnode.attrs.base64_ref);
this.loadPartition();
},
view() {
view(vnode: m.Vnode<PartitionStatusAttrs>) {
if (this.loading && !this.data) {
return m('div.container.mx-auto.p-4', [
m('div.flex.flex-col.justify-center.items-center.min-h-96', [
@ -991,14 +1002,14 @@ export const PartitionStatus = {
}
};
export const JobsList = {
export const JobsList: TypedComponent<JobsListAttrs> = {
jobs: [] as any[],
searchTerm: '',
loading: false,
error: null as string | null,
searchTimeout: null as NodeJS.Timeout | null,
oninit(vnode: any) {
oninit(vnode: m.Vnode<JobsListAttrs>) {
JobsList.loadJobs();
},
@ -1028,7 +1039,7 @@ export const JobsList = {
);
},
view: () => {
view: (vnode: m.Vnode<JobsListAttrs>) => {
if (JobsList.loading) {
return m('div.container.mx-auto.p-4', [
m('div.flex.justify-center.items-center.h-64', [
@ -1115,13 +1126,13 @@ export const JobsList = {
}
};
export const JobMetrics = {
export const JobMetrics: TypedComponent<JobMetricsAttrs> = {
jobLabel: '',
metrics: null as any,
loading: false,
error: null as string | null,
oninit(vnode: any) {
oninit(vnode: m.Vnode<JobMetricsAttrs>) {
JobMetrics.jobLabel = decodeJobLabel(vnode.attrs.label);
JobMetrics.loadJobMetrics();
},
@ -1145,7 +1156,7 @@ export const JobMetrics = {
}
},
view: () => {
view: (vnode: m.Vnode<JobMetricsAttrs>) => {
if (JobMetrics.loading) {
return m('div.container.mx-auto.p-4', [
m('div.flex.justify-center.items-center.h-64', [
@ -1283,8 +1294,8 @@ export const JobMetrics = {
}
};
export const GraphAnalysis = {
view: () => m('div.container.mx-auto.p-4', [
export const GraphAnalysis: TypedComponent<GraphAnalysisAttrs> = {
view: (vnode: m.Vnode<GraphAnalysisAttrs>) => m('div.container.mx-auto.p-4', [
m('h1.text-3xl.font-bold.mb-4', 'Graph Analysis'),
m('div.card.bg-base-100.shadow-xl', [
m('div.card-body', [

View file

@ -1,5 +1,5 @@
// Import the generated TypeScript client
import { DefaultApi, Configuration, ActivityResponse, BuildSummary, PartitionSummary, JobsListResponse, JobMetricsResponse, JobSummary, JobRunSummary, JobDailyStats } from '../client/typescript_generated/src/index';
import { DefaultApi, Configuration, ActivityApiResponse, ActivityResponse, BuildSummary, PartitionSummary, JobsListApiResponse, JobMetricsResponse, JobSummary, JobRunSummary, JobDailyStats } from '../client/typescript_generated/src/index';
// Configure the API client
const apiConfig = new Configuration({
@ -45,22 +45,24 @@ export class DashboardService {
async getRecentActivity(): Promise<RecentActivitySummary> {
try {
// Use the new activity endpoint that aggregates all the data we need
const activityResponse: ActivityResponse = await apiClient.apiV1ActivityGet();
console.info('Recent activity:', activityResponse);
const activityApiResponse: ActivityApiResponse = await apiClient.apiV1ActivityGet();
console.info('Recent activity:', activityApiResponse);
const activityResponse = activityApiResponse.data;
// Convert the API response to our dashboard format
const recentBuilds: BuildRequest[] = activityResponse.recent_builds.map((build: BuildSummary) => ({
buildRequestId: build.build_request_id,
status: build.status,
createdAt: build.created_at,
updatedAt: build.updated_at,
status: build.status_name, // Use human-readable status name
createdAt: build.requested_at,
updatedAt: build.started_at || build.requested_at,
}));
const recentPartitions: PartitionBuild[] = activityResponse.recent_partitions.map((partition: PartitionSummary) => ({
ref: partition.partition_ref,
status: partition.status,
updatedAt: partition.updated_at,
buildRequestId: partition.build_request_id || undefined
status: partition.status_name, // Use human-readable status name
updatedAt: partition.last_updated,
buildRequestId: partition.last_successful_build || undefined
}));
console.info("made", recentBuilds, recentPartitions);
return {
@ -99,8 +101,8 @@ export class DashboardService {
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const data: JobsListResponse = await response.json();
return data.jobs;
const data: JobsListApiResponse = await response.json();
return data.data.jobs;
} catch (error) {
console.error('Failed to fetch jobs:', error);
return [];

View file

@ -0,0 +1,181 @@
import m from 'mithril';
import {
ActivityResponse,
ActivityApiResponse,
BuildSummary,
BuildDetailResponse,
PartitionSummary,
PartitionDetailResponse,
PartitionEventsResponse,
JobSummary,
JobMetricsResponse,
JobDailyStats,
JobRunSummary
} from '../client/typescript_generated/src/index';
// Generic typed component interface that extends Mithril's component
// Uses intersection type to allow arbitrary properties while ensuring type safety for lifecycle methods
export interface TypedComponent<TAttrs = {}> extends Record<string, any> {
oninit?(vnode: m.Vnode<TAttrs>): void;
oncreate?(vnode: m.VnodeDOM<TAttrs>): void;
onupdate?(vnode: m.VnodeDOM<TAttrs>): void;
onbeforeremove?(vnode: m.VnodeDOM<TAttrs>): Promise<any> | void;
onremove?(vnode: m.VnodeDOM<TAttrs>): void;
onbeforeupdate?(vnode: m.Vnode<TAttrs>, old: m.VnodeDOM<TAttrs>): boolean | void;
view(vnode: m.Vnode<TAttrs>): m.Children;
}
// Helper type for typed vnodes
export type TypedVnode<TAttrs = {}> = m.Vnode<TAttrs>;
export type TypedVnodeDOM<TAttrs = {}> = m.VnodeDOM<TAttrs>;
// Route parameter types
export interface RouteParams {
[key: string]: string;
}
export interface BuildRouteParams extends RouteParams {
id: string;
}
export interface PartitionRouteParams extends RouteParams {
base64_ref: string;
}
export interface JobRouteParams extends RouteParams {
label: string;
}
// Component attribute interfaces that reference OpenAPI types
export interface RecentActivityAttrs {
// No external attrs needed - component manages its own data loading
}
export interface BuildStatusAttrs {
id: string;
}
export interface PartitionStatusAttrs {
base64_ref: string;
}
export interface PartitionsListAttrs {
// No external attrs needed - component manages its own data loading
}
export interface JobsListAttrs {
// No external attrs needed - component manages its own data loading
}
export interface JobMetricsAttrs {
label: string;
}
export interface GraphAnalysisAttrs {
// No external attrs needed for now
}
// Badge component attribute interfaces with OpenAPI type constraints
export interface BuildStatusBadgeAttrs {
status: string; // This should be constrained to BuildSummary status values
size?: 'xs' | 'sm' | 'md' | 'lg';
class?: string;
}
export interface PartitionStatusBadgeAttrs {
status: string; // This should be constrained to PartitionSummary status values
size?: 'xs' | 'sm' | 'md' | 'lg';
class?: string;
}
export interface EventTypeBadgeAttrs {
eventType: string; // This should be constrained to known event types
size?: 'xs' | 'sm' | 'md' | 'lg';
class?: string;
}
// Layout wrapper attributes
export interface LayoutWrapperAttrs {
// Layout wrapper will pass through attributes to wrapped component
[key: string]: any;
}
// Data types for component state (using OpenAPI types)
export interface RecentActivityData {
data: ActivityResponse | null;
loading: boolean;
error: string | null;
}
export interface BuildStatusData {
data: BuildDetailResponse | null;
partitionStatuses: Map<string, any>;
loading: boolean;
error: string | null;
buildId: string;
}
export interface PartitionStatusData {
data: PartitionDetailResponse | null;
events: PartitionEventsResponse | null;
loading: boolean;
error: string | null;
partitionRef: string;
buildHistory: any[];
}
export interface JobsListData {
jobs: JobSummary[];
searchTerm: string;
loading: boolean;
error: string | null;
searchTimeout: NodeJS.Timeout | null;
}
export interface JobMetricsData {
jobLabel: string;
metrics: JobMetricsResponse | null;
loading: boolean;
error: string | null;
}
// Utility type for creating typed components
export type CreateTypedComponent<TAttrs> = TypedComponent<TAttrs>;
// Type guards and validators using OpenAPI type information
export function isActivityResponse(data: any): data is ActivityResponse {
return data &&
typeof data.active_builds_count === 'number' &&
typeof data.graph_name === 'string' &&
Array.isArray(data.recent_builds) &&
Array.isArray(data.recent_partitions) &&
typeof data.system_status === 'string' &&
typeof data.total_partitions_count === 'number';
}
export function isBuildSummary(data: any): data is BuildSummary {
return data &&
typeof data.build_request_id === 'string' &&
typeof data.status_name === 'string' &&
typeof data.requested_at === 'number';
}
export function isPartitionSummary(data: any): data is PartitionSummary {
return data &&
typeof data.partition_ref === 'string' &&
typeof data.last_updated === 'number';
}
// Helper function to create type-safe Mithril components
export function createTypedComponent<TAttrs>(
component: TypedComponent<TAttrs>
): m.Component<TAttrs> {
return component as m.Component<TAttrs>;
}
// Helper for type-safe route handling
export function getTypedRouteParams<T extends RouteParams>(vnode: m.Vnode<T>): T {
return vnode.attrs;
}

View file

@ -23,12 +23,19 @@ export function decodeJobLabel(encoded: string): string {
}
import m from 'mithril';
import {
TypedComponent,
BuildStatusBadgeAttrs,
PartitionStatusBadgeAttrs,
EventTypeBadgeAttrs,
createTypedComponent
} from './types';
// Mithril components for status badges - encapsulates both logic and presentation
export const BuildStatusBadge = {
view(vnode: any) {
const { status, size = 'sm', ...attrs } = vnode.attrs;
export const BuildStatusBadge: TypedComponent<BuildStatusBadgeAttrs> = {
view(vnode: m.Vnode<BuildStatusBadgeAttrs>) {
const { status, size = 'sm', class: className, ...attrs } = vnode.attrs;
const normalizedStatus = status.toLowerCase();
let badgeClass = 'badge-neutral';
@ -42,15 +49,15 @@ export const BuildStatusBadge = {
badgeClass = 'badge-error';
}
return m(`span.badge.badge-${size}.${badgeClass}`, attrs, status);
return m(`span.badge.badge-${size}.${badgeClass}`, { class: className, ...attrs }, status);
}
};
export const PartitionStatusBadge = {
view(vnode: any) {
const { status, size = 'sm', ...attrs } = vnode.attrs;
export const PartitionStatusBadge: TypedComponent<PartitionStatusBadgeAttrs> = {
view(vnode: m.Vnode<PartitionStatusBadgeAttrs>) {
const { status, size = 'sm', class: className, ...attrs } = vnode.attrs;
if (!status) {
return m(`span.badge.badge-${size}.badge-neutral`, attrs, 'Unknown');
return m(`span.badge.badge-${size}.badge-neutral`, { class: className, ...attrs }, 'Unknown');
}
const normalizedStatus = status.toLowerCase();
@ -66,13 +73,13 @@ export const PartitionStatusBadge = {
badgeClass = 'badge-error';
}
return m(`span.badge.badge-${size}.${badgeClass}`, attrs, status);
return m(`span.badge.badge-${size}.${badgeClass}`, { class: className, ...attrs }, status);
}
};
export const EventTypeBadge = {
view(vnode: any) {
const { eventType, size = 'sm', ...attrs } = vnode.attrs;
export const EventTypeBadge: TypedComponent<EventTypeBadgeAttrs> = {
view(vnode: m.Vnode<EventTypeBadgeAttrs>) {
const { eventType, size = 'sm', class: className, ...attrs } = vnode.attrs;
let badgeClass = 'badge-ghost';
let displayName = eventType;
@ -96,6 +103,6 @@ export const EventTypeBadge = {
break;
}
return m(`span.badge.badge-${size}.${badgeClass}`, attrs, displayName);
return m(`span.badge.badge-${size}.${badgeClass}`, { class: className, ...attrs }, displayName);
}
};

View file

@ -22,8 +22,9 @@ enum DepType {
// Represents a data dependency
message DataDep {
DepType dep_type = 1;
PartitionRef partition_ref = 2;
DepType dep_type_code = 1; // Enum for programmatic use
string dep_type_name = 2; // Human-readable string ("query", "materialize")
PartitionRef partition_ref = 3; // Moved from field 2 to 3
}
// Configuration for a job
@ -195,17 +196,19 @@ enum BuildRequestStatus {
// Build request lifecycle event
message BuildRequestEvent {
BuildRequestStatus status = 1;
repeated PartitionRef requested_partitions = 2;
string message = 3; // Optional status message
BuildRequestStatus status_code = 1; // Enum for programmatic use
string status_name = 2; // Human-readable string
repeated PartitionRef requested_partitions = 3;
string message = 4; // Optional status message
}
// Partition state change event
message PartitionEvent {
PartitionRef partition_ref = 1;
PartitionStatus status = 2;
string message = 3; // Optional status message
string job_run_id = 4; // UUID of job run producing this partition (if applicable)
PartitionStatus status_code = 2; // Enum for programmatic use
string status_name = 3; // Human-readable string
string message = 4; // Optional status message
string job_run_id = 5; // UUID of job run producing this partition (if applicable)
}
// Job execution event
@ -213,10 +216,11 @@ message JobEvent {
string job_run_id = 1; // UUID for this job run
JobLabel job_label = 2; // Job being executed
repeated PartitionRef target_partitions = 3; // Partitions this job run produces
JobStatus status = 4;
string message = 5; // Optional status message
JobConfig config = 6; // Job configuration used (for SCHEDULED events)
repeated PartitionManifest manifests = 7; // Results (for COMPLETED events)
JobStatus status_code = 4; // Enum for programmatic use
string status_name = 5; // Human-readable string
string message = 6; // Optional status message
JobConfig config = 7; // Job configuration used (for SCHEDULED events)
repeated PartitionManifest manifests = 8; // Results (for COMPLETED events)
}
// Delegation event (when build request delegates to existing build)
@ -232,6 +236,23 @@ message JobGraphEvent {
string message = 2; // Optional message
}
// Partition invalidation event
message PartitionInvalidationEvent {
PartitionRef partition_ref = 1; // Partition being invalidated
string reason = 2; // Reason for invalidation
}
// Task cancellation event
message TaskCancelEvent {
string job_run_id = 1; // UUID of the job run being cancelled
string reason = 2; // Reason for cancellation
}
// Build cancellation event
message BuildCancelEvent {
string reason = 1; // Reason for cancellation
}
// Individual build event
message BuildEvent {
// Event metadata
@ -246,9 +267,277 @@ message BuildEvent {
JobEvent job_event = 12;
DelegationEvent delegation_event = 13;
JobGraphEvent job_graph_event = 14;
PartitionInvalidationEvent partition_invalidation_event = 15;
TaskCancelEvent task_cancel_event = 16;
BuildCancelEvent build_cancel_event = 17;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
// List Operations (Unified CLI/Service Responses)
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Partitions List
//
message PartitionsListRequest {
optional uint32 limit = 1;
optional uint32 offset = 2;
optional string status_filter = 3;
}
message PartitionsListResponse {
repeated PartitionSummary partitions = 1;
uint32 total_count = 2;
bool has_more = 3;
}
message PartitionSummary {
string partition_ref = 1;
PartitionStatus status_code = 2; // Enum for programmatic use
string status_name = 3; // Human-readable string
int64 last_updated = 4;
uint32 builds_count = 5;
uint32 invalidation_count = 6;
optional string last_successful_build = 7;
}
//
// Jobs List
//
message JobsListRequest {
optional uint32 limit = 1;
optional string search = 2;
}
message JobsListResponse {
repeated JobSummary jobs = 1;
uint32 total_count = 2;
}
message JobSummary {
string job_label = 1;
uint32 total_runs = 2;
uint32 successful_runs = 3;
uint32 failed_runs = 4;
uint32 cancelled_runs = 5;
double average_partitions_per_run = 6;
int64 last_run_timestamp = 7;
JobStatus last_run_status_code = 8; // Enum for programmatic use
string last_run_status_name = 9; // Human-readable string
repeated string recent_builds = 10;
}
//
// Tasks List
//
message TasksListRequest {
optional uint32 limit = 1;
}
message TasksListResponse {
repeated TaskSummary tasks = 1;
uint32 total_count = 2;
}
message TaskSummary {
string job_run_id = 1;
string job_label = 2;
string build_request_id = 3;
JobStatus status_code = 4; // Enum for programmatic use
string status_name = 5; // Human-readable string
repeated PartitionRef target_partitions = 6;
int64 scheduled_at = 7;
optional int64 started_at = 8;
optional int64 completed_at = 9;
optional int64 duration_ms = 10;
bool cancelled = 11;
string message = 12;
}
//
// Builds List
//
message BuildsListRequest {
optional uint32 limit = 1;
optional uint32 offset = 2;
optional string status_filter = 3;
}
message BuildsListResponse {
repeated BuildSummary builds = 1;
uint32 total_count = 2;
bool has_more = 3;
}
message BuildSummary {
string build_request_id = 1;
BuildRequestStatus status_code = 2; // Enum for programmatic use
string status_name = 3; // Human-readable string
repeated PartitionRef requested_partitions = 4;
uint32 total_jobs = 5;
uint32 completed_jobs = 6;
uint32 failed_jobs = 7;
uint32 cancelled_jobs = 8;
int64 requested_at = 9;
optional int64 started_at = 10;
optional int64 completed_at = 11;
optional int64 duration_ms = 12;
bool cancelled = 13;
}
//
// Activity Summary
//
message ActivityResponse {
uint32 active_builds_count = 1;
repeated BuildSummary recent_builds = 2;
repeated PartitionSummary recent_partitions = 3;
uint32 total_partitions_count = 4;
string system_status = 5;
string graph_name = 6;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Detail Operations (Unified CLI/Service Detail Responses)
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Build Detail
//
message BuildDetailRequest {
string build_request_id = 1;
}
message BuildDetailResponse {
string build_request_id = 1;
BuildRequestStatus status_code = 2; // Enum for programmatic use
string status_name = 3; // Human-readable string
repeated PartitionRef requested_partitions = 4;
uint32 total_jobs = 5;
uint32 completed_jobs = 6;
uint32 failed_jobs = 7;
uint32 cancelled_jobs = 8;
int64 requested_at = 9;
optional int64 started_at = 10;
optional int64 completed_at = 11;
optional int64 duration_ms = 12;
bool cancelled = 13;
optional string cancel_reason = 14;
repeated BuildTimelineEvent timeline = 15;
}
message BuildTimelineEvent {
int64 timestamp = 1;
optional BuildRequestStatus status_code = 2; // Enum for programmatic use
optional string status_name = 3; // Human-readable string
string message = 4;
string event_type = 5;
optional string cancel_reason = 6;
}
//
// Partition Detail
//
message PartitionDetailRequest {
string partition_ref = 1;
}
message PartitionDetailResponse {
string partition_ref = 1;
PartitionStatus status_code = 2; // Enum for programmatic use
string status_name = 3; // Human-readable string
int64 last_updated = 4;
uint32 builds_count = 5;
optional string last_successful_build = 6;
uint32 invalidation_count = 7;
repeated PartitionTimelineEvent timeline = 8;
}
message PartitionTimelineEvent {
int64 timestamp = 1;
PartitionStatus status_code = 2; // Enum for programmatic use
string status_name = 3; // Human-readable string
string message = 4;
string build_request_id = 5;
optional string job_run_id = 6;
}
//
// Job Detail
//
message JobDetailRequest {
string job_label = 1;
}
message JobDetailResponse {
string job_label = 1;
uint32 total_runs = 2;
uint32 successful_runs = 3;
uint32 failed_runs = 4;
uint32 cancelled_runs = 5;
double average_partitions_per_run = 6;
int64 last_run_timestamp = 7;
JobStatus last_run_status_code = 8; // Enum for programmatic use
string last_run_status_name = 9; // Human-readable string
repeated string recent_builds = 10;
repeated JobRunDetail runs = 11;
}
message JobRunDetail {
string job_run_id = 1;
string build_request_id = 2;
repeated PartitionRef target_partitions = 3;
JobStatus status_code = 4; // Enum for programmatic use
string status_name = 5; // Human-readable string
optional int64 started_at = 6;
optional int64 completed_at = 7;
optional int64 duration_ms = 8;
string message = 9;
}
//
// Task Detail
//
message TaskDetailRequest {
string job_run_id = 1;
}
message TaskDetailResponse {
string job_run_id = 1;
string job_label = 2;
string build_request_id = 3;
JobStatus status_code = 4; // Enum for programmatic use
string status_name = 5; // Human-readable string
repeated PartitionRef target_partitions = 6;
int64 scheduled_at = 7;
optional int64 started_at = 8;
optional int64 completed_at = 9;
optional int64 duration_ms = 10;
bool cancelled = 11;
optional string cancel_reason = 12;
string message = 13;
repeated TaskTimelineEvent timeline = 14;
}
message TaskTimelineEvent {
int64 timestamp = 1;
optional JobStatus status_code = 2; // Enum for programmatic use
optional string status_name = 3; // Human-readable string
string message = 4;
string event_type = 5;
optional string cancel_reason = 6;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Services
///////////////////////////////////////////////////////////////////////////////////////////////

755
databuild/event_log/mock.rs Normal file
View file

@ -0,0 +1,755 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result, QueryResult, BuildRequestSummary, PartitionSummary, ActivitySummary};
use async_trait::async_trait;
use std::sync::{Arc, Mutex};
use rusqlite::{Connection, params};
/// MockBuildEventLog provides an in-memory SQLite database for testing
///
/// This implementation makes it easy to specify test data and verify behavior
/// while using the real code paths for event writing and repository queries.
///
/// Key features:
/// - Uses in-memory SQLite for parallel test execution
/// - Provides event constructors with sensible defaults
/// - Allows easy specification of test scenarios
/// - Uses the same SQL schema as production SQLite implementation
pub struct MockBuildEventLog {
connection: Arc<Mutex<Connection>>,
}
impl MockBuildEventLog {
/// Create a new MockBuildEventLog with an in-memory SQLite database
pub async fn new() -> Result<Self> {
let mut conn = Connection::open(":memory:")
.map_err(|e| BuildEventLogError::ConnectionError(e.to_string()))?;
// Disable foreign key constraints for simplicity in testing
// conn.execute("PRAGMA foreign_keys = ON", [])
let mock = Self {
connection: Arc::new(Mutex::new(conn)),
};
// Initialize the schema
mock.initialize().await?;
Ok(mock)
}
/// Create a new MockBuildEventLog with predefined events
pub async fn with_events(events: Vec<BuildEvent>) -> Result<Self> {
let mock = Self::new().await?;
// Insert all provided events
for event in events {
mock.append_event(event).await?;
}
Ok(mock)
}
/// Get the number of events in the mock event log
pub async fn event_count(&self) -> Result<usize> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare("SELECT COUNT(*) FROM build_events")
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let count: i64 = stmt.query_row([], |row| row.get(0))
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
Ok(count as usize)
}
/// Get all events ordered by timestamp
pub async fn get_all_events(&self) -> Result<Vec<BuildEvent>> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT event_data FROM build_events ORDER BY timestamp ASC"
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let rows = stmt.query_map([], |row| {
let event_data: String = row.get(0)?;
Ok(event_data)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut events = Vec::new();
for row in rows {
let event_data = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let event: BuildEvent = serde_json::from_str(&event_data)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
events.push(event);
}
Ok(events)
}
/// Clear all events from the mock event log
pub async fn clear(&self) -> Result<()> {
let conn = self.connection.lock().unwrap();
// Clear all tables
conn.execute("DELETE FROM build_events", [])
.map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute("DELETE FROM build_request_events", [])
.map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute("DELETE FROM partition_events", [])
.map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute("DELETE FROM job_events", [])
.map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute("DELETE FROM delegation_events", [])
.map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute("DELETE FROM job_graph_events", [])
.map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
Ok(())
}
}
#[async_trait]
impl BuildEventLog for MockBuildEventLog {
async fn append_event(&self, event: BuildEvent) -> Result<()> {
let conn = self.connection.lock().unwrap();
// Serialize the entire event for storage
let event_data = serde_json::to_string(&event)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
// Insert into main events table
conn.execute(
"INSERT INTO build_events (event_id, timestamp, build_request_id, event_type, event_data) VALUES (?1, ?2, ?3, ?4, ?5)",
params![
event.event_id,
event.timestamp,
event.build_request_id,
match &event.event_type {
Some(crate::build_event::EventType::BuildRequestEvent(_)) => "build_request",
Some(crate::build_event::EventType::PartitionEvent(_)) => "partition",
Some(crate::build_event::EventType::JobEvent(_)) => "job",
Some(crate::build_event::EventType::DelegationEvent(_)) => "delegation",
Some(crate::build_event::EventType::JobGraphEvent(_)) => "job_graph",
Some(crate::build_event::EventType::PartitionInvalidationEvent(_)) => "partition_invalidation",
Some(crate::build_event::EventType::TaskCancelEvent(_)) => "task_cancel",
Some(crate::build_event::EventType::BuildCancelEvent(_)) => "build_cancel",
None => "unknown",
},
event_data
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
// Insert into specific event type table for better querying
match &event.event_type {
Some(crate::build_event::EventType::BuildRequestEvent(br_event)) => {
let partitions_json = serde_json::to_string(&br_event.requested_partitions)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
conn.execute(
"INSERT INTO build_request_events (event_id, status, requested_partitions, message) VALUES (?1, ?2, ?3, ?4)",
params![
event.event_id,
br_event.status_code.to_string(),
partitions_json,
br_event.message
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
}
Some(crate::build_event::EventType::PartitionEvent(p_event)) => {
conn.execute(
"INSERT INTO partition_events (event_id, partition_ref, status, message, job_run_id) VALUES (?1, ?2, ?3, ?4, ?5)",
params![
event.event_id,
p_event.partition_ref.as_ref().map(|r| &r.str).unwrap_or(&String::new()),
p_event.status_code.to_string(),
p_event.message,
if p_event.job_run_id.is_empty() { None } else { Some(&p_event.job_run_id) }
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
}
Some(crate::build_event::EventType::JobEvent(j_event)) => {
let partitions_json = serde_json::to_string(&j_event.target_partitions)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
let config_json = j_event.config.as_ref()
.map(|c| serde_json::to_string(c))
.transpose()
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
let manifests_json = serde_json::to_string(&j_event.manifests)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
conn.execute(
"INSERT INTO job_events (event_id, job_run_id, job_label, target_partitions, status, message, config_json, manifests_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
params![
event.event_id,
j_event.job_run_id,
j_event.job_label.as_ref().map(|l| &l.label).unwrap_or(&String::new()),
partitions_json,
j_event.status_code.to_string(),
j_event.message,
config_json,
manifests_json
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
}
Some(crate::build_event::EventType::DelegationEvent(d_event)) => {
conn.execute(
"INSERT INTO delegation_events (event_id, partition_ref, delegated_to_build_request_id, message) VALUES (?1, ?2, ?3, ?4)",
params![
event.event_id,
d_event.partition_ref.as_ref().map(|r| &r.str).unwrap_or(&String::new()),
d_event.delegated_to_build_request_id,
d_event.message
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
}
Some(crate::build_event::EventType::JobGraphEvent(jg_event)) => {
let job_graph_json = match serde_json::to_string(&jg_event.job_graph) {
Ok(json) => json,
Err(e) => {
return Err(BuildEventLogError::DatabaseError(format!("Failed to serialize job graph: {}", e)));
}
};
conn.execute(
"INSERT INTO job_graph_events (event_id, job_graph_json, message) VALUES (?1, ?2, ?3)",
params![
event.event_id,
job_graph_json,
jg_event.message
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
}
Some(crate::build_event::EventType::PartitionInvalidationEvent(_pi_event)) => {
// For now, just store in main events table
}
Some(crate::build_event::EventType::TaskCancelEvent(_tc_event)) => {
// For now, just store in main events table
}
Some(crate::build_event::EventType::BuildCancelEvent(_bc_event)) => {
// For now, just store in main events table
}
None => {}
}
Ok(())
}
async fn get_build_request_events(
&self,
build_request_id: &str,
since: Option<i64>
) -> Result<Vec<BuildEvent>> {
let conn = self.connection.lock().unwrap();
let (query, params): (String, Vec<_>) = match since {
Some(timestamp) => (
"SELECT event_data FROM build_events WHERE build_request_id = ?1 AND timestamp > ?2 ORDER BY timestamp ASC".to_string(),
vec![build_request_id.to_string(), timestamp.to_string()]
),
None => (
"SELECT event_data FROM build_events WHERE build_request_id = ?1 ORDER BY timestamp ASC".to_string(),
vec![build_request_id.to_string()]
)
};
let mut stmt = conn.prepare(&query)
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let rows = stmt.query_map(rusqlite::params_from_iter(params.iter()), |row| {
let event_data: String = row.get(0)?;
Ok(event_data)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut events = Vec::new();
for row in rows {
let event_data = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let event: BuildEvent = serde_json::from_str(&event_data)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
events.push(event);
}
Ok(events)
}
async fn get_partition_events(
&self,
partition_ref: &str,
since: Option<i64>
) -> Result<Vec<BuildEvent>> {
let conn = self.connection.lock().unwrap();
let (query, params): (String, Vec<_>) = match since {
Some(timestamp) => (
"SELECT be.event_data FROM build_events be JOIN partition_events pe ON be.event_id = pe.event_id WHERE pe.partition_ref = ?1 AND be.timestamp > ?2 ORDER BY be.timestamp ASC".to_string(),
vec![partition_ref.to_string(), timestamp.to_string()]
),
None => (
"SELECT be.event_data FROM build_events be JOIN partition_events pe ON be.event_id = pe.event_id WHERE pe.partition_ref = ?1 ORDER BY be.timestamp ASC".to_string(),
vec![partition_ref.to_string()]
)
};
let mut stmt = conn.prepare(&query)
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let rows = stmt.query_map(rusqlite::params_from_iter(params.iter()), |row| {
let event_data: String = row.get(0)?;
Ok(event_data)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut events = Vec::new();
for row in rows {
let event_data = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let event: BuildEvent = serde_json::from_str(&event_data)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
events.push(event);
}
Ok(events)
}
async fn get_job_run_events(
&self,
job_run_id: &str
) -> Result<Vec<BuildEvent>> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT be.event_data FROM build_events be JOIN job_events je ON be.event_id = je.event_id WHERE je.job_run_id = ?1 ORDER BY be.timestamp ASC"
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let rows = stmt.query_map([job_run_id], |row| {
let event_data: String = row.get(0)?;
Ok(event_data)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut events = Vec::new();
for row in rows {
let event_data = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let event: BuildEvent = serde_json::from_str(&event_data)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
events.push(event);
}
Ok(events)
}
async fn get_events_in_range(
&self,
start_time: i64,
end_time: i64
) -> Result<Vec<BuildEvent>> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT event_data FROM build_events WHERE timestamp >= ?1 AND timestamp <= ?2 ORDER BY timestamp ASC"
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let rows = stmt.query_map([start_time, end_time], |row| {
let event_data: String = row.get(0)?;
Ok(event_data)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut events = Vec::new();
for row in rows {
let event_data = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let event: BuildEvent = serde_json::from_str(&event_data)
.map_err(|e| BuildEventLogError::SerializationError(e.to_string()))?;
events.push(event);
}
Ok(events)
}
async fn execute_query(&self, query: &str) -> Result<QueryResult> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(query)
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let column_names: Vec<String> = stmt.column_names().iter().map(|s| s.to_string()).collect();
let rows = stmt.query_map([], |row| {
let mut values = Vec::new();
for i in 0..column_names.len() {
let value: String = row.get::<_, Option<String>>(i)?.unwrap_or_default();
values.push(value);
}
Ok(values)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut result_rows = Vec::new();
for row in rows {
let values = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
result_rows.push(values);
}
Ok(QueryResult {
columns: column_names,
rows: result_rows,
})
}
async fn get_latest_partition_status(
&self,
partition_ref: &str
) -> Result<Option<(PartitionStatus, i64)>> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT pe.status, be.timestamp FROM build_events be JOIN partition_events pe ON be.event_id = pe.event_id WHERE pe.partition_ref = ?1 ORDER BY be.timestamp DESC LIMIT 1"
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let result = stmt.query_row([partition_ref], |row| {
let status_str: String = row.get(0)?;
let timestamp: i64 = row.get(1)?;
let status: i32 = status_str.parse().unwrap_or(0);
Ok((status, timestamp))
});
match result {
Ok((status, timestamp)) => {
let partition_status = match status {
1 => PartitionStatus::PartitionRequested,
2 => PartitionStatus::PartitionAnalyzed,
3 => PartitionStatus::PartitionBuilding,
4 => PartitionStatus::PartitionAvailable,
5 => PartitionStatus::PartitionFailed,
6 => PartitionStatus::PartitionDelegated,
_ => PartitionStatus::PartitionUnknown,
};
Ok(Some((partition_status, timestamp)))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(BuildEventLogError::QueryError(e.to_string())),
}
}
async fn get_active_builds_for_partition(
&self,
partition_ref: &str
) -> Result<Vec<String>> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT DISTINCT be.build_request_id FROM build_events be JOIN partition_events pe ON be.event_id = pe.event_id WHERE pe.partition_ref = ?1 AND pe.status IN ('1', '2', '3') ORDER BY be.timestamp DESC"
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let rows = stmt.query_map([partition_ref], |row| {
let build_request_id: String = row.get(0)?;
Ok(build_request_id)
}).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut build_ids = Vec::new();
for row in rows {
let build_id = row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
build_ids.push(build_id);
}
Ok(build_ids)
}
async fn initialize(&self) -> Result<()> {
let conn = self.connection.lock().unwrap();
// Create main events table
conn.execute(
"CREATE TABLE IF NOT EXISTS build_events (
event_id TEXT PRIMARY KEY,
timestamp INTEGER NOT NULL,
build_request_id TEXT NOT NULL,
event_type TEXT NOT NULL,
event_data TEXT NOT NULL
)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
// Create specific event type tables
conn.execute(
"CREATE TABLE IF NOT EXISTS build_request_events (
event_id TEXT PRIMARY KEY,
status TEXT NOT NULL,
requested_partitions TEXT NOT NULL,
message TEXT NOT NULL
)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE TABLE IF NOT EXISTS partition_events (
event_id TEXT PRIMARY KEY,
partition_ref TEXT NOT NULL,
status TEXT NOT NULL,
message TEXT NOT NULL,
job_run_id TEXT
)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE TABLE IF NOT EXISTS job_events (
event_id TEXT PRIMARY KEY,
job_run_id TEXT NOT NULL,
job_label TEXT NOT NULL,
target_partitions TEXT NOT NULL,
status TEXT NOT NULL,
message TEXT NOT NULL,
config_json TEXT,
manifests_json TEXT NOT NULL
)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE TABLE IF NOT EXISTS delegation_events (
event_id TEXT PRIMARY KEY,
partition_ref TEXT NOT NULL,
delegated_to_build_request_id TEXT NOT NULL,
message TEXT NOT NULL
)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE TABLE IF NOT EXISTS job_graph_events (
event_id TEXT PRIMARY KEY,
job_graph_json TEXT NOT NULL,
message TEXT NOT NULL
)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
// Create indexes for common queries
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_build_events_build_request_id ON build_events (build_request_id)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_build_events_timestamp ON build_events (timestamp)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_partition_events_partition_ref ON partition_events (partition_ref)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_job_events_job_run_id ON job_events (job_run_id)",
[],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
Ok(())
}
async fn list_build_requests(
&self,
limit: u32,
offset: u32,
status_filter: Option<BuildRequestStatus>,
) -> Result<(Vec<BuildRequestSummary>, u32)> {
// For simplicity in the mock, return empty results
// Real implementation would query the database
Ok((vec![], 0))
}
async fn list_recent_partitions(
&self,
limit: u32,
offset: u32,
status_filter: Option<PartitionStatus>,
) -> Result<(Vec<PartitionSummary>, u32)> {
// For simplicity in the mock, return empty results
// Real implementation would query the database
Ok((vec![], 0))
}
async fn get_activity_summary(&self) -> Result<ActivitySummary> {
// For simplicity in the mock, return empty activity
Ok(ActivitySummary {
active_builds_count: 0,
recent_builds: vec![],
recent_partitions: vec![],
total_partitions_count: 0,
})
}
async fn get_build_request_for_available_partition(
&self,
partition_ref: &str
) -> Result<Option<String>> {
let conn = self.connection.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT be.build_request_id FROM build_events be JOIN partition_events pe ON be.event_id = pe.event_id WHERE pe.partition_ref = ?1 AND pe.status = '4' ORDER BY be.timestamp DESC LIMIT 1"
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let result = stmt.query_row([partition_ref], |row| {
let build_request_id: String = row.get(0)?;
Ok(build_request_id)
});
match result {
Ok(build_request_id) => Ok(Some(build_request_id)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(BuildEventLogError::QueryError(e.to_string())),
}
}
}
/// Utility functions for creating test events with sensible defaults
pub mod test_events {
use super::*;
use crate::event_log::{generate_event_id, current_timestamp_nanos};
use uuid::Uuid;
/// Create a build request received event with random defaults
pub fn build_request_received(
build_request_id: Option<String>,
partitions: Vec<PartitionRef>,
) -> BuildEvent {
BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id: build_request_id.unwrap_or_else(|| Uuid::new_v4().to_string()),
event_type: Some(build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status_code: BuildRequestStatus::BuildRequestReceived as i32,
status_name: BuildRequestStatus::BuildRequestReceived.to_display_string(),
requested_partitions: partitions,
message: "Build request received".to_string(),
})),
}
}
/// Create a build request event with specific status
pub fn build_request_event(
build_request_id: Option<String>,
partitions: Vec<PartitionRef>,
status: BuildRequestStatus,
) -> BuildEvent {
BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id: build_request_id.unwrap_or_else(|| Uuid::new_v4().to_string()),
event_type: Some(build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status_code: status as i32,
status_name: status.to_display_string(),
requested_partitions: partitions,
message: format!("Build request status: {:?}", status),
})),
}
}
/// Create a partition status event with random defaults
pub fn partition_status(
build_request_id: Option<String>,
partition_ref: PartitionRef,
status: PartitionStatus,
job_run_id: Option<String>,
) -> BuildEvent {
BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id: build_request_id.unwrap_or_else(|| Uuid::new_v4().to_string()),
event_type: Some(build_event::EventType::PartitionEvent(PartitionEvent {
partition_ref: Some(partition_ref),
status_code: status as i32,
status_name: status.to_display_string(),
message: format!("Partition status: {:?}", status),
job_run_id: job_run_id.unwrap_or_default(),
})),
}
}
/// Create a job event with random defaults
pub fn job_event(
build_request_id: Option<String>,
job_run_id: Option<String>,
job_label: JobLabel,
target_partitions: Vec<PartitionRef>,
status: JobStatus,
) -> BuildEvent {
BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id: build_request_id.unwrap_or_else(|| Uuid::new_v4().to_string()),
event_type: Some(build_event::EventType::JobEvent(JobEvent {
job_run_id: job_run_id.unwrap_or_else(|| Uuid::new_v4().to_string()),
job_label: Some(job_label),
target_partitions,
status_code: status as i32,
status_name: status.to_display_string(),
message: format!("Job status: {:?}", status),
config: None,
manifests: vec![],
})),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::test_events::*;
#[tokio::test]
async fn test_mock_build_event_log_basic() {
let mock = MockBuildEventLog::new().await.unwrap();
// Initially empty
assert_eq!(mock.event_count().await.unwrap(), 0);
// Add an event
let build_id = "test-build-123".to_string();
let partition = PartitionRef { str: "test/partition".to_string() };
let event = build_request_received(Some(build_id.clone()), vec![partition]);
mock.append_event(event).await.unwrap();
// Check event count
assert_eq!(mock.event_count().await.unwrap(), 1);
// Query events by build request
let events = mock.get_build_request_events(&build_id, None).await.unwrap();
assert_eq!(events.len(), 1);
// Clear events
mock.clear().await.unwrap();
assert_eq!(mock.event_count().await.unwrap(), 0);
}
#[tokio::test]
async fn test_mock_build_event_log_with_predefined_events() {
let build_id = "test-build-456".to_string();
let partition = PartitionRef { str: "data/users".to_string() };
let events = vec![
build_request_received(Some(build_id.clone()), vec![partition.clone()]),
partition_status(Some(build_id.clone()), partition.clone(), PartitionStatus::PartitionBuilding, None),
partition_status(Some(build_id.clone()), partition.clone(), PartitionStatus::PartitionAvailable, None),
];
let mock = MockBuildEventLog::with_events(events).await.unwrap();
// Should have 3 events
assert_eq!(mock.event_count().await.unwrap(), 3);
// Query partition events
let partition_events = mock.get_partition_events(&partition.str, None).await.unwrap();
assert_eq!(partition_events.len(), 2); // Two partition events
// Check latest partition status
let latest_status = mock.get_latest_partition_status(&partition.str).await.unwrap();
assert!(latest_status.is_some());
let (status, _timestamp) = latest_status.unwrap();
assert_eq!(status, PartitionStatus::PartitionAvailable);
}
#[tokio::test]
async fn test_event_constructors() {
let partition = PartitionRef { str: "test/data".to_string() };
let job_label = JobLabel { label: "//:test_job".to_string() };
// Test build request event constructor
let br_event = build_request_received(None, vec![partition.clone()]);
assert!(matches!(br_event.event_type, Some(build_event::EventType::BuildRequestEvent(_))));
// Test partition event constructor
let p_event = partition_status(None, partition.clone(), PartitionStatus::PartitionAvailable, None);
assert!(matches!(p_event.event_type, Some(build_event::EventType::PartitionEvent(_))));
// Test job event constructor
let j_event = job_event(None, None, job_label, vec![partition], JobStatus::JobCompleted);
assert!(matches!(j_event.event_type, Some(build_event::EventType::JobEvent(_))));
}
}

View file

@ -6,6 +6,8 @@ use uuid::Uuid;
pub mod stdout;
pub mod sqlite;
pub mod postgres;
pub mod writer;
pub mod mock;
#[derive(Debug)]
pub enum BuildEventLogError {

View file

@ -65,7 +65,17 @@ impl SqliteBuildEventLog {
.unwrap_or_default();
Some(crate::build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status,
status_code: status,
status_name: match status {
1 => BuildRequestStatus::BuildRequestReceived.to_display_string(),
2 => BuildRequestStatus::BuildRequestPlanning.to_display_string(),
3 => BuildRequestStatus::BuildRequestExecuting.to_display_string(),
4 => BuildRequestStatus::BuildRequestCompleted.to_display_string(),
5 => BuildRequestStatus::BuildRequestFailed.to_display_string(),
6 => BuildRequestStatus::BuildRequestCancelled.to_display_string(),
7 => BuildRequestStatus::BuildRequestAnalysisCompleted.to_display_string(),
_ => BuildRequestStatus::BuildRequestUnknown.to_display_string(),
},
requested_partitions,
message,
}))
@ -81,7 +91,16 @@ impl SqliteBuildEventLog {
Some(crate::build_event::EventType::PartitionEvent(PartitionEvent {
partition_ref: Some(PartitionRef { str: partition_ref }),
status,
status_code: status,
status_name: match status {
1 => PartitionStatus::PartitionRequested.to_display_string(),
2 => PartitionStatus::PartitionAnalyzed.to_display_string(),
3 => PartitionStatus::PartitionBuilding.to_display_string(),
4 => PartitionStatus::PartitionAvailable.to_display_string(),
5 => PartitionStatus::PartitionFailed.to_display_string(),
6 => PartitionStatus::PartitionDelegated.to_display_string(),
_ => PartitionStatus::PartitionUnknown.to_display_string(),
},
message,
job_run_id,
}))
@ -108,7 +127,16 @@ impl SqliteBuildEventLog {
job_run_id,
job_label: Some(JobLabel { label: job_label }),
target_partitions,
status,
status_code: status,
status_name: match status {
1 => JobStatus::JobScheduled.to_display_string(),
2 => JobStatus::JobRunning.to_display_string(),
3 => JobStatus::JobCompleted.to_display_string(),
4 => JobStatus::JobFailed.to_display_string(),
5 => JobStatus::JobCancelled.to_display_string(),
6 => JobStatus::JobSkipped.to_display_string(),
_ => JobStatus::JobUnknown.to_display_string(),
},
message,
config,
manifests,
@ -168,6 +196,9 @@ impl BuildEventLog for SqliteBuildEventLog {
Some(crate::build_event::EventType::JobEvent(_)) => "job",
Some(crate::build_event::EventType::DelegationEvent(_)) => "delegation",
Some(crate::build_event::EventType::JobGraphEvent(_)) => "job_graph",
Some(crate::build_event::EventType::PartitionInvalidationEvent(_)) => "partition_invalidation",
Some(crate::build_event::EventType::TaskCancelEvent(_)) => "task_cancel",
Some(crate::build_event::EventType::BuildCancelEvent(_)) => "build_cancel",
None => "unknown",
}
],
@ -183,7 +214,7 @@ impl BuildEventLog for SqliteBuildEventLog {
"INSERT INTO build_request_events (event_id, status, requested_partitions, message) VALUES (?1, ?2, ?3, ?4)",
params![
event.event_id,
br_event.status.to_string(),
br_event.status_code.to_string(),
partitions_json,
br_event.message
],
@ -195,7 +226,7 @@ impl BuildEventLog for SqliteBuildEventLog {
params![
event.event_id,
p_event.partition_ref.as_ref().map(|r| &r.str).unwrap_or(&String::new()),
p_event.status.to_string(),
p_event.status_code.to_string(),
p_event.message,
if p_event.job_run_id.is_empty() { None } else { Some(&p_event.job_run_id) }
],
@ -218,7 +249,7 @@ impl BuildEventLog for SqliteBuildEventLog {
j_event.job_run_id,
j_event.job_label.as_ref().map(|l| &l.label).unwrap_or(&String::new()),
partitions_json,
j_event.status.to_string(),
j_event.status_code.to_string(),
j_event.message,
config_json,
manifests_json
@ -252,6 +283,18 @@ impl BuildEventLog for SqliteBuildEventLog {
],
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
}
Some(crate::build_event::EventType::PartitionInvalidationEvent(_pi_event)) => {
// For now, we'll just store these in the main events table
// In a later phase, we could add a specific table for invalidation events
}
Some(crate::build_event::EventType::TaskCancelEvent(_tc_event)) => {
// For now, we'll just store these in the main events table
// In a later phase, we could add a specific table for task cancel events
}
Some(crate::build_event::EventType::BuildCancelEvent(_bc_event)) => {
// For now, we'll just store these in the main events table
// In a later phase, we could add a specific table for build cancel events
}
None => {}
}
@ -391,15 +434,60 @@ impl BuildEventLog for SqliteBuildEventLog {
async fn get_events_in_range(
&self,
_start_time: i64,
_end_time: i64
start_time: i64,
end_time: i64
) -> Result<Vec<BuildEvent>> {
// This method is not implemented because it would require complex joins
// to reconstruct complete event data. Use get_build_request_events instead
// which properly reconstructs all event types for a build request.
Err(BuildEventLogError::QueryError(
"get_events_in_range is not implemented - use get_build_request_events to get complete event data".to_string()
))
let conn = self.connection.lock().unwrap();
// Use a UNION query to get all event types with their specific data in the time range
let query = "
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
bre.status, bre.requested_partitions, bre.message, NULL, NULL, NULL, NULL
FROM build_events be
LEFT JOIN build_request_events bre ON be.event_id = bre.event_id
WHERE be.timestamp >= ?1 AND be.timestamp <= ?2 AND be.event_type = 'build_request'
UNION ALL
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
pe.partition_ref, pe.status, pe.message, pe.job_run_id, NULL, NULL, NULL
FROM build_events be
LEFT JOIN partition_events pe ON be.event_id = pe.event_id
WHERE be.timestamp >= ?3 AND be.timestamp <= ?4 AND be.event_type = 'partition'
UNION ALL
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
je.job_run_id, je.job_label, je.target_partitions, je.status, je.message, je.config_json, je.manifests_json
FROM build_events be
LEFT JOIN job_events je ON be.event_id = je.event_id
WHERE be.timestamp >= ?5 AND be.timestamp <= ?6 AND be.event_type = 'job'
UNION ALL
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
de.partition_ref, de.delegated_to_build_request_id, de.message, NULL, NULL, NULL, NULL
FROM build_events be
LEFT JOIN delegation_events de ON be.event_id = de.event_id
WHERE be.timestamp >= ?7 AND be.timestamp <= ?8 AND be.event_type = 'delegation'
UNION ALL
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
jge.job_graph_json, jge.message, NULL, NULL, NULL, NULL, NULL
FROM build_events be
LEFT JOIN job_graph_events jge ON be.event_id = jge.event_id
WHERE be.timestamp >= ?9 AND be.timestamp <= ?10 AND be.event_type = 'job_graph'
ORDER BY timestamp ASC
";
let mut stmt = conn.prepare(query)
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
// We need 10 parameters: start_time and end_time for each of the 5 UNION queries
let rows = stmt.query_map(
params![start_time, end_time, start_time, end_time, start_time, end_time, start_time, end_time, start_time, end_time],
Self::row_to_build_event_from_join
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
let mut events = Vec::new();
for row in rows {
events.push(row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?);
}
Ok(events)
}
async fn execute_query(&self, query: &str) -> Result<QueryResult> {

View file

@ -0,0 +1,452 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result, create_build_event, current_timestamp_nanos, generate_event_id};
use std::sync::Arc;
use log::debug;
/// Common interface for writing events to the build event log with validation
pub struct EventWriter {
event_log: Arc<dyn BuildEventLog>,
}
impl EventWriter {
/// Create a new EventWriter with the specified event log backend
pub fn new(event_log: Arc<dyn BuildEventLog>) -> Self {
Self { event_log }
}
/// Get access to the underlying event log for direct operations
pub fn event_log(&self) -> &dyn BuildEventLog {
self.event_log.as_ref()
}
/// Request a new build for the specified partitions
pub async fn request_build(
&self,
build_request_id: String,
requested_partitions: Vec<PartitionRef>,
) -> Result<()> {
debug!("Writing build request event for build: {}", build_request_id);
let event = create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status_code: BuildRequestStatus::BuildRequestReceived as i32,
status_name: BuildRequestStatus::BuildRequestReceived.to_display_string(),
requested_partitions,
message: "Build request received".to_string(),
}),
);
self.event_log.append_event(event).await
}
/// Update build request status
pub async fn update_build_status(
&self,
build_request_id: String,
status: BuildRequestStatus,
message: String,
) -> Result<()> {
debug!("Updating build status for {}: {:?}", build_request_id, status);
let event = create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status_code: status as i32,
status_name: status.to_display_string(),
requested_partitions: vec![],
message,
}),
);
self.event_log.append_event(event).await
}
/// Update build request status with partition list
pub async fn update_build_status_with_partitions(
&self,
build_request_id: String,
status: BuildRequestStatus,
requested_partitions: Vec<PartitionRef>,
message: String,
) -> Result<()> {
debug!("Updating build status for {}: {:?}", build_request_id, status);
let event = create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status_code: status as i32,
status_name: status.to_display_string(),
requested_partitions,
message,
}),
);
self.event_log.append_event(event).await
}
/// Update partition status
pub async fn update_partition_status(
&self,
build_request_id: String,
partition_ref: PartitionRef,
status: PartitionStatus,
message: String,
job_run_id: Option<String>,
) -> Result<()> {
debug!("Updating partition status for {}: {:?}", partition_ref.str, status);
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id,
event_type: Some(build_event::EventType::PartitionEvent(PartitionEvent {
partition_ref: Some(partition_ref),
status_code: status as i32,
status_name: status.to_display_string(),
message,
job_run_id: job_run_id.unwrap_or_default(),
})),
};
self.event_log.append_event(event).await
}
/// Invalidate a partition with a reason
pub async fn invalidate_partition(
&self,
build_request_id: String,
partition_ref: PartitionRef,
reason: String,
) -> Result<()> {
// First validate that the partition exists by checking its current status
let current_status = self.event_log.get_latest_partition_status(&partition_ref.str).await?;
if current_status.is_none() {
return Err(BuildEventLogError::QueryError(
format!("Cannot invalidate non-existent partition: {}", partition_ref.str)
));
}
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id,
event_type: Some(build_event::EventType::PartitionInvalidationEvent(
PartitionInvalidationEvent {
partition_ref: Some(partition_ref),
reason,
}
)),
};
self.event_log.append_event(event).await
}
/// Schedule a job for execution
pub async fn schedule_job(
&self,
build_request_id: String,
job_run_id: String,
job_label: JobLabel,
target_partitions: Vec<PartitionRef>,
config: JobConfig,
) -> Result<()> {
debug!("Scheduling job {} for partitions: {:?}", job_label.label, target_partitions);
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id,
event_type: Some(build_event::EventType::JobEvent(JobEvent {
job_run_id,
job_label: Some(job_label),
target_partitions,
status_code: JobStatus::JobScheduled as i32,
status_name: JobStatus::JobScheduled.to_display_string(),
message: "Job scheduled for execution".to_string(),
config: Some(config),
manifests: vec![],
})),
};
self.event_log.append_event(event).await
}
/// Update job status
pub async fn update_job_status(
&self,
build_request_id: String,
job_run_id: String,
job_label: JobLabel,
target_partitions: Vec<PartitionRef>,
status: JobStatus,
message: String,
manifests: Vec<PartitionManifest>,
) -> Result<()> {
debug!("Updating job {} status to {:?}", job_run_id, status);
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id,
event_type: Some(build_event::EventType::JobEvent(JobEvent {
job_run_id,
job_label: Some(job_label),
target_partitions,
status_code: status as i32,
status_name: status.to_display_string(),
message,
config: None,
manifests,
})),
};
self.event_log.append_event(event).await
}
/// Cancel a task (job run) with a reason
pub async fn cancel_task(
&self,
build_request_id: String,
job_run_id: String,
reason: String,
) -> Result<()> {
// Validate that the job run exists and is in a cancellable state
let job_events = self.event_log.get_job_run_events(&job_run_id).await?;
if job_events.is_empty() {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel non-existent job run: {}", job_run_id)
));
}
// Find the latest job status
let latest_status = job_events.iter()
.rev()
.find_map(|e| match &e.event_type {
Some(build_event::EventType::JobEvent(job)) => Some(job.status_code),
_ => None,
});
match latest_status {
Some(status) if status == JobStatus::JobCompleted as i32 => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel completed job run: {}", job_run_id)
));
}
Some(status) if status == JobStatus::JobFailed as i32 => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel failed job run: {}", job_run_id)
));
}
Some(status) if status == JobStatus::JobCancelled as i32 => {
return Err(BuildEventLogError::QueryError(
format!("Job run already cancelled: {}", job_run_id)
));
}
_ => {}
}
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id,
event_type: Some(build_event::EventType::TaskCancelEvent(TaskCancelEvent {
job_run_id,
reason,
})),
};
self.event_log.append_event(event).await
}
/// Cancel a build request with a reason
pub async fn cancel_build(
&self,
build_request_id: String,
reason: String,
) -> Result<()> {
// Validate that the build exists and is in a cancellable state
let build_events = self.event_log.get_build_request_events(&build_request_id, None).await?;
if build_events.is_empty() {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel non-existent build: {}", build_request_id)
));
}
// Find the latest build status
let latest_status = build_events.iter()
.rev()
.find_map(|e| match &e.event_type {
Some(build_event::EventType::BuildRequestEvent(br)) => Some(br.status_code),
_ => None,
});
match latest_status {
Some(status) if status == BuildRequestStatus::BuildRequestCompleted as i32 => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel completed build: {}", build_request_id)
));
}
Some(status) if status == BuildRequestStatus::BuildRequestFailed as i32 => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel failed build: {}", build_request_id)
));
}
Some(status) if status == BuildRequestStatus::BuildRequestCancelled as i32 => {
return Err(BuildEventLogError::QueryError(
format!("Build already cancelled: {}", build_request_id)
));
}
_ => {}
}
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id: build_request_id.clone(),
event_type: Some(build_event::EventType::BuildCancelEvent(BuildCancelEvent {
reason,
})),
};
self.event_log.append_event(event).await?;
// Also emit a build request status update
self.update_build_status(
build_request_id,
BuildRequestStatus::BuildRequestCancelled,
"Build cancelled by user".to_string(),
).await
}
/// Record a delegation event when a partition build is delegated to another build
pub async fn record_delegation(
&self,
build_request_id: String,
partition_ref: PartitionRef,
delegated_to_build_request_id: String,
message: String,
) -> Result<()> {
debug!("Recording delegation of {} to build {}", partition_ref.str, delegated_to_build_request_id);
let event = create_build_event(
build_request_id,
build_event::EventType::DelegationEvent(DelegationEvent {
partition_ref: Some(partition_ref),
delegated_to_build_request_id,
message,
}),
);
self.event_log.append_event(event).await
}
/// Record the analyzed job graph
pub async fn record_job_graph(
&self,
build_request_id: String,
job_graph: JobGraph,
message: String,
) -> Result<()> {
debug!("Recording job graph for build: {}", build_request_id);
let event = BuildEvent {
event_id: generate_event_id(),
timestamp: current_timestamp_nanos(),
build_request_id,
event_type: Some(build_event::EventType::JobGraphEvent(JobGraphEvent {
job_graph: Some(job_graph),
message,
})),
};
self.event_log.append_event(event).await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_log::stdout::StdoutBuildEventLog;
#[tokio::test]
async fn test_event_writer_build_lifecycle() {
let event_log = Arc::new(StdoutBuildEventLog::new());
let writer = EventWriter::new(event_log);
let build_id = "test-build-123".to_string();
let partitions = vec![PartitionRef { str: "test/partition".to_string() }];
// Test build request
writer.request_build(build_id.clone(), partitions.clone()).await.unwrap();
// Test status updates
writer.update_build_status(
build_id.clone(),
BuildRequestStatus::BuildRequestPlanning,
"Starting planning".to_string(),
).await.unwrap();
writer.update_build_status(
build_id.clone(),
BuildRequestStatus::BuildRequestExecuting,
"Starting execution".to_string(),
).await.unwrap();
writer.update_build_status(
build_id.clone(),
BuildRequestStatus::BuildRequestCompleted,
"Build completed successfully".to_string(),
).await.unwrap();
}
#[tokio::test]
async fn test_event_writer_partition_and_job() {
let event_log = Arc::new(StdoutBuildEventLog::new());
let writer = EventWriter::new(event_log);
let build_id = "test-build-456".to_string();
let partition = PartitionRef { str: "data/users".to_string() };
let job_run_id = "job-run-789".to_string();
let job_label = JobLabel { label: "//:test_job".to_string() };
// Test partition status update
writer.update_partition_status(
build_id.clone(),
partition.clone(),
PartitionStatus::PartitionBuilding,
"Building partition".to_string(),
Some(job_run_id.clone()),
).await.unwrap();
// Test job scheduling
let config = JobConfig {
outputs: vec![partition.clone()],
inputs: vec![],
args: vec!["test".to_string()],
env: std::collections::HashMap::new(),
};
writer.schedule_job(
build_id.clone(),
job_run_id.clone(),
job_label.clone(),
vec![partition.clone()],
config,
).await.unwrap();
// Test job status update
writer.update_job_status(
build_id.clone(),
job_run_id,
job_label,
vec![partition],
JobStatus::JobCompleted,
"Job completed successfully".to_string(),
vec![],
).await.unwrap();
}
}

View file

@ -0,0 +1,144 @@
#[cfg(test)]
mod format_consistency_tests {
use super::*;
use crate::*;
use crate::repositories::partitions::PartitionsRepository;
use crate::event_log::mock::{MockBuildEventLog, test_events};
use std::sync::Arc;
#[tokio::test]
async fn test_partitions_list_json_format_consistency() {
// Create test data
let build_id = "test-build-123".to_string();
let partition1 = PartitionRef { str: "data/users".to_string() };
let partition2 = PartitionRef { str: "data/orders".to_string() };
let events = vec![
test_events::build_request_received(Some(build_id.clone()), vec![partition1.clone(), partition2.clone()]),
test_events::partition_status(Some(build_id.clone()), partition1.clone(), PartitionStatus::PartitionBuilding, None),
test_events::partition_status(Some(build_id.clone()), partition1.clone(), PartitionStatus::PartitionAvailable, None),
test_events::partition_status(Some(build_id.clone()), partition2.clone(), PartitionStatus::PartitionBuilding, None),
test_events::partition_status(Some(build_id.clone()), partition2.clone(), PartitionStatus::PartitionFailed, None),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repository = PartitionsRepository::new(mock_log);
// Test the new unified protobuf format
let request = PartitionsListRequest {
limit: Some(10),
offset: None,
status_filter: None,
};
let response = repository.list_protobuf(request).await.unwrap();
// Serialize to JSON and verify structure
let json_value = serde_json::to_value(&response).unwrap();
// Verify top-level structure matches expected protobuf schema
assert!(json_value.get("partitions").is_some());
assert!(json_value.get("total_count").is_some());
assert!(json_value.get("has_more").is_some());
let partitions = json_value["partitions"].as_array().unwrap();
assert_eq!(partitions.len(), 2);
// Verify each partition has dual status fields
for partition in partitions {
assert!(partition.get("partition_ref").is_some());
assert!(partition.get("status_code").is_some(), "Missing status_code field");
assert!(partition.get("status_name").is_some(), "Missing status_name field");
assert!(partition.get("last_updated").is_some());
assert!(partition.get("builds_count").is_some());
assert!(partition.get("invalidation_count").is_some());
// Verify status fields are consistent
let status_code = partition["status_code"].as_i64().unwrap();
let status_name = partition["status_name"].as_str().unwrap();
// Map status codes to expected names
let expected_name = match status_code {
1 => "requested",
2 => "analyzed",
3 => "building",
4 => "available",
5 => "failed",
6 => "delegated",
_ => "unknown",
};
// Find the partition by status to verify correct mapping
if status_name == "available" {
assert_eq!(status_code, 4, "Available status should have code 4");
} else if status_name == "failed" {
assert_eq!(status_code, 5, "Failed status should have code 5");
}
}
// Verify JSON serialization produces expected field names (snake_case for JSON)
let json_str = serde_json::to_string_pretty(&response).unwrap();
assert!(json_str.contains("\"partitions\""));
assert!(json_str.contains("\"total_count\""));
assert!(json_str.contains("\"has_more\""));
assert!(json_str.contains("\"partition_ref\""));
assert!(json_str.contains("\"status_code\""));
assert!(json_str.contains("\"status_name\""));
assert!(json_str.contains("\"last_updated\""));
assert!(json_str.contains("\"builds_count\""));
assert!(json_str.contains("\"invalidation_count\""));
println!("✅ Partitions list JSON format test passed");
println!("Sample JSON output:\n{}", json_str);
}
#[tokio::test]
async fn test_status_conversion_utilities() {
use crate::status_utils::*;
// Test PartitionStatus conversions
let status = PartitionStatus::PartitionAvailable;
assert_eq!(status.to_display_string(), "available");
assert_eq!(PartitionStatus::from_display_string("available"), Some(status));
// Test JobStatus conversions
let job_status = JobStatus::JobCompleted;
assert_eq!(job_status.to_display_string(), "completed");
assert_eq!(JobStatus::from_display_string("completed"), Some(job_status));
// Test BuildRequestStatus conversions
let build_status = BuildRequestStatus::BuildRequestCompleted;
assert_eq!(build_status.to_display_string(), "completed");
assert_eq!(BuildRequestStatus::from_display_string("completed"), Some(build_status));
// Test invalid conversions
assert_eq!(PartitionStatus::from_display_string("invalid"), None);
println!("✅ Status conversion utilities test passed");
}
#[test]
fn test_protobuf_response_helper_functions() {
use crate::status_utils::list_response_helpers::*;
// Test PartitionSummary creation
let summary = create_partition_summary(
"test/partition".to_string(),
PartitionStatus::PartitionAvailable,
1234567890,
5,
2,
Some("build-123".to_string()),
);
assert_eq!(summary.partition_ref, "test/partition");
assert_eq!(summary.status_code, 4); // PartitionAvailable = 4
assert_eq!(summary.status_name, "available");
assert_eq!(summary.last_updated, 1234567890);
assert_eq!(summary.builds_count, 5);
assert_eq!(summary.invalidation_count, 2);
assert_eq!(summary.last_successful_build, Some("build-123".to_string()));
println!("✅ Protobuf response helper functions test passed");
}
}

View file

@ -200,7 +200,8 @@ async fn plan(
let event = create_build_event(
build_request_id.to_string(),
crate::build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestReceived as i32,
status_code: BuildRequestStatus::BuildRequestReceived as i32,
status_name: BuildRequestStatus::BuildRequestReceived.to_display_string(),
requested_partitions: output_refs.iter().map(|s| PartitionRef { str: s.clone() }).collect(),
message: "Analysis started".to_string(),
})
@ -260,7 +261,8 @@ async fn plan(
let event = create_build_event(
build_request_id.to_string(),
crate::build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestPlanning as i32,
status_code: BuildRequestStatus::BuildRequestPlanning as i32,
status_name: BuildRequestStatus::BuildRequestPlanning.to_display_string(),
requested_partitions: output_refs.iter().map(|s| PartitionRef { str: s.clone() }).collect(),
message: "Graph analysis in progress".to_string(),
})
@ -307,7 +309,7 @@ async fn plan(
let mut new_unhandled_count = 0;
for task in &new_nodes {
for input in &task.config.as_ref().unwrap().inputs {
if input.dep_type == 1 { // MATERIALIZE = 1
if input.dep_type_code == 1 { // MATERIALIZE = 1
if !unhandled_refs.contains(&input.partition_ref.as_ref().unwrap().str) {
new_unhandled_count += 1;
}
@ -329,7 +331,8 @@ async fn plan(
let event = create_build_event(
build_request_id.to_string(),
crate::build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestAnalysisCompleted as i32,
status_code: BuildRequestStatus::BuildRequestAnalysisCompleted as i32,
status_name: BuildRequestStatus::BuildRequestAnalysisCompleted.to_display_string(),
requested_partitions: output_refs.iter().map(|s| PartitionRef { str: s.clone() }).collect(),
message: format!("Analysis completed successfully, {} tasks planned", nodes.len()),
})
@ -370,7 +373,8 @@ async fn plan(
let event = create_build_event(
build_request_id.to_string(),
crate::build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestFailed as i32,
status_code: BuildRequestStatus::BuildRequestFailed as i32,
status_name: BuildRequestStatus::BuildRequestFailed.to_display_string(),
requested_partitions: output_refs.iter().map(|s| PartitionRef { str: s.clone() }).collect(),
message: "No jobs found for requested partitions".to_string(),
})

View file

@ -248,7 +248,7 @@ fn is_task_ready(task: &Task, completed_outputs: &HashSet<String>) -> bool {
let mut missing_deps = Vec::new();
for dep in &task.config.as_ref().unwrap().inputs {
if dep.dep_type == 1 { // MATERIALIZE = 1
if dep.dep_type_code == 1 { // MATERIALIZE = 1
if !completed_outputs.contains(&dep.partition_ref.as_ref().unwrap().str) {
missing_deps.push(&dep.partition_ref.as_ref().unwrap().str);
}
@ -430,7 +430,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let event = create_build_event(
build_request_id.clone(),
EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestExecuting as i32,
status_code: BuildRequestStatus::BuildRequestExecuting as i32,
status_name: BuildRequestStatus::BuildRequestExecuting.to_display_string(),
requested_partitions: graph.outputs.clone(),
message: format!("Starting execution of {} jobs", graph.nodes.len()),
})
@ -502,7 +503,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
job_run_id: job_run_id.clone(),
job_label: original_task.job.clone(),
target_partitions: original_task.config.as_ref().unwrap().outputs.clone(),
status: if result.success { JobStatus::JobCompleted as i32 } else { JobStatus::JobFailed as i32 },
status_code: if result.success { JobStatus::JobCompleted as i32 } else { JobStatus::JobFailed as i32 },
status_name: if result.success { JobStatus::JobCompleted.to_display_string() } else { JobStatus::JobFailed.to_display_string() },
message: if result.success { "Job completed successfully".to_string() } else { result.error_message.clone().unwrap_or_default() },
config: original_task.config.clone(),
manifests: vec![], // Would be populated from actual job output
@ -518,7 +520,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
build_request_id.clone(),
EventType::PartitionEvent(PartitionEvent {
partition_ref: Some(output_ref.clone()),
status: if result.success { PartitionStatus::PartitionAvailable as i32 } else { PartitionStatus::PartitionFailed as i32 },
status_code: if result.success { PartitionStatus::PartitionAvailable as i32 } else { PartitionStatus::PartitionFailed as i32 },
status_name: if result.success { PartitionStatus::PartitionAvailable.to_display_string() } else { PartitionStatus::PartitionFailed.to_display_string() },
message: if result.success { "Partition built successfully".to_string() } else { "Partition build failed".to_string() },
job_run_id: job_run_id.clone(),
})
@ -601,7 +604,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
job_run_id: job_run_id.clone(),
job_label: task_node.job.clone(),
target_partitions: task_node.config.as_ref().unwrap().outputs.clone(),
status: JobStatus::JobSkipped as i32,
status_code: JobStatus::JobSkipped as i32,
status_name: JobStatus::JobSkipped.to_display_string(),
message: "Job skipped - all target partitions already available".to_string(),
config: task_node.config.clone(),
manifests: vec![],
@ -638,7 +642,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
job_run_id: job_run_id.clone(),
job_label: task_node.job.clone(),
target_partitions: task_node.config.as_ref().unwrap().outputs.clone(),
status: JobStatus::JobScheduled as i32,
status_code: JobStatus::JobScheduled as i32,
status_name: JobStatus::JobScheduled.to_display_string(),
message: "Job scheduled for execution".to_string(),
config: task_node.config.clone(),
manifests: vec![],
@ -654,7 +659,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
build_request_id.clone(),
EventType::PartitionEvent(PartitionEvent {
partition_ref: Some(output_ref.clone()),
status: PartitionStatus::PartitionBuilding as i32,
status_code: PartitionStatus::PartitionBuilding as i32,
status_name: PartitionStatus::PartitionBuilding.to_display_string(),
message: "Partition build started".to_string(),
job_run_id: job_run_id.clone(),
})
@ -691,7 +697,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
warn!("Pending task: {} ({})", task.job.as_ref().unwrap().label, key);
warn!(" Required inputs:");
for dep in &task.config.as_ref().unwrap().inputs {
if dep.dep_type == 1 { // MATERIALIZE = 1
if dep.dep_type_code == 1 { // MATERIALIZE = 1
let available = completed_outputs.contains(&dep.partition_ref.as_ref().unwrap().str);
warn!(" {} - {}", dep.partition_ref.as_ref().unwrap().str, if available { "AVAILABLE" } else { "MISSING" });
}
@ -759,7 +765,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let event = create_build_event(
build_request_id.clone(),
EventType::BuildRequestEvent(BuildRequestEvent {
status: final_status as i32,
status_code: final_status as i32,
status_name: final_status.to_display_string(),
requested_partitions: graph.outputs.clone(),
message: format!("Execution completed: {} succeeded, {} failed", success_count, failure_count),
})

View file

@ -10,8 +10,18 @@ pub mod orchestration;
// Service module
pub mod service;
// Repository pattern implementations
pub mod repositories;
pub mod mermaid_utils;
// Status conversion utilities
pub mod status_utils;
// Format consistency tests
#[cfg(test)]
mod format_consistency_test;
// Re-export commonly used types from event_log
pub use event_log::{BuildEventLog, BuildEventLogError, create_build_event_log};

View file

@ -43,7 +43,7 @@ pub fn extract_status_map(events: &[BuildEvent]) -> (HashMap<String, NodeStatus>
match &event.event_type {
Some(crate::build_event::EventType::JobEvent(job_event)) => {
if let Some(job_label) = &job_event.job_label {
let status = match job_event.status {
let status = match job_event.status_code {
1 => NodeStatus::Running, // JOB_SCHEDULED
2 => NodeStatus::Running, // JOB_RUNNING
3 => NodeStatus::Completed, // JOB_COMPLETED
@ -65,7 +65,7 @@ pub fn extract_status_map(events: &[BuildEvent]) -> (HashMap<String, NodeStatus>
}
Some(crate::build_event::EventType::PartitionEvent(partition_event)) => {
if let Some(partition_ref) = &partition_event.partition_ref {
let status = match partition_event.status {
let status = match partition_event.status_code {
1 => NodeStatus::Pending, // PARTITION_REQUESTED
2 => NodeStatus::Pending, // PARTITION_ANALYZED
3 => NodeStatus::Running, // PARTITION_BUILDING
@ -543,7 +543,7 @@ pub fn generate_mermaid_with_status(
for input in &config.inputs {
if let Some(partition_ref) = &input.partition_ref {
let ref_id = builder.add_partition_node(&partition_ref.str);
let edge_type = if input.dep_type == 1 {
let edge_type = if input.dep_type_code == 1 {
EdgeType::Solid
} else {
EdgeType::Dotted
@ -661,7 +661,8 @@ mod tests {
inputs: vec![{
let mut input = DataDep::default();
input.partition_ref = Some(PartitionRef { str: "input/data".to_string() });
input.dep_type = 1; // Solid dependency
input.dep_type_code = 1; // Solid dependency
input.dep_type_name = "materialize".to_string();
input
}],
outputs: vec![
@ -678,7 +679,8 @@ mod tests {
inputs: vec![{
let mut input = DataDep::default();
input.partition_ref = Some(PartitionRef { str: "intermediate/data".to_string() });
input.dep_type = 2; // Dotted dependency
input.dep_type_code = 0; // Dotted dependency
input.dep_type_name = "query".to_string();
input
}],
outputs: vec![
@ -728,7 +730,7 @@ mod tests {
event1.event_type = Some(crate::build_event::EventType::JobEvent({
let mut job_event = JobEvent::default();
job_event.job_label = Some(JobLabel { label: "test_job".to_string() });
job_event.status = 2; // JOB_RUNNING
job_event.status_code = 2; // JOB_RUNNING
job_event
}));
@ -737,7 +739,7 @@ mod tests {
event2.event_type = Some(crate::build_event::EventType::PartitionEvent({
let mut partition_event = PartitionEvent::default();
partition_event.partition_ref = Some(PartitionRef { str: "test/partition".to_string() });
partition_event.status = 4; // PARTITION_AVAILABLE
partition_event.status_code = 4; // PARTITION_AVAILABLE
partition_event
}));
@ -758,7 +760,7 @@ mod tests {
let mut job_event = JobEvent::default();
job_event.job_label = Some(JobLabel { label: "same_job".to_string() });
job_event.target_partitions = vec![PartitionRef { str: "output1".to_string() }];
job_event.status = 2; // JOB_RUNNING
job_event.status_code = 2; // JOB_RUNNING
job_event
}));
@ -767,7 +769,7 @@ mod tests {
let mut job_event = JobEvent::default();
job_event.job_label = Some(JobLabel { label: "same_job".to_string() });
job_event.target_partitions = vec![PartitionRef { str: "output2".to_string() }];
job_event.status = 3; // JOB_COMPLETED
job_event.status_code = 3; // JOB_COMPLETED
job_event
}));
@ -789,7 +791,8 @@ mod tests {
inputs: vec![{
let mut input = DataDep::default();
input.partition_ref = Some(PartitionRef { str: "input/data".to_string() });
input.dep_type = 1; // Solid dependency
input.dep_type_code = 1; // Solid dependency
input.dep_type_name = "materialize".to_string();
input
}],
outputs: vec![
@ -810,7 +813,7 @@ mod tests {
partition_event.event_type = Some(crate::build_event::EventType::PartitionEvent({
let mut pe = PartitionEvent::default();
pe.partition_ref = Some(PartitionRef { str: "input/data".to_string() });
pe.status = 4; // PARTITION_AVAILABLE
pe.status_code = 4; // PARTITION_AVAILABLE
pe
}));
@ -819,7 +822,7 @@ mod tests {
let mut je = JobEvent::default();
je.job_label = Some(JobLabel { label: "job1".to_string() });
je.target_partitions = vec![PartitionRef { str: "intermediate/data".to_string() }];
je.status = 2; // JOB_RUNNING
je.status_code = 2; // JOB_RUNNING
je
}));
@ -859,7 +862,8 @@ mod tests {
inputs: vec![{
let mut input = DataDep::default();
input.partition_ref = Some(PartitionRef { str: "shared_input".to_string() });
input.dep_type = 1;
input.dep_type_code = 1;
input.dep_type_name = "materialize".to_string();
input
}],
outputs: vec![
@ -875,7 +879,8 @@ mod tests {
inputs: vec![{
let mut input = DataDep::default();
input.partition_ref = Some(PartitionRef { str: "shared_input".to_string() });
input.dep_type = 1;
input.dep_type_code = 1;
input.dep_type_name = "materialize".to_string();
input
}],
outputs: vec![

View file

@ -10,7 +10,8 @@ pub fn create_build_request_received_event(
create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestReceived as i32,
status_code: BuildRequestStatus::BuildRequestReceived as i32,
status_name: BuildRequestStatus::BuildRequestReceived.to_display_string(),
requested_partitions,
message: "Build request received".to_string(),
}),
@ -23,7 +24,8 @@ pub fn create_build_planning_started_event(
create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestPlanning as i32,
status_code: BuildRequestStatus::BuildRequestPlanning as i32,
status_name: BuildRequestStatus::BuildRequestPlanning.to_display_string(),
requested_partitions: vec![],
message: "Starting build planning".to_string(),
}),
@ -36,7 +38,8 @@ pub fn create_build_execution_started_event(
create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestExecuting as i32,
status_code: BuildRequestStatus::BuildRequestExecuting as i32,
status_name: BuildRequestStatus::BuildRequestExecuting.to_display_string(),
requested_partitions: vec![],
message: "Starting build execution".to_string(),
}),
@ -67,7 +70,8 @@ pub fn create_build_completed_event(
create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: status as i32,
status_code: status as i32,
status_name: status.to_display_string(),
requested_partitions: vec![],
message,
}),
@ -82,7 +86,8 @@ pub fn create_analysis_completed_event(
create_build_event(
build_request_id,
build_event::EventType::BuildRequestEvent(BuildRequestEvent {
status: BuildRequestStatus::BuildRequestAnalysisCompleted as i32,
status_code: BuildRequestStatus::BuildRequestAnalysisCompleted as i32,
status_name: BuildRequestStatus::BuildRequestAnalysisCompleted.to_display_string(),
requested_partitions,
message: format!("Analysis completed successfully, {} tasks planned", task_count),
}),

View file

@ -1,5 +1,5 @@
use crate::*;
use crate::event_log::BuildEventLog;
use crate::event_log::{BuildEventLog, writer::EventWriter};
use log::info;
use std::sync::Arc;
@ -18,7 +18,7 @@ pub enum BuildResult {
/// Core orchestrator for managing build lifecycle and event emission
pub struct BuildOrchestrator {
event_log: Arc<dyn BuildEventLog>,
event_writer: EventWriter,
build_request_id: String,
requested_partitions: Vec<PartitionRef>,
}
@ -31,7 +31,7 @@ impl BuildOrchestrator {
requested_partitions: Vec<PartitionRef>,
) -> Self {
Self {
event_log,
event_writer: EventWriter::new(event_log),
build_request_id,
requested_partitions,
}
@ -51,12 +51,10 @@ impl BuildOrchestrator {
pub async fn start_build(&self) -> Result<()> {
info!("Starting build for request: {}", self.build_request_id);
let event = events::create_build_request_received_event(
self.event_writer.request_build(
self.build_request_id.clone(),
self.requested_partitions.clone(),
);
self.event_log.append_event(event).await
).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -66,11 +64,11 @@ impl BuildOrchestrator {
pub async fn start_planning(&self) -> Result<()> {
info!("Starting build planning for request: {}", self.build_request_id);
let event = events::create_build_planning_started_event(
self.event_writer.update_build_status(
self.build_request_id.clone(),
);
self.event_log.append_event(event).await
BuildRequestStatus::BuildRequestPlanning,
"Starting build planning".to_string(),
).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -80,11 +78,11 @@ impl BuildOrchestrator {
pub async fn start_execution(&self) -> Result<()> {
info!("Starting build execution for request: {}", self.build_request_id);
let event = events::create_build_execution_started_event(
self.event_writer.update_build_status(
self.build_request_id.clone(),
);
self.event_log.append_event(event).await
BuildRequestStatus::BuildRequestExecuting,
"Starting build execution".to_string(),
).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -95,12 +93,26 @@ impl BuildOrchestrator {
info!("Completing build for request: {} with result: {:?}",
self.build_request_id, result);
let event = events::create_build_completed_event(
self.build_request_id.clone(),
&result,
);
let (status, message) = match &result {
BuildResult::Success { jobs_completed } => {
(BuildRequestStatus::BuildRequestCompleted,
format!("Build completed successfully with {} jobs", jobs_completed))
}
BuildResult::Failed { jobs_completed, jobs_failed } => {
(BuildRequestStatus::BuildRequestFailed,
format!("Build failed: {} jobs completed, {} jobs failed", jobs_completed, jobs_failed))
}
BuildResult::FailFast { trigger_job } => {
(BuildRequestStatus::BuildRequestFailed,
format!("Build failed fast due to job: {}", trigger_job))
}
};
self.event_log.append_event(event).await
self.event_writer.update_build_status(
self.build_request_id.clone(),
status,
message,
).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -108,13 +120,12 @@ impl BuildOrchestrator {
/// Emit analysis completed event
pub async fn emit_analysis_completed(&self, task_count: usize) -> Result<()> {
let event = events::create_analysis_completed_event(
self.event_writer.update_build_status_with_partitions(
self.build_request_id.clone(),
BuildRequestStatus::BuildRequestAnalysisCompleted,
self.requested_partitions.clone(),
task_count,
);
self.event_log.append_event(event).await
format!("Analysis completed successfully, {} tasks planned", task_count),
).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -127,7 +138,7 @@ impl BuildOrchestrator {
job,
);
self.event_log.append_event(event).await
self.event_writer.event_log().append_event(event).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -140,7 +151,7 @@ impl BuildOrchestrator {
job,
);
self.event_log.append_event(event).await
self.event_writer.event_log().append_event(event).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -153,7 +164,7 @@ impl BuildOrchestrator {
partition,
);
self.event_log.append_event(event).await
self.event_writer.event_log().append_event(event).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -166,14 +177,14 @@ impl BuildOrchestrator {
target_build: &str,
message: &str,
) -> Result<()> {
let event = events::create_delegation_event(
self.build_request_id.clone(),
partition_ref,
target_build,
message,
);
let partition = PartitionRef { str: partition_ref.to_string() };
self.event_log.append_event(event).await
self.event_writer.record_delegation(
self.build_request_id.clone(),
partition,
target_build.to_string(),
message.to_string(),
).await
.map_err(OrchestrationError::EventLog)?;
Ok(())
@ -181,7 +192,7 @@ impl BuildOrchestrator {
/// Get reference to the event log for direct access if needed
pub fn event_log(&self) -> &dyn BuildEventLog {
self.event_log.as_ref()
self.event_writer.event_log()
}
}
@ -331,7 +342,7 @@ mod tests {
// Verify first event is build request received
if let Some(build_event::EventType::BuildRequestEvent(br_event)) = &emitted_events[0].event_type {
assert_eq!(br_event.status, BuildRequestStatus::BuildRequestReceived as i32);
assert_eq!(br_event.status_code, BuildRequestStatus::BuildRequestReceived as i32);
assert_eq!(br_event.requested_partitions, partitions);
} else {
panic!("First event should be BuildRequestEvent");
@ -357,7 +368,8 @@ mod tests {
job_run_id: "job-run-123".to_string(),
job_label: Some(JobLabel { label: "//:test_job".to_string() }),
target_partitions: vec![partition.clone()],
status: JobStatus::JobScheduled as i32,
status_code: JobStatus::JobScheduled as i32,
status_name: JobStatus::JobScheduled.to_display_string(),
message: "Job scheduled".to_string(),
config: None,
manifests: vec![],

View file

@ -31,7 +31,7 @@ fn generate_prost_code(proto_file: &str, output_file: &str) -> Result<(), Box<dy
config.out_dir(temp_path);
// Configure derive traits - prost::Message provides Debug automatically
config.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]");
config.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize, schemars::JsonSchema)]");
// Try to find protoc in the environment (Bazel should provide this)
if let Ok(protoc_path) = env::var("PROTOC") {

View file

@ -0,0 +1,532 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result};
use crate::{BuildDetailResponse, BuildTimelineEvent as ServiceBuildTimelineEvent};
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
/// Repository for querying build data from the build event log
pub struct BuildsRepository {
event_log: Arc<dyn BuildEventLog>,
}
/// Summary of a build request and its current status
#[derive(Debug, Clone, Serialize)]
pub struct BuildInfo {
pub build_request_id: String,
pub status: BuildRequestStatus,
pub requested_partitions: Vec<PartitionRef>,
pub requested_at: i64,
pub started_at: Option<i64>,
pub completed_at: Option<i64>,
pub duration_ms: Option<i64>,
pub total_jobs: usize,
pub completed_jobs: usize,
pub failed_jobs: usize,
pub cancelled_jobs: usize,
pub cancelled: bool,
pub cancel_reason: Option<String>,
}
/// Detailed timeline of a build's execution events
#[derive(Debug, Clone, Serialize)]
pub struct BuildEvent {
pub timestamp: i64,
pub event_type: String,
pub status: Option<BuildRequestStatus>,
pub message: String,
pub cancel_reason: Option<String>,
}
impl BuildsRepository {
/// Create a new BuildsRepository
pub fn new(event_log: Arc<dyn BuildEventLog>) -> Self {
Self { event_log }
}
/// List all builds with their current status
///
/// Returns a list of all build requests that have been made,
/// including their current status and execution details.
pub async fn list(&self, limit: Option<usize>) -> Result<Vec<BuildInfo>> {
// Get all events from the event log
let events = self.event_log.get_events_in_range(0, i64::MAX).await?;
let mut build_data: HashMap<String, BuildInfo> = HashMap::new();
let mut build_cancellations: HashMap<String, String> = HashMap::new();
let mut job_counts: HashMap<String, (usize, usize, usize, usize)> = HashMap::new(); // total, completed, failed, cancelled
// First pass: collect all build cancel events
for event in &events {
if let Some(build_event::EventType::BuildCancelEvent(bc_event)) = &event.event_type {
build_cancellations.insert(event.build_request_id.clone(), bc_event.reason.clone());
}
}
// Second pass: collect job statistics for each build
for event in &events {
if let Some(build_event::EventType::JobEvent(j_event)) = &event.event_type {
let build_id = &event.build_request_id;
let (total, completed, failed, cancelled) = job_counts.entry(build_id.clone()).or_insert((0, 0, 0, 0));
match j_event.status_code {
1 => *total = (*total).max(1), // JobScheduled - count unique jobs
3 => *completed += 1, // JobCompleted
4 => *failed += 1, // JobFailed
5 => *cancelled += 1, // JobCancelled
_ => {}
}
}
}
// Third pass: collect all build request events and build information
for event in events {
if let Some(build_event::EventType::BuildRequestEvent(br_event)) = &event.event_type {
let status = match br_event.status_code {
1 => BuildRequestStatus::BuildRequestReceived,
2 => BuildRequestStatus::BuildRequestPlanning,
3 => BuildRequestStatus::BuildRequestExecuting,
4 => BuildRequestStatus::BuildRequestCompleted,
5 => BuildRequestStatus::BuildRequestFailed,
6 => BuildRequestStatus::BuildRequestCancelled,
_ => BuildRequestStatus::BuildRequestUnknown,
};
// Create or update build info
let build = build_data.entry(event.build_request_id.clone()).or_insert_with(|| {
let (total_jobs, completed_jobs, failed_jobs, cancelled_jobs) =
job_counts.get(&event.build_request_id).unwrap_or(&(0, 0, 0, 0));
BuildInfo {
build_request_id: event.build_request_id.clone(),
status: BuildRequestStatus::BuildRequestUnknown,
requested_partitions: br_event.requested_partitions.clone(),
requested_at: event.timestamp,
started_at: None,
completed_at: None,
duration_ms: None,
total_jobs: *total_jobs,
completed_jobs: *completed_jobs,
failed_jobs: *failed_jobs,
cancelled_jobs: *cancelled_jobs,
cancelled: false,
cancel_reason: None,
}
});
// Update build with new information
build.status = status;
match status {
BuildRequestStatus::BuildRequestReceived => {
build.requested_at = event.timestamp;
}
BuildRequestStatus::BuildRequestExecuting => {
build.started_at = Some(event.timestamp);
}
BuildRequestStatus::BuildRequestCompleted |
BuildRequestStatus::BuildRequestFailed |
BuildRequestStatus::BuildRequestCancelled => {
build.completed_at = Some(event.timestamp);
if let Some(started) = build.started_at {
build.duration_ms = Some((event.timestamp - started) / 1_000_000); // Convert to ms
}
}
_ => {}
}
// Check if this build was cancelled
if let Some(cancel_reason) = build_cancellations.get(&event.build_request_id) {
build.cancelled = true;
build.cancel_reason = Some(cancel_reason.clone());
}
}
}
// Convert to vector and sort by requested time (most recent first)
let mut builds: Vec<BuildInfo> = build_data.into_values().collect();
builds.sort_by(|a, b| b.requested_at.cmp(&a.requested_at));
// Apply limit if specified
if let Some(limit) = limit {
builds.truncate(limit);
}
Ok(builds)
}
/// Show detailed information about a specific build
///
/// Returns the complete timeline of events for the specified build,
/// including all status changes and any cancellation events.
pub async fn show(&self, build_request_id: &str) -> Result<Option<(BuildInfo, Vec<BuildEvent>)>> {
// Get all events for this specific build
let build_events = self.event_log.get_build_request_events(build_request_id, None).await?;
if build_events.is_empty() {
return Ok(None);
}
let mut build_info: Option<BuildInfo> = None;
let mut timeline: Vec<BuildEvent> = Vec::new();
let mut job_counts = (0, 0, 0, 0); // total, completed, failed, cancelled
// Process all events to get job statistics
let all_events = self.event_log.get_events_in_range(0, i64::MAX).await?;
for event in &all_events {
if event.build_request_id == build_request_id {
if let Some(build_event::EventType::JobEvent(j_event)) = &event.event_type {
match j_event.status_code {
1 => job_counts.0 = job_counts.0.max(1), // JobScheduled - count unique jobs
3 => job_counts.1 += 1, // JobCompleted
4 => job_counts.2 += 1, // JobFailed
5 => job_counts.3 += 1, // JobCancelled
_ => {}
}
}
}
}
// Process build request events to build timeline
for event in &build_events {
if let Some(build_event::EventType::BuildRequestEvent(br_event)) = &event.event_type {
let status = match br_event.status_code {
1 => BuildRequestStatus::BuildRequestReceived,
2 => BuildRequestStatus::BuildRequestPlanning,
3 => BuildRequestStatus::BuildRequestExecuting,
4 => BuildRequestStatus::BuildRequestCompleted,
5 => BuildRequestStatus::BuildRequestFailed,
6 => BuildRequestStatus::BuildRequestCancelled,
_ => BuildRequestStatus::BuildRequestUnknown,
};
// Create or update build info
if build_info.is_none() {
build_info = Some(BuildInfo {
build_request_id: event.build_request_id.clone(),
status: BuildRequestStatus::BuildRequestUnknown,
requested_partitions: br_event.requested_partitions.clone(),
requested_at: event.timestamp,
started_at: None,
completed_at: None,
duration_ms: None,
total_jobs: job_counts.0,
completed_jobs: job_counts.1,
failed_jobs: job_counts.2,
cancelled_jobs: job_counts.3,
cancelled: false,
cancel_reason: None,
});
}
let build = build_info.as_mut().unwrap();
build.status = status;
match status {
BuildRequestStatus::BuildRequestReceived => {
build.requested_at = event.timestamp;
}
BuildRequestStatus::BuildRequestExecuting => {
build.started_at = Some(event.timestamp);
}
BuildRequestStatus::BuildRequestCompleted |
BuildRequestStatus::BuildRequestFailed |
BuildRequestStatus::BuildRequestCancelled => {
build.completed_at = Some(event.timestamp);
if let Some(started) = build.started_at {
build.duration_ms = Some((event.timestamp - started) / 1_000_000); // Convert to ms
}
}
_ => {}
}
// Add to timeline
timeline.push(BuildEvent {
timestamp: event.timestamp,
event_type: "build_status_change".to_string(),
status: Some(status),
message: format!("Build status: {:?}", status),
cancel_reason: None,
});
}
}
// Also check for build cancel events in all events
for event in all_events {
if event.build_request_id == build_request_id {
if let Some(build_event::EventType::BuildCancelEvent(bc_event)) = &event.event_type {
if let Some(build) = build_info.as_mut() {
build.cancelled = true;
build.cancel_reason = Some(bc_event.reason.clone());
}
timeline.push(BuildEvent {
timestamp: event.timestamp,
event_type: "build_cancel".to_string(),
status: None,
message: "Build cancelled".to_string(),
cancel_reason: Some(bc_event.reason.clone()),
});
}
}
}
// Sort timeline by timestamp
timeline.sort_by_key(|e| e.timestamp);
Ok(build_info.map(|info| (info, timeline)))
}
/// Show detailed information about a specific build using protobuf response format
///
/// Returns the complete build details with dual status fields and timeline events.
pub async fn show_protobuf(&self, build_request_id: &str) -> Result<Option<BuildDetailResponse>> {
// Get build info and timeline using existing show method
if let Some((build_info, timeline)) = self.show(build_request_id).await? {
// Convert timeline events to protobuf format
let protobuf_timeline: Vec<ServiceBuildTimelineEvent> = timeline
.into_iter()
.map(|event| ServiceBuildTimelineEvent {
timestamp: event.timestamp,
status_code: event.status.map(|s| s as i32),
status_name: event.status.map(|s| s.to_display_string()),
message: event.message,
event_type: event.event_type,
cancel_reason: event.cancel_reason,
})
.collect();
let response = BuildDetailResponse {
build_request_id: build_info.build_request_id,
status_code: build_info.status as i32,
status_name: build_info.status.to_display_string(),
requested_partitions: build_info.requested_partitions,
total_jobs: build_info.total_jobs as u32,
completed_jobs: build_info.completed_jobs as u32,
failed_jobs: build_info.failed_jobs as u32,
cancelled_jobs: build_info.cancelled_jobs as u32,
requested_at: build_info.requested_at,
started_at: build_info.started_at,
completed_at: build_info.completed_at,
duration_ms: build_info.duration_ms,
cancelled: build_info.cancelled,
cancel_reason: build_info.cancel_reason,
timeline: protobuf_timeline,
};
Ok(Some(response))
} else {
Ok(None)
}
}
/// Cancel a build with a reason
///
/// This method uses the EventWriter to write a build cancellation event.
/// It validates that the build exists and is in a cancellable state.
pub async fn cancel(&self, build_request_id: &str, reason: String) -> Result<()> {
// First check if the build exists and get its current status
let build_info = self.show(build_request_id).await?;
if build_info.is_none() {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel non-existent build: {}", build_request_id)
));
}
let (build, _timeline) = build_info.unwrap();
// Check if build is in a cancellable state
match build.status {
BuildRequestStatus::BuildRequestCompleted => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel completed build: {}", build_request_id)
));
}
BuildRequestStatus::BuildRequestFailed => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel failed build: {}", build_request_id)
));
}
BuildRequestStatus::BuildRequestCancelled => {
return Err(BuildEventLogError::QueryError(
format!("Build already cancelled: {}", build_request_id)
));
}
_ => {}
}
// Use EventWriter to write the cancellation event
let event_writer = crate::event_log::writer::EventWriter::new(self.event_log.clone());
event_writer.cancel_build(build_request_id.to_string(), reason).await
}
/// List builds using protobuf response format with dual status fields
///
/// Returns BuildSummary protobuf messages with status_code and status_name.
pub async fn list_protobuf(&self, limit: Option<usize>) -> Result<Vec<crate::BuildSummary>> {
// Get build info using existing list method
let builds = self.list(limit).await?;
// Convert to protobuf format
let protobuf_builds: Vec<crate::BuildSummary> = builds
.into_iter()
.map(|build| crate::BuildSummary {
build_request_id: build.build_request_id,
status_code: build.status as i32,
status_name: build.status.to_display_string(),
requested_partitions: build.requested_partitions.into_iter().map(|p| crate::PartitionRef { str: p.str }).collect(),
total_jobs: build.total_jobs as u32,
completed_jobs: build.completed_jobs as u32,
failed_jobs: build.failed_jobs as u32,
cancelled_jobs: build.cancelled_jobs as u32,
requested_at: build.requested_at,
started_at: build.started_at,
completed_at: build.completed_at,
duration_ms: build.duration_ms,
cancelled: build.cancelled,
})
.collect();
Ok(protobuf_builds)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_log::mock::{MockBuildEventLog, test_events};
#[tokio::test]
async fn test_builds_repository_list_empty() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = BuildsRepository::new(mock_log);
let builds = repo.list(None).await.unwrap();
assert!(builds.is_empty());
}
#[tokio::test]
async fn test_builds_repository_list_with_data() {
let build_id1 = "build-123".to_string();
let build_id2 = "build-456".to_string();
let partition1 = PartitionRef { str: "data/users".to_string() };
let partition2 = PartitionRef { str: "data/orders".to_string() };
// Create events for multiple builds
let events = vec![
test_events::build_request_event(Some(build_id1.clone()), vec![partition1.clone()], BuildRequestStatus::BuildRequestReceived),
test_events::build_request_event(Some(build_id1.clone()), vec![partition1.clone()], BuildRequestStatus::BuildRequestCompleted),
test_events::build_request_event(Some(build_id2.clone()), vec![partition2.clone()], BuildRequestStatus::BuildRequestReceived),
test_events::build_request_event(Some(build_id2.clone()), vec![partition2.clone()], BuildRequestStatus::BuildRequestFailed),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = BuildsRepository::new(mock_log);
let builds = repo.list(None).await.unwrap();
assert_eq!(builds.len(), 2);
// Find builds by id
let build1 = builds.iter().find(|b| b.build_request_id == build_id1).unwrap();
let build2 = builds.iter().find(|b| b.build_request_id == build_id2).unwrap();
assert_eq!(build1.status, BuildRequestStatus::BuildRequestCompleted);
assert_eq!(build1.requested_partitions.len(), 1);
assert!(!build1.cancelled);
assert_eq!(build2.status, BuildRequestStatus::BuildRequestFailed);
assert_eq!(build2.requested_partitions.len(), 1);
assert!(!build2.cancelled);
}
#[tokio::test]
async fn test_builds_repository_show() {
let build_id = "build-789".to_string();
let partition = PartitionRef { str: "analytics/daily".to_string() };
let events = vec![
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestReceived),
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestPlanning),
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestExecuting),
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestCompleted),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = BuildsRepository::new(mock_log);
let result = repo.show(&build_id).await.unwrap();
assert!(result.is_some());
let (info, timeline) = result.unwrap();
assert_eq!(info.build_request_id, build_id);
assert_eq!(info.status, BuildRequestStatus::BuildRequestCompleted);
assert!(!info.cancelled);
assert_eq!(timeline.len(), 4);
assert_eq!(timeline[0].status, Some(BuildRequestStatus::BuildRequestReceived));
assert_eq!(timeline[1].status, Some(BuildRequestStatus::BuildRequestPlanning));
assert_eq!(timeline[2].status, Some(BuildRequestStatus::BuildRequestExecuting));
assert_eq!(timeline[3].status, Some(BuildRequestStatus::BuildRequestCompleted));
}
#[tokio::test]
async fn test_builds_repository_show_nonexistent() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = BuildsRepository::new(mock_log);
let result = repo.show("nonexistent-build").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_builds_repository_cancel() {
let build_id = "build-cancel-test".to_string();
let partition = PartitionRef { str: "test/data".to_string() };
// Start with a running build
let events = vec![
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestReceived),
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestExecuting),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = BuildsRepository::new(mock_log.clone());
// Cancel the build
repo.cancel(&build_id, "User requested cancellation".to_string()).await.unwrap();
// Verify the cancellation was recorded
// Note: This test demonstrates the pattern, but the MockBuildEventLog would need
// to be enhanced to properly store build cancel events for full verification
// Try to cancel a non-existent build
let result = repo.cancel("nonexistent-build", "Should fail".to_string()).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_builds_repository_cancel_completed_build() {
let build_id = "completed-build".to_string();
let partition = PartitionRef { str: "test/data".to_string() };
// Create a completed build
let events = vec![
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestReceived),
test_events::build_request_event(Some(build_id.clone()), vec![partition.clone()], BuildRequestStatus::BuildRequestCompleted),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = BuildsRepository::new(mock_log);
// Try to cancel the completed build - should fail
let result = repo.cancel(&build_id, "Should fail".to_string()).await;
assert!(result.is_err());
if let Err(BuildEventLogError::QueryError(msg)) = result {
assert!(msg.contains("Cannot cancel completed build"));
} else {
panic!("Expected QueryError for completed build cancellation");
}
}
}

View file

@ -0,0 +1,498 @@
use crate::*;
use crate::event_log::{BuildEventLog, Result};
use crate::{JobDetailResponse, JobRunDetail as ServiceJobRunDetail};
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
/// Repository for querying job data from the build event log
pub struct JobsRepository {
event_log: Arc<dyn BuildEventLog>,
}
/// Summary of a job's execution history and statistics
#[derive(Debug, Clone, Serialize)]
pub struct JobInfo {
pub job_label: String,
pub total_runs: usize,
pub successful_runs: usize,
pub failed_runs: usize,
pub cancelled_runs: usize,
pub last_run_timestamp: i64,
pub last_run_status: JobStatus,
pub average_partitions_per_run: f64,
pub recent_builds: Vec<String>, // Build request IDs that used this job
}
/// Detailed information about a specific job execution
#[derive(Debug, Clone, Serialize)]
pub struct JobRunDetail {
pub job_run_id: String,
pub job_label: String,
pub build_request_id: String,
pub target_partitions: Vec<PartitionRef>,
pub status: JobStatus,
pub scheduled_at: i64,
pub started_at: Option<i64>,
pub completed_at: Option<i64>,
pub duration_ms: Option<i64>,
pub message: String,
pub config: Option<JobConfig>,
pub manifests: Vec<PartitionManifest>,
}
impl JobsRepository {
/// Create a new JobsRepository
pub fn new(event_log: Arc<dyn BuildEventLog>) -> Self {
Self { event_log }
}
/// List all jobs with their execution statistics
///
/// Returns a summary of all jobs that have been executed, including
/// success/failure statistics and recent activity.
pub async fn list(&self, limit: Option<usize>) -> Result<Vec<JobInfo>> {
// Get all job events from the event log
let events = self.event_log.get_events_in_range(0, i64::MAX).await?;
let mut job_data: HashMap<String, Vec<JobRunDetail>> = HashMap::new();
// Collect all job events and group by job label
for event in events {
if let Some(build_event::EventType::JobEvent(j_event)) = &event.event_type {
let job_label = j_event.job_label.as_ref()
.map(|l| l.label.clone())
.unwrap_or_else(|| "unknown".to_string());
let status = match j_event.status_code {
1 => JobStatus::JobScheduled,
2 => JobStatus::JobRunning,
3 => JobStatus::JobCompleted,
4 => JobStatus::JobFailed,
5 => JobStatus::JobCancelled,
6 => JobStatus::JobSkipped,
_ => JobStatus::JobUnknown,
};
// Create or update job run detail
let job_runs = job_data.entry(job_label.clone()).or_insert_with(Vec::new);
// Find existing run or create new one
if let Some(existing_run) = job_runs.iter_mut().find(|r| r.job_run_id == j_event.job_run_id) {
// Update existing run with new status
existing_run.status = status;
existing_run.message = j_event.message.clone();
match status {
JobStatus::JobRunning => {
existing_run.started_at = Some(event.timestamp);
}
JobStatus::JobCompleted | JobStatus::JobFailed | JobStatus::JobCancelled => {
existing_run.completed_at = Some(event.timestamp);
if let Some(started) = existing_run.started_at {
existing_run.duration_ms = Some((event.timestamp - started) / 1_000_000); // Convert to ms
}
existing_run.manifests = j_event.manifests.clone();
}
_ => {}
}
} else {
// Create new job run
let job_run = JobRunDetail {
job_run_id: j_event.job_run_id.clone(),
job_label: job_label.clone(),
build_request_id: event.build_request_id.clone(),
target_partitions: j_event.target_partitions.clone(),
status,
scheduled_at: event.timestamp,
started_at: if status == JobStatus::JobRunning { Some(event.timestamp) } else { None },
completed_at: None,
duration_ms: None,
message: j_event.message.clone(),
config: j_event.config.clone(),
manifests: j_event.manifests.clone(),
};
job_runs.push(job_run);
}
}
}
// Convert to JobInfo structs with statistics
let mut job_infos: Vec<JobInfo> = job_data.into_iter()
.map(|(job_label, job_runs)| {
let total_runs = job_runs.len();
let successful_runs = job_runs.iter().filter(|r| r.status == JobStatus::JobCompleted).count();
let failed_runs = job_runs.iter().filter(|r| r.status == JobStatus::JobFailed).count();
let cancelled_runs = job_runs.iter().filter(|r| r.status == JobStatus::JobCancelled).count();
let (last_run_timestamp, last_run_status) = job_runs.iter()
.max_by_key(|r| r.scheduled_at)
.map(|r| (r.scheduled_at, r.status.clone()))
.unwrap_or((0, JobStatus::JobUnknown));
let total_partitions: usize = job_runs.iter()
.map(|r| r.target_partitions.len())
.sum();
let average_partitions_per_run = if total_runs > 0 {
total_partitions as f64 / total_runs as f64
} else {
0.0
};
// Get recent unique build request IDs
let mut recent_builds: Vec<String> = job_runs.iter()
.map(|r| r.build_request_id.clone())
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect();
recent_builds.sort();
recent_builds.truncate(10); // Keep last 10 builds
JobInfo {
job_label,
total_runs,
successful_runs,
failed_runs,
cancelled_runs,
last_run_timestamp,
last_run_status,
average_partitions_per_run,
recent_builds,
}
})
.collect();
// Sort by last run timestamp (most recent first)
job_infos.sort_by(|a, b| b.last_run_timestamp.cmp(&a.last_run_timestamp));
// Apply limit if specified
if let Some(limit) = limit {
job_infos.truncate(limit);
}
Ok(job_infos)
}
/// Show detailed information about a specific job
///
/// Returns all execution runs for the specified job label, including
/// detailed timing, status, and output information.
pub async fn show(&self, job_label: &str) -> Result<Option<(JobInfo, Vec<JobRunDetail>)>> {
// Get all job events for this specific job
let events = self.event_log.get_events_in_range(0, i64::MAX).await?;
let mut job_runs: Vec<JobRunDetail> = Vec::new();
// Collect all job events for this job label
for event in events {
if let Some(build_event::EventType::JobEvent(j_event)) = &event.event_type {
let event_job_label = j_event.job_label.as_ref()
.map(|l| l.label.clone())
.unwrap_or_else(|| "unknown".to_string());
if event_job_label != job_label {
continue;
}
let status = match j_event.status_code {
1 => JobStatus::JobScheduled,
2 => JobStatus::JobRunning,
3 => JobStatus::JobCompleted,
4 => JobStatus::JobFailed,
5 => JobStatus::JobCancelled,
6 => JobStatus::JobSkipped,
_ => JobStatus::JobUnknown,
};
// Find existing run or create new one
if let Some(existing_run) = job_runs.iter_mut().find(|r| r.job_run_id == j_event.job_run_id) {
// Update existing run with new status
existing_run.status = status;
existing_run.message = j_event.message.clone();
match status {
JobStatus::JobRunning => {
existing_run.started_at = Some(event.timestamp);
}
JobStatus::JobCompleted | JobStatus::JobFailed | JobStatus::JobCancelled => {
existing_run.completed_at = Some(event.timestamp);
if let Some(started) = existing_run.started_at {
existing_run.duration_ms = Some((event.timestamp - started) / 1_000_000); // Convert to ms
}
existing_run.manifests = j_event.manifests.clone();
}
_ => {}
}
} else {
// Create new job run
let job_run = JobRunDetail {
job_run_id: j_event.job_run_id.clone(),
job_label: job_label.to_string(),
build_request_id: event.build_request_id.clone(),
target_partitions: j_event.target_partitions.clone(),
status,
scheduled_at: event.timestamp,
started_at: if status == JobStatus::JobRunning { Some(event.timestamp) } else { None },
completed_at: None,
duration_ms: None,
message: j_event.message.clone(),
config: j_event.config.clone(),
manifests: j_event.manifests.clone(),
};
job_runs.push(job_run);
}
}
}
if job_runs.is_empty() {
return Ok(None);
}
// Sort runs by scheduled time (most recent first)
job_runs.sort_by(|a, b| b.scheduled_at.cmp(&a.scheduled_at));
// Calculate job statistics
let total_runs = job_runs.len();
let successful_runs = job_runs.iter().filter(|r| r.status == JobStatus::JobCompleted).count();
let failed_runs = job_runs.iter().filter(|r| r.status == JobStatus::JobFailed).count();
let cancelled_runs = job_runs.iter().filter(|r| r.status == JobStatus::JobCancelled).count();
let (last_run_timestamp, last_run_status) = job_runs.iter()
.max_by_key(|r| r.scheduled_at)
.map(|r| (r.scheduled_at, r.status.clone()))
.unwrap_or((0, JobStatus::JobUnknown));
let total_partitions: usize = job_runs.iter()
.map(|r| r.target_partitions.len())
.sum();
let average_partitions_per_run = if total_runs > 0 {
total_partitions as f64 / total_runs as f64
} else {
0.0
};
// Get recent unique build request IDs
let mut recent_builds: Vec<String> = job_runs.iter()
.map(|r| r.build_request_id.clone())
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect();
recent_builds.sort();
recent_builds.truncate(10); // Keep last 10 builds
let job_info = JobInfo {
job_label: job_label.to_string(),
total_runs,
successful_runs,
failed_runs,
cancelled_runs,
last_run_timestamp,
last_run_status,
average_partitions_per_run,
recent_builds,
};
Ok(Some((job_info, job_runs)))
}
/// Show detailed information about a specific job using protobuf response format
///
/// Returns the complete job details with dual status fields and run details.
pub async fn show_protobuf(&self, job_label: &str) -> Result<Option<JobDetailResponse>> {
// Get job info and runs using existing show method
if let Some((job_info, job_runs)) = self.show(job_label).await? {
// Convert job runs to protobuf format
let protobuf_runs: Vec<ServiceJobRunDetail> = job_runs
.into_iter()
.map(|run| ServiceJobRunDetail {
job_run_id: run.job_run_id,
build_request_id: run.build_request_id,
target_partitions: run.target_partitions,
status_code: run.status as i32,
status_name: run.status.to_display_string(),
started_at: run.started_at,
completed_at: run.completed_at,
duration_ms: run.duration_ms,
message: run.message,
})
.collect();
let response = JobDetailResponse {
job_label: job_info.job_label,
total_runs: job_info.total_runs as u32,
successful_runs: job_info.successful_runs as u32,
failed_runs: job_info.failed_runs as u32,
cancelled_runs: job_info.cancelled_runs as u32,
average_partitions_per_run: job_info.average_partitions_per_run,
last_run_timestamp: job_info.last_run_timestamp,
last_run_status_code: job_info.last_run_status as i32,
last_run_status_name: job_info.last_run_status.to_display_string(),
recent_builds: job_info.recent_builds,
runs: protobuf_runs,
};
Ok(Some(response))
} else {
Ok(None)
}
}
/// List jobs using protobuf response format with dual status fields
///
/// Returns JobsListResponse protobuf message with JobSummary objects containing
/// last_run_status_code and last_run_status_name fields.
pub async fn list_protobuf(&self, request: JobsListRequest) -> Result<JobsListResponse> {
// Get job info using existing list method
let jobs = self.list(request.limit.map(|l| l as usize)).await?;
// Convert to protobuf format
let protobuf_jobs: Vec<crate::JobSummary> = jobs
.into_iter()
.map(|job| crate::JobSummary {
job_label: job.job_label,
total_runs: job.total_runs as u32,
successful_runs: job.successful_runs as u32,
failed_runs: job.failed_runs as u32,
cancelled_runs: job.cancelled_runs as u32,
average_partitions_per_run: job.average_partitions_per_run,
last_run_timestamp: job.last_run_timestamp,
last_run_status_code: job.last_run_status as i32,
last_run_status_name: job.last_run_status.to_display_string(),
recent_builds: job.recent_builds,
})
.collect();
let total_count = protobuf_jobs.len() as u32;
Ok(JobsListResponse {
jobs: protobuf_jobs,
total_count,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_log::mock::{MockBuildEventLog, test_events};
#[tokio::test]
async fn test_jobs_repository_list_empty() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = JobsRepository::new(mock_log);
let jobs = repo.list(None).await.unwrap();
assert!(jobs.is_empty());
}
#[tokio::test]
async fn test_jobs_repository_list_with_data() {
let build_id = "test-build-123".to_string();
let job_label1 = JobLabel { label: "//:process_data".to_string() };
let job_label2 = JobLabel { label: "//:generate_reports".to_string() };
let partition1 = PartitionRef { str: "data/users".to_string() };
let partition2 = PartitionRef { str: "reports/summary".to_string() };
// Create events for multiple jobs
let events = vec![
test_events::job_event(Some(build_id.clone()), Some("job-run-1".to_string()), job_label1.clone(), vec![partition1.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("job-run-1".to_string()), job_label1.clone(), vec![partition1.clone()], JobStatus::JobCompleted),
test_events::job_event(Some(build_id.clone()), Some("job-run-2".to_string()), job_label2.clone(), vec![partition2.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("job-run-2".to_string()), job_label2.clone(), vec![partition2.clone()], JobStatus::JobFailed),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = JobsRepository::new(mock_log);
let jobs = repo.list(None).await.unwrap();
assert_eq!(jobs.len(), 2);
// Find jobs by label
let process_job = jobs.iter().find(|j| j.job_label == "//:process_data").unwrap();
let reports_job = jobs.iter().find(|j| j.job_label == "//:generate_reports").unwrap();
assert_eq!(process_job.total_runs, 1);
assert_eq!(process_job.successful_runs, 1);
assert_eq!(process_job.failed_runs, 0);
assert_eq!(process_job.last_run_status, JobStatus::JobCompleted);
assert_eq!(reports_job.total_runs, 1);
assert_eq!(reports_job.successful_runs, 0);
assert_eq!(reports_job.failed_runs, 1);
assert_eq!(reports_job.last_run_status, JobStatus::JobFailed);
}
#[tokio::test]
async fn test_jobs_repository_show() {
let build_id = "test-build-456".to_string();
let job_label = JobLabel { label: "//:analytics_job".to_string() };
let partition = PartitionRef { str: "analytics/daily".to_string() };
let events = vec![
test_events::job_event(Some(build_id.clone()), Some("job-run-123".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("job-run-123".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobRunning),
test_events::job_event(Some(build_id.clone()), Some("job-run-123".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobCompleted),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = JobsRepository::new(mock_log);
let result = repo.show(&job_label.label).await.unwrap();
assert!(result.is_some());
let (info, runs) = result.unwrap();
assert_eq!(info.job_label, "//:analytics_job");
assert_eq!(info.total_runs, 1);
assert_eq!(info.successful_runs, 1);
assert_eq!(info.last_run_status, JobStatus::JobCompleted);
assert_eq!(runs.len(), 1);
let run = &runs[0];
assert_eq!(run.job_run_id, "job-run-123");
assert_eq!(run.status, JobStatus::JobCompleted);
assert_eq!(run.target_partitions.len(), 1);
assert_eq!(run.target_partitions[0].str, "analytics/daily");
}
#[tokio::test]
async fn test_jobs_repository_show_nonexistent() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = JobsRepository::new(mock_log);
let result = repo.show("//:nonexistent_job").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_jobs_repository_statistics() {
let build_id = "test-build-789".to_string();
let job_label = JobLabel { label: "//:batch_processor".to_string() };
let partition = PartitionRef { str: "batch/data".to_string() };
// Create multiple runs with different outcomes
let events = vec![
// First run - successful
test_events::job_event(Some(build_id.clone()), Some("run-1".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("run-1".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobCompleted),
// Second run - failed
test_events::job_event(Some(build_id.clone()), Some("run-2".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("run-2".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobFailed),
// Third run - cancelled
test_events::job_event(Some(build_id.clone()), Some("run-3".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("run-3".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobCancelled),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = JobsRepository::new(mock_log);
let result = repo.show(&job_label.label).await.unwrap();
assert!(result.is_some());
let (info, _runs) = result.unwrap();
assert_eq!(info.total_runs, 3);
assert_eq!(info.successful_runs, 1);
assert_eq!(info.failed_runs, 1);
assert_eq!(info.cancelled_runs, 1);
assert_eq!(info.average_partitions_per_run, 1.0);
}
}

View file

@ -0,0 +1,17 @@
/// Repository pattern implementations for reading from the build event log
///
/// This module provides read-only repository interfaces that query the build event log
/// for different types of data. Each repository focuses on a specific domain:
///
/// - PartitionsRepository: Query partition status and history
/// - JobsRepository: Query job execution data
/// - TasksRepository: Query task (job run) information
/// - BuildsRepository: Query build request data
///
/// All repositories work with any BuildEventLog implementation and provide
/// a clean separation between read and write operations.
pub mod partitions;
pub mod jobs;
pub mod tasks;
pub mod builds;

View file

@ -0,0 +1,430 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result};
use crate::status_utils::list_response_helpers;
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
/// Repository for querying partition data from the build event log
pub struct PartitionsRepository {
event_log: Arc<dyn BuildEventLog>,
}
/// Summary of a partition's current state and history
#[derive(Debug, Clone, Serialize)]
pub struct PartitionInfo {
pub partition_ref: String,
pub current_status: PartitionStatus,
pub last_updated: i64,
pub builds_count: usize,
pub last_successful_build: Option<String>,
pub invalidation_count: usize,
}
/// Detailed partition status with timeline
#[derive(Debug, Clone, Serialize)]
pub struct PartitionStatusEvent {
pub timestamp: i64,
pub status: PartitionStatus,
pub message: String,
pub build_request_id: String,
pub job_run_id: Option<String>,
}
impl PartitionsRepository {
/// Create a new PartitionsRepository
pub fn new(event_log: Arc<dyn BuildEventLog>) -> Self {
Self { event_log }
}
/// List all partitions with their current status
///
/// Returns a list of all partitions that have been referenced in the build event log,
/// along with their current status and summary information.
pub async fn list(&self, limit: Option<usize>) -> Result<Vec<PartitionInfo>> {
// Get all partition events from the event log
let events = self.event_log.get_events_in_range(0, i64::MAX).await?;
let mut partition_data: HashMap<String, Vec<PartitionStatusEvent>> = HashMap::new();
// Collect all partition events
for event in events {
if let Some(build_event::EventType::PartitionEvent(p_event)) = &event.event_type {
if let Some(partition_ref) = &p_event.partition_ref {
let status = match p_event.status_code {
1 => PartitionStatus::PartitionRequested,
2 => PartitionStatus::PartitionAnalyzed,
3 => PartitionStatus::PartitionBuilding,
4 => PartitionStatus::PartitionAvailable,
5 => PartitionStatus::PartitionFailed,
6 => PartitionStatus::PartitionDelegated,
_ => PartitionStatus::PartitionUnknown,
};
let status_event = PartitionStatusEvent {
timestamp: event.timestamp,
status,
message: p_event.message.clone(),
build_request_id: event.build_request_id.clone(),
job_run_id: if p_event.job_run_id.is_empty() { None } else { Some(p_event.job_run_id.clone()) },
};
partition_data.entry(partition_ref.str.clone())
.or_insert_with(Vec::new)
.push(status_event);
}
}
// Also check for partition invalidation events
if let Some(build_event::EventType::PartitionInvalidationEvent(pi_event)) = &event.event_type {
if let Some(partition_ref) = &pi_event.partition_ref {
let status_event = PartitionStatusEvent {
timestamp: event.timestamp,
status: PartitionStatus::PartitionUnknown, // Invalidated
message: format!("Invalidated: {}", pi_event.reason),
build_request_id: event.build_request_id.clone(),
job_run_id: None,
};
partition_data.entry(partition_ref.str.clone())
.or_insert_with(Vec::new)
.push(status_event);
}
}
}
// Convert to PartitionInfo structs
let mut partition_infos: Vec<PartitionInfo> = partition_data.into_iter()
.map(|(partition_ref, mut events)| {
// Sort events by timestamp
events.sort_by_key(|e| e.timestamp);
// Get current status from latest event
let (current_status, last_updated) = events.last()
.map(|e| (e.status.clone(), e.timestamp))
.unwrap_or((PartitionStatus::PartitionUnknown, 0));
// Count builds and find last successful build
let builds: std::collections::HashSet<String> = events.iter()
.map(|e| e.build_request_id.clone())
.collect();
let last_successful_build = events.iter()
.rev()
.find(|e| e.status == PartitionStatus::PartitionAvailable)
.map(|e| e.build_request_id.clone());
// Count invalidations
let invalidation_count = events.iter()
.filter(|e| e.message.starts_with("Invalidated:"))
.count();
PartitionInfo {
partition_ref,
current_status,
last_updated,
builds_count: builds.len(),
last_successful_build,
invalidation_count,
}
})
.collect();
// Sort by most recently updated
partition_infos.sort_by(|a, b| b.last_updated.cmp(&a.last_updated));
// Apply limit if specified
if let Some(limit) = limit {
partition_infos.truncate(limit);
}
Ok(partition_infos)
}
/// Show detailed information about a specific partition
///
/// Returns the complete timeline of status changes for the specified partition,
/// including all builds that have referenced it.
pub async fn show(&self, partition_ref: &str) -> Result<Option<(PartitionInfo, Vec<PartitionStatusEvent>)>> {
// Get all events for this partition
let events = self.event_log.get_partition_events(partition_ref, None).await?;
if events.is_empty() {
return Ok(None);
}
let mut status_events = Vec::new();
let mut builds = std::collections::HashSet::new();
// Process partition events
for event in &events {
if let Some(build_event::EventType::PartitionEvent(p_event)) = &event.event_type {
let status = match p_event.status_code {
1 => PartitionStatus::PartitionRequested,
2 => PartitionStatus::PartitionAnalyzed,
3 => PartitionStatus::PartitionBuilding,
4 => PartitionStatus::PartitionAvailable,
5 => PartitionStatus::PartitionFailed,
6 => PartitionStatus::PartitionDelegated,
_ => PartitionStatus::PartitionUnknown,
};
status_events.push(PartitionStatusEvent {
timestamp: event.timestamp,
status,
message: p_event.message.clone(),
build_request_id: event.build_request_id.clone(),
job_run_id: if p_event.job_run_id.is_empty() { None } else { Some(p_event.job_run_id.clone()) },
});
builds.insert(event.build_request_id.clone());
}
}
// Also check for invalidation events in all events
let all_events = self.event_log.get_events_in_range(0, i64::MAX).await?;
let mut invalidation_count = 0;
for event in all_events {
if let Some(build_event::EventType::PartitionInvalidationEvent(pi_event)) = &event.event_type {
if let Some(partition) = &pi_event.partition_ref {
if partition.str == partition_ref {
status_events.push(PartitionStatusEvent {
timestamp: event.timestamp,
status: PartitionStatus::PartitionUnknown, // Invalidated
message: format!("Invalidated: {}", pi_event.reason),
build_request_id: event.build_request_id.clone(),
job_run_id: None,
});
invalidation_count += 1;
}
}
}
}
// Sort events by timestamp
status_events.sort_by_key(|e| e.timestamp);
// Get current status from latest event
let (current_status, last_updated) = status_events.last()
.map(|e| (e.status.clone(), e.timestamp))
.unwrap_or((PartitionStatus::PartitionUnknown, 0));
// Find last successful build
let last_successful_build = status_events.iter()
.rev()
.find(|e| e.status == PartitionStatus::PartitionAvailable)
.map(|e| e.build_request_id.clone());
let partition_info = PartitionInfo {
partition_ref: partition_ref.to_string(),
current_status,
last_updated,
builds_count: builds.len(),
last_successful_build,
invalidation_count,
};
Ok(Some((partition_info, status_events)))
}
/// Invalidate a partition with a reason
///
/// This method uses the EventWriter to write a partition invalidation event.
/// It validates that the partition exists before invalidating it.
pub async fn invalidate(&self, partition_ref: &str, reason: String, build_request_id: String) -> Result<()> {
// First check if the partition exists
let partition_exists = self.show(partition_ref).await?.is_some();
if !partition_exists {
return Err(BuildEventLogError::QueryError(
format!("Cannot invalidate non-existent partition: {}", partition_ref)
));
}
// Use EventWriter to write the invalidation event
let event_writer = crate::event_log::writer::EventWriter::new(self.event_log.clone());
let partition = PartitionRef { str: partition_ref.to_string() };
event_writer.invalidate_partition(build_request_id, partition, reason).await
}
/// Show detailed information about a specific partition using protobuf response format
///
/// Returns the complete partition details with dual status fields and timeline events.
pub async fn show_protobuf(&self, partition_ref: &str) -> Result<Option<PartitionDetailResponse>> {
// Get partition info and timeline using existing show method
if let Some((partition_info, timeline)) = self.show(partition_ref).await? {
// Convert timeline events to protobuf format
let protobuf_timeline: Vec<PartitionTimelineEvent> = timeline
.into_iter()
.map(|event| PartitionTimelineEvent {
timestamp: event.timestamp,
status_code: event.status as i32,
status_name: event.status.to_display_string(),
message: event.message,
build_request_id: event.build_request_id,
job_run_id: event.job_run_id,
})
.collect();
let response = PartitionDetailResponse {
partition_ref: partition_info.partition_ref,
status_code: partition_info.current_status as i32,
status_name: partition_info.current_status.to_display_string(),
last_updated: partition_info.last_updated,
builds_count: partition_info.builds_count as u32,
last_successful_build: partition_info.last_successful_build,
invalidation_count: partition_info.invalidation_count as u32,
timeline: protobuf_timeline,
};
Ok(Some(response))
} else {
Ok(None)
}
}
/// List partitions returning protobuf response format with dual status fields
///
/// This method provides the unified CLI/Service response format with both
/// status codes (enum values) and status names (human-readable strings).
pub async fn list_protobuf(&self, request: PartitionsListRequest) -> Result<PartitionsListResponse> {
// Get legacy format data
let partition_infos = self.list(request.limit.map(|l| l as usize)).await?;
// Convert to protobuf format with dual status fields
let partitions: Vec<PartitionSummary> = partition_infos.into_iter()
.map(|info| {
list_response_helpers::create_partition_summary(
info.partition_ref,
info.current_status,
info.last_updated,
info.builds_count,
info.invalidation_count,
info.last_successful_build,
)
})
.collect();
// TODO: Implement proper pagination with offset and has_more
// For now, return simple response without full pagination support
let total_count = partitions.len() as u32;
let has_more = false; // This would be calculated based on actual total vs returned
Ok(PartitionsListResponse {
partitions,
total_count,
has_more,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_log::mock::{MockBuildEventLog, test_events};
#[tokio::test]
async fn test_partitions_repository_list_empty() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = PartitionsRepository::new(mock_log);
let partitions = repo.list(None).await.unwrap();
assert!(partitions.is_empty());
}
#[tokio::test]
async fn test_partitions_repository_list_with_data() {
let build_id = "test-build-123".to_string();
let partition1 = PartitionRef { str: "data/users".to_string() };
let partition2 = PartitionRef { str: "data/orders".to_string() };
// Create events for multiple partitions
let events = vec![
test_events::build_request_received(Some(build_id.clone()), vec![partition1.clone(), partition2.clone()]),
test_events::partition_status(Some(build_id.clone()), partition1.clone(), PartitionStatus::PartitionBuilding, None),
test_events::partition_status(Some(build_id.clone()), partition1.clone(), PartitionStatus::PartitionAvailable, None),
test_events::partition_status(Some(build_id.clone()), partition2.clone(), PartitionStatus::PartitionBuilding, None),
test_events::partition_status(Some(build_id.clone()), partition2.clone(), PartitionStatus::PartitionFailed, None),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = PartitionsRepository::new(mock_log);
let partitions = repo.list(None).await.unwrap();
assert_eq!(partitions.len(), 2);
// Find partitions by name
let users_partition = partitions.iter().find(|p| p.partition_ref == "data/users").unwrap();
let orders_partition = partitions.iter().find(|p| p.partition_ref == "data/orders").unwrap();
assert_eq!(users_partition.current_status, PartitionStatus::PartitionAvailable);
assert_eq!(orders_partition.current_status, PartitionStatus::PartitionFailed);
assert_eq!(users_partition.builds_count, 1);
assert_eq!(orders_partition.builds_count, 1);
}
#[tokio::test]
async fn test_partitions_repository_show() {
let build_id = "test-build-456".to_string();
let partition = PartitionRef { str: "analytics/metrics".to_string() };
let events = vec![
test_events::partition_status(Some(build_id.clone()), partition.clone(), PartitionStatus::PartitionRequested, None),
test_events::partition_status(Some(build_id.clone()), partition.clone(), PartitionStatus::PartitionBuilding, None),
test_events::partition_status(Some(build_id.clone()), partition.clone(), PartitionStatus::PartitionAvailable, None),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = PartitionsRepository::new(mock_log);
let result = repo.show(&partition.str).await.unwrap();
assert!(result.is_some());
let (info, timeline) = result.unwrap();
assert_eq!(info.partition_ref, "analytics/metrics");
assert_eq!(info.current_status, PartitionStatus::PartitionAvailable);
assert_eq!(info.builds_count, 1);
assert_eq!(timeline.len(), 3);
// Verify timeline order
assert_eq!(timeline[0].status, PartitionStatus::PartitionRequested);
assert_eq!(timeline[1].status, PartitionStatus::PartitionBuilding);
assert_eq!(timeline[2].status, PartitionStatus::PartitionAvailable);
}
#[tokio::test]
async fn test_partitions_repository_show_nonexistent() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = PartitionsRepository::new(mock_log);
let result = repo.show("nonexistent/partition").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_partitions_repository_invalidate() {
let build_id = "test-build-789".to_string();
let partition = PartitionRef { str: "temp/data".to_string() };
// Start with an existing partition
let events = vec![
test_events::partition_status(Some(build_id.clone()), partition.clone(), PartitionStatus::PartitionAvailable, None),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = PartitionsRepository::new(mock_log.clone());
// Invalidate the partition
repo.invalidate(&partition.str, "Test invalidation".to_string(), build_id.clone()).await.unwrap();
// Verify the invalidation was recorded
// Note: This test demonstrates the pattern, but the MockBuildEventLog would need
// to be enhanced to properly store invalidation events for full verification
// Try to invalidate a non-existent partition
let result = repo.invalidate("nonexistent/partition", "Should fail".to_string(), build_id).await;
assert!(result.is_err());
}
}

View file

@ -0,0 +1,518 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result};
use crate::{TaskDetailResponse, TaskTimelineEvent as ServiceTaskTimelineEvent};
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
/// Repository for querying task (job run) data from the build event log
pub struct TasksRepository {
event_log: Arc<dyn BuildEventLog>,
}
/// Summary of a task's execution
#[derive(Debug, Clone, Serialize)]
pub struct TaskInfo {
pub job_run_id: String,
pub job_label: String,
pub build_request_id: String,
pub status: JobStatus,
pub target_partitions: Vec<PartitionRef>,
pub scheduled_at: i64,
pub started_at: Option<i64>,
pub completed_at: Option<i64>,
pub duration_ms: Option<i64>,
pub message: String,
pub config: Option<JobConfig>,
pub manifests: Vec<PartitionManifest>,
pub cancelled: bool,
pub cancel_reason: Option<String>,
}
/// Detailed timeline of a task's execution events
#[derive(Debug, Clone, Serialize)]
pub struct TaskEvent {
pub timestamp: i64,
pub event_type: String,
pub status: Option<JobStatus>,
pub message: String,
pub cancel_reason: Option<String>,
}
impl TasksRepository {
/// Create a new TasksRepository
pub fn new(event_log: Arc<dyn BuildEventLog>) -> Self {
Self { event_log }
}
/// List all tasks with their current status
///
/// Returns a list of all job runs (tasks) that have been executed,
/// including their current status and execution details.
pub async fn list(&self, limit: Option<usize>) -> Result<Vec<TaskInfo>> {
// Get all events from the event log
let events = self.event_log.get_events_in_range(0, i64::MAX).await?;
let mut task_data: HashMap<String, TaskInfo> = HashMap::new();
let mut task_cancellations: HashMap<String, String> = HashMap::new();
// First pass: collect all task cancel events
for event in &events {
if let Some(build_event::EventType::TaskCancelEvent(tc_event)) = &event.event_type {
task_cancellations.insert(tc_event.job_run_id.clone(), tc_event.reason.clone());
}
}
// Second pass: collect all job events and build task information
for event in events {
if let Some(build_event::EventType::JobEvent(j_event)) = &event.event_type {
let job_label = j_event.job_label.as_ref()
.map(|l| l.label.clone())
.unwrap_or_else(|| "unknown".to_string());
let status = match j_event.status_code {
1 => JobStatus::JobScheduled,
2 => JobStatus::JobRunning,
3 => JobStatus::JobCompleted,
4 => JobStatus::JobFailed,
5 => JobStatus::JobCancelled,
6 => JobStatus::JobSkipped,
_ => JobStatus::JobUnknown,
};
// Create or update task info
let task = task_data.entry(j_event.job_run_id.clone()).or_insert_with(|| {
TaskInfo {
job_run_id: j_event.job_run_id.clone(),
job_label: job_label.clone(),
build_request_id: event.build_request_id.clone(),
status: JobStatus::JobUnknown,
target_partitions: j_event.target_partitions.clone(),
scheduled_at: event.timestamp,
started_at: None,
completed_at: None,
duration_ms: None,
message: String::new(),
config: None,
manifests: vec![],
cancelled: false,
cancel_reason: None,
}
});
// Update task with new information
task.status = status;
task.message = j_event.message.clone();
match status {
JobStatus::JobScheduled => {
task.scheduled_at = event.timestamp;
if let Some(config) = &j_event.config {
task.config = Some(config.clone());
}
}
JobStatus::JobRunning => {
task.started_at = Some(event.timestamp);
}
JobStatus::JobCompleted | JobStatus::JobFailed | JobStatus::JobCancelled => {
task.completed_at = Some(event.timestamp);
if let Some(started) = task.started_at {
task.duration_ms = Some((event.timestamp - started) / 1_000_000); // Convert to ms
}
task.manifests = j_event.manifests.clone();
}
_ => {}
}
// Check if this task was cancelled
if let Some(cancel_reason) = task_cancellations.get(&j_event.job_run_id) {
task.cancelled = true;
task.cancel_reason = Some(cancel_reason.clone());
}
}
}
// Convert to vector and sort by scheduled time (most recent first)
let mut tasks: Vec<TaskInfo> = task_data.into_values().collect();
tasks.sort_by(|a, b| b.scheduled_at.cmp(&a.scheduled_at));
// Apply limit if specified
if let Some(limit) = limit {
tasks.truncate(limit);
}
Ok(tasks)
}
/// Show detailed information about a specific task
///
/// Returns the complete timeline of events for the specified task,
/// including all status changes and any cancellation events.
pub async fn show(&self, job_run_id: &str) -> Result<Option<(TaskInfo, Vec<TaskEvent>)>> {
// Get all events for this specific job run
let job_events = self.event_log.get_job_run_events(job_run_id).await?;
if job_events.is_empty() {
return Ok(None);
}
let mut task_info: Option<TaskInfo> = None;
let mut timeline: Vec<TaskEvent> = Vec::new();
// Process job events to build task information
for event in &job_events {
if let Some(build_event::EventType::JobEvent(j_event)) = &event.event_type {
let job_label = j_event.job_label.as_ref()
.map(|l| l.label.clone())
.unwrap_or_else(|| "unknown".to_string());
let status = match j_event.status_code {
1 => JobStatus::JobScheduled,
2 => JobStatus::JobRunning,
3 => JobStatus::JobCompleted,
4 => JobStatus::JobFailed,
5 => JobStatus::JobCancelled,
6 => JobStatus::JobSkipped,
_ => JobStatus::JobUnknown,
};
// Create or update task info
if task_info.is_none() {
task_info = Some(TaskInfo {
job_run_id: j_event.job_run_id.clone(),
job_label: job_label.clone(),
build_request_id: event.build_request_id.clone(),
status: JobStatus::JobUnknown,
target_partitions: j_event.target_partitions.clone(),
scheduled_at: event.timestamp,
started_at: None,
completed_at: None,
duration_ms: None,
message: String::new(),
config: None,
manifests: vec![],
cancelled: false,
cancel_reason: None,
});
}
let task = task_info.as_mut().unwrap();
task.status = status;
task.message = j_event.message.clone();
match status {
JobStatus::JobScheduled => {
task.scheduled_at = event.timestamp;
if let Some(config) = &j_event.config {
task.config = Some(config.clone());
}
}
JobStatus::JobRunning => {
task.started_at = Some(event.timestamp);
}
JobStatus::JobCompleted | JobStatus::JobFailed | JobStatus::JobCancelled => {
task.completed_at = Some(event.timestamp);
if let Some(started) = task.started_at {
task.duration_ms = Some((event.timestamp - started) / 1_000_000); // Convert to ms
}
task.manifests = j_event.manifests.clone();
}
_ => {}
}
// Add to timeline
timeline.push(TaskEvent {
timestamp: event.timestamp,
event_type: "job_status_change".to_string(),
status: Some(status),
message: j_event.message.clone(),
cancel_reason: None,
});
}
}
// Also check for task cancel events in all events
let all_events = self.event_log.get_events_in_range(0, i64::MAX).await?;
for event in all_events {
if let Some(build_event::EventType::TaskCancelEvent(tc_event)) = &event.event_type {
if tc_event.job_run_id == job_run_id {
if let Some(task) = task_info.as_mut() {
task.cancelled = true;
task.cancel_reason = Some(tc_event.reason.clone());
}
timeline.push(TaskEvent {
timestamp: event.timestamp,
event_type: "task_cancel".to_string(),
status: None,
message: "Task cancelled".to_string(),
cancel_reason: Some(tc_event.reason.clone()),
});
}
}
}
// Sort timeline by timestamp
timeline.sort_by_key(|e| e.timestamp);
Ok(task_info.map(|info| (info, timeline)))
}
/// Cancel a task with a reason
///
/// This method uses the EventWriter to write a task cancellation event.
/// It validates that the task exists and is in a cancellable state.
pub async fn cancel(&self, job_run_id: &str, reason: String, build_request_id: String) -> Result<()> {
// First check if the task exists and get its current status
let task_info = self.show(job_run_id).await?;
if task_info.is_none() {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel non-existent task: {}", job_run_id)
));
}
let (task, _timeline) = task_info.unwrap();
// Check if task is in a cancellable state
match task.status {
JobStatus::JobCompleted => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel completed task: {}", job_run_id)
));
}
JobStatus::JobFailed => {
return Err(BuildEventLogError::QueryError(
format!("Cannot cancel failed task: {}", job_run_id)
));
}
JobStatus::JobCancelled => {
return Err(BuildEventLogError::QueryError(
format!("Task already cancelled: {}", job_run_id)
));
}
_ => {}
}
// Use EventWriter to write the cancellation event
let event_writer = crate::event_log::writer::EventWriter::new(self.event_log.clone());
event_writer.cancel_task(build_request_id, job_run_id.to_string(), reason).await
}
/// Show detailed information about a specific task using protobuf response format
///
/// Returns the complete task details with dual status fields and timeline events.
pub async fn show_protobuf(&self, job_run_id: &str) -> Result<Option<TaskDetailResponse>> {
// Get task info and timeline using existing show method
if let Some((task_info, timeline)) = self.show(job_run_id).await? {
// Convert timeline events to protobuf format
let protobuf_timeline: Vec<ServiceTaskTimelineEvent> = timeline
.into_iter()
.map(|event| ServiceTaskTimelineEvent {
timestamp: event.timestamp,
status_code: event.status.map(|s| s as i32),
status_name: event.status.map(|s| s.to_display_string()),
message: event.message,
event_type: event.event_type,
cancel_reason: event.cancel_reason,
})
.collect();
let response = TaskDetailResponse {
job_run_id: task_info.job_run_id,
job_label: task_info.job_label,
build_request_id: task_info.build_request_id,
status_code: task_info.status as i32,
status_name: task_info.status.to_display_string(),
target_partitions: task_info.target_partitions,
scheduled_at: task_info.scheduled_at,
started_at: task_info.started_at,
completed_at: task_info.completed_at,
duration_ms: task_info.duration_ms,
cancelled: task_info.cancelled,
cancel_reason: task_info.cancel_reason,
message: task_info.message,
timeline: protobuf_timeline,
};
Ok(Some(response))
} else {
Ok(None)
}
}
/// List tasks using protobuf response format with dual status fields
///
/// Returns TasksListResponse protobuf message with TaskSummary objects containing
/// status_code and status_name fields.
pub async fn list_protobuf(&self, request: TasksListRequest) -> Result<TasksListResponse> {
// Get task info using existing list method
let tasks = self.list(request.limit.map(|l| l as usize)).await?;
// Convert to protobuf format
let protobuf_tasks: Vec<crate::TaskSummary> = tasks
.into_iter()
.map(|task| crate::TaskSummary {
job_run_id: task.job_run_id,
job_label: task.job_label,
build_request_id: task.build_request_id,
status_code: task.status as i32,
status_name: task.status.to_display_string(),
target_partitions: task.target_partitions.into_iter().map(|p| crate::PartitionRef { str: p.str }).collect(),
scheduled_at: task.scheduled_at,
started_at: task.started_at,
completed_at: task.completed_at,
duration_ms: task.duration_ms,
cancelled: task.cancelled,
message: task.message,
})
.collect();
let total_count = protobuf_tasks.len() as u32;
Ok(TasksListResponse {
tasks: protobuf_tasks,
total_count,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_log::mock::{MockBuildEventLog, test_events};
#[tokio::test]
async fn test_tasks_repository_list_empty() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = TasksRepository::new(mock_log);
let tasks = repo.list(None).await.unwrap();
assert!(tasks.is_empty());
}
#[tokio::test]
async fn test_tasks_repository_list_with_data() {
let build_id = "test-build-123".to_string();
let job_label = JobLabel { label: "//:process_data".to_string() };
let partition = PartitionRef { str: "data/users".to_string() };
// Create events for multiple tasks
let events = vec![
test_events::job_event(Some(build_id.clone()), Some("task-1".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("task-1".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobCompleted),
test_events::job_event(Some(build_id.clone()), Some("task-2".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("task-2".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobFailed),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = TasksRepository::new(mock_log);
let tasks = repo.list(None).await.unwrap();
assert_eq!(tasks.len(), 2);
// Find tasks by job run id
let task1 = tasks.iter().find(|t| t.job_run_id == "task-1").unwrap();
let task2 = tasks.iter().find(|t| t.job_run_id == "task-2").unwrap();
assert_eq!(task1.status, JobStatus::JobCompleted);
assert_eq!(task1.job_label, "//:process_data");
assert!(!task1.cancelled);
assert_eq!(task2.status, JobStatus::JobFailed);
assert_eq!(task2.job_label, "//:process_data");
assert!(!task2.cancelled);
}
#[tokio::test]
async fn test_tasks_repository_show() {
let build_id = "test-build-456".to_string();
let job_label = JobLabel { label: "//:analytics_task".to_string() };
let partition = PartitionRef { str: "analytics/daily".to_string() };
let events = vec![
test_events::job_event(Some(build_id.clone()), Some("task-123".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("task-123".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobRunning),
test_events::job_event(Some(build_id.clone()), Some("task-123".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobCompleted),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = TasksRepository::new(mock_log);
let result = repo.show("task-123").await.unwrap();
assert!(result.is_some());
let (info, timeline) = result.unwrap();
assert_eq!(info.job_run_id, "task-123");
assert_eq!(info.job_label, "//:analytics_task");
assert_eq!(info.status, JobStatus::JobCompleted);
assert!(!info.cancelled);
assert_eq!(timeline.len(), 3);
assert_eq!(timeline[0].status, Some(JobStatus::JobScheduled));
assert_eq!(timeline[1].status, Some(JobStatus::JobRunning));
assert_eq!(timeline[2].status, Some(JobStatus::JobCompleted));
}
#[tokio::test]
async fn test_tasks_repository_show_nonexistent() {
let mock_log = Arc::new(MockBuildEventLog::new().await.unwrap());
let repo = TasksRepository::new(mock_log);
let result = repo.show("nonexistent-task").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn test_tasks_repository_cancel() {
let build_id = "test-build-789".to_string();
let job_label = JobLabel { label: "//:batch_task".to_string() };
let partition = PartitionRef { str: "batch/data".to_string() };
// Start with a running task
let events = vec![
test_events::job_event(Some(build_id.clone()), Some("task-456".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("task-456".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobRunning),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = TasksRepository::new(mock_log.clone());
// Cancel the task
repo.cancel("task-456", "User requested cancellation".to_string(), build_id.clone()).await.unwrap();
// Verify the cancellation was recorded
// Note: This test demonstrates the pattern, but the MockBuildEventLog would need
// to be enhanced to properly store task cancel events for full verification
// Try to cancel a non-existent task
let result = repo.cancel("nonexistent-task", "Should fail".to_string(), build_id).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_tasks_repository_cancel_completed_task() {
let build_id = "test-build-999".to_string();
let job_label = JobLabel { label: "//:completed_task".to_string() };
let partition = PartitionRef { str: "test/data".to_string() };
// Create a completed task
let events = vec![
test_events::job_event(Some(build_id.clone()), Some("completed-task".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobScheduled),
test_events::job_event(Some(build_id.clone()), Some("completed-task".to_string()), job_label.clone(), vec![partition.clone()], JobStatus::JobCompleted),
];
let mock_log = Arc::new(MockBuildEventLog::with_events(events).await.unwrap());
let repo = TasksRepository::new(mock_log);
// Try to cancel the completed task - should fail
let result = repo.cancel("completed-task", "Should fail".to_string(), build_id).await;
assert!(result.is_err());
if let Err(BuildEventLogError::QueryError(msg)) = result {
assert!(msg.contains("Cannot cancel completed task"));
} else {
panic!("Expected QueryError for completed task cancellation");
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -75,7 +75,8 @@ pub struct BuildEventSummary {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionStatusResponse {
pub partition_ref: String,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub last_updated: Option<i64>,
pub build_requests: Vec<String>,
}
@ -93,73 +94,102 @@ pub struct AnalyzeRequest {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct AnalyzeResponse {
#[schemars(schema_with = "job_graph_schema")]
pub job_graph: serde_json::Value,
}
fn job_graph_schema(_gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema {
schemars::schema::Schema::Object(schemars::schema::SchemaObject {
instance_type: Some(schemars::schema::SingleOrVec::Single(Box::new(schemars::schema::InstanceType::Object))),
..Default::default()
})
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ErrorResponse {
pub error: String,
}
// List endpoints request/response types
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildsListResponse {
pub builds: Vec<BuildSummary>,
pub total_count: u32,
pub has_more: bool,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildSummary {
pub struct BuildCancelResponse {
pub cancelled: bool,
pub build_request_id: String,
pub status: String,
pub requested_partitions: Vec<String>,
pub created_at: i64,
pub updated_at: i64,
}
// TODO snake cased response
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionsListResponse {
pub partitions: Vec<PartitionSummary>,
pub struct BuildCancelRepositoryResponse {
pub cancelled: bool,
pub build_request_id: String,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionInvalidateResponse {
pub invalidated: bool,
pub partition_ref: String,
pub reason: String,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct TaskCancelResponse {
pub cancelled: bool,
pub job_run_id: String,
pub reason: String,
}
// List endpoints request/response types
// Removed: duplicate of crate::BuildsListResponse from proto
// Wrapper structs for API responses that contain protobuf data + service metadata
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildsListApiResponse {
pub data: crate::BuildsListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionsListApiResponse {
pub data: crate::PartitionsListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobsListApiResponse {
pub data: crate::JobsListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct TasksListApiResponse {
pub data: crate::TasksListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ActivityApiResponse {
pub data: crate::ActivityResponse,
pub request_id: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PaginationInfo {
pub total_count: u32,
pub has_more: bool,
pub limit: Option<u32>,
pub offset: Option<u32>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionSummary {
pub partition_ref: String,
pub status: String,
pub updated_at: i64,
pub build_request_id: Option<String>,
}
// Removed: Legacy types that duplicate proto definitions
// - BuildSummary (use crate::BuildSummary from proto)
// - PartitionsListResponse (use crate::PartitionsListResponse from proto)
// - PartitionSummary (use crate::PartitionSummary from proto)
// TODO camel cased results
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ActivityResponse {
pub active_builds_count: u32,
pub recent_builds: Vec<BuildSummary>,
pub recent_partitions: Vec<PartitionSummary>,
pub total_partitions_count: u32,
pub system_status: String,
pub graph_name: String,
}
// Job-related request/response types
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobsListResponse {
pub jobs: Vec<JobSummary>,
pub total_count: u32,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobSummary {
pub job_label: String,
pub success_rate: f64,
pub avg_duration_ms: Option<i64>,
pub recent_runs: u32,
pub last_run: Option<i64>,
}
// Removed: JobsListResponse and JobSummary (use crate:: proto versions)
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobMetricsResponse {
@ -175,7 +205,8 @@ pub struct JobMetricsResponse {
pub struct JobRunSummary {
pub build_request_id: String,
pub partitions: Vec<String>,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub duration_ms: Option<i64>,
pub started_at: i64,
}
@ -213,14 +244,20 @@ impl BuildGraphService {
// Create API router with all routes to generate OpenAPI spec
let _ = ApiRouter::new()
.api_route("/api/v1/builds", post(handlers::submit_build_request))
.api_route("/api/v1/builds", get(handlers::list_build_requests))
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_status))
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_request))
.api_route("/api/v1/partitions", get(handlers::list_partitions))
.api_route("/api/v1/partitions/:ref/status", get(handlers::get_partition_status))
.api_route("/api/v1/partitions/:ref/events", get(handlers::get_partition_events))
.api_route("/api/v1/jobs", get(handlers::list_jobs))
.api_route("/api/v1/jobs/:label", get(handlers::get_job_metrics))
.api_route("/api/v1/builds", get(handlers::list_builds_repository))
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_detail))
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_repository))
.api_route("/api/v1/partitions", get(handlers::list_partitions_repository))
.api_route("/api/v1/partitions/:partition_ref", get(handlers::get_partition_detail))
.api_route("/api/v1/partitions/:partition_ref/status", get(handlers::get_partition_status))
.api_route("/api/v1/partitions/:partition_ref/events", get(handlers::get_partition_events))
.api_route("/api/v1/partitions/:partition_ref/invalidate", post(handlers::invalidate_partition))
.api_route("/api/v1/jobs", get(handlers::list_jobs_repository))
.api_route("/api/v1/jobs/:label", get(handlers::get_job_detail))
.api_route("/api/v1/jobs/:label/metrics", get(handlers::get_job_metrics))
.api_route("/api/v1/tasks", get(handlers::list_tasks_repository))
.api_route("/api/v1/tasks/:job_run_id", get(handlers::get_task_detail))
.api_route("/api/v1/tasks/:job_run_id/cancel", post(handlers::cancel_task))
.api_route("/api/v1/activity", get(handlers::get_activity_summary))
.api_route("/api/v1/analyze", post(handlers::analyze_build_graph))
.finish_api(&mut api);
@ -233,14 +270,20 @@ impl BuildGraphService {
let api_router = ApiRouter::new()
.api_route("/api/v1/builds", post(handlers::submit_build_request))
.api_route("/api/v1/builds", get(handlers::list_build_requests))
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_status))
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_request))
.api_route("/api/v1/partitions", get(handlers::list_partitions))
.api_route("/api/v1/partitions/:ref/status", get(handlers::get_partition_status))
.api_route("/api/v1/partitions/:ref/events", get(handlers::get_partition_events))
.api_route("/api/v1/jobs", get(handlers::list_jobs))
.api_route("/api/v1/jobs/:label", get(handlers::get_job_metrics))
.api_route("/api/v1/builds", get(handlers::list_builds_repository))
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_detail))
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_repository))
.api_route("/api/v1/partitions", get(handlers::list_partitions_repository))
.api_route("/api/v1/partitions/:partition_ref", get(handlers::get_partition_detail))
.api_route("/api/v1/partitions/:partition_ref/status", get(handlers::get_partition_status))
.api_route("/api/v1/partitions/:partition_ref/events", get(handlers::get_partition_events))
.api_route("/api/v1/partitions/:partition_ref/invalidate", post(handlers::invalidate_partition))
.api_route("/api/v1/jobs", get(handlers::list_jobs_repository))
.api_route("/api/v1/jobs/:label", get(handlers::get_job_detail))
.api_route("/api/v1/jobs/:label/metrics", get(handlers::get_job_metrics))
.api_route("/api/v1/tasks", get(handlers::list_tasks_repository))
.api_route("/api/v1/tasks/:job_run_id", get(handlers::get_task_detail))
.api_route("/api/v1/tasks/:job_run_id/cancel", post(handlers::cancel_task))
.api_route("/api/v1/activity", get(handlers::get_activity_summary))
.api_route("/api/v1/analyze", post(handlers::analyze_build_graph))
.route("/api/v1/openapi.json", get(Self::openapi_spec))
@ -371,4 +414,54 @@ impl BuildGraphService {
}
}
pub type ServiceState = Arc<BuildGraphService>;
pub type ServiceState = Arc<BuildGraphService>;
// Repository-based response types
// Removed: PartitionDetailResponse and PartitionTimelineEvent (use crate:: proto versions)
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobsRepositoryListResponse {
pub jobs: Vec<JobRepositorySummary>,
pub total_count: u32,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobRepositorySummary {
pub job_label: String,
pub total_runs: usize,
pub successful_runs: usize,
pub failed_runs: usize,
pub cancelled_runs: usize,
pub average_partitions_per_run: f64,
pub last_run_timestamp: i64,
pub last_run_status: String,
pub recent_builds: Vec<String>,
}
// Removed: JobDetailResponse, JobRunDetail, TasksListResponse, TaskSummary (use crate:: proto versions)
// Removed: TaskDetailResponse and TaskTimelineEvent (use crate:: proto versions)
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildsRepositoryListResponse {
pub builds: Vec<BuildRepositorySummary>,
pub total_count: u32,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildRepositorySummary {
pub build_request_id: String,
pub status: String,
pub requested_partitions: Vec<String>,
pub total_jobs: usize,
pub completed_jobs: usize,
pub failed_jobs: usize,
pub cancelled_jobs: usize,
pub requested_at: i64,
pub started_at: Option<i64>,
pub completed_at: Option<i64>,
pub duration_ms: Option<i64>,
pub cancelled: bool,
}
// Removed: BuildDetailResponse and BuildTimelineEvent (use crate:: proto versions)

282
databuild/status_utils.rs Normal file
View file

@ -0,0 +1,282 @@
use crate::*;
/// Utilities for converting status enums to human-readable strings
/// This provides consistent status naming across CLI and Service interfaces
impl PartitionStatus {
/// Convert partition status to human-readable string matching current CLI/service format
pub fn to_display_string(&self) -> String {
match self {
PartitionStatus::PartitionUnknown => "unknown".to_string(),
PartitionStatus::PartitionRequested => "requested".to_string(),
PartitionStatus::PartitionAnalyzed => "analyzed".to_string(),
PartitionStatus::PartitionBuilding => "building".to_string(),
PartitionStatus::PartitionAvailable => "available".to_string(),
PartitionStatus::PartitionFailed => "failed".to_string(),
PartitionStatus::PartitionDelegated => "delegated".to_string(),
}
}
/// Parse a display string back to enum (for filtering, etc.)
pub fn from_display_string(s: &str) -> Option<Self> {
match s {
"unknown" => Some(PartitionStatus::PartitionUnknown),
"requested" => Some(PartitionStatus::PartitionRequested),
"analyzed" => Some(PartitionStatus::PartitionAnalyzed),
"building" => Some(PartitionStatus::PartitionBuilding),
"available" => Some(PartitionStatus::PartitionAvailable),
"failed" => Some(PartitionStatus::PartitionFailed),
"delegated" => Some(PartitionStatus::PartitionDelegated),
_ => None,
}
}
}
impl JobStatus {
/// Convert job status to human-readable string matching current CLI/service format
pub fn to_display_string(&self) -> String {
match self {
JobStatus::JobUnknown => "unknown".to_string(),
JobStatus::JobScheduled => "scheduled".to_string(),
JobStatus::JobRunning => "running".to_string(),
JobStatus::JobCompleted => "completed".to_string(),
JobStatus::JobFailed => "failed".to_string(),
JobStatus::JobCancelled => "cancelled".to_string(),
JobStatus::JobSkipped => "skipped".to_string(),
}
}
/// Parse a display string back to enum
pub fn from_display_string(s: &str) -> Option<Self> {
match s {
"unknown" => Some(JobStatus::JobUnknown),
"scheduled" => Some(JobStatus::JobScheduled),
"running" => Some(JobStatus::JobRunning),
"completed" => Some(JobStatus::JobCompleted),
"failed" => Some(JobStatus::JobFailed),
"cancelled" => Some(JobStatus::JobCancelled),
"skipped" => Some(JobStatus::JobSkipped),
_ => None,
}
}
}
impl BuildRequestStatus {
/// Convert build request status to human-readable string matching current CLI/service format
pub fn to_display_string(&self) -> String {
match self {
BuildRequestStatus::BuildRequestUnknown => "unknown".to_string(),
BuildRequestStatus::BuildRequestReceived => "received".to_string(),
BuildRequestStatus::BuildRequestPlanning => "planning".to_string(),
BuildRequestStatus::BuildRequestAnalysisCompleted => "analysis_completed".to_string(),
BuildRequestStatus::BuildRequestExecuting => "executing".to_string(),
BuildRequestStatus::BuildRequestCompleted => "completed".to_string(),
BuildRequestStatus::BuildRequestFailed => "failed".to_string(),
BuildRequestStatus::BuildRequestCancelled => "cancelled".to_string(),
}
}
/// Parse a display string back to enum
pub fn from_display_string(s: &str) -> Option<Self> {
match s {
"unknown" => Some(BuildRequestStatus::BuildRequestUnknown),
"received" => Some(BuildRequestStatus::BuildRequestReceived),
"planning" => Some(BuildRequestStatus::BuildRequestPlanning),
"analysis_completed" => Some(BuildRequestStatus::BuildRequestAnalysisCompleted),
"executing" => Some(BuildRequestStatus::BuildRequestExecuting),
"completed" => Some(BuildRequestStatus::BuildRequestCompleted),
"failed" => Some(BuildRequestStatus::BuildRequestFailed),
"cancelled" => Some(BuildRequestStatus::BuildRequestCancelled),
_ => None,
}
}
}
impl DepType {
/// Convert dependency type to human-readable string
pub fn to_display_string(&self) -> String {
match self {
DepType::Query => "query".to_string(),
DepType::Materialize => "materialize".to_string(),
}
}
/// Parse a display string back to enum
pub fn from_display_string(s: &str) -> Option<Self> {
match s {
"query" => Some(DepType::Query),
"materialize" => Some(DepType::Materialize),
_ => None,
}
}
}
/// Helper functions for creating protobuf list responses with dual status fields
pub mod list_response_helpers {
use super::*;
/// Create a PartitionSummary from repository data
pub fn create_partition_summary(
partition_ref: String,
status: PartitionStatus,
last_updated: i64,
builds_count: usize,
invalidation_count: usize,
last_successful_build: Option<String>,
) -> PartitionSummary {
PartitionSummary {
partition_ref,
status_code: status as i32,
status_name: status.to_display_string(),
last_updated,
builds_count: builds_count as u32,
invalidation_count: invalidation_count as u32,
last_successful_build,
}
}
/// Create a JobSummary from repository data
pub fn create_job_summary(
job_label: String,
total_runs: usize,
successful_runs: usize,
failed_runs: usize,
cancelled_runs: usize,
average_partitions_per_run: f64,
last_run_timestamp: i64,
last_run_status: JobStatus,
recent_builds: Vec<String>,
) -> JobSummary {
JobSummary {
job_label,
total_runs: total_runs as u32,
successful_runs: successful_runs as u32,
failed_runs: failed_runs as u32,
cancelled_runs: cancelled_runs as u32,
average_partitions_per_run,
last_run_timestamp,
last_run_status_code: last_run_status as i32,
last_run_status_name: last_run_status.to_display_string(),
recent_builds,
}
}
/// Create a TaskSummary from repository data
pub fn create_task_summary(
job_run_id: String,
job_label: String,
build_request_id: String,
status: JobStatus,
target_partitions: Vec<PartitionRef>,
scheduled_at: i64,
started_at: Option<i64>,
completed_at: Option<i64>,
duration_ms: Option<i64>,
cancelled: bool,
message: String,
) -> TaskSummary {
TaskSummary {
job_run_id,
job_label,
build_request_id,
status_code: status as i32,
status_name: status.to_display_string(),
target_partitions,
scheduled_at,
started_at,
completed_at,
duration_ms,
cancelled,
message,
}
}
/// Create a BuildSummary from repository data
pub fn create_build_summary(
build_request_id: String,
status: BuildRequestStatus,
requested_partitions: Vec<PartitionRef>,
total_jobs: usize,
completed_jobs: usize,
failed_jobs: usize,
cancelled_jobs: usize,
requested_at: i64,
started_at: Option<i64>,
completed_at: Option<i64>,
duration_ms: Option<i64>,
cancelled: bool,
) -> BuildSummary {
BuildSummary {
build_request_id,
status_code: status as i32,
status_name: status.to_display_string(),
requested_partitions,
total_jobs: total_jobs as u32,
completed_jobs: completed_jobs as u32,
failed_jobs: failed_jobs as u32,
cancelled_jobs: cancelled_jobs as u32,
requested_at,
started_at,
completed_at,
duration_ms,
cancelled,
}
}
/// Create a DataDep with dual fields from repository data
pub fn create_data_dep(
dep_type: DepType,
partition_ref: PartitionRef,
) -> DataDep {
DataDep {
dep_type_code: dep_type as i32,
dep_type_name: dep_type.to_display_string(),
partition_ref: Some(partition_ref),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_partition_status_conversions() {
let status = PartitionStatus::PartitionAvailable;
assert_eq!(status.to_display_string(), "available");
assert_eq!(PartitionStatus::from_display_string("available"), Some(status));
}
#[test]
fn test_job_status_conversions() {
let status = JobStatus::JobCompleted;
assert_eq!(status.to_display_string(), "completed");
assert_eq!(JobStatus::from_display_string("completed"), Some(status));
}
#[test]
fn test_build_request_status_conversions() {
let status = BuildRequestStatus::BuildRequestCompleted;
assert_eq!(status.to_display_string(), "completed");
assert_eq!(BuildRequestStatus::from_display_string("completed"), Some(status));
}
#[test]
fn test_dep_type_conversions() {
let dep_type = DepType::Materialize;
assert_eq!(dep_type.to_display_string(), "materialize");
assert_eq!(DepType::from_display_string("materialize"), Some(dep_type));
let dep_type = DepType::Query;
assert_eq!(dep_type.to_display_string(), "query");
assert_eq!(DepType::from_display_string("query"), Some(dep_type));
}
#[test]
fn test_invalid_display_string() {
assert_eq!(PartitionStatus::from_display_string("invalid"), None);
assert_eq!(JobStatus::from_display_string("invalid"), None);
assert_eq!(BuildRequestStatus::from_display_string("invalid"), None);
assert_eq!(DepType::from_display_string("invalid"), None);
}
}

View file

@ -11,6 +11,7 @@ rust_test(
edition = "2021",
deps = [
"@crates//:prost",
"@crates//:schemars",
"@crates//:serde",
"@crates//:serde_json",
],
@ -45,6 +46,7 @@ rust_test(
edition = "2021",
deps = [
"@crates//:prost",
"@crates//:schemars",
"@crates//:serde",
"@crates//:serde_json",
],

View file

@ -15,6 +15,14 @@
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/MODULE.bazel": "2b31ffcc9bdc8295b2167e07a757dbbc9ac8906e7028e5170a3708cecaac119f",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/source.json": "0cf1826853b0bef8b5cd19c0610d717500f5521aa2b38b72b2ec302ac5e7526c",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.7.2/MODULE.bazel": "780d1a6522b28f5edb7ea09630748720721dfe27690d65a2d33aa7509de77e07",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.7.7/MODULE.bazel": "491f8681205e31bb57892d67442ce448cda4f472a8e6b3dc062865e29a64f89c",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.9.3/MODULE.bazel": "66baf724dbae7aff4787bf2245cc188d50cb08e07789769730151c0943587c14",
"https://bcr.bazel.build/modules/aspect_rules_esbuild/0.21.0/MODULE.bazel": "77dc393c43ad79398b05865444c5200c6f1aae6765615544f2c7730b5858d533",
"https://bcr.bazel.build/modules/aspect_rules_esbuild/0.21.0/source.json": "062b1d3dba8adcfeb28fe60c185647f5a53ec0487ffe93cf0ae91566596e4b49",
"https://bcr.bazel.build/modules/aspect_rules_js/2.0.0/MODULE.bazel": "b45b507574aa60a92796e3e13c195cd5744b3b8aff516a9c0cb5ae6a048161c5",
"https://bcr.bazel.build/modules/aspect_rules_js/2.0.0/source.json": "a6b09288ab135225982a58ac0b5e2c032c331d88f80553d86596000e894e86b3",
"https://bcr.bazel.build/modules/aspect_rules_ts/3.6.3/MODULE.bazel": "d09db394970f076176ce7bab5b5fa7f0d560fd4f30b8432ea5e2c2570505b130",
"https://bcr.bazel.build/modules/aspect_rules_ts/3.6.3/source.json": "641e58c62e5090d52a0d3538451893acdb2d79a36e8b3d1d30a013c580bc2058",
"https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd",
"https://bcr.bazel.build/modules/bazel_features/1.10.0/MODULE.bazel": "f75e8807570484a99be90abcd52b5e1f390362c258bcb73106f4544957a48101",
"https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
@ -39,7 +47,8 @@
"https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/MODULE.bazel": "88ade7293becda963e0e3ea33e7d54d3425127e0a326e0d17da085a5f1f03ff6",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/source.json": "7ebaefba0b03efe59cac88ed5bbc67bcf59a3eff33af937345ede2a38b2d368a",
"https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
"https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
"https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
@ -114,6 +123,8 @@
"https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d",
"https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c",
"https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb",
"https://bcr.bazel.build/modules/rules_nodejs/6.2.0/MODULE.bazel": "ec27907f55eb34705adb4e8257952162a2d4c3ed0f0b3b4c3c1aad1fac7be35e",
"https://bcr.bazel.build/modules/rules_nodejs/6.2.0/source.json": "a77c307175a82982f0847fd6a8660db5b21440d8a9d073642cb4afa7a18612ff",
"https://bcr.bazel.build/modules/rules_oci/2.2.6/MODULE.bazel": "2ba6ddd679269e00aeffe9ca04faa2d0ca4129650982c9246d0d459fe2da47d9",
"https://bcr.bazel.build/modules/rules_oci/2.2.6/source.json": "94e7decb8f95d9465b0bbea71c65064cd16083be1350c7468f131818641dc4a5",
"https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc",
@ -121,6 +132,7 @@
"https://bcr.bazel.build/modules/rules_pkg/1.0.1/source.json": "bd82e5d7b9ce2d31e380dd9f50c111d678c3bdaca190cb76b0e1c71b05e1ba8a",
"https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
"https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
"https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f",
"https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
"https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2",
"https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1",

View file

@ -55,8 +55,8 @@ def handle_config(args):
configs.append({
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": reviews_ref}},
{"dep_type": 1, "partition_ref": {"str": podcasts_ref}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": reviews_ref}},
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": podcasts_ref}}
],
"args": [category, date_str],
"env": {
@ -135,8 +135,8 @@ def handle_exec(args):
"config": {
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": f"reviews/date={target_date}"}},
{"dep_type": 1, "partition_ref": {"str": "podcasts/all"}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": f"reviews/date={target_date}"}},
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": "podcasts/all"}}
],
"args": [target_category, target_date],
"env": {"PARTITION_REF": partition_ref, "TARGET_CATEGORY": target_category, "TARGET_DATE": target_date}

View file

@ -55,8 +55,8 @@ def handle_config(args):
configs.append({
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": phrase_stats_ref}},
{"dep_type": 1, "partition_ref": {"str": categorized_reviews_ref}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": phrase_stats_ref}},
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": categorized_reviews_ref}}
],
"args": [category, date_str],
"env": {
@ -125,8 +125,8 @@ def handle_exec(args):
"config": {
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": f"phrase_stats/category={target_category}/date={target_date}"}},
{"dep_type": 1, "partition_ref": {"str": f"categorized_reviews/category={target_category}/date={target_date}"}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": f"phrase_stats/category={target_category}/date={target_date}"}},
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": f"categorized_reviews/category={target_category}/date={target_date}"}}
],
"args": [target_category, target_date],
"env": {"PARTITION_REF": partition_ref, "TARGET_CATEGORY": target_category, "TARGET_DATE": target_date}

View file

@ -56,7 +56,7 @@ def handle_config(args):
configs.append({
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": categorized_reviews_ref}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": categorized_reviews_ref}}
],
"args": [category, date_str],
"env": {
@ -113,7 +113,7 @@ def handle_exec(args):
"config": {
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": f"categorized_reviews/category={target_category}/date={target_date}"}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": f"categorized_reviews/category={target_category}/date={target_date}"}}
],
"args": [target_category, target_date],
"env": {"PARTITION_REF": partition_ref, "TARGET_CATEGORY": target_category, "TARGET_DATE": target_date}

View file

@ -55,8 +55,8 @@ def handle_config(args):
configs.append({
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": phrase_models_ref}},
{"dep_type": 1, "partition_ref": {"str": categorized_reviews_ref}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": phrase_models_ref}},
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": categorized_reviews_ref}}
],
"args": [category, date_str],
"env": {
@ -125,8 +125,8 @@ def handle_exec(args):
"config": {
"outputs": [{"str": partition_ref}],
"inputs": [
{"dep_type": 1, "partition_ref": {"str": f"phrase_models/category={target_category}/date={target_date}"}},
{"dep_type": 1, "partition_ref": {"str": f"categorized_reviews/category={target_category}/date={target_date}"}}
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": f"phrase_models/category={target_category}/date={target_date}"}},
{"dep_type_code": 1, "dep_type_name": "materialize", "partition_ref": {"str": f"categorized_reviews/category={target_category}/date={target_date}"}}
],
"args": [target_category, target_date],
"env": {"PARTITION_REF": partition_ref, "TARGET_CATEGORY": target_category, "TARGET_DATE": target_date}

View file

@ -8,18 +8,19 @@ These core capabilities should be factored into explicit read vs write capabilit
# Plan
We should take a phased approach to executing this plan. After implementing the core functionality and unit tests for each phase, we should pause and write down any potential refactoring that would benefit the system before moving onto the next phase.
## Phase 1 - Implement `MockBuildEventLog`
## Phase 1 - Implement Common Event Write Component
Goal: create a single interface for writing events to the build event log.
- Should include all existing "write" functionality, like requesting a new build, etc.
- Migrate CLI to use new write component
- Migrate service to use new write component
## Phase 2 - Implement `MockBuildEventLog`
Goal: create a common testing tool that allows easy specification of testing conditions (e.g. BEL contents/events) to test system/graph behavior.
- Should use an in-memory sqlite database to ensure tests can be run in parallel
- Should make it very easy to specify test data (e.g. event constructors with random defaults that can be overwritten)
- Should include a trivial unit test that writes a valid event and verifies its there via real code paths.
## Phase 2 - Implement Common Event Write Component
Goal: create a single interface for writing events to the build event log.
- Should include all existing "write" functionality, like requesting a new build, etc.
- Migrate CLI to use new write component
- TODO - whats the exec model? Does it write the event, then start the execute based on the ID? Does it start a service? Actually what does tailing builds look like?
- Migrate service to use new write component
- Design notes: shouldn't rewrite event write or repository read code; should focus on making test cases easy to describe, so then assertions can be made on repository-based queries.
- Event write and repositories should be pluggable, allowing for the MockBuildEventLog to be provided per test in a way consistent with how BEL backing databases are specified normally.
## Phase 3 - Implement `partitions` Repository
- Create a new build event log event for partition invalidation (with reason field)

View file

@ -1,4 +1,6 @@
- Remove manual reference of enum values, e.g. [here](../databuild/repositories/builds/mod.rs:85)
- Type-safe mithril [claude link](https://claude.ai/share/f33f8605-472a-4db4-9211-5a1e52087316)
- Status indicator for page selection
- On build request detail page, show aggregated job results
- Use path based navigation instead of hashbang?
@ -8,5 +10,6 @@
- Plan for external worker dispatch (e.g. k8s pod per build, or launch in container service)
- k8s can use [jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/)
- Should we have meaningful exit codes? E.g. "retry-able error", etc?
- Fully joinable build/job IDs - ensure all execution logs / metrics are joinable to build request ID?
- Triggers?
- How do we handle task logging?

View file

@ -0,0 +1,510 @@
# Web App Compile-Time Correctness Plan
## Problem Statement
The DataBuild web application currently has a type safety blindspot where backend protobuf changes can cause runtime failures in the frontend without any compile-time warnings. While we achieved end-to-end type generation (Proto → Rust → OpenAPI → TypeScript), inconsistent data transformation patterns and loose TypeScript configuration allow type mismatches to slip through.
**Specific observed failures:**
- `status.toLowerCase()` crashes when status objects are passed instead of strings
- `status?.status` accesses non-existent properties on protobuf response objects
- Partitions page fails silently due to unhandled nullability
- Inconsistent data shapes flowing through components
## Root Cause Analysis
1. **Mixed Data Contracts**: Some components expect `{ status: string }` while APIs return `{ status_code: number, status_name: string }`
2. **Inconsistent Transformations**: Data shape changes happen ad-hoc throughout the component tree
3. **Protobuf Nullability**: Generated types are honest about optional fields, but TypeScript config allows unsafe access
4. **Service Boundary Leakage**: Backend implementation details leak into frontend components
## Solution: Three-Pronged Approach
### Option 2: Consistent Data Transformation (Primary)
- Define canonical dashboard types separate from generated API types
- Transform data at service boundaries, never in components
- Single source of truth for data shapes within the frontend
### Option 4: Generated Type Enforcement (Supporting)
- Use generated protobuf types in service layer for accurate contracts
- Leverage protobuf's honest nullability information
- Maintain type safety chain from backend to service boundary
### Option 3: Stricter TypeScript Configuration (Foundation)
- Enable strict null checks to catch undefined access patterns
- Prevent implicit any types that mask runtime errors
- Force explicit handling of protobuf's optional fields
## Implementation Plan
### Phase 1: TypeScript Configuration Hardening
**Goal**: Enable strict type checking to surface existing issues
**Tasks**:
1. Update `tsconfig.json` with strict configuration:
```json
{
"compilerOptions": {
"strict": true,
"noImplicitAny": true,
"strictNullChecks": true,
"noImplicitReturns": true,
"noUncheckedIndexedAccess": true,
"exactOptionalPropertyTypes": true
}
}
```
2. Run TypeScript compilation to identify all type errors
3. Create tracking issue for each compilation error
**Success Criteria**: TypeScript build passes with strict configuration enabled
**Estimated Time**: 1-2 days
### Phase 1.5: Verification of Strict Configuration
**Goal**: Prove strict TypeScript catches the specific issues we identified
**Tasks**:
1. Create test cases that reproduce original failures:
```typescript
// Test file: dashboard/verification-tests.ts
const mockResponse = { status_code: 1, status_name: "COMPLETED" };
// These should now cause TypeScript compilation errors:
const test1 = mockResponse.status?.toLowerCase(); // undefined property access
const test2 = mockResponse.status?.status; // nested undefined access
```
2. Run TypeScript compilation and verify these cause errors:
- Document which strict rules catch which specific issues
- Confirm `strictNullChecks` prevents undefined property access
- Verify `noImplicitAny` surfaces type gaps
3. Test protobuf nullable field handling:
```typescript
interface TestPartitionSummary {
last_updated?: number; // optional field from protobuf
}
// This should require explicit null checking:
const timestamp = partition.last_updated.toString(); // Should error
```
**Success Criteria**:
- All identified runtime failures now cause compile-time errors
- Clear mapping between strict TypeScript rules and caught issues
- Zero false positives in existing working code
**Estimated Time**: 0.5 days
### Phase 2: Define Dashboard Data Contracts
**Goal**: Create canonical frontend types independent of backend schema
**Tasks**:
1. Define dashboard types in `dashboard/types.ts`:
```typescript
// Dashboard-optimized types
interface DashboardBuild {
build_request_id: string;
status: string; // Always human-readable name
requested_partitions: string[]; // Always string values
total_jobs: number;
completed_jobs: number;
failed_jobs: number;
cancelled_jobs: number;
requested_at: number;
started_at: number | null;
completed_at: number | null;
duration_ms: number | null;
cancelled: boolean;
}
interface DashboardPartition {
partition_ref: string; // Always string value
status: string; // Always human-readable name
last_updated: number | null;
build_requests: string[];
}
interface DashboardJob {
job_label: string;
total_runs: number;
successful_runs: number;
failed_runs: number;
cancelled_runs: number;
last_run_timestamp: number;
last_run_status: string; // Always human-readable name
average_partitions_per_run: number;
recent_builds: string[];
}
```
2. Update component attribute interfaces to use dashboard types
3. Document the rationale for each transformation decision
**Success Criteria**: All dashboard types are self-contained and UI-optimized
**Estimated Time**: 2-3 days
### Phase 3: Service Layer Transformation
**Goal**: Create consistent transformation boundaries between API and dashboard
**Tasks**:
1. Implement transformation functions in `services.ts`:
```typescript
// Transform API responses to dashboard types
function transformBuildDetail(apiResponse: BuildDetailResponse): DashboardBuild {
return {
build_request_id: apiResponse.build_request_id,
status: apiResponse.status_name,
requested_partitions: apiResponse.requested_partitions.map(p => p.str),
total_jobs: apiResponse.total_jobs,
completed_jobs: apiResponse.completed_jobs,
failed_jobs: apiResponse.failed_jobs,
cancelled_jobs: apiResponse.cancelled_jobs,
requested_at: apiResponse.requested_at,
started_at: apiResponse.started_at ?? null,
completed_at: apiResponse.completed_at ?? null,
duration_ms: apiResponse.duration_ms ?? null,
cancelled: apiResponse.cancelled,
};
}
function transformPartitionSummary(apiResponse: PartitionSummary): DashboardPartition {
return {
partition_ref: apiResponse.partition_ref.str,
status: apiResponse.status_name,
last_updated: apiResponse.last_updated ?? null,
build_requests: apiResponse.build_requests,
};
}
```
2. Update all service methods to use transformation functions
3. Add type guards for runtime validation:
```typescript
function isValidBuildResponse(data: unknown): data is BuildDetailResponse {
return typeof data === 'object' &&
data !== null &&
'build_request_id' in data &&
'status_name' in data;
}
```
4. Handle API errors with proper typing
**Success Criteria**: All API data flows through consistent transformation layer
**Estimated Time**: 3-4 days
### Phase 3.5: Transformation Validation
**Goal**: Prove transformation functions prevent observed failures and handle edge cases
**Tasks**:
1. Create comprehensive unit tests for transformation functions:
```typescript
// Test file: dashboard/transformation-tests.ts
describe('transformBuildDetail', () => {
it('handles status objects correctly', () => {
const apiResponse = { status_code: 1, status_name: 'COMPLETED' };
const result = transformBuildDetail(apiResponse);
expect(typeof result.status).toBe('string');
expect(result.status).toBe('COMPLETED');
});
it('handles null optional fields', () => {
const apiResponse = { started_at: null, completed_at: undefined };
const result = transformBuildDetail(apiResponse);
expect(result.started_at).toBe(null);
expect(result.completed_at).toBe(null);
});
});
```
2. Test edge cases and malformed responses:
- Missing required fields
- Null values where not expected
- Wrong data types in API responses
- Verify type guards catch invalid responses
3. Validate PartitionRef transformations:
```typescript
it('converts PartitionRef objects to strings', () => {
const apiResponse = { partition_ref: { str: 'test-partition' } };
const result = transformPartitionSummary(apiResponse);
expect(typeof result.partition_ref).toBe('string');
expect(result.partition_ref).toBe('test-partition');
});
```
4. Test transformation against real protobuf response shapes:
- Use actual OpenAPI generated types in tests
- Verify transformations work with current API schema
- Document transformation rationale for each field
**Success Criteria**:
- All transformation functions have >90% test coverage
- Edge cases and null handling verified
- Real API response shapes handled correctly
- Type guards prevent invalid data from reaching components
**Estimated Time**: 1 day
### Phase 4: Component Migration
**Goal**: Update all components to use dashboard types exclusively
**Tasks**:
1. Update component implementations to use dashboard types:
- Remove direct `.status_code`/`.status_name` access
- Use transformed string status values
- Handle null values explicitly where needed
2. Fix specific identified issues:
- Line 472: `status?.status` → use `status` directly
- Badge components: Ensure they receive strings
- Partition list: Use consistent partition type
3. Update component attribute interfaces to match dashboard types
4. Add runtime assertions where needed:
```typescript
if (!status) {
console.warn('Missing status in component');
return m('span', 'Unknown Status');
}
```
**Success Criteria**: All components compile and work with dashboard types
**Estimated Time**: 2-3 days
### Phase 4.5: Continuous Component Verification
**Goal**: Verify components work correctly with dashboard types throughout migration
**Tasks**:
1. After each component migration, run verification tests:
```typescript
// Component-specific tests
describe('BuildDetailComponent', () => {
it('renders status as string correctly', () => {
const dashboardBuild: DashboardBuild = {
status: 'COMPLETED', // Transformed string, not object
// ... other fields
};
const component = m(BuildDetailComponent, { build: dashboardBuild });
// Verify no runtime errors with .toLowerCase()
});
});
```
2. Test component attribute interfaces match usage:
- Verify TypeScript compilation passes for each component
- Check that vnode.attrs typing prevents invalid property access
- Test null handling in component rendering
3. Integration tests with real transformed data:
- Use actual service layer transformation outputs
- Verify components render correctly with dashboard types
- Test error states and missing data scenarios
**Success Criteria**:
- Each migrated component passes TypeScript compilation
- No runtime errors when using transformed dashboard types
- Components gracefully handle null/undefined dashboard fields
**Estimated Time**: 0.5 days (distributed across Phase 4)
### Phase 5: Schema Change Simulation & Integration Testing
**Goal**: Verify end-to-end compile-time correctness with simulated backend changes
**Tasks**:
1. **Automated Schema Change Testing**:
```bash
# Create test script: scripts/test-schema-changes.sh
# Test 1: Add new required field to protobuf
# - Modify databuild.proto temporarily
# - Regenerate Rust types and OpenAPI schema
# - Verify TypeScript compilation fails predictably
# - Document exact error messages
# Test 2: Remove existing field
# - Remove field from protobuf definition
# - Verify transformation functions catch missing fields
# - Confirm components fail compilation when accessing removed field
# Test 3: Change field type (string → object)
# - Modify status field structure in protobuf
# - Verify transformation layer prevents type mismatches
# - Confirm this catches issues like original status.toLowerCase() failure
```
2. **Full Build Cycle Verification**:
- Proto change → `bazel build //databuild:openapi_spec_generator`
- OpenAPI regeneration → `bazel build //databuild/client:typescript_client`
- TypeScript compilation → `bazel build //databuild/dashboard:*`
- Document each failure point and error messages
3. **End-to-End Type Safety Validation**:
```typescript
// Create comprehensive integration tests
describe('End-to-End Type Safety', () => {
it('prevents runtime failures from schema changes', async () => {
// Test actual API calls with transformed responses
const service = DashboardService.getInstance();
const activity = await service.getRecentActivity();
// Verify transformed types prevent original failures
activity.recentBuilds.forEach(build => {
expect(typeof build.status).toBe('string');
expect(() => build.status.toLowerCase()).not.toThrow();
});
});
});
```
4. **Regression Testing for Original Failures**:
- Test status.toLowerCase() with transformed data
- Test status?.status access patterns
- Test partition.str access with transformed partition refs
- Verify null handling in timestamp fields
5. **Real Data Flow Testing**:
- New build creation → status updates → completion
- Partition status changes using dashboard types
- Job execution monitoring with transformed data
- Error states and edge cases
**Success Criteria**:
- Schema changes cause predictable TypeScript compilation failures
- Transformation layer prevents all identified runtime failures
- Full build cycle catches type mismatches at each stage
- Zero runtime type errors with dashboard types
- Original failure scenarios now impossible with strict types
**Estimated Time**: 2-3 days
### Phase 6: Documentation & Monitoring
**Goal**: Establish practices to maintain type safety over time
**Tasks**:
1. Document transformation patterns:
- When to create new dashboard types
- How to handle protobuf schema changes
- Service layer responsibilities
2. Add runtime monitoring:
- Log transformation failures
- Track API response shape mismatches
- Monitor for unexpected null values
3. Create development guidelines:
- Never use generated types directly in components
- Always transform at service boundaries
- Handle nullability explicitly
4. Set up CI checks:
- Strict TypeScript compilation in build pipeline
- Automated schema change detection tests
- Integration test suite for type safety validation
- Pre-commit hooks for TypeScript compilation
5. **Create Ongoing Verification Tools**:
```bash
# CI script: scripts/verify-type-safety.sh
# - Run schema change simulation tests
# - Verify transformation tests pass
# - Check strict TypeScript compilation
# - Validate component integration tests
```
**Success Criteria**:
- Team has clear practices for maintaining type safety
- CI pipeline catches type safety regressions automatically
- Schema change testing is automated and repeatable
- Documentation provides concrete examples and rationale
**Estimated Time**: 2 days
## Risk Mitigation
### High-Impact Risks
1. **Breaking Change Volume**: Strict TypeScript may reveal many existing issues
- *Mitigation*: Implement incrementally, fix issues in phases
- *Rollback*: Keep loose config as backup during transition
2. **Performance Impact**: Additional transformation layer overhead
- *Mitigation*: Profile transformation functions, optimize hot paths
- *Monitoring*: Track bundle size and runtime performance
3. **Developer Learning Curve**: Team needs to adapt to strict null checks
- *Mitigation*: Provide training on handling optional types
- *Support*: Create examples and best practices documentation
### Medium-Impact Risks
1. **API Response Changes**: Backend might return unexpected data shapes
- *Mitigation*: Add runtime validation in service layer
- *Detection*: Monitor for transformation failures
2. **Third-party Type Conflicts**: Generated types might conflict with other libraries
- *Mitigation*: Use type aliases and careful imports
- *Testing*: Verify integration with existing dependencies
## Success Metrics
### Compile-Time Safety
- [ ] Zero `any` types in dashboard code
- [ ] All protobuf optional fields handled explicitly
- [ ] TypeScript strict mode enabled and passing
- [ ] Component attribute interfaces match usage
### Runtime Reliability
- [ ] Zero "undefined is not a function" errors
- [ ] Zero "cannot read property of undefined" errors
- [ ] All API error states handled gracefully
- [ ] Consistent data shapes across all components
### Development Experience
- [ ] Backend schema changes cause predictable frontend compilation results
- [ ] Clear error messages when types don't match
- [ ] Consistent patterns for handling new data types
- [ ] Fast iteration cycle maintained
## Future Considerations
### Schema Evolution Strategy
- Plan for handling breaking vs non-breaking backend changes
- Consider versioning approach for dashboard types
- Establish deprecation process for old data shapes
### Tooling Enhancements
- Consider code generation for transformation functions
- Explore runtime schema validation libraries
- Investigate GraphQL for stronger API contracts
### Performance Optimization
- Profile transformation layer performance
- Consider caching strategies for transformed data
- Optimize bundle size impact of strict typing
---
## Implementation Notes
This plan prioritizes compile-time correctness while maintaining development velocity. The phased approach allows for incremental progress and risk mitigation, while the three-pronged strategy (Options 2+3+4) provides comprehensive type safety from protobuf definitions through to component rendering.
The key insight is that true compile-time correctness requires both accurate type definitions AND consistent data transformation patterns enforced by strict TypeScript configuration.

View file

@ -6,6 +6,12 @@
set -euo pipefail
# First make sure the build succeeds
bazel build //...
# Then make sure the core tests succeed
bazel test //...
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TESTS_DIR="$SCRIPT_DIR/tests/end_to_end"

View file

@ -80,18 +80,18 @@ echo "[INFO] Created build request: $BUILD_ID"
# Wait for build completion
for i in {1..60}; do
STATUS_RESPONSE=$(curl -s "http://127.0.0.1:$SERVICE_PORT/api/v1/builds/$BUILD_ID")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status' 2>/dev/null || echo "UNKNOWN")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status_name' 2>/dev/null || echo "UNKNOWN")
case "$STATUS" in
"completed"|"COMPLETED")
"completed"|"COMPLETED"|"BuildRequestCompleted")
echo "[INFO] Service build completed"
break
;;
"failed"|"FAILED")
"failed"|"FAILED"|"BuildRequestFailed")
echo "[ERROR] Service build failed: $STATUS_RESPONSE"
exit 1
;;
"running"|"RUNNING"|"pending"|"PENDING"|"planning"|"PLANNING"|"executing"|"EXECUTING")
"running"|"RUNNING"|"pending"|"PENDING"|"planning"|"PLANNING"|"executing"|"EXECUTING"|"BuildRequestPlanning"|"BuildRequestExecuting"|"BuildRequestReceived")
echo "[INFO] Build status: $STATUS"
sleep 2
;;

View file

@ -89,18 +89,18 @@ echo "[INFO] Created build request: $BUILD_ID"
# Wait for build completion
for i in {1..30}; do
STATUS_RESPONSE=$(curl -s "http://127.0.0.1:$SERVICE_PORT/api/v1/builds/$BUILD_ID")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status' 2>/dev/null || echo "UNKNOWN")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status_name' 2>/dev/null || echo "UNKNOWN")
case "$STATUS" in
"completed"|"COMPLETED")
"completed"|"COMPLETED"|"BuildRequestCompleted")
echo "[INFO] Service build completed"
break
;;
"failed"|"FAILED")
"failed"|"FAILED"|"BuildRequestFailed")
echo "[ERROR] Service build failed: $STATUS_RESPONSE"
exit 1
;;
"running"|"RUNNING"|"pending"|"PENDING"|"planning"|"PLANNING"|"executing"|"EXECUTING")
"running"|"RUNNING"|"pending"|"PENDING"|"planning"|"PLANNING"|"executing"|"EXECUTING"|"BuildRequestPlanning"|"BuildRequestExecuting"|"BuildRequestReceived")
echo "[INFO] Build status: $STATUS"
sleep 2
;;