Upgrade CLI
This commit is contained in:
parent
d5cdabdc43
commit
956bb463ff
13 changed files with 1707 additions and 116 deletions
|
|
@ -38,11 +38,18 @@ rust_library(
|
|||
"event_log/postgres.rs",
|
||||
"event_log/sqlite.rs",
|
||||
"event_log/stdout.rs",
|
||||
"event_log/writer.rs",
|
||||
"event_log/mock.rs",
|
||||
"lib.rs",
|
||||
"mermaid_utils.rs",
|
||||
"orchestration/error.rs",
|
||||
"orchestration/events.rs",
|
||||
"orchestration/mod.rs",
|
||||
"repositories/mod.rs",
|
||||
"repositories/partitions/mod.rs",
|
||||
"repositories/jobs/mod.rs",
|
||||
"repositories/tasks/mod.rs",
|
||||
"repositories/builds/mod.rs",
|
||||
"service/handlers.rs",
|
||||
"service/mod.rs",
|
||||
":generate_databuild_rust",
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ rust_binary(
|
|||
"//databuild:databuild",
|
||||
"@crates//:clap",
|
||||
"@crates//:log",
|
||||
"@crates//:serde",
|
||||
"@crates//:serde_json",
|
||||
"@crates//:simple_logger",
|
||||
"@crates//:thiserror",
|
||||
|
|
|
|||
|
|
@ -20,6 +20,12 @@ pub enum CliError {
|
|||
|
||||
#[error("Invalid arguments: {0}")]
|
||||
InvalidArguments(String),
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
Database(String),
|
||||
|
||||
#[error("Output formatting error: {0}")]
|
||||
Output(String),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, CliError>;
|
||||
|
|
@ -1,11 +1,18 @@
|
|||
use databuild::*;
|
||||
use databuild::event_log::create_build_event_log;
|
||||
use databuild::orchestration::{BuildOrchestrator, BuildResult};
|
||||
use clap::{Arg, Command as ClapCommand};
|
||||
use databuild::repositories::{
|
||||
partitions::{PartitionsRepository, PartitionInfo, PartitionStatusEvent},
|
||||
jobs::{JobsRepository, JobInfo, JobRunDetail},
|
||||
tasks::{TasksRepository, TaskInfo, TaskEvent},
|
||||
builds::{BuildsRepository, BuildInfo, BuildEvent as BuildRepositoryEvent}
|
||||
};
|
||||
use clap::{Arg, Command as ClapCommand, ArgMatches};
|
||||
use log::info;
|
||||
use simple_logger::SimpleLogger;
|
||||
use std::env;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
mod error;
|
||||
|
|
@ -121,41 +128,7 @@ async fn run_execution(
|
|||
Ok(BuildResult::Success { jobs_completed: job_graph.nodes.len() })
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
// Initialize logger
|
||||
SimpleLogger::new()
|
||||
.with_level(log::LevelFilter::Info)
|
||||
.init()
|
||||
.map_err(|e| CliError::Environment(format!("Failed to initialize logger: {}", e)))?;
|
||||
|
||||
info!("Starting DataBuild CLI wrapper");
|
||||
|
||||
// Parse command line arguments
|
||||
let matches = ClapCommand::new("databuild")
|
||||
.version("1.0")
|
||||
.about("DataBuild unified CLI")
|
||||
.arg(
|
||||
Arg::new("partitions")
|
||||
.help("Partition references to build")
|
||||
.required(true)
|
||||
.num_args(1..)
|
||||
.value_name("PARTITIONS")
|
||||
)
|
||||
.arg(
|
||||
Arg::new("event-log")
|
||||
.long("event-log")
|
||||
.help("Event log URI (default: stdout)")
|
||||
.value_name("URI")
|
||||
)
|
||||
.arg(
|
||||
Arg::new("build-request-id")
|
||||
.long("build-request-id")
|
||||
.help("Build request ID (default: generate UUID)")
|
||||
.value_name("ID")
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
async fn handle_build_command(matches: &ArgMatches) -> Result<()> {
|
||||
let partitions: Vec<String> = matches.get_many::<String>("partitions")
|
||||
.unwrap()
|
||||
.cloned()
|
||||
|
|
@ -203,5 +176,826 @@ async fn main() -> Result<()> {
|
|||
orchestrator.complete_build(result).await?;
|
||||
|
||||
info!("DataBuild CLI completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn format_timestamp(timestamp_nanos: i64) -> String {
|
||||
use std::time::{UNIX_EPOCH, Duration};
|
||||
|
||||
let timestamp_secs = timestamp_nanos / 1_000_000_000;
|
||||
let system_time = UNIX_EPOCH + Duration::from_secs(timestamp_secs as u64);
|
||||
|
||||
match system_time.duration_since(UNIX_EPOCH) {
|
||||
Ok(duration) => {
|
||||
let secs = duration.as_secs();
|
||||
let days = secs / 86400;
|
||||
let hours = (secs % 86400) / 3600;
|
||||
let minutes = (secs % 3600) / 60;
|
||||
|
||||
if days > 0 {
|
||||
format!("{}d {}h ago", days, hours)
|
||||
} else if hours > 0 {
|
||||
format!("{}h {}m ago", hours, minutes)
|
||||
} else {
|
||||
format!("{}m ago", minutes)
|
||||
}
|
||||
}
|
||||
Err(_) => "unknown".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
// Initialize logger
|
||||
SimpleLogger::new()
|
||||
.with_level(log::LevelFilter::Info)
|
||||
.init()
|
||||
.map_err(|e| CliError::Environment(format!("Failed to initialize logger: {}", e)))?;
|
||||
|
||||
// Parse command line arguments
|
||||
let matches = ClapCommand::new("databuild")
|
||||
.version("1.0")
|
||||
.about("DataBuild unified CLI")
|
||||
.subcommand_required(false)
|
||||
.arg_required_else_help(false)
|
||||
.subcommand(
|
||||
ClapCommand::new("build")
|
||||
.about("Build partitions using the DataBuild execution engine")
|
||||
.arg(
|
||||
Arg::new("partitions")
|
||||
.help("Partition references to build")
|
||||
.required(true)
|
||||
.num_args(1..)
|
||||
.value_name("PARTITIONS")
|
||||
)
|
||||
.arg(
|
||||
Arg::new("event-log")
|
||||
.long("event-log")
|
||||
.help("Event log URI (default: stdout)")
|
||||
.value_name("URI")
|
||||
)
|
||||
.arg(
|
||||
Arg::new("build-request-id")
|
||||
.long("build-request-id")
|
||||
.help("Build request ID (default: generate UUID)")
|
||||
.value_name("ID")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("partitions")
|
||||
.about("Query and manage partitions")
|
||||
.subcommand(
|
||||
ClapCommand::new("list")
|
||||
.about("List all partitions")
|
||||
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of partitions to show"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("show")
|
||||
.about("Show partition details")
|
||||
.arg(Arg::new("partition_ref").required(true).help("Partition reference"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("invalidate")
|
||||
.about("Invalidate a partition")
|
||||
.arg(Arg::new("partition_ref").required(true).help("Partition reference"))
|
||||
.arg(Arg::new("reason").long("reason").short('r').required(true).help("Reason for invalidation"))
|
||||
.arg(Arg::new("build_request_id").long("build-request-id").short('b').required(true).help("Build request ID"))
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("jobs")
|
||||
.about("Query job execution data")
|
||||
.subcommand(
|
||||
ClapCommand::new("list")
|
||||
.about("List all jobs")
|
||||
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of jobs to show"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("show")
|
||||
.about("Show job details")
|
||||
.arg(Arg::new("job_label").required(true).help("Job label"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("tasks")
|
||||
.about("Query and manage tasks (job runs)")
|
||||
.subcommand(
|
||||
ClapCommand::new("list")
|
||||
.about("List all tasks")
|
||||
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of tasks to show"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("show")
|
||||
.about("Show task details")
|
||||
.arg(Arg::new("job_run_id").required(true).help("Job run ID"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("cancel")
|
||||
.about("Cancel a task")
|
||||
.arg(Arg::new("job_run_id").required(true).help("Job run ID"))
|
||||
.arg(Arg::new("reason").long("reason").short('r').required(true).help("Reason for cancellation"))
|
||||
.arg(Arg::new("build_request_id").long("build-request-id").short('b').required(true).help("Build request ID"))
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("builds")
|
||||
.about("Query and manage build requests")
|
||||
.subcommand(
|
||||
ClapCommand::new("list")
|
||||
.about("List all builds")
|
||||
.arg(Arg::new("limit").long("limit").short('l').value_name("LIMIT").help("Maximum number of builds to show"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("show")
|
||||
.about("Show build details")
|
||||
.arg(Arg::new("build_request_id").required(true).help("Build request ID"))
|
||||
.arg(Arg::new("format").long("format").short('f').value_name("FORMAT").help("Output format (table or json)").default_value("table"))
|
||||
)
|
||||
.subcommand(
|
||||
ClapCommand::new("cancel")
|
||||
.about("Cancel a build")
|
||||
.arg(Arg::new("build_request_id").required(true).help("Build request ID"))
|
||||
.arg(Arg::new("reason").long("reason").short('r').required(true).help("Reason for cancellation"))
|
||||
)
|
||||
)
|
||||
.arg(
|
||||
Arg::new("event-log")
|
||||
.long("event-log")
|
||||
.help("Event log URI (default: sqlite:databuild.db for repository commands)")
|
||||
.value_name("URI")
|
||||
.global(true)
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// Get global event log URI
|
||||
let event_log_uri = matches.get_one::<String>("event-log")
|
||||
.cloned()
|
||||
.or_else(|| env::var("DATABUILD_BUILD_EVENT_LOG").ok())
|
||||
.unwrap_or_else(|| "sqlite:databuild.db".to_string());
|
||||
|
||||
match matches.subcommand() {
|
||||
Some(("build", sub_matches)) => {
|
||||
handle_build_command(sub_matches).await?;
|
||||
}
|
||||
Some(("partitions", sub_matches)) => {
|
||||
handle_partitions_command(sub_matches, &event_log_uri).await?;
|
||||
}
|
||||
Some(("jobs", sub_matches)) => {
|
||||
handle_jobs_command(sub_matches, &event_log_uri).await?;
|
||||
}
|
||||
Some(("tasks", sub_matches)) => {
|
||||
handle_tasks_command(sub_matches, &event_log_uri).await?;
|
||||
}
|
||||
Some(("builds", sub_matches)) => {
|
||||
handle_builds_command(sub_matches, &event_log_uri).await?;
|
||||
}
|
||||
_ => {
|
||||
// Show help if no subcommand provided
|
||||
let mut cmd = ClapCommand::new("databuild")
|
||||
.version("1.0")
|
||||
.about("DataBuild unified CLI");
|
||||
cmd.print_help().unwrap();
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_partitions_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
|
||||
let event_log = create_build_event_log(event_log_uri).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
|
||||
|
||||
let repository = PartitionsRepository::new(Arc::from(event_log));
|
||||
|
||||
match matches.subcommand() {
|
||||
Some(("list", sub_matches)) => {
|
||||
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let partitions = repository.list(limit).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to list partitions: {}", e)))?;
|
||||
|
||||
match format {
|
||||
"json" => {
|
||||
let json = serde_json::to_string_pretty(&partitions)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
if partitions.is_empty() {
|
||||
println!("No partitions found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Partitions ({} total):", partitions.len());
|
||||
println!();
|
||||
println!("{:<30} {:<15} {:<12} {:<12} {:<20}", "Partition", "Status", "Builds", "Invalidated", "Last Updated");
|
||||
println!("{}", "-".repeat(90));
|
||||
|
||||
for partition in partitions {
|
||||
let status_str = format!("{:?}", partition.current_status);
|
||||
let last_updated = format_timestamp(partition.last_updated);
|
||||
|
||||
println!("{:<30} {:<15} {:<12} {:<12} {:<20}",
|
||||
partition.partition_ref,
|
||||
status_str,
|
||||
partition.builds_count,
|
||||
partition.invalidation_count,
|
||||
last_updated
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("show", sub_matches)) => {
|
||||
let partition_ref = sub_matches.get_one::<String>("partition_ref").unwrap();
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let result = repository.show(partition_ref).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to show partition: {}", e)))?;
|
||||
|
||||
match result {
|
||||
Some((info, timeline)) => {
|
||||
match format {
|
||||
"json" => {
|
||||
#[derive(serde::Serialize)]
|
||||
struct PartitionDetail {
|
||||
info: PartitionInfo,
|
||||
timeline: Vec<PartitionStatusEvent>,
|
||||
}
|
||||
let detail = PartitionDetail { info, timeline };
|
||||
let json = serde_json::to_string_pretty(&detail)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
println!("Partition: {}", info.partition_ref);
|
||||
println!("Status: {:?}", info.current_status);
|
||||
println!("Builds involved: {}", info.builds_count);
|
||||
println!("Invalidation count: {}", info.invalidation_count);
|
||||
println!("Last updated: {}", format_timestamp(info.last_updated));
|
||||
|
||||
if let Some(ref last_build) = info.last_successful_build {
|
||||
println!("\nLast successful build: {}", last_build);
|
||||
}
|
||||
|
||||
if !timeline.is_empty() {
|
||||
println!("\nTimeline ({} events):", timeline.len());
|
||||
for event in timeline {
|
||||
let timestamp = format_timestamp(event.timestamp);
|
||||
println!(" {} [{:?}] {}", timestamp, event.status, event.message);
|
||||
if event.message.starts_with("Invalidated:") {
|
||||
// Invalidation reason is in the message
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
match format {
|
||||
"json" => {
|
||||
println!("null");
|
||||
}
|
||||
_ => {
|
||||
println!("Partition '{}' not found", partition_ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("invalidate", sub_matches)) => {
|
||||
let partition_ref = sub_matches.get_one::<String>("partition_ref").unwrap();
|
||||
let reason = sub_matches.get_one::<String>("reason").unwrap();
|
||||
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
|
||||
|
||||
let partition_ref_obj = PartitionRef { str: partition_ref.clone() };
|
||||
|
||||
repository.invalidate(&partition_ref_obj.str, reason.clone(), build_request_id.clone()).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to invalidate partition: {}", e)))?;
|
||||
|
||||
println!("Successfully invalidated partition '{}' with reason: {}", partition_ref, reason);
|
||||
}
|
||||
_ => {
|
||||
println!("Unknown partitions subcommand. Use 'list', 'show', or 'invalidate'.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_jobs_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
|
||||
let event_log = create_build_event_log(event_log_uri).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
|
||||
|
||||
let repository = JobsRepository::new(Arc::from(event_log));
|
||||
|
||||
match matches.subcommand() {
|
||||
Some(("list", sub_matches)) => {
|
||||
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let jobs = repository.list(limit).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to list jobs: {}", e)))?;
|
||||
|
||||
match format {
|
||||
"json" => {
|
||||
let json = serde_json::to_string_pretty(&jobs)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
if jobs.is_empty() {
|
||||
println!("No jobs found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Jobs ({} total):", jobs.len());
|
||||
println!();
|
||||
println!("{:<40} {:<8} {:<8} {:<8} {:<8} {:<8} {:<20}", "Job Label", "Runs", "Success", "Failed", "Cancel", "Avg Parts", "Last Run");
|
||||
println!("{}", "-".repeat(120));
|
||||
|
||||
for job in jobs {
|
||||
let success_rate = if job.total_runs > 0 {
|
||||
(job.successful_runs as f64 / job.total_runs as f64 * 100.0) as u32
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let last_run = format_timestamp(job.last_run_timestamp);
|
||||
let last_status = format!("{:?}", job.last_run_status);
|
||||
|
||||
println!("{:<40} {:<8} {:<8} {:<8} {:<8} {:<8.1} {:<20}",
|
||||
job.job_label,
|
||||
job.total_runs,
|
||||
format!("{}({}%)", job.successful_runs, success_rate),
|
||||
job.failed_runs,
|
||||
job.cancelled_runs,
|
||||
job.average_partitions_per_run,
|
||||
format!("{} ({})", last_run, last_status)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("show", sub_matches)) => {
|
||||
let job_label = sub_matches.get_one::<String>("job_label").unwrap();
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let result = repository.show(job_label).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to show job: {}", e)))?;
|
||||
|
||||
match result {
|
||||
Some((info, runs)) => {
|
||||
match format {
|
||||
"json" => {
|
||||
#[derive(serde::Serialize)]
|
||||
struct JobDetail {
|
||||
info: JobInfo,
|
||||
runs: Vec<JobRunDetail>,
|
||||
}
|
||||
let detail = JobDetail { info, runs };
|
||||
let json = serde_json::to_string_pretty(&detail)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
println!("Job: {}", info.job_label);
|
||||
println!("Total runs: {}", info.total_runs);
|
||||
println!("Successful runs: {} ({:.1}%)", info.successful_runs,
|
||||
if info.total_runs > 0 { info.successful_runs as f64 / info.total_runs as f64 * 100.0 } else { 0.0 });
|
||||
println!("Failed runs: {}", info.failed_runs);
|
||||
println!("Cancelled runs: {}", info.cancelled_runs);
|
||||
println!("Average partitions per run: {:.1}", info.average_partitions_per_run);
|
||||
println!("Last run: {} ({:?})", format_timestamp(info.last_run_timestamp), info.last_run_status);
|
||||
|
||||
if !info.recent_builds.is_empty() {
|
||||
println!("\nRecent builds:");
|
||||
for build_id in &info.recent_builds {
|
||||
println!(" - {}", build_id);
|
||||
}
|
||||
}
|
||||
|
||||
if !runs.is_empty() {
|
||||
println!("\nExecution history ({} runs):", runs.len());
|
||||
println!("{:<25} {:<15} {:<15} {:<10} {:<30}", "Run ID", "Status", "Duration", "Parts", "Build Request");
|
||||
println!("{}", "-".repeat(95));
|
||||
|
||||
for run in runs.iter().take(10) { // Show last 10 runs
|
||||
let duration_str = if let Some(duration) = run.duration_ms {
|
||||
if duration > 1000 {
|
||||
format!("{:.1}s", duration as f64 / 1000.0)
|
||||
} else {
|
||||
format!("{}ms", duration)
|
||||
}
|
||||
} else {
|
||||
"N/A".to_string()
|
||||
};
|
||||
|
||||
println!("{:<25} {:<15} {:<15} {:<10} {:<30}",
|
||||
run.job_run_id,
|
||||
format!("{:?}", run.status),
|
||||
duration_str,
|
||||
run.target_partitions.len(),
|
||||
run.build_request_id
|
||||
);
|
||||
}
|
||||
|
||||
if runs.len() > 10 {
|
||||
println!("... and {} more runs", runs.len() - 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
match format {
|
||||
"json" => {
|
||||
println!("null");
|
||||
}
|
||||
_ => {
|
||||
println!("Job '{}' not found", job_label);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
println!("Unknown jobs subcommand. Use 'list' or 'show'.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_tasks_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
|
||||
let event_log = create_build_event_log(event_log_uri).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
|
||||
|
||||
let repository = TasksRepository::new(Arc::from(event_log));
|
||||
|
||||
match matches.subcommand() {
|
||||
Some(("list", sub_matches)) => {
|
||||
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let tasks = repository.list(limit).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to list tasks: {}", e)))?;
|
||||
|
||||
match format {
|
||||
"json" => {
|
||||
let json = serde_json::to_string_pretty(&tasks)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
if tasks.is_empty() {
|
||||
println!("No tasks found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Tasks ({} total):", tasks.len());
|
||||
println!();
|
||||
println!("{:<25} {:<30} {:<15} {:<15} {:<10} {:<20}", "Job Run ID", "Job Label", "Status", "Duration", "Parts", "Scheduled");
|
||||
println!("{}", "-".repeat(115));
|
||||
|
||||
for task in tasks {
|
||||
let duration_str = if let Some(duration) = task.duration_ms {
|
||||
if duration > 1000 {
|
||||
format!("{:.1}s", duration as f64 / 1000.0)
|
||||
} else {
|
||||
format!("{}ms", duration)
|
||||
}
|
||||
} else {
|
||||
"N/A".to_string()
|
||||
};
|
||||
|
||||
let scheduled = format_timestamp(task.scheduled_at);
|
||||
let status_str = if task.cancelled {
|
||||
format!("{:?}*", task.status) // Add asterisk for cancelled tasks
|
||||
} else {
|
||||
format!("{:?}", task.status)
|
||||
};
|
||||
|
||||
println!("{:<25} {:<30} {:<15} {:<15} {:<10} {:<20}",
|
||||
task.job_run_id,
|
||||
task.job_label,
|
||||
status_str,
|
||||
duration_str,
|
||||
task.target_partitions.len(),
|
||||
scheduled
|
||||
);
|
||||
}
|
||||
|
||||
println!("\n* = Cancelled task");
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("show", sub_matches)) => {
|
||||
let job_run_id = sub_matches.get_one::<String>("job_run_id").unwrap();
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let result = repository.show(job_run_id).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to show task: {}", e)))?;
|
||||
|
||||
match result {
|
||||
Some((info, timeline)) => {
|
||||
match format {
|
||||
"json" => {
|
||||
#[derive(serde::Serialize)]
|
||||
struct TaskDetail {
|
||||
info: TaskInfo,
|
||||
timeline: Vec<TaskEvent>,
|
||||
}
|
||||
let detail = TaskDetail { info, timeline };
|
||||
let json = serde_json::to_string_pretty(&detail)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
println!("Task: {}", info.job_run_id);
|
||||
println!("Job: {}", info.job_label);
|
||||
println!("Build request: {}", info.build_request_id);
|
||||
println!("Status: {:?}", info.status);
|
||||
println!("Target partitions: {}", info.target_partitions.len());
|
||||
println!("Scheduled: {}", format_timestamp(info.scheduled_at));
|
||||
|
||||
if let Some(started) = info.started_at {
|
||||
println!("Started: {}", format_timestamp(started));
|
||||
}
|
||||
|
||||
if let Some(completed) = info.completed_at {
|
||||
println!("Completed: {}", format_timestamp(completed));
|
||||
}
|
||||
|
||||
if let Some(duration) = info.duration_ms {
|
||||
if duration > 1000 {
|
||||
println!("Duration: {:.1}s", duration as f64 / 1000.0);
|
||||
} else {
|
||||
println!("Duration: {}ms", duration);
|
||||
}
|
||||
}
|
||||
|
||||
if info.cancelled {
|
||||
println!("Cancelled: Yes");
|
||||
if let Some(ref reason) = info.cancel_reason {
|
||||
println!("Cancel reason: {}", reason);
|
||||
}
|
||||
}
|
||||
|
||||
if !info.message.is_empty() {
|
||||
println!("Message: {}", info.message);
|
||||
}
|
||||
|
||||
if !info.target_partitions.is_empty() {
|
||||
println!("\nTarget partitions:");
|
||||
for partition in &info.target_partitions {
|
||||
println!(" - {}", partition.str);
|
||||
}
|
||||
}
|
||||
|
||||
if !timeline.is_empty() {
|
||||
println!("\nTimeline ({} events):", timeline.len());
|
||||
for event in timeline {
|
||||
let timestamp = format_timestamp(event.timestamp);
|
||||
let status_info = if let Some(status) = event.status {
|
||||
format!(" -> {:?}", status)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
println!(" {} [{}]{} {}", timestamp, event.event_type, status_info, event.message);
|
||||
if let Some(ref reason) = event.cancel_reason {
|
||||
println!(" Reason: {}", reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
match format {
|
||||
"json" => {
|
||||
println!("null");
|
||||
}
|
||||
_ => {
|
||||
println!("Task '{}' not found", job_run_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("cancel", sub_matches)) => {
|
||||
let job_run_id = sub_matches.get_one::<String>("job_run_id").unwrap();
|
||||
let reason = sub_matches.get_one::<String>("reason").unwrap();
|
||||
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
|
||||
|
||||
repository.cancel(job_run_id, reason.clone(), build_request_id.clone()).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to cancel task: {}", e)))?;
|
||||
|
||||
println!("Successfully cancelled task '{}' with reason: {}", job_run_id, reason);
|
||||
}
|
||||
_ => {
|
||||
println!("Unknown tasks subcommand. Use 'list', 'show', or 'cancel'.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_builds_command(matches: &ArgMatches, event_log_uri: &str) -> Result<()> {
|
||||
let event_log = create_build_event_log(event_log_uri).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to connect to event log: {}", e)))?;
|
||||
|
||||
let repository = BuildsRepository::new(Arc::from(event_log));
|
||||
|
||||
match matches.subcommand() {
|
||||
Some(("list", sub_matches)) => {
|
||||
let limit = sub_matches.get_one::<String>("limit").and_then(|s| s.parse().ok());
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let builds = repository.list(limit).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to list builds: {}", e)))?;
|
||||
|
||||
match format {
|
||||
"json" => {
|
||||
let json = serde_json::to_string_pretty(&builds)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
if builds.is_empty() {
|
||||
println!("No builds found");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Builds ({} total):", builds.len());
|
||||
println!();
|
||||
println!("{:<40} {:<15} {:<15} {:<8} {:<8} {:<8} {:<20}", "Build Request ID", "Status", "Duration", "Parts", "Jobs", "Comp", "Requested");
|
||||
println!("{}", "-".repeat(120));
|
||||
|
||||
for build in builds {
|
||||
let duration_str = if let Some(duration) = build.duration_ms {
|
||||
if duration > 60000 {
|
||||
format!("{:.1}m", duration as f64 / 60000.0)
|
||||
} else if duration > 1000 {
|
||||
format!("{:.1}s", duration as f64 / 1000.0)
|
||||
} else {
|
||||
format!("{}ms", duration)
|
||||
}
|
||||
} else {
|
||||
"N/A".to_string()
|
||||
};
|
||||
|
||||
let requested = format_timestamp(build.requested_at);
|
||||
let status_str = if build.cancelled {
|
||||
format!("{:?}*", build.status) // Add asterisk for cancelled builds
|
||||
} else {
|
||||
format!("{:?}", build.status)
|
||||
};
|
||||
|
||||
let completion_rate = if build.total_jobs > 0 {
|
||||
format!("{}/{}", build.completed_jobs, build.total_jobs)
|
||||
} else {
|
||||
"0/0".to_string()
|
||||
};
|
||||
|
||||
println!("{:<40} {:<15} {:<15} {:<8} {:<8} {:<8} {:<20}",
|
||||
build.build_request_id,
|
||||
status_str,
|
||||
duration_str,
|
||||
build.requested_partitions.len(),
|
||||
build.total_jobs,
|
||||
completion_rate,
|
||||
requested
|
||||
);
|
||||
}
|
||||
|
||||
println!("\n* = Cancelled build");
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("show", sub_matches)) => {
|
||||
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
|
||||
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
|
||||
let result = repository.show(build_request_id).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to show build: {}", e)))?;
|
||||
|
||||
match result {
|
||||
Some((info, timeline)) => {
|
||||
match format {
|
||||
"json" => {
|
||||
#[derive(serde::Serialize)]
|
||||
struct BuildDetail {
|
||||
info: BuildInfo,
|
||||
timeline: Vec<BuildRepositoryEvent>,
|
||||
}
|
||||
let detail = BuildDetail { info, timeline };
|
||||
let json = serde_json::to_string_pretty(&detail)
|
||||
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
|
||||
println!("{}", json);
|
||||
}
|
||||
_ => {
|
||||
println!("Build: {}", info.build_request_id);
|
||||
println!("Status: {:?}", info.status);
|
||||
println!("Requested partitions: {}", info.requested_partitions.len());
|
||||
println!("Total jobs: {}", info.total_jobs);
|
||||
println!("Completed jobs: {}", info.completed_jobs);
|
||||
println!("Failed jobs: {}", info.failed_jobs);
|
||||
println!("Cancelled jobs: {}", info.cancelled_jobs);
|
||||
println!("Requested: {}", format_timestamp(info.requested_at));
|
||||
|
||||
if let Some(started) = info.started_at {
|
||||
println!("Started: {}", format_timestamp(started));
|
||||
}
|
||||
|
||||
if let Some(completed) = info.completed_at {
|
||||
println!("Completed: {}", format_timestamp(completed));
|
||||
}
|
||||
|
||||
if let Some(duration) = info.duration_ms {
|
||||
if duration > 60000 {
|
||||
println!("Duration: {:.1}m", duration as f64 / 60000.0);
|
||||
} else if duration > 1000 {
|
||||
println!("Duration: {:.1}s", duration as f64 / 1000.0);
|
||||
} else {
|
||||
println!("Duration: {}ms", duration);
|
||||
}
|
||||
}
|
||||
|
||||
if info.cancelled {
|
||||
println!("Cancelled: Yes");
|
||||
if let Some(ref reason) = info.cancel_reason {
|
||||
println!("Cancel reason: {}", reason);
|
||||
}
|
||||
}
|
||||
|
||||
if !info.requested_partitions.is_empty() {
|
||||
println!("\nRequested partitions:");
|
||||
for partition in &info.requested_partitions {
|
||||
println!(" - {}", partition.str);
|
||||
}
|
||||
}
|
||||
|
||||
// Show job statistics
|
||||
if info.total_jobs > 0 {
|
||||
let success_rate = (info.completed_jobs as f64 / info.total_jobs as f64 * 100.0) as u32;
|
||||
println!("\nJob statistics:");
|
||||
println!(" Success rate: {}% ({}/{})", success_rate, info.completed_jobs, info.total_jobs);
|
||||
|
||||
if info.failed_jobs > 0 {
|
||||
println!(" Failed: {}", info.failed_jobs);
|
||||
}
|
||||
if info.cancelled_jobs > 0 {
|
||||
println!(" Cancelled: {}", info.cancelled_jobs);
|
||||
}
|
||||
}
|
||||
|
||||
if !timeline.is_empty() {
|
||||
println!("\nTimeline ({} events):", timeline.len());
|
||||
for event in timeline {
|
||||
let timestamp = format_timestamp(event.timestamp);
|
||||
let status_info = if let Some(status) = event.status {
|
||||
format!(" -> {:?}", status)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
println!(" {} [{}]{} {}", timestamp, event.event_type, status_info, event.message);
|
||||
if let Some(ref reason) = event.cancel_reason {
|
||||
println!(" Reason: {}", reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
match format {
|
||||
"json" => {
|
||||
println!("null");
|
||||
}
|
||||
_ => {
|
||||
println!("Build '{}' not found", build_request_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("cancel", sub_matches)) => {
|
||||
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
|
||||
let reason = sub_matches.get_one::<String>("reason").unwrap();
|
||||
|
||||
repository.cancel(build_request_id, reason.clone()).await
|
||||
.map_err(|e| CliError::Database(format!("Failed to cancel build: {}", e)))?;
|
||||
|
||||
println!("Successfully cancelled build '{}' with reason: {}", build_request_id, reason);
|
||||
}
|
||||
_ => {
|
||||
println!("Unknown builds subcommand. Use 'list', 'show', or 'cancel'.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -232,6 +232,23 @@ message JobGraphEvent {
|
|||
string message = 2; // Optional message
|
||||
}
|
||||
|
||||
// Partition invalidation event
|
||||
message PartitionInvalidationEvent {
|
||||
PartitionRef partition_ref = 1; // Partition being invalidated
|
||||
string reason = 2; // Reason for invalidation
|
||||
}
|
||||
|
||||
// Task cancellation event
|
||||
message TaskCancelEvent {
|
||||
string job_run_id = 1; // UUID of the job run being cancelled
|
||||
string reason = 2; // Reason for cancellation
|
||||
}
|
||||
|
||||
// Build cancellation event
|
||||
message BuildCancelEvent {
|
||||
string reason = 1; // Reason for cancellation
|
||||
}
|
||||
|
||||
// Individual build event
|
||||
message BuildEvent {
|
||||
// Event metadata
|
||||
|
|
@ -246,6 +263,9 @@ message BuildEvent {
|
|||
JobEvent job_event = 12;
|
||||
DelegationEvent delegation_event = 13;
|
||||
JobGraphEvent job_graph_event = 14;
|
||||
PartitionInvalidationEvent partition_invalidation_event = 15;
|
||||
TaskCancelEvent task_cancel_event = 16;
|
||||
BuildCancelEvent build_cancel_event = 17;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ use uuid::Uuid;
|
|||
pub mod stdout;
|
||||
pub mod sqlite;
|
||||
pub mod postgres;
|
||||
pub mod writer;
|
||||
pub mod mock;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BuildEventLogError {
|
||||
|
|
|
|||
|
|
@ -168,6 +168,9 @@ impl BuildEventLog for SqliteBuildEventLog {
|
|||
Some(crate::build_event::EventType::JobEvent(_)) => "job",
|
||||
Some(crate::build_event::EventType::DelegationEvent(_)) => "delegation",
|
||||
Some(crate::build_event::EventType::JobGraphEvent(_)) => "job_graph",
|
||||
Some(crate::build_event::EventType::PartitionInvalidationEvent(_)) => "partition_invalidation",
|
||||
Some(crate::build_event::EventType::TaskCancelEvent(_)) => "task_cancel",
|
||||
Some(crate::build_event::EventType::BuildCancelEvent(_)) => "build_cancel",
|
||||
None => "unknown",
|
||||
}
|
||||
],
|
||||
|
|
@ -252,6 +255,18 @@ impl BuildEventLog for SqliteBuildEventLog {
|
|||
],
|
||||
).map_err(|e| BuildEventLogError::DatabaseError(e.to_string()))?;
|
||||
}
|
||||
Some(crate::build_event::EventType::PartitionInvalidationEvent(_pi_event)) => {
|
||||
// For now, we'll just store these in the main events table
|
||||
// In a later phase, we could add a specific table for invalidation events
|
||||
}
|
||||
Some(crate::build_event::EventType::TaskCancelEvent(_tc_event)) => {
|
||||
// For now, we'll just store these in the main events table
|
||||
// In a later phase, we could add a specific table for task cancel events
|
||||
}
|
||||
Some(crate::build_event::EventType::BuildCancelEvent(_bc_event)) => {
|
||||
// For now, we'll just store these in the main events table
|
||||
// In a later phase, we could add a specific table for build cancel events
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
|
|
@ -391,15 +406,60 @@ impl BuildEventLog for SqliteBuildEventLog {
|
|||
|
||||
async fn get_events_in_range(
|
||||
&self,
|
||||
_start_time: i64,
|
||||
_end_time: i64
|
||||
start_time: i64,
|
||||
end_time: i64
|
||||
) -> Result<Vec<BuildEvent>> {
|
||||
// This method is not implemented because it would require complex joins
|
||||
// to reconstruct complete event data. Use get_build_request_events instead
|
||||
// which properly reconstructs all event types for a build request.
|
||||
Err(BuildEventLogError::QueryError(
|
||||
"get_events_in_range is not implemented - use get_build_request_events to get complete event data".to_string()
|
||||
))
|
||||
let conn = self.connection.lock().unwrap();
|
||||
|
||||
// Use a UNION query to get all event types with their specific data in the time range
|
||||
let query = "
|
||||
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
|
||||
bre.status, bre.requested_partitions, bre.message, NULL, NULL, NULL, NULL
|
||||
FROM build_events be
|
||||
LEFT JOIN build_request_events bre ON be.event_id = bre.event_id
|
||||
WHERE be.timestamp >= ?1 AND be.timestamp <= ?2 AND be.event_type = 'build_request'
|
||||
UNION ALL
|
||||
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
|
||||
pe.partition_ref, pe.status, pe.message, pe.job_run_id, NULL, NULL, NULL
|
||||
FROM build_events be
|
||||
LEFT JOIN partition_events pe ON be.event_id = pe.event_id
|
||||
WHERE be.timestamp >= ?3 AND be.timestamp <= ?4 AND be.event_type = 'partition'
|
||||
UNION ALL
|
||||
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
|
||||
je.job_run_id, je.job_label, je.target_partitions, je.status, je.message, je.config_json, je.manifests_json
|
||||
FROM build_events be
|
||||
LEFT JOIN job_events je ON be.event_id = je.event_id
|
||||
WHERE be.timestamp >= ?5 AND be.timestamp <= ?6 AND be.event_type = 'job'
|
||||
UNION ALL
|
||||
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
|
||||
de.partition_ref, de.delegated_to_build_request_id, de.message, NULL, NULL, NULL, NULL
|
||||
FROM build_events be
|
||||
LEFT JOIN delegation_events de ON be.event_id = de.event_id
|
||||
WHERE be.timestamp >= ?7 AND be.timestamp <= ?8 AND be.event_type = 'delegation'
|
||||
UNION ALL
|
||||
SELECT be.event_id, be.timestamp, be.build_request_id, be.event_type,
|
||||
jge.job_graph_json, jge.message, NULL, NULL, NULL, NULL, NULL
|
||||
FROM build_events be
|
||||
LEFT JOIN job_graph_events jge ON be.event_id = jge.event_id
|
||||
WHERE be.timestamp >= ?9 AND be.timestamp <= ?10 AND be.event_type = 'job_graph'
|
||||
ORDER BY timestamp ASC
|
||||
";
|
||||
|
||||
let mut stmt = conn.prepare(query)
|
||||
.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
|
||||
|
||||
// We need 10 parameters: start_time and end_time for each of the 5 UNION queries
|
||||
let rows = stmt.query_map(
|
||||
params![start_time, end_time, start_time, end_time, start_time, end_time, start_time, end_time, start_time, end_time],
|
||||
Self::row_to_build_event_from_join
|
||||
).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?;
|
||||
|
||||
let mut events = Vec::new();
|
||||
for row in rows {
|
||||
events.push(row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?);
|
||||
}
|
||||
|
||||
Ok(events)
|
||||
}
|
||||
|
||||
async fn execute_query(&self, query: &str) -> Result<QueryResult> {
|
||||
|
|
|
|||
|
|
@ -10,6 +10,9 @@ pub mod orchestration;
|
|||
// Service module
|
||||
pub mod service;
|
||||
|
||||
// Repository pattern implementations
|
||||
pub mod repositories;
|
||||
|
||||
pub mod mermaid_utils;
|
||||
|
||||
// Re-export commonly used types from event_log
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use crate::*;
|
||||
use crate::event_log::BuildEventLog;
|
||||
use crate::event_log::{BuildEventLog, writer::EventWriter};
|
||||
use log::info;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ pub enum BuildResult {
|
|||
|
||||
/// Core orchestrator for managing build lifecycle and event emission
|
||||
pub struct BuildOrchestrator {
|
||||
event_log: Arc<dyn BuildEventLog>,
|
||||
event_writer: EventWriter,
|
||||
build_request_id: String,
|
||||
requested_partitions: Vec<PartitionRef>,
|
||||
}
|
||||
|
|
@ -31,7 +31,7 @@ impl BuildOrchestrator {
|
|||
requested_partitions: Vec<PartitionRef>,
|
||||
) -> Self {
|
||||
Self {
|
||||
event_log,
|
||||
event_writer: EventWriter::new(event_log),
|
||||
build_request_id,
|
||||
requested_partitions,
|
||||
}
|
||||
|
|
@ -51,12 +51,10 @@ impl BuildOrchestrator {
|
|||
pub async fn start_build(&self) -> Result<()> {
|
||||
info!("Starting build for request: {}", self.build_request_id);
|
||||
|
||||
let event = events::create_build_request_received_event(
|
||||
self.event_writer.request_build(
|
||||
self.build_request_id.clone(),
|
||||
self.requested_partitions.clone(),
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -66,11 +64,11 @@ impl BuildOrchestrator {
|
|||
pub async fn start_planning(&self) -> Result<()> {
|
||||
info!("Starting build planning for request: {}", self.build_request_id);
|
||||
|
||||
let event = events::create_build_planning_started_event(
|
||||
self.event_writer.update_build_status(
|
||||
self.build_request_id.clone(),
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
BuildRequestStatus::BuildRequestPlanning,
|
||||
"Starting build planning".to_string(),
|
||||
).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -80,11 +78,11 @@ impl BuildOrchestrator {
|
|||
pub async fn start_execution(&self) -> Result<()> {
|
||||
info!("Starting build execution for request: {}", self.build_request_id);
|
||||
|
||||
let event = events::create_build_execution_started_event(
|
||||
self.event_writer.update_build_status(
|
||||
self.build_request_id.clone(),
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
BuildRequestStatus::BuildRequestExecuting,
|
||||
"Starting build execution".to_string(),
|
||||
).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -95,12 +93,26 @@ impl BuildOrchestrator {
|
|||
info!("Completing build for request: {} with result: {:?}",
|
||||
self.build_request_id, result);
|
||||
|
||||
let event = events::create_build_completed_event(
|
||||
self.build_request_id.clone(),
|
||||
&result,
|
||||
);
|
||||
let (status, message) = match &result {
|
||||
BuildResult::Success { jobs_completed } => {
|
||||
(BuildRequestStatus::BuildRequestCompleted,
|
||||
format!("Build completed successfully with {} jobs", jobs_completed))
|
||||
}
|
||||
BuildResult::Failed { jobs_completed, jobs_failed } => {
|
||||
(BuildRequestStatus::BuildRequestFailed,
|
||||
format!("Build failed: {} jobs completed, {} jobs failed", jobs_completed, jobs_failed))
|
||||
}
|
||||
BuildResult::FailFast { trigger_job } => {
|
||||
(BuildRequestStatus::BuildRequestFailed,
|
||||
format!("Build failed fast due to job: {}", trigger_job))
|
||||
}
|
||||
};
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
self.event_writer.update_build_status(
|
||||
self.build_request_id.clone(),
|
||||
status,
|
||||
message,
|
||||
).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -108,13 +120,12 @@ impl BuildOrchestrator {
|
|||
|
||||
/// Emit analysis completed event
|
||||
pub async fn emit_analysis_completed(&self, task_count: usize) -> Result<()> {
|
||||
let event = events::create_analysis_completed_event(
|
||||
self.event_writer.update_build_status_with_partitions(
|
||||
self.build_request_id.clone(),
|
||||
BuildRequestStatus::BuildRequestAnalysisCompleted,
|
||||
self.requested_partitions.clone(),
|
||||
task_count,
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
format!("Analysis completed successfully, {} tasks planned", task_count),
|
||||
).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -127,7 +138,7 @@ impl BuildOrchestrator {
|
|||
job,
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
self.event_writer.event_log().append_event(event).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -140,7 +151,7 @@ impl BuildOrchestrator {
|
|||
job,
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
self.event_writer.event_log().append_event(event).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -153,7 +164,7 @@ impl BuildOrchestrator {
|
|||
partition,
|
||||
);
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
self.event_writer.event_log().append_event(event).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -166,14 +177,14 @@ impl BuildOrchestrator {
|
|||
target_build: &str,
|
||||
message: &str,
|
||||
) -> Result<()> {
|
||||
let event = events::create_delegation_event(
|
||||
self.build_request_id.clone(),
|
||||
partition_ref,
|
||||
target_build,
|
||||
message,
|
||||
);
|
||||
let partition = PartitionRef { str: partition_ref.to_string() };
|
||||
|
||||
self.event_log.append_event(event).await
|
||||
self.event_writer.record_delegation(
|
||||
self.build_request_id.clone(),
|
||||
partition,
|
||||
target_build.to_string(),
|
||||
message.to_string(),
|
||||
).await
|
||||
.map_err(OrchestrationError::EventLog)?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -181,7 +192,7 @@ impl BuildOrchestrator {
|
|||
|
||||
/// Get reference to the event log for direct access if needed
|
||||
pub fn event_log(&self) -> &dyn BuildEventLog {
|
||||
self.event_log.as_ref()
|
||||
self.event_writer.event_log()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -329,23 +329,22 @@ pub async fn cancel_build_request(
|
|||
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct PartitionStatusRequest {
|
||||
#[serde(rename = "ref")]
|
||||
pub ref_param: String,
|
||||
pub partition_ref: String,
|
||||
}
|
||||
|
||||
pub async fn get_partition_status(
|
||||
State(service): State<ServiceState>,
|
||||
Path(partition_ref): Path<PartitionStatusRequest>,
|
||||
Path(PartitionStatusRequest { partition_ref }): Path<PartitionStatusRequest>,
|
||||
) -> Result<Json<PartitionStatusResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
// Get latest partition status
|
||||
let (status, last_updated) = match service.event_log.get_latest_partition_status(&partition_ref.ref_param).await {
|
||||
let (status, last_updated) = match service.event_log.get_latest_partition_status(&partition_ref).await {
|
||||
Ok(Some((status, timestamp))) => (status, Some(timestamp)),
|
||||
Ok(None) => {
|
||||
// No partition events found - this is a legitimate 404
|
||||
return Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Partition not found: {}", partition_ref.ref_param),
|
||||
error: format!("Partition not found: {}", partition_ref),
|
||||
}),
|
||||
));
|
||||
},
|
||||
|
|
@ -361,7 +360,7 @@ pub async fn get_partition_status(
|
|||
};
|
||||
|
||||
// Get active builds for this partition
|
||||
let build_requests = match service.event_log.get_active_builds_for_partition(&partition_ref.ref_param).await {
|
||||
let build_requests = match service.event_log.get_active_builds_for_partition(&partition_ref).await {
|
||||
Ok(builds) => builds,
|
||||
Err(e) => {
|
||||
error!("Failed to get active builds for partition: {}", e);
|
||||
|
|
@ -375,7 +374,7 @@ pub async fn get_partition_status(
|
|||
};
|
||||
|
||||
Ok(Json(PartitionStatusResponse {
|
||||
partition_ref: partition_ref.ref_param,
|
||||
partition_ref,
|
||||
status: BuildGraphService::partition_status_to_string(status),
|
||||
last_updated,
|
||||
build_requests,
|
||||
|
|
@ -384,15 +383,14 @@ pub async fn get_partition_status(
|
|||
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct PartitionEventsRequest {
|
||||
#[serde(rename = "ref")]
|
||||
pub ref_param: String,
|
||||
pub partition_ref: String,
|
||||
}
|
||||
|
||||
pub async fn get_partition_events(
|
||||
State(service): State<ServiceState>,
|
||||
Path(request): Path<PartitionEventsRequest>,
|
||||
Path(PartitionEventsRequest { partition_ref }): Path<PartitionEventsRequest>,
|
||||
) -> Result<Json<PartitionEventsResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let events = match service.event_log.get_partition_events(&request.ref_param, None).await {
|
||||
let events = match service.event_log.get_partition_events(&partition_ref, None).await {
|
||||
Ok(events) => events.into_iter().map(|e| {
|
||||
let (job_label, partition_ref, delegated_build_id) = extract_navigation_data(&e.event_type);
|
||||
BuildEventSummary {
|
||||
|
|
@ -418,7 +416,7 @@ pub async fn get_partition_events(
|
|||
};
|
||||
|
||||
Ok(Json(PartitionEventsResponse {
|
||||
partition_ref: request.ref_param,
|
||||
partition_ref,
|
||||
events,
|
||||
}))
|
||||
}
|
||||
|
|
@ -633,6 +631,9 @@ fn event_type_to_string(event_type: &Option<crate::build_event::EventType>) -> S
|
|||
Some(crate::build_event::EventType::JobEvent(_)) => "job".to_string(),
|
||||
Some(crate::build_event::EventType::DelegationEvent(_)) => "delegation".to_string(),
|
||||
Some(crate::build_event::EventType::JobGraphEvent(_)) => "job_graph".to_string(),
|
||||
Some(crate::build_event::EventType::PartitionInvalidationEvent(_)) => "partition_invalidation".to_string(),
|
||||
Some(crate::build_event::EventType::TaskCancelEvent(_)) => "task_cancel".to_string(),
|
||||
Some(crate::build_event::EventType::BuildCancelEvent(_)) => "build_cancel".to_string(),
|
||||
None => "INVALID_EVENT_TYPE".to_string(), // Make this obvious rather than hiding it
|
||||
}
|
||||
}
|
||||
|
|
@ -644,6 +645,9 @@ fn event_to_message(event_type: &Option<crate::build_event::EventType>) -> Strin
|
|||
Some(crate::build_event::EventType::JobEvent(event)) => event.message.clone(),
|
||||
Some(crate::build_event::EventType::DelegationEvent(event)) => event.message.clone(),
|
||||
Some(crate::build_event::EventType::JobGraphEvent(event)) => event.message.clone(),
|
||||
Some(crate::build_event::EventType::PartitionInvalidationEvent(event)) => event.reason.clone(),
|
||||
Some(crate::build_event::EventType::TaskCancelEvent(event)) => event.reason.clone(),
|
||||
Some(crate::build_event::EventType::BuildCancelEvent(event)) => event.reason.clone(),
|
||||
None => "INVALID_EVENT_NO_MESSAGE".to_string(), // Make this obvious
|
||||
}
|
||||
}
|
||||
|
|
@ -670,6 +674,18 @@ fn extract_navigation_data(event_type: &Option<crate::build_event::EventType>) -
|
|||
// Job graph events don't need navigation links
|
||||
(None, None, None)
|
||||
},
|
||||
Some(crate::build_event::EventType::PartitionInvalidationEvent(event)) => {
|
||||
let partition_ref = event.partition_ref.as_ref().map(|r| r.str.clone());
|
||||
(None, partition_ref, None)
|
||||
},
|
||||
Some(crate::build_event::EventType::TaskCancelEvent(_event)) => {
|
||||
// Task cancel events reference job run IDs, which we could potentially navigate to
|
||||
(None, None, None)
|
||||
},
|
||||
Some(crate::build_event::EventType::BuildCancelEvent(_)) => {
|
||||
// Build cancel events don't need navigation links
|
||||
(None, None, None)
|
||||
},
|
||||
None => (None, None, None),
|
||||
}
|
||||
}
|
||||
|
|
@ -1139,4 +1155,488 @@ pub async fn get_job_metrics(
|
|||
recent_runs,
|
||||
daily_stats,
|
||||
}))
|
||||
}
|
||||
|
||||
// Repository-based handlers for the new shared core functionality
|
||||
use crate::repositories::{
|
||||
partitions::PartitionsRepository,
|
||||
jobs::JobsRepository,
|
||||
tasks::TasksRepository,
|
||||
builds::BuildsRepository,
|
||||
};
|
||||
|
||||
/// Request for partition detail endpoint
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct PartitionDetailRequest {
|
||||
pub partition_ref: String,
|
||||
}
|
||||
|
||||
/// Get detailed partition information with timeline
|
||||
pub async fn get_partition_detail(
|
||||
State(service): State<ServiceState>,
|
||||
Path(PartitionDetailRequest { partition_ref }): Path<PartitionDetailRequest>,
|
||||
) -> Result<Json<PartitionDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = PartitionsRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.show(&partition_ref).await {
|
||||
Ok(Some((info, timeline))) => {
|
||||
let timeline_events: Vec<PartitionTimelineEvent> = timeline.into_iter().map(|event| {
|
||||
PartitionTimelineEvent {
|
||||
timestamp: event.timestamp,
|
||||
status: format!("{:?}", event.status),
|
||||
message: event.message,
|
||||
build_request_id: event.build_request_id,
|
||||
job_run_id: event.job_run_id,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
Ok(Json(PartitionDetailResponse {
|
||||
partition_ref: info.partition_ref,
|
||||
current_status: format!("{:?}", info.current_status),
|
||||
last_updated: info.last_updated,
|
||||
builds_count: info.builds_count,
|
||||
last_successful_build: info.last_successful_build,
|
||||
invalidation_count: info.invalidation_count,
|
||||
timeline: timeline_events,
|
||||
}))
|
||||
}
|
||||
Ok(None) => Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Partition '{}' not found", partition_ref),
|
||||
}),
|
||||
)),
|
||||
Err(e) => {
|
||||
error!("Failed to get partition detail: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to get partition detail: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Invalidate a partition
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct InvalidatePartitionRequest {
|
||||
pub reason: String,
|
||||
pub build_request_id: String,
|
||||
}
|
||||
|
||||
/// Request for partition invalidation endpoint path
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct PartitionInvalidatePathRequest {
|
||||
pub partition_ref: String,
|
||||
}
|
||||
|
||||
pub async fn invalidate_partition(
|
||||
State(service): State<ServiceState>,
|
||||
Path(PartitionInvalidatePathRequest { partition_ref }): Path<PartitionInvalidatePathRequest>,
|
||||
Json(request): Json<InvalidatePartitionRequest>,
|
||||
) -> Result<Json<serde_json::Value>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = PartitionsRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.invalidate(&partition_ref, request.reason.clone(), request.build_request_id).await {
|
||||
Ok(()) => Ok(Json(serde_json::json!({
|
||||
"invalidated": true,
|
||||
"partition_ref": partition_ref,
|
||||
"reason": request.reason
|
||||
}))),
|
||||
Err(e) => {
|
||||
error!("Failed to invalidate partition: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to invalidate partition: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// List jobs using repository
|
||||
pub async fn list_jobs_repository(
|
||||
State(service): State<ServiceState>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Result<Json<JobsRepositoryListResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = JobsRepository::new(service.event_log.clone());
|
||||
let limit = params.get("limit").and_then(|s| s.parse().ok());
|
||||
|
||||
match repository.list(limit).await {
|
||||
Ok(jobs) => {
|
||||
let job_summaries: Vec<JobRepositorySummary> = jobs.into_iter().map(|job| {
|
||||
JobRepositorySummary {
|
||||
job_label: job.job_label,
|
||||
total_runs: job.total_runs,
|
||||
successful_runs: job.successful_runs,
|
||||
failed_runs: job.failed_runs,
|
||||
cancelled_runs: job.cancelled_runs,
|
||||
average_partitions_per_run: job.average_partitions_per_run,
|
||||
last_run_timestamp: job.last_run_timestamp,
|
||||
last_run_status: format!("{:?}", job.last_run_status),
|
||||
recent_builds: job.recent_builds,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
let total_count = job_summaries.len() as u32;
|
||||
Ok(Json(JobsRepositoryListResponse {
|
||||
jobs: job_summaries,
|
||||
total_count,
|
||||
}))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list jobs: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to list jobs: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request for job detail endpoint
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct JobDetailRequest {
|
||||
pub label: String,
|
||||
}
|
||||
|
||||
/// Get detailed job information
|
||||
pub async fn get_job_detail(
|
||||
State(service): State<ServiceState>,
|
||||
Path(JobDetailRequest { label }): Path<JobDetailRequest>,
|
||||
) -> Result<Json<JobDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let job_label = label;
|
||||
let repository = JobsRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.show(&job_label).await {
|
||||
Ok(Some((info, runs))) => {
|
||||
let run_summaries: Vec<JobRunDetail> = runs.into_iter().map(|run| {
|
||||
JobRunDetail {
|
||||
job_run_id: run.job_run_id,
|
||||
build_request_id: run.build_request_id,
|
||||
target_partitions: run.target_partitions.into_iter().map(|p| p.str).collect(),
|
||||
status: format!("{:?}", run.status),
|
||||
started_at: run.started_at,
|
||||
completed_at: run.completed_at,
|
||||
duration_ms: run.duration_ms,
|
||||
message: run.message,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
Ok(Json(JobDetailResponse {
|
||||
job_label: info.job_label,
|
||||
total_runs: info.total_runs,
|
||||
successful_runs: info.successful_runs,
|
||||
failed_runs: info.failed_runs,
|
||||
cancelled_runs: info.cancelled_runs,
|
||||
average_partitions_per_run: info.average_partitions_per_run,
|
||||
last_run_timestamp: info.last_run_timestamp,
|
||||
last_run_status: format!("{:?}", info.last_run_status),
|
||||
recent_builds: info.recent_builds,
|
||||
runs: run_summaries,
|
||||
}))
|
||||
}
|
||||
Ok(None) => Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Job '{}' not found", job_label),
|
||||
}),
|
||||
)),
|
||||
Err(e) => {
|
||||
error!("Failed to get job detail: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to get job detail: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// List tasks using repository
|
||||
pub async fn list_tasks(
|
||||
State(service): State<ServiceState>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Result<Json<TasksListResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = TasksRepository::new(service.event_log.clone());
|
||||
let limit = params.get("limit").and_then(|s| s.parse().ok());
|
||||
|
||||
match repository.list(limit).await {
|
||||
Ok(tasks) => {
|
||||
let task_summaries: Vec<TaskSummary> = tasks.into_iter().map(|task| {
|
||||
TaskSummary {
|
||||
job_run_id: task.job_run_id,
|
||||
job_label: task.job_label,
|
||||
build_request_id: task.build_request_id,
|
||||
status: format!("{:?}", task.status),
|
||||
target_partitions: task.target_partitions.into_iter().map(|p| p.str).collect(),
|
||||
scheduled_at: task.scheduled_at,
|
||||
started_at: task.started_at,
|
||||
completed_at: task.completed_at,
|
||||
duration_ms: task.duration_ms,
|
||||
cancelled: task.cancelled,
|
||||
message: task.message,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
let total_count = task_summaries.len() as u32;
|
||||
Ok(Json(TasksListResponse {
|
||||
tasks: task_summaries,
|
||||
total_count,
|
||||
}))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list tasks: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to list tasks: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request for task detail endpoint
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct TaskDetailRequest {
|
||||
pub job_run_id: String,
|
||||
}
|
||||
|
||||
/// Get detailed task information
|
||||
pub async fn get_task_detail(
|
||||
State(service): State<ServiceState>,
|
||||
Path(TaskDetailRequest { job_run_id }): Path<TaskDetailRequest>,
|
||||
) -> Result<Json<TaskDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = TasksRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.show(&job_run_id).await {
|
||||
Ok(Some((info, timeline))) => {
|
||||
let timeline_events: Vec<TaskTimelineEvent> = timeline.into_iter().map(|event| {
|
||||
TaskTimelineEvent {
|
||||
timestamp: event.timestamp,
|
||||
status: event.status.map(|s| format!("{:?}", s)),
|
||||
message: event.message,
|
||||
event_type: event.event_type,
|
||||
cancel_reason: event.cancel_reason,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
Ok(Json(TaskDetailResponse {
|
||||
job_run_id: info.job_run_id,
|
||||
job_label: info.job_label,
|
||||
build_request_id: info.build_request_id,
|
||||
status: format!("{:?}", info.status),
|
||||
target_partitions: info.target_partitions.into_iter().map(|p| p.str).collect(),
|
||||
scheduled_at: info.scheduled_at,
|
||||
started_at: info.started_at,
|
||||
completed_at: info.completed_at,
|
||||
duration_ms: info.duration_ms,
|
||||
cancelled: info.cancelled,
|
||||
cancel_reason: info.cancel_reason,
|
||||
message: info.message,
|
||||
timeline: timeline_events,
|
||||
}))
|
||||
}
|
||||
Ok(None) => Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Task '{}' not found", job_run_id),
|
||||
}),
|
||||
)),
|
||||
Err(e) => {
|
||||
error!("Failed to get task detail: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to get task detail: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancel a task
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct CancelTaskRequest {
|
||||
pub reason: String,
|
||||
pub build_request_id: String,
|
||||
}
|
||||
|
||||
/// Request for task cancel endpoint path
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct TaskCancelPathRequest {
|
||||
pub job_run_id: String,
|
||||
}
|
||||
|
||||
pub async fn cancel_task(
|
||||
State(service): State<ServiceState>,
|
||||
Path(TaskCancelPathRequest { job_run_id }): Path<TaskCancelPathRequest>,
|
||||
Json(request): Json<CancelTaskRequest>,
|
||||
) -> Result<Json<serde_json::Value>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = TasksRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.cancel(&job_run_id, request.reason.clone(), request.build_request_id).await {
|
||||
Ok(()) => Ok(Json(serde_json::json!({
|
||||
"cancelled": true,
|
||||
"job_run_id": job_run_id,
|
||||
"reason": request.reason
|
||||
}))),
|
||||
Err(e) => {
|
||||
error!("Failed to cancel task: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to cancel task: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// List builds using repository
|
||||
pub async fn list_builds_repository(
|
||||
State(service): State<ServiceState>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Result<Json<BuildsRepositoryListResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = BuildsRepository::new(service.event_log.clone());
|
||||
let limit = params.get("limit").and_then(|s| s.parse().ok());
|
||||
|
||||
match repository.list(limit).await {
|
||||
Ok(builds) => {
|
||||
let build_summaries: Vec<BuildRepositorySummary> = builds.into_iter().map(|build| {
|
||||
BuildRepositorySummary {
|
||||
build_request_id: build.build_request_id,
|
||||
status: format!("{:?}", build.status),
|
||||
requested_partitions: build.requested_partitions.into_iter().map(|p| p.str).collect(),
|
||||
total_jobs: build.total_jobs,
|
||||
completed_jobs: build.completed_jobs,
|
||||
failed_jobs: build.failed_jobs,
|
||||
cancelled_jobs: build.cancelled_jobs,
|
||||
requested_at: build.requested_at,
|
||||
started_at: build.started_at,
|
||||
completed_at: build.completed_at,
|
||||
duration_ms: build.duration_ms,
|
||||
cancelled: build.cancelled,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
let total_count = build_summaries.len() as u32;
|
||||
Ok(Json(BuildsRepositoryListResponse {
|
||||
builds: build_summaries,
|
||||
total_count,
|
||||
}))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list builds: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to list builds: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request for build detail endpoint
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct BuildDetailRequest {
|
||||
pub build_request_id: String,
|
||||
}
|
||||
|
||||
/// Get detailed build information
|
||||
pub async fn get_build_detail(
|
||||
State(service): State<ServiceState>,
|
||||
Path(BuildDetailRequest { build_request_id }): Path<BuildDetailRequest>,
|
||||
) -> Result<Json<BuildDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = BuildsRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.show(&build_request_id).await {
|
||||
Ok(Some((info, timeline))) => {
|
||||
let timeline_events: Vec<BuildTimelineEvent> = timeline.into_iter().map(|event| {
|
||||
BuildTimelineEvent {
|
||||
timestamp: event.timestamp,
|
||||
status: event.status.map(|s| format!("{:?}", s)),
|
||||
message: event.message,
|
||||
event_type: event.event_type,
|
||||
cancel_reason: event.cancel_reason,
|
||||
}
|
||||
}).collect();
|
||||
|
||||
Ok(Json(BuildDetailResponse {
|
||||
build_request_id: info.build_request_id,
|
||||
status: format!("{:?}", info.status),
|
||||
requested_partitions: info.requested_partitions.into_iter().map(|p| p.str).collect(),
|
||||
total_jobs: info.total_jobs,
|
||||
completed_jobs: info.completed_jobs,
|
||||
failed_jobs: info.failed_jobs,
|
||||
cancelled_jobs: info.cancelled_jobs,
|
||||
requested_at: info.requested_at,
|
||||
started_at: info.started_at,
|
||||
completed_at: info.completed_at,
|
||||
duration_ms: info.duration_ms,
|
||||
cancelled: info.cancelled,
|
||||
cancel_reason: info.cancel_reason,
|
||||
timeline: timeline_events,
|
||||
}))
|
||||
}
|
||||
Ok(None) => Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Build '{}' not found", build_request_id),
|
||||
}),
|
||||
)),
|
||||
Err(e) => {
|
||||
error!("Failed to get build detail: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to get build detail: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request for build cancel endpoint path
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct BuildCancelPathRequest {
|
||||
pub build_request_id: String,
|
||||
}
|
||||
|
||||
/// Cancel a build using repository
|
||||
pub async fn cancel_build_repository(
|
||||
State(service): State<ServiceState>,
|
||||
Path(BuildCancelPathRequest { build_request_id }): Path<BuildCancelPathRequest>,
|
||||
Json(request): Json<CancelBuildRepositoryRequest>,
|
||||
) -> Result<Json<serde_json::Value>, (StatusCode, Json<ErrorResponse>)> {
|
||||
let repository = BuildsRepository::new(service.event_log.clone());
|
||||
|
||||
match repository.cancel(&build_request_id, request.reason.clone()).await {
|
||||
Ok(()) => Ok(Json(serde_json::json!({
|
||||
"cancelled": true,
|
||||
"build_request_id": build_request_id,
|
||||
"reason": request.reason
|
||||
}))),
|
||||
Err(e) => {
|
||||
error!("Failed to cancel build: {}", e);
|
||||
Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(ErrorResponse {
|
||||
error: format!("Failed to cancel build: {}", e),
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
pub struct CancelBuildRepositoryRequest {
|
||||
pub reason: String,
|
||||
}
|
||||
|
|
@ -213,14 +213,20 @@ impl BuildGraphService {
|
|||
// Create API router with all routes to generate OpenAPI spec
|
||||
let _ = ApiRouter::new()
|
||||
.api_route("/api/v1/builds", post(handlers::submit_build_request))
|
||||
.api_route("/api/v1/builds", get(handlers::list_build_requests))
|
||||
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_status))
|
||||
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_request))
|
||||
.api_route("/api/v1/builds", get(handlers::list_builds_repository))
|
||||
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_detail))
|
||||
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_repository))
|
||||
.api_route("/api/v1/partitions", get(handlers::list_partitions))
|
||||
.api_route("/api/v1/partitions/:ref/status", get(handlers::get_partition_status))
|
||||
.api_route("/api/v1/partitions/:ref/events", get(handlers::get_partition_events))
|
||||
.api_route("/api/v1/jobs", get(handlers::list_jobs))
|
||||
.api_route("/api/v1/jobs/:label", get(handlers::get_job_metrics))
|
||||
.api_route("/api/v1/partitions/:partition_ref", get(handlers::get_partition_detail))
|
||||
.api_route("/api/v1/partitions/:partition_ref/status", get(handlers::get_partition_status))
|
||||
.api_route("/api/v1/partitions/:partition_ref/events", get(handlers::get_partition_events))
|
||||
.api_route("/api/v1/partitions/:partition_ref/invalidate", post(handlers::invalidate_partition))
|
||||
.api_route("/api/v1/jobs", get(handlers::list_jobs_repository))
|
||||
.api_route("/api/v1/jobs/:label", get(handlers::get_job_detail))
|
||||
.api_route("/api/v1/jobs/:label/metrics", get(handlers::get_job_metrics))
|
||||
.api_route("/api/v1/tasks", get(handlers::list_tasks))
|
||||
.api_route("/api/v1/tasks/:job_run_id", get(handlers::get_task_detail))
|
||||
.api_route("/api/v1/tasks/:job_run_id/cancel", post(handlers::cancel_task))
|
||||
.api_route("/api/v1/activity", get(handlers::get_activity_summary))
|
||||
.api_route("/api/v1/analyze", post(handlers::analyze_build_graph))
|
||||
.finish_api(&mut api);
|
||||
|
|
@ -233,14 +239,20 @@ impl BuildGraphService {
|
|||
|
||||
let api_router = ApiRouter::new()
|
||||
.api_route("/api/v1/builds", post(handlers::submit_build_request))
|
||||
.api_route("/api/v1/builds", get(handlers::list_build_requests))
|
||||
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_status))
|
||||
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_request))
|
||||
.api_route("/api/v1/builds", get(handlers::list_builds_repository))
|
||||
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_detail))
|
||||
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_repository))
|
||||
.api_route("/api/v1/partitions", get(handlers::list_partitions))
|
||||
.api_route("/api/v1/partitions/:ref/status", get(handlers::get_partition_status))
|
||||
.api_route("/api/v1/partitions/:ref/events", get(handlers::get_partition_events))
|
||||
.api_route("/api/v1/jobs", get(handlers::list_jobs))
|
||||
.api_route("/api/v1/jobs/:label", get(handlers::get_job_metrics))
|
||||
.api_route("/api/v1/partitions/:partition_ref", get(handlers::get_partition_detail))
|
||||
.api_route("/api/v1/partitions/:partition_ref/status", get(handlers::get_partition_status))
|
||||
.api_route("/api/v1/partitions/:partition_ref/events", get(handlers::get_partition_events))
|
||||
.api_route("/api/v1/partitions/:partition_ref/invalidate", post(handlers::invalidate_partition))
|
||||
.api_route("/api/v1/jobs", get(handlers::list_jobs_repository))
|
||||
.api_route("/api/v1/jobs/:label", get(handlers::get_job_detail))
|
||||
.api_route("/api/v1/jobs/:label/metrics", get(handlers::get_job_metrics))
|
||||
.api_route("/api/v1/tasks", get(handlers::list_tasks))
|
||||
.api_route("/api/v1/tasks/:job_run_id", get(handlers::get_task_detail))
|
||||
.api_route("/api/v1/tasks/:job_run_id/cancel", post(handlers::cancel_task))
|
||||
.api_route("/api/v1/activity", get(handlers::get_activity_summary))
|
||||
.api_route("/api/v1/analyze", post(handlers::analyze_build_graph))
|
||||
.route("/api/v1/openapi.json", get(Self::openapi_spec))
|
||||
|
|
@ -371,4 +383,166 @@ impl BuildGraphService {
|
|||
}
|
||||
}
|
||||
|
||||
pub type ServiceState = Arc<BuildGraphService>;
|
||||
pub type ServiceState = Arc<BuildGraphService>;
|
||||
|
||||
// Repository-based response types
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct PartitionDetailResponse {
|
||||
pub partition_ref: String,
|
||||
pub current_status: String,
|
||||
pub last_updated: i64,
|
||||
pub builds_count: usize,
|
||||
pub last_successful_build: Option<String>,
|
||||
pub invalidation_count: usize,
|
||||
pub timeline: Vec<PartitionTimelineEvent>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct PartitionTimelineEvent {
|
||||
pub timestamp: i64,
|
||||
pub status: String,
|
||||
pub message: String,
|
||||
pub build_request_id: String,
|
||||
pub job_run_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct JobsRepositoryListResponse {
|
||||
pub jobs: Vec<JobRepositorySummary>,
|
||||
pub total_count: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct JobRepositorySummary {
|
||||
pub job_label: String,
|
||||
pub total_runs: usize,
|
||||
pub successful_runs: usize,
|
||||
pub failed_runs: usize,
|
||||
pub cancelled_runs: usize,
|
||||
pub average_partitions_per_run: f64,
|
||||
pub last_run_timestamp: i64,
|
||||
pub last_run_status: String,
|
||||
pub recent_builds: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct JobDetailResponse {
|
||||
pub job_label: String,
|
||||
pub total_runs: usize,
|
||||
pub successful_runs: usize,
|
||||
pub failed_runs: usize,
|
||||
pub cancelled_runs: usize,
|
||||
pub average_partitions_per_run: f64,
|
||||
pub last_run_timestamp: i64,
|
||||
pub last_run_status: String,
|
||||
pub recent_builds: Vec<String>,
|
||||
pub runs: Vec<JobRunDetail>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct JobRunDetail {
|
||||
pub job_run_id: String,
|
||||
pub build_request_id: String,
|
||||
pub target_partitions: Vec<String>,
|
||||
pub status: String,
|
||||
pub started_at: Option<i64>,
|
||||
pub completed_at: Option<i64>,
|
||||
pub duration_ms: Option<i64>,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct TasksListResponse {
|
||||
pub tasks: Vec<TaskSummary>,
|
||||
pub total_count: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct TaskSummary {
|
||||
pub job_run_id: String,
|
||||
pub job_label: String,
|
||||
pub build_request_id: String,
|
||||
pub status: String,
|
||||
pub target_partitions: Vec<String>,
|
||||
pub scheduled_at: i64,
|
||||
pub started_at: Option<i64>,
|
||||
pub completed_at: Option<i64>,
|
||||
pub duration_ms: Option<i64>,
|
||||
pub cancelled: bool,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct TaskDetailResponse {
|
||||
pub job_run_id: String,
|
||||
pub job_label: String,
|
||||
pub build_request_id: String,
|
||||
pub status: String,
|
||||
pub target_partitions: Vec<String>,
|
||||
pub scheduled_at: i64,
|
||||
pub started_at: Option<i64>,
|
||||
pub completed_at: Option<i64>,
|
||||
pub duration_ms: Option<i64>,
|
||||
pub cancelled: bool,
|
||||
pub cancel_reason: Option<String>,
|
||||
pub message: String,
|
||||
pub timeline: Vec<TaskTimelineEvent>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct TaskTimelineEvent {
|
||||
pub timestamp: i64,
|
||||
pub status: Option<String>,
|
||||
pub message: String,
|
||||
pub event_type: String,
|
||||
pub cancel_reason: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct BuildsRepositoryListResponse {
|
||||
pub builds: Vec<BuildRepositorySummary>,
|
||||
pub total_count: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct BuildRepositorySummary {
|
||||
pub build_request_id: String,
|
||||
pub status: String,
|
||||
pub requested_partitions: Vec<String>,
|
||||
pub total_jobs: usize,
|
||||
pub completed_jobs: usize,
|
||||
pub failed_jobs: usize,
|
||||
pub cancelled_jobs: usize,
|
||||
pub requested_at: i64,
|
||||
pub started_at: Option<i64>,
|
||||
pub completed_at: Option<i64>,
|
||||
pub duration_ms: Option<i64>,
|
||||
pub cancelled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct BuildDetailResponse {
|
||||
pub build_request_id: String,
|
||||
pub status: String,
|
||||
pub requested_partitions: Vec<String>,
|
||||
pub total_jobs: usize,
|
||||
pub completed_jobs: usize,
|
||||
pub failed_jobs: usize,
|
||||
pub cancelled_jobs: usize,
|
||||
pub requested_at: i64,
|
||||
pub started_at: Option<i64>,
|
||||
pub completed_at: Option<i64>,
|
||||
pub duration_ms: Option<i64>,
|
||||
pub cancelled: bool,
|
||||
pub cancel_reason: Option<String>,
|
||||
pub timeline: Vec<BuildTimelineEvent>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct BuildTimelineEvent {
|
||||
pub timestamp: i64,
|
||||
pub status: Option<String>,
|
||||
pub message: String,
|
||||
pub event_type: String,
|
||||
pub cancel_reason: Option<String>,
|
||||
}
|
||||
|
|
@ -15,6 +15,14 @@
|
|||
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/MODULE.bazel": "2b31ffcc9bdc8295b2167e07a757dbbc9ac8906e7028e5170a3708cecaac119f",
|
||||
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/source.json": "0cf1826853b0bef8b5cd19c0610d717500f5521aa2b38b72b2ec302ac5e7526c",
|
||||
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.7.2/MODULE.bazel": "780d1a6522b28f5edb7ea09630748720721dfe27690d65a2d33aa7509de77e07",
|
||||
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.7.7/MODULE.bazel": "491f8681205e31bb57892d67442ce448cda4f472a8e6b3dc062865e29a64f89c",
|
||||
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.9.3/MODULE.bazel": "66baf724dbae7aff4787bf2245cc188d50cb08e07789769730151c0943587c14",
|
||||
"https://bcr.bazel.build/modules/aspect_rules_esbuild/0.21.0/MODULE.bazel": "77dc393c43ad79398b05865444c5200c6f1aae6765615544f2c7730b5858d533",
|
||||
"https://bcr.bazel.build/modules/aspect_rules_esbuild/0.21.0/source.json": "062b1d3dba8adcfeb28fe60c185647f5a53ec0487ffe93cf0ae91566596e4b49",
|
||||
"https://bcr.bazel.build/modules/aspect_rules_js/2.0.0/MODULE.bazel": "b45b507574aa60a92796e3e13c195cd5744b3b8aff516a9c0cb5ae6a048161c5",
|
||||
"https://bcr.bazel.build/modules/aspect_rules_js/2.0.0/source.json": "a6b09288ab135225982a58ac0b5e2c032c331d88f80553d86596000e894e86b3",
|
||||
"https://bcr.bazel.build/modules/aspect_rules_ts/3.6.3/MODULE.bazel": "d09db394970f076176ce7bab5b5fa7f0d560fd4f30b8432ea5e2c2570505b130",
|
||||
"https://bcr.bazel.build/modules/aspect_rules_ts/3.6.3/source.json": "641e58c62e5090d52a0d3538451893acdb2d79a36e8b3d1d30a013c580bc2058",
|
||||
"https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd",
|
||||
"https://bcr.bazel.build/modules/bazel_features/1.10.0/MODULE.bazel": "f75e8807570484a99be90abcd52b5e1f390362c258bcb73106f4544957a48101",
|
||||
"https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
|
||||
|
|
@ -39,7 +47,8 @@
|
|||
"https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917",
|
||||
"https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d",
|
||||
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
|
||||
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953",
|
||||
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/MODULE.bazel": "88ade7293becda963e0e3ea33e7d54d3425127e0a326e0d17da085a5f1f03ff6",
|
||||
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/source.json": "7ebaefba0b03efe59cac88ed5bbc67bcf59a3eff33af937345ede2a38b2d368a",
|
||||
"https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
|
||||
"https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
|
||||
"https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
|
||||
|
|
@ -114,6 +123,8 @@
|
|||
"https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d",
|
||||
"https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c",
|
||||
"https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb",
|
||||
"https://bcr.bazel.build/modules/rules_nodejs/6.2.0/MODULE.bazel": "ec27907f55eb34705adb4e8257952162a2d4c3ed0f0b3b4c3c1aad1fac7be35e",
|
||||
"https://bcr.bazel.build/modules/rules_nodejs/6.2.0/source.json": "a77c307175a82982f0847fd6a8660db5b21440d8a9d073642cb4afa7a18612ff",
|
||||
"https://bcr.bazel.build/modules/rules_oci/2.2.6/MODULE.bazel": "2ba6ddd679269e00aeffe9ca04faa2d0ca4129650982c9246d0d459fe2da47d9",
|
||||
"https://bcr.bazel.build/modules/rules_oci/2.2.6/source.json": "94e7decb8f95d9465b0bbea71c65064cd16083be1350c7468f131818641dc4a5",
|
||||
"https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc",
|
||||
|
|
@ -121,6 +132,7 @@
|
|||
"https://bcr.bazel.build/modules/rules_pkg/1.0.1/source.json": "bd82e5d7b9ce2d31e380dd9f50c111d678c3bdaca190cb76b0e1c71b05e1ba8a",
|
||||
"https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
|
||||
"https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
|
||||
"https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f",
|
||||
"https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
|
||||
"https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2",
|
||||
"https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1",
|
||||
|
|
|
|||
|
|
@ -8,18 +8,19 @@ These core capabilities should be factored into explicit read vs write capabilit
|
|||
# Plan
|
||||
We should take a phased approach to executing this plan. After implementing the core functionality and unit tests for each phase, we should pause and write down any potential refactoring that would benefit the system before moving onto the next phase.
|
||||
|
||||
## Phase 1 - Implement `MockBuildEventLog`
|
||||
## Phase 1 - Implement Common Event Write Component
|
||||
Goal: create a single interface for writing events to the build event log.
|
||||
- Should include all existing "write" functionality, like requesting a new build, etc.
|
||||
- Migrate CLI to use new write component
|
||||
- Migrate service to use new write component
|
||||
|
||||
## Phase 2 - Implement `MockBuildEventLog`
|
||||
Goal: create a common testing tool that allows easy specification of testing conditions (e.g. BEL contents/events) to test system/graph behavior.
|
||||
- Should use an in-memory sqlite database to ensure tests can be run in parallel
|
||||
- Should make it very easy to specify test data (e.g. event constructors with random defaults that can be overwritten)
|
||||
- Should include a trivial unit test that writes a valid event and verifies its there via real code paths.
|
||||
|
||||
## Phase 2 - Implement Common Event Write Component
|
||||
Goal: create a single interface for writing events to the build event log.
|
||||
- Should include all existing "write" functionality, like requesting a new build, etc.
|
||||
- Migrate CLI to use new write component
|
||||
- TODO - whats the exec model? Does it write the event, then start the execute based on the ID? Does it start a service? Actually what does tailing builds look like?
|
||||
- Migrate service to use new write component
|
||||
- Design notes: shouldn't rewrite event write or repository read code; should focus on making test cases easy to describe, so then assertions can be made on repository-based queries.
|
||||
- Event write and repositories should be pluggable, allowing for the MockBuildEventLog to be provided per test in a way consistent with how BEL backing databases are specified normally.
|
||||
|
||||
## Phase 3 - Implement `partitions` Repository
|
||||
- Create a new build event log event for partition invalidation (with reason field)
|
||||
|
|
|
|||
Loading…
Reference in a new issue