Code/name Service and CLI updated

This commit is contained in:
Stuart Axelbrooke 2025-07-20 16:20:34 -07:00
parent 894bbc35bd
commit bf2678c992
6 changed files with 191 additions and 195 deletions

View file

@ -2,10 +2,10 @@ use databuild::*;
use databuild::event_log::create_build_event_log;
use databuild::orchestration::{BuildOrchestrator, BuildResult};
use databuild::repositories::{
partitions::{PartitionsRepository, PartitionInfo, PartitionStatusEvent},
jobs::{JobsRepository, JobInfo, JobRunDetail},
tasks::{TasksRepository, TaskInfo, TaskEvent},
builds::{BuildsRepository, BuildInfo, BuildEvent as BuildRepositoryEvent}
partitions::PartitionsRepository,
jobs::JobsRepository,
tasks::TasksRepository,
builds::BuildsRepository
};
use clap::{Arg, Command as ClapCommand, ArgMatches};
use log::info;
@ -453,39 +453,33 @@ async fn handle_partitions_command(matches: &ArgMatches, event_log_uri: &str) ->
Some(("show", sub_matches)) => {
let partition_ref = sub_matches.get_one::<String>("partition_ref").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show(partition_ref).await
let result = repository.show_protobuf(partition_ref).await
.map_err(|e| CliError::Database(format!("Failed to show partition: {}", e)))?;
match result {
Some((info, timeline)) => {
Some(detail) => {
match format {
"json" => {
#[derive(serde::Serialize)]
struct PartitionDetail {
info: PartitionInfo,
timeline: Vec<PartitionStatusEvent>,
}
let detail = PartitionDetail { info, timeline };
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Partition: {}", info.partition_ref);
println!("Status: {:?}", info.current_status);
println!("Builds involved: {}", info.builds_count);
println!("Invalidation count: {}", info.invalidation_count);
println!("Last updated: {}", format_timestamp(info.last_updated));
println!("Partition: {}", detail.partition_ref);
println!("Status: {} ({})", detail.status_name, detail.status_code);
println!("Builds involved: {}", detail.builds_count);
println!("Invalidation count: {}", detail.invalidation_count);
println!("Last updated: {}", format_timestamp(detail.last_updated));
if let Some(ref last_build) = info.last_successful_build {
if let Some(ref last_build) = detail.last_successful_build {
println!("\nLast successful build: {}", last_build);
}
if !timeline.is_empty() {
println!("\nTimeline ({} events):", timeline.len());
for event in timeline {
if !detail.timeline.is_empty() {
println!("\nTimeline ({} events):", detail.timeline.len());
for event in detail.timeline {
let timestamp = format_timestamp(event.timestamp);
println!(" {} [{:?}] {}", timestamp, event.status, event.message);
println!(" {} [{}] {}", timestamp, event.status_name, event.message);
if event.message.starts_with("Invalidated:") {
// Invalidation reason is in the message
}
@ -582,46 +576,40 @@ async fn handle_jobs_command(matches: &ArgMatches, event_log_uri: &str) -> Resul
Some(("show", sub_matches)) => {
let job_label = sub_matches.get_one::<String>("job_label").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show(job_label).await
let result = repository.show_protobuf(job_label).await
.map_err(|e| CliError::Database(format!("Failed to show job: {}", e)))?;
match result {
Some((info, runs)) => {
Some(detail) => {
match format {
"json" => {
#[derive(serde::Serialize)]
struct JobDetail {
info: JobInfo,
runs: Vec<JobRunDetail>,
}
let detail = JobDetail { info, runs };
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Job: {}", info.job_label);
println!("Total runs: {}", info.total_runs);
println!("Successful runs: {} ({:.1}%)", info.successful_runs,
if info.total_runs > 0 { info.successful_runs as f64 / info.total_runs as f64 * 100.0 } else { 0.0 });
println!("Failed runs: {}", info.failed_runs);
println!("Cancelled runs: {}", info.cancelled_runs);
println!("Average partitions per run: {:.1}", info.average_partitions_per_run);
println!("Last run: {} ({:?})", format_timestamp(info.last_run_timestamp), info.last_run_status);
println!("Job: {}", detail.job_label);
println!("Total runs: {}", detail.total_runs);
println!("Successful runs: {} ({:.1}%)", detail.successful_runs,
if detail.total_runs > 0 { detail.successful_runs as f64 / detail.total_runs as f64 * 100.0 } else { 0.0 });
println!("Failed runs: {}", detail.failed_runs);
println!("Cancelled runs: {}", detail.cancelled_runs);
println!("Average partitions per run: {:.1}", detail.average_partitions_per_run);
println!("Last run: {} ({} - {})", format_timestamp(detail.last_run_timestamp), detail.last_run_status_name, detail.last_run_status_code);
if !info.recent_builds.is_empty() {
if !detail.recent_builds.is_empty() {
println!("\nRecent builds:");
for build_id in &info.recent_builds {
for build_id in &detail.recent_builds {
println!(" - {}", build_id);
}
}
if !runs.is_empty() {
println!("\nExecution history ({} runs):", runs.len());
if !detail.runs.is_empty() {
println!("\nExecution history ({} runs):", detail.runs.len());
println!("{:<25} {:<15} {:<15} {:<10} {:<30}", "Run ID", "Status", "Duration", "Parts", "Build Request");
println!("{}", "-".repeat(95));
for run in runs.iter().take(10) { // Show last 10 runs
for run in detail.runs.iter().take(10) { // Show last 10 runs
let duration_str = if let Some(duration) = run.duration_ms {
if duration > 1000 {
format!("{:.1}s", duration as f64 / 1000.0)
@ -634,15 +622,15 @@ async fn handle_jobs_command(matches: &ArgMatches, event_log_uri: &str) -> Resul
println!("{:<25} {:<15} {:<15} {:<10} {:<30}",
run.job_run_id,
format!("{:?}", run.status),
run.status_name,
duration_str,
run.target_partitions.len(),
run.build_request_id
);
}
if runs.len() > 10 {
println!("... and {} more runs", runs.len() - 10);
if detail.runs.len() > 10 {
println!("... and {} more runs", detail.runs.len() - 10);
}
}
}
@ -733,40 +721,34 @@ async fn handle_tasks_command(matches: &ArgMatches, event_log_uri: &str) -> Resu
Some(("show", sub_matches)) => {
let job_run_id = sub_matches.get_one::<String>("job_run_id").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show(job_run_id).await
let result = repository.show_protobuf(job_run_id).await
.map_err(|e| CliError::Database(format!("Failed to show task: {}", e)))?;
match result {
Some((info, timeline)) => {
Some(detail) => {
match format {
"json" => {
#[derive(serde::Serialize)]
struct TaskDetail {
info: TaskInfo,
timeline: Vec<TaskEvent>,
}
let detail = TaskDetail { info, timeline };
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Task: {}", info.job_run_id);
println!("Job: {}", info.job_label);
println!("Build request: {}", info.build_request_id);
println!("Status: {:?}", info.status);
println!("Target partitions: {}", info.target_partitions.len());
println!("Scheduled: {}", format_timestamp(info.scheduled_at));
println!("Task: {}", detail.job_run_id);
println!("Job: {}", detail.job_label);
println!("Build request: {}", detail.build_request_id);
println!("Status: {} ({})", detail.status_name, detail.status_code);
println!("Target partitions: {}", detail.target_partitions.len());
println!("Scheduled: {}", format_timestamp(detail.scheduled_at));
if let Some(started) = info.started_at {
if let Some(started) = detail.started_at {
println!("Started: {}", format_timestamp(started));
}
if let Some(completed) = info.completed_at {
if let Some(completed) = detail.completed_at {
println!("Completed: {}", format_timestamp(completed));
}
if let Some(duration) = info.duration_ms {
if let Some(duration) = detail.duration_ms {
if duration > 1000 {
println!("Duration: {:.1}s", duration as f64 / 1000.0);
} else {
@ -774,30 +756,30 @@ async fn handle_tasks_command(matches: &ArgMatches, event_log_uri: &str) -> Resu
}
}
if info.cancelled {
if detail.cancelled {
println!("Cancelled: Yes");
if let Some(ref reason) = info.cancel_reason {
if let Some(ref reason) = detail.cancel_reason {
println!("Cancel reason: {}", reason);
}
}
if !info.message.is_empty() {
println!("Message: {}", info.message);
if !detail.message.is_empty() {
println!("Message: {}", detail.message);
}
if !info.target_partitions.is_empty() {
if !detail.target_partitions.is_empty() {
println!("\nTarget partitions:");
for partition in &info.target_partitions {
println!(" - {}", partition.str);
for partition in &detail.target_partitions {
println!(" - {}", partition);
}
}
if !timeline.is_empty() {
println!("\nTimeline ({} events):", timeline.len());
for event in timeline {
if !detail.timeline.is_empty() {
println!("\nTimeline ({} events):", detail.timeline.len());
for event in detail.timeline {
let timestamp = format_timestamp(event.timestamp);
let status_info = if let Some(status) = event.status {
format!(" -> {:?}", status)
let status_info = if let Some(ref status_name) = event.status_name {
format!(" -> {}", status_name)
} else {
String::new()
};
@ -915,42 +897,36 @@ async fn handle_builds_command(matches: &ArgMatches, event_log_uri: &str) -> Res
Some(("show", sub_matches)) => {
let build_request_id = sub_matches.get_one::<String>("build_request_id").unwrap();
let format = sub_matches.get_one::<String>("format").map(|s| s.as_str()).unwrap_or("table");
let result = repository.show(build_request_id).await
let result = repository.show_protobuf(build_request_id).await
.map_err(|e| CliError::Database(format!("Failed to show build: {}", e)))?;
match result {
Some((info, timeline)) => {
Some(detail) => {
match format {
"json" => {
#[derive(serde::Serialize)]
struct BuildDetail {
info: BuildInfo,
timeline: Vec<BuildRepositoryEvent>,
}
let detail = BuildDetail { info, timeline };
let json = serde_json::to_string_pretty(&detail)
.map_err(|e| CliError::Output(format!("Failed to serialize to JSON: {}", e)))?;
println!("{}", json);
}
_ => {
println!("Build: {}", info.build_request_id);
println!("Status: {:?}", info.status);
println!("Requested partitions: {}", info.requested_partitions.len());
println!("Total jobs: {}", info.total_jobs);
println!("Completed jobs: {}", info.completed_jobs);
println!("Failed jobs: {}", info.failed_jobs);
println!("Cancelled jobs: {}", info.cancelled_jobs);
println!("Requested: {}", format_timestamp(info.requested_at));
println!("Build: {}", detail.build_request_id);
println!("Status: {} ({})", detail.status_name, detail.status_code);
println!("Requested partitions: {}", detail.requested_partitions.len());
println!("Total jobs: {}", detail.total_jobs);
println!("Completed jobs: {}", detail.completed_jobs);
println!("Failed jobs: {}", detail.failed_jobs);
println!("Cancelled jobs: {}", detail.cancelled_jobs);
println!("Requested: {}", format_timestamp(detail.requested_at));
if let Some(started) = info.started_at {
if let Some(started) = detail.started_at {
println!("Started: {}", format_timestamp(started));
}
if let Some(completed) = info.completed_at {
if let Some(completed) = detail.completed_at {
println!("Completed: {}", format_timestamp(completed));
}
if let Some(duration) = info.duration_ms {
if let Some(duration) = detail.duration_ms {
if duration > 60000 {
println!("Duration: {:.1}m", duration as f64 / 60000.0);
} else if duration > 1000 {
@ -960,40 +936,40 @@ async fn handle_builds_command(matches: &ArgMatches, event_log_uri: &str) -> Res
}
}
if info.cancelled {
if detail.cancelled {
println!("Cancelled: Yes");
if let Some(ref reason) = info.cancel_reason {
if let Some(ref reason) = detail.cancel_reason {
println!("Cancel reason: {}", reason);
}
}
if !info.requested_partitions.is_empty() {
if !detail.requested_partitions.is_empty() {
println!("\nRequested partitions:");
for partition in &info.requested_partitions {
println!(" - {}", partition.str);
for partition in &detail.requested_partitions {
println!(" - {}", partition);
}
}
// Show job statistics
if info.total_jobs > 0 {
let success_rate = (info.completed_jobs as f64 / info.total_jobs as f64 * 100.0) as u32;
if detail.total_jobs > 0 {
let success_rate = (detail.completed_jobs as f64 / detail.total_jobs as f64 * 100.0) as u32;
println!("\nJob statistics:");
println!(" Success rate: {}% ({}/{})", success_rate, info.completed_jobs, info.total_jobs);
println!(" Success rate: {}% ({}/{})", success_rate, detail.completed_jobs, detail.total_jobs);
if info.failed_jobs > 0 {
println!(" Failed: {}", info.failed_jobs);
if detail.failed_jobs > 0 {
println!(" Failed: {}", detail.failed_jobs);
}
if info.cancelled_jobs > 0 {
println!(" Cancelled: {}", info.cancelled_jobs);
if detail.cancelled_jobs > 0 {
println!(" Cancelled: {}", detail.cancelled_jobs);
}
}
if !timeline.is_empty() {
println!("\nTimeline ({} events):", timeline.len());
for event in timeline {
if !detail.timeline.is_empty() {
println!("\nTimeline ({} events):", detail.timeline.len());
for event in detail.timeline {
let timestamp = format_timestamp(event.timestamp);
let status_info = if let Some(status) = event.status {
format!(" -> {:?}", status)
let status_info = if let Some(ref status_name) = event.status_name {
format!(" -> {}", status_name)
} else {
String::new()
};

View file

@ -1,5 +1,6 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result};
use crate::service::{BuildDetailResponse, BuildTimelineEvent as ServiceBuildTimelineEvent};
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
@ -283,9 +284,9 @@ impl BuildsRepository {
// Get build info and timeline using existing show method
if let Some((build_info, timeline)) = self.show(build_request_id).await? {
// Convert timeline events to protobuf format
let protobuf_timeline: Vec<BuildTimelineEvent> = timeline
let protobuf_timeline: Vec<ServiceBuildTimelineEvent> = timeline
.into_iter()
.map(|event| BuildTimelineEvent {
.map(|event| ServiceBuildTimelineEvent {
timestamp: event.timestamp,
status_code: event.status.map(|s| s as i32),
status_name: event.status.map(|s| s.to_display_string()),
@ -299,7 +300,7 @@ impl BuildsRepository {
build_request_id: build_info.build_request_id,
status_code: build_info.status as i32,
status_name: build_info.status.to_display_string(),
requested_partitions: build_info.requested_partitions,
requested_partitions: build_info.requested_partitions.into_iter().map(|p| p.str).collect(),
total_jobs: build_info.total_jobs as u32,
completed_jobs: build_info.completed_jobs as u32,
failed_jobs: build_info.failed_jobs as u32,

View file

@ -1,5 +1,6 @@
use crate::*;
use crate::event_log::{BuildEventLog, Result};
use crate::service::{JobDetailResponse, JobRunDetail as ServiceJobRunDetail};
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
@ -301,12 +302,12 @@ impl JobsRepository {
// Get job info and runs using existing show method
if let Some((job_info, job_runs)) = self.show(job_label).await? {
// Convert job runs to protobuf format
let protobuf_runs: Vec<crate::JobRunDetail> = job_runs
let protobuf_runs: Vec<ServiceJobRunDetail> = job_runs
.into_iter()
.map(|run| crate::JobRunDetail {
.map(|run| ServiceJobRunDetail {
job_run_id: run.job_run_id,
build_request_id: run.build_request_id,
target_partitions: run.target_partitions,
target_partitions: run.target_partitions.into_iter().map(|p| p.str).collect(),
status_code: run.status as i32,
status_name: run.status.to_display_string(),
started_at: run.started_at,

View file

@ -1,5 +1,6 @@
use crate::*;
use crate::event_log::{BuildEventLog, BuildEventLogError, Result};
use crate::service::{TaskDetailResponse, TaskTimelineEvent as ServiceTaskTimelineEvent};
use std::sync::Arc;
use std::collections::HashMap;
use serde::Serialize;
@ -305,9 +306,9 @@ impl TasksRepository {
// Get task info and timeline using existing show method
if let Some((task_info, timeline)) = self.show(job_run_id).await? {
// Convert timeline events to protobuf format
let protobuf_timeline: Vec<TaskTimelineEvent> = timeline
let protobuf_timeline: Vec<ServiceTaskTimelineEvent> = timeline
.into_iter()
.map(|event| TaskTimelineEvent {
.map(|event| ServiceTaskTimelineEvent {
timestamp: event.timestamp,
status_code: event.status.map(|s| s as i32),
status_name: event.status.map(|s| s.to_display_string()),
@ -323,7 +324,7 @@ impl TasksRepository {
build_request_id: task_info.build_request_id,
status_code: task_info.status as i32,
status_name: task_info.status.to_display_string(),
target_partitions: task_info.target_partitions,
target_partitions: task_info.target_partitions.into_iter().map(|p| p.str).collect(),
scheduled_at: task_info.scheduled_at,
started_at: task_info.started_at,
completed_at: task_info.completed_at,

View file

@ -1221,12 +1221,13 @@ pub async fn get_partition_detail(
) -> Result<Json<PartitionDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = PartitionsRepository::new(service.event_log.clone());
match repository.show(&partition_ref).await {
Ok(Some((info, timeline))) => {
let timeline_events: Vec<PartitionTimelineEvent> = timeline.into_iter().map(|event| {
match repository.show_protobuf(&partition_ref).await {
Ok(Some(protobuf_response)) => {
let timeline_events: Vec<PartitionTimelineEvent> = protobuf_response.timeline.into_iter().map(|event| {
PartitionTimelineEvent {
timestamp: event.timestamp,
status: format!("{:?}", event.status),
status_code: event.status_code,
status_name: event.status_name,
message: event.message,
build_request_id: event.build_request_id,
job_run_id: event.job_run_id,
@ -1234,12 +1235,13 @@ pub async fn get_partition_detail(
}).collect();
Ok(Json(PartitionDetailResponse {
partition_ref: info.partition_ref,
current_status: format!("{:?}", info.current_status),
last_updated: info.last_updated,
builds_count: info.builds_count,
last_successful_build: info.last_successful_build,
invalidation_count: info.invalidation_count,
partition_ref: protobuf_response.partition_ref,
status_code: protobuf_response.status_code,
status_name: protobuf_response.status_name,
last_updated: protobuf_response.last_updated,
builds_count: protobuf_response.builds_count,
last_successful_build: protobuf_response.last_successful_build,
invalidation_count: protobuf_response.invalidation_count,
timeline: timeline_events,
}))
}
@ -1355,14 +1357,15 @@ pub async fn get_job_detail(
let job_label = label;
let repository = JobsRepository::new(service.event_log.clone());
match repository.show(&job_label).await {
Ok(Some((info, runs))) => {
let run_summaries: Vec<JobRunDetail> = runs.into_iter().map(|run| {
match repository.show_protobuf(&job_label).await {
Ok(Some(protobuf_response)) => {
let run_summaries: Vec<JobRunDetail> = protobuf_response.runs.into_iter().map(|run| {
JobRunDetail {
job_run_id: run.job_run_id,
build_request_id: run.build_request_id,
target_partitions: run.target_partitions.into_iter().map(|p| p.str).collect(),
status: format!("{:?}", run.status),
target_partitions: run.target_partitions,
status_code: run.status_code,
status_name: run.status_name,
started_at: run.started_at,
completed_at: run.completed_at,
duration_ms: run.duration_ms,
@ -1371,15 +1374,16 @@ pub async fn get_job_detail(
}).collect();
Ok(Json(JobDetailResponse {
job_label: info.job_label,
total_runs: info.total_runs,
successful_runs: info.successful_runs,
failed_runs: info.failed_runs,
cancelled_runs: info.cancelled_runs,
average_partitions_per_run: info.average_partitions_per_run,
last_run_timestamp: info.last_run_timestamp,
last_run_status: format!("{:?}", info.last_run_status),
recent_builds: info.recent_builds,
job_label: protobuf_response.job_label,
total_runs: protobuf_response.total_runs,
successful_runs: protobuf_response.successful_runs,
failed_runs: protobuf_response.failed_runs,
cancelled_runs: protobuf_response.cancelled_runs,
average_partitions_per_run: protobuf_response.average_partitions_per_run,
last_run_timestamp: protobuf_response.last_run_timestamp,
last_run_status_code: protobuf_response.last_run_status_code,
last_run_status_name: protobuf_response.last_run_status_name,
recent_builds: protobuf_response.recent_builds,
runs: run_summaries,
}))
}
@ -1458,12 +1462,13 @@ pub async fn get_task_detail(
) -> Result<Json<TaskDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = TasksRepository::new(service.event_log.clone());
match repository.show(&job_run_id).await {
Ok(Some((info, timeline))) => {
let timeline_events: Vec<TaskTimelineEvent> = timeline.into_iter().map(|event| {
match repository.show_protobuf(&job_run_id).await {
Ok(Some(protobuf_response)) => {
let timeline_events: Vec<TaskTimelineEvent> = protobuf_response.timeline.into_iter().map(|event| {
TaskTimelineEvent {
timestamp: event.timestamp,
status: event.status.map(|s| format!("{:?}", s)),
status_code: event.status_code,
status_name: event.status_name,
message: event.message,
event_type: event.event_type,
cancel_reason: event.cancel_reason,
@ -1471,18 +1476,19 @@ pub async fn get_task_detail(
}).collect();
Ok(Json(TaskDetailResponse {
job_run_id: info.job_run_id,
job_label: info.job_label,
build_request_id: info.build_request_id,
status: format!("{:?}", info.status),
target_partitions: info.target_partitions.into_iter().map(|p| p.str).collect(),
scheduled_at: info.scheduled_at,
started_at: info.started_at,
completed_at: info.completed_at,
duration_ms: info.duration_ms,
cancelled: info.cancelled,
cancel_reason: info.cancel_reason,
message: info.message,
job_run_id: protobuf_response.job_run_id,
job_label: protobuf_response.job_label,
build_request_id: protobuf_response.build_request_id,
status_code: protobuf_response.status_code,
status_name: protobuf_response.status_name,
target_partitions: protobuf_response.target_partitions,
scheduled_at: protobuf_response.scheduled_at,
started_at: protobuf_response.started_at,
completed_at: protobuf_response.completed_at,
duration_ms: protobuf_response.duration_ms,
cancelled: protobuf_response.cancelled,
cancel_reason: protobuf_response.cancel_reason,
message: protobuf_response.message,
timeline: timeline_events,
}))
}
@ -1600,12 +1606,14 @@ pub async fn get_build_detail(
) -> Result<Json<BuildDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = BuildsRepository::new(service.event_log.clone());
match repository.show(&build_request_id).await {
Ok(Some((info, timeline))) => {
let timeline_events: Vec<BuildTimelineEvent> = timeline.into_iter().map(|event| {
match repository.show_protobuf(&build_request_id).await {
Ok(Some(protobuf_response)) => {
// Convert protobuf response to service response (with dual status fields)
let timeline_events: Vec<BuildTimelineEvent> = protobuf_response.timeline.into_iter().map(|event| {
BuildTimelineEvent {
timestamp: event.timestamp,
status: event.status.map(|s| format!("{:?}", s)),
status_code: event.status_code,
status_name: event.status_name,
message: event.message,
event_type: event.event_type,
cancel_reason: event.cancel_reason,
@ -1613,19 +1621,20 @@ pub async fn get_build_detail(
}).collect();
Ok(Json(BuildDetailResponse {
build_request_id: info.build_request_id,
status: format!("{:?}", info.status),
requested_partitions: info.requested_partitions.into_iter().map(|p| p.str).collect(),
total_jobs: info.total_jobs,
completed_jobs: info.completed_jobs,
failed_jobs: info.failed_jobs,
cancelled_jobs: info.cancelled_jobs,
requested_at: info.requested_at,
started_at: info.started_at,
completed_at: info.completed_at,
duration_ms: info.duration_ms,
cancelled: info.cancelled,
cancel_reason: info.cancel_reason,
build_request_id: protobuf_response.build_request_id,
status_code: protobuf_response.status_code,
status_name: protobuf_response.status_name,
requested_partitions: protobuf_response.requested_partitions,
total_jobs: protobuf_response.total_jobs,
completed_jobs: protobuf_response.completed_jobs,
failed_jobs: protobuf_response.failed_jobs,
cancelled_jobs: protobuf_response.cancelled_jobs,
requested_at: protobuf_response.requested_at,
started_at: protobuf_response.started_at,
completed_at: protobuf_response.completed_at,
duration_ms: protobuf_response.duration_ms,
cancelled: protobuf_response.cancelled,
cancel_reason: protobuf_response.cancel_reason,
timeline: timeline_events,
}))
}

View file

@ -423,18 +423,20 @@ pub type ServiceState = Arc<BuildGraphService>;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionDetailResponse {
pub partition_ref: String,
pub current_status: String,
pub status_code: i32,
pub status_name: String,
pub last_updated: i64,
pub builds_count: usize,
pub builds_count: u32,
pub last_successful_build: Option<String>,
pub invalidation_count: usize,
pub invalidation_count: u32,
pub timeline: Vec<PartitionTimelineEvent>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionTimelineEvent {
pub timestamp: i64,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub message: String,
pub build_request_id: String,
pub job_run_id: Option<String>,
@ -462,13 +464,14 @@ pub struct JobRepositorySummary {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobDetailResponse {
pub job_label: String,
pub total_runs: usize,
pub successful_runs: usize,
pub failed_runs: usize,
pub cancelled_runs: usize,
pub total_runs: u32,
pub successful_runs: u32,
pub failed_runs: u32,
pub cancelled_runs: u32,
pub average_partitions_per_run: f64,
pub last_run_timestamp: i64,
pub last_run_status: String,
pub last_run_status_code: i32,
pub last_run_status_name: String,
pub recent_builds: Vec<String>,
pub runs: Vec<JobRunDetail>,
}
@ -478,7 +481,8 @@ pub struct JobRunDetail {
pub job_run_id: String,
pub build_request_id: String,
pub target_partitions: Vec<String>,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub started_at: Option<i64>,
pub completed_at: Option<i64>,
pub duration_ms: Option<i64>,
@ -511,7 +515,8 @@ pub struct TaskDetailResponse {
pub job_run_id: String,
pub job_label: String,
pub build_request_id: String,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub target_partitions: Vec<String>,
pub scheduled_at: i64,
pub started_at: Option<i64>,
@ -526,7 +531,8 @@ pub struct TaskDetailResponse {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct TaskTimelineEvent {
pub timestamp: i64,
pub status: Option<String>,
pub status_code: Option<i32>,
pub status_name: Option<String>,
pub message: String,
pub event_type: String,
pub cancel_reason: Option<String>,
@ -557,12 +563,13 @@ pub struct BuildRepositorySummary {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildDetailResponse {
pub build_request_id: String,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub requested_partitions: Vec<String>,
pub total_jobs: usize,
pub completed_jobs: usize,
pub failed_jobs: usize,
pub cancelled_jobs: usize,
pub total_jobs: u32,
pub completed_jobs: u32,
pub failed_jobs: u32,
pub cancelled_jobs: u32,
pub requested_at: i64,
pub started_at: Option<i64>,
pub completed_at: Option<i64>,
@ -575,7 +582,8 @@ pub struct BuildDetailResponse {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildTimelineEvent {
pub timestamp: i64,
pub status: Option<String>,
pub status_code: Option<i32>,
pub status_name: Option<String>,
pub message: String,
pub event_type: String,
pub cancel_reason: Option<String>,