Pre-typescript mithril commit

This commit is contained in:
Stuart Axelbrooke 2025-07-20 17:54:32 -07:00
parent bf2678c992
commit 4f05192229
14 changed files with 455 additions and 467 deletions

View file

@ -11,6 +11,7 @@ rust_binary(
"@crates//:prost",
"@crates//:prost-build",
"@crates//:serde",
"@crates//:schemars",
"@crates//:tempfile",
],
)

View file

@ -32,6 +32,7 @@ genrule(
"typescript_generated/src/apis/DefaultApi.ts",
"typescript_generated/src/apis/index.ts",
"typescript_generated/src/models/index.ts",
"typescript_generated/src/models/ActivityApiResponse.ts",
"typescript_generated/src/models/ActivityResponse.ts",
"typescript_generated/src/models/AnalyzeRequest.ts",
"typescript_generated/src/models/AnalyzeResponse.ts",
@ -40,12 +41,12 @@ genrule(
"typescript_generated/src/models/BuildDetailRequest.ts",
"typescript_generated/src/models/BuildDetailResponse.ts",
"typescript_generated/src/models/BuildEventSummary.ts",
"typescript_generated/src/models/BuildRepositorySummary.ts",
"typescript_generated/src/models/BuildRequest.ts",
"typescript_generated/src/models/BuildRequestResponse.ts",
"typescript_generated/src/models/BuildSummary.ts",
"typescript_generated/src/models/BuildTimelineEvent.ts",
"typescript_generated/src/models/BuildsRepositoryListResponse.ts",
"typescript_generated/src/models/BuildsListApiResponse.ts",
"typescript_generated/src/models/BuildsListResponse.ts",
"typescript_generated/src/models/CancelBuildRepositoryRequest.ts",
"typescript_generated/src/models/CancelTaskRequest.ts",
"typescript_generated/src/models/InvalidatePartitionRequest.ts",
@ -54,20 +55,24 @@ genrule(
"typescript_generated/src/models/JobDetailResponse.ts",
"typescript_generated/src/models/JobMetricsRequest.ts",
"typescript_generated/src/models/JobMetricsResponse.ts",
"typescript_generated/src/models/JobRepositorySummary.ts",
"typescript_generated/src/models/JobRunDetail.ts",
"typescript_generated/src/models/JobRunSummary.ts",
"typescript_generated/src/models/JobsRepositoryListResponse.ts",
"typescript_generated/src/models/JobSummary.ts",
"typescript_generated/src/models/JobsListApiResponse.ts",
"typescript_generated/src/models/JobsListResponse.ts",
"typescript_generated/src/models/PaginationInfo.ts",
"typescript_generated/src/models/PartitionDetailRequest.ts",
"typescript_generated/src/models/PartitionDetailResponse.ts",
"typescript_generated/src/models/PartitionEventsRequest.ts",
"typescript_generated/src/models/PartitionEventsResponse.ts",
"typescript_generated/src/models/PartitionInvalidatePathRequest.ts",
"typescript_generated/src/models/PartitionInvalidateResponse.ts",
"typescript_generated/src/models/PartitionRef.ts",
"typescript_generated/src/models/PartitionStatusRequest.ts",
"typescript_generated/src/models/PartitionStatusResponse.ts",
"typescript_generated/src/models/PartitionSummary.ts",
"typescript_generated/src/models/PartitionTimelineEvent.ts",
"typescript_generated/src/models/PartitionsListApiResponse.ts",
"typescript_generated/src/models/PartitionsListResponse.ts",
"typescript_generated/src/models/TaskCancelPathRequest.ts",
"typescript_generated/src/models/TaskCancelResponse.ts",
@ -75,6 +80,7 @@ genrule(
"typescript_generated/src/models/TaskDetailResponse.ts",
"typescript_generated/src/models/TaskSummary.ts",
"typescript_generated/src/models/TaskTimelineEvent.ts",
"typescript_generated/src/models/TasksListApiResponse.ts",
"typescript_generated/src/models/TasksListResponse.ts",
"typescript_generated/src/runtime.ts",
"typescript_generated/src/index.ts",
@ -100,6 +106,7 @@ genrule(
cp $$TEMP_DIR/src/apis/DefaultApi.ts $(location typescript_generated/src/apis/DefaultApi.ts)
cp $$TEMP_DIR/src/apis/index.ts $(location typescript_generated/src/apis/index.ts)
cp $$TEMP_DIR/src/models/index.ts $(location typescript_generated/src/models/index.ts)
cp $$TEMP_DIR/src/models/ActivityApiResponse.ts $(location typescript_generated/src/models/ActivityApiResponse.ts)
cp $$TEMP_DIR/src/models/ActivityResponse.ts $(location typescript_generated/src/models/ActivityResponse.ts)
cp $$TEMP_DIR/src/models/AnalyzeRequest.ts $(location typescript_generated/src/models/AnalyzeRequest.ts)
cp $$TEMP_DIR/src/models/AnalyzeResponse.ts $(location typescript_generated/src/models/AnalyzeResponse.ts)
@ -108,12 +115,12 @@ genrule(
cp $$TEMP_DIR/src/models/BuildDetailRequest.ts $(location typescript_generated/src/models/BuildDetailRequest.ts)
cp $$TEMP_DIR/src/models/BuildDetailResponse.ts $(location typescript_generated/src/models/BuildDetailResponse.ts)
cp $$TEMP_DIR/src/models/BuildEventSummary.ts $(location typescript_generated/src/models/BuildEventSummary.ts)
cp $$TEMP_DIR/src/models/BuildRepositorySummary.ts $(location typescript_generated/src/models/BuildRepositorySummary.ts)
cp $$TEMP_DIR/src/models/BuildRequest.ts $(location typescript_generated/src/models/BuildRequest.ts)
cp $$TEMP_DIR/src/models/BuildRequestResponse.ts $(location typescript_generated/src/models/BuildRequestResponse.ts)
cp $$TEMP_DIR/src/models/BuildSummary.ts $(location typescript_generated/src/models/BuildSummary.ts)
cp $$TEMP_DIR/src/models/BuildTimelineEvent.ts $(location typescript_generated/src/models/BuildTimelineEvent.ts)
cp $$TEMP_DIR/src/models/BuildsRepositoryListResponse.ts $(location typescript_generated/src/models/BuildsRepositoryListResponse.ts)
cp $$TEMP_DIR/src/models/BuildsListApiResponse.ts $(location typescript_generated/src/models/BuildsListApiResponse.ts)
cp $$TEMP_DIR/src/models/BuildsListResponse.ts $(location typescript_generated/src/models/BuildsListResponse.ts)
cp $$TEMP_DIR/src/models/CancelBuildRepositoryRequest.ts $(location typescript_generated/src/models/CancelBuildRepositoryRequest.ts)
cp $$TEMP_DIR/src/models/CancelTaskRequest.ts $(location typescript_generated/src/models/CancelTaskRequest.ts)
cp $$TEMP_DIR/src/models/InvalidatePartitionRequest.ts $(location typescript_generated/src/models/InvalidatePartitionRequest.ts)
@ -122,20 +129,24 @@ genrule(
cp $$TEMP_DIR/src/models/JobDetailResponse.ts $(location typescript_generated/src/models/JobDetailResponse.ts)
cp $$TEMP_DIR/src/models/JobMetricsRequest.ts $(location typescript_generated/src/models/JobMetricsRequest.ts)
cp $$TEMP_DIR/src/models/JobMetricsResponse.ts $(location typescript_generated/src/models/JobMetricsResponse.ts)
cp $$TEMP_DIR/src/models/JobRepositorySummary.ts $(location typescript_generated/src/models/JobRepositorySummary.ts)
cp $$TEMP_DIR/src/models/JobRunDetail.ts $(location typescript_generated/src/models/JobRunDetail.ts)
cp $$TEMP_DIR/src/models/JobRunSummary.ts $(location typescript_generated/src/models/JobRunSummary.ts)
cp $$TEMP_DIR/src/models/JobsRepositoryListResponse.ts $(location typescript_generated/src/models/JobsRepositoryListResponse.ts)
cp $$TEMP_DIR/src/models/JobSummary.ts $(location typescript_generated/src/models/JobSummary.ts)
cp $$TEMP_DIR/src/models/JobsListApiResponse.ts $(location typescript_generated/src/models/JobsListApiResponse.ts)
cp $$TEMP_DIR/src/models/JobsListResponse.ts $(location typescript_generated/src/models/JobsListResponse.ts)
cp $$TEMP_DIR/src/models/PaginationInfo.ts $(location typescript_generated/src/models/PaginationInfo.ts)
cp $$TEMP_DIR/src/models/PartitionDetailRequest.ts $(location typescript_generated/src/models/PartitionDetailRequest.ts)
cp $$TEMP_DIR/src/models/PartitionDetailResponse.ts $(location typescript_generated/src/models/PartitionDetailResponse.ts)
cp $$TEMP_DIR/src/models/PartitionEventsRequest.ts $(location typescript_generated/src/models/PartitionEventsRequest.ts)
cp $$TEMP_DIR/src/models/PartitionEventsResponse.ts $(location typescript_generated/src/models/PartitionEventsResponse.ts)
cp $$TEMP_DIR/src/models/PartitionInvalidatePathRequest.ts $(location typescript_generated/src/models/PartitionInvalidatePathRequest.ts)
cp $$TEMP_DIR/src/models/PartitionInvalidateResponse.ts $(location typescript_generated/src/models/PartitionInvalidateResponse.ts)
cp $$TEMP_DIR/src/models/PartitionRef.ts $(location typescript_generated/src/models/PartitionRef.ts)
cp $$TEMP_DIR/src/models/PartitionStatusRequest.ts $(location typescript_generated/src/models/PartitionStatusRequest.ts)
cp $$TEMP_DIR/src/models/PartitionStatusResponse.ts $(location typescript_generated/src/models/PartitionStatusResponse.ts)
cp $$TEMP_DIR/src/models/PartitionSummary.ts $(location typescript_generated/src/models/PartitionSummary.ts)
cp $$TEMP_DIR/src/models/PartitionTimelineEvent.ts $(location typescript_generated/src/models/PartitionTimelineEvent.ts)
cp $$TEMP_DIR/src/models/PartitionsListApiResponse.ts $(location typescript_generated/src/models/PartitionsListApiResponse.ts)
cp $$TEMP_DIR/src/models/PartitionsListResponse.ts $(location typescript_generated/src/models/PartitionsListResponse.ts)
cp $$TEMP_DIR/src/models/TaskCancelPathRequest.ts $(location typescript_generated/src/models/TaskCancelPathRequest.ts)
cp $$TEMP_DIR/src/models/TaskCancelResponse.ts $(location typescript_generated/src/models/TaskCancelResponse.ts)
@ -143,6 +154,7 @@ genrule(
cp $$TEMP_DIR/src/models/TaskDetailResponse.ts $(location typescript_generated/src/models/TaskDetailResponse.ts)
cp $$TEMP_DIR/src/models/TaskSummary.ts $(location typescript_generated/src/models/TaskSummary.ts)
cp $$TEMP_DIR/src/models/TaskTimelineEvent.ts $(location typescript_generated/src/models/TaskTimelineEvent.ts)
cp $$TEMP_DIR/src/models/TasksListApiResponse.ts $(location typescript_generated/src/models/TasksListApiResponse.ts)
cp $$TEMP_DIR/src/models/TasksListResponse.ts $(location typescript_generated/src/models/TasksListResponse.ts)
cp $$TEMP_DIR/src/runtime.ts $(location typescript_generated/src/runtime.ts)
cp $$TEMP_DIR/src/index.ts $(location typescript_generated/src/index.ts)

View file

@ -1,5 +1,5 @@
// Import the generated TypeScript client
import { DefaultApi, Configuration, ActivityResponse, BuildSummary, PartitionSummary, JobsRepositoryListResponse, JobMetricsResponse, JobRepositorySummary, JobRunSummary, JobDailyStats } from '../client/typescript_generated/src/index';
import { DefaultApi, Configuration, ActivityApiResponse, ActivityResponse, BuildSummary, PartitionSummary, JobsListApiResponse, JobMetricsResponse, JobSummary, JobRunSummary, JobDailyStats } from '../client/typescript_generated/src/index';
// Configure the API client
const apiConfig = new Configuration({
@ -45,22 +45,24 @@ export class DashboardService {
async getRecentActivity(): Promise<RecentActivitySummary> {
try {
// Use the new activity endpoint that aggregates all the data we need
const activityResponse: ActivityResponse = await apiClient.apiV1ActivityGet();
console.info('Recent activity:', activityResponse);
const activityApiResponse: ActivityApiResponse = await apiClient.apiV1ActivityGet();
console.info('Recent activity:', activityApiResponse);
const activityResponse = activityApiResponse.data;
// Convert the API response to our dashboard format
const recentBuilds: BuildRequest[] = activityResponse.recent_builds.map((build: BuildSummary) => ({
buildRequestId: build.build_request_id,
status: build.status,
createdAt: build.created_at,
updatedAt: build.updated_at,
status: build.status_name, // Use human-readable status name
createdAt: build.requested_at,
updatedAt: build.started_at || build.requested_at,
}));
const recentPartitions: PartitionBuild[] = activityResponse.recent_partitions.map((partition: PartitionSummary) => ({
ref: partition.partition_ref,
status: partition.status,
updatedAt: partition.updated_at,
buildRequestId: partition.build_request_id || undefined
status: partition.status_name, // Use human-readable status name
updatedAt: partition.last_updated,
buildRequestId: partition.last_successful_build || undefined
}));
console.info("made", recentBuilds, recentPartitions);
return {
@ -86,7 +88,7 @@ export class DashboardService {
}
}
async getJobs(searchTerm?: string): Promise<JobRepositorySummary[]> {
async getJobs(searchTerm?: string): Promise<JobSummary[]> {
try {
// Build query parameters manually since the generated client may not support query params correctly
const queryParams = new URLSearchParams();
@ -99,8 +101,8 @@ export class DashboardService {
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const data: JobsRepositoryListResponse = await response.json();
return data.jobs;
const data: JobsListApiResponse = await response.json();
return data.data.jobs;
} catch (error) {
console.error('Failed to fetch jobs:', error);
return [];

View file

@ -390,6 +390,19 @@ message BuildSummary {
bool cancelled = 13;
}
//
// Activity Summary
//
message ActivityResponse {
uint32 active_builds_count = 1;
repeated BuildSummary recent_builds = 2;
repeated PartitionSummary recent_partitions = 3;
uint32 total_partitions_count = 4;
string system_status = 5;
string graph_name = 6;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Detail Operations (Unified CLI/Service Detail Responses)
///////////////////////////////////////////////////////////////////////////////////////////////

View file

@ -31,7 +31,7 @@ fn generate_prost_code(proto_file: &str, output_file: &str) -> Result<(), Box<dy
config.out_dir(temp_path);
// Configure derive traits - prost::Message provides Debug automatically
config.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]");
config.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize, schemars::JsonSchema)]");
// Try to find protoc in the environment (Bazel should provide this)
if let Ok(protoc_path) = env::var("PROTOC") {

View file

@ -360,6 +360,36 @@ impl BuildsRepository {
let event_writer = crate::event_log::writer::EventWriter::new(self.event_log.clone());
event_writer.cancel_build(build_request_id.to_string(), reason).await
}
/// List builds using protobuf response format with dual status fields
///
/// Returns BuildSummary protobuf messages with status_code and status_name.
pub async fn list_protobuf(&self, limit: Option<usize>) -> Result<Vec<crate::BuildSummary>> {
// Get build info using existing list method
let builds = self.list(limit).await?;
// Convert to protobuf format
let protobuf_builds: Vec<crate::BuildSummary> = builds
.into_iter()
.map(|build| crate::BuildSummary {
build_request_id: build.build_request_id,
status_code: build.status as i32,
status_name: build.status.to_display_string(),
requested_partitions: build.requested_partitions.into_iter().map(|p| crate::PartitionRef { str: p.str }).collect(),
total_jobs: build.total_jobs as u32,
completed_jobs: build.completed_jobs as u32,
failed_jobs: build.failed_jobs as u32,
cancelled_jobs: build.cancelled_jobs as u32,
requested_at: build.requested_at,
started_at: build.started_at,
completed_at: build.completed_at,
duration_ms: build.duration_ms,
cancelled: build.cancelled,
})
.collect();
Ok(protobuf_builds)
}
}
#[cfg(test)]

View file

@ -336,6 +336,39 @@ impl JobsRepository {
Ok(None)
}
}
/// List jobs using protobuf response format with dual status fields
///
/// Returns JobsListResponse protobuf message with JobSummary objects containing
/// last_run_status_code and last_run_status_name fields.
pub async fn list_protobuf(&self, request: JobsListRequest) -> Result<JobsListResponse> {
// Get job info using existing list method
let jobs = self.list(request.limit.map(|l| l as usize)).await?;
// Convert to protobuf format
let protobuf_jobs: Vec<crate::JobSummary> = jobs
.into_iter()
.map(|job| crate::JobSummary {
job_label: job.job_label,
total_runs: job.total_runs as u32,
successful_runs: job.successful_runs as u32,
failed_runs: job.failed_runs as u32,
cancelled_runs: job.cancelled_runs as u32,
average_partitions_per_run: job.average_partitions_per_run,
last_run_timestamp: job.last_run_timestamp,
last_run_status_code: job.last_run_status as i32,
last_run_status_name: job.last_run_status.to_display_string(),
recent_builds: job.recent_builds,
})
.collect();
let total_count = protobuf_jobs.len() as u32;
Ok(JobsListResponse {
jobs: protobuf_jobs,
total_count,
})
}
}
#[cfg(test)]

View file

@ -340,6 +340,41 @@ impl TasksRepository {
Ok(None)
}
}
/// List tasks using protobuf response format with dual status fields
///
/// Returns TasksListResponse protobuf message with TaskSummary objects containing
/// status_code and status_name fields.
pub async fn list_protobuf(&self, request: TasksListRequest) -> Result<TasksListResponse> {
// Get task info using existing list method
let tasks = self.list(request.limit.map(|l| l as usize)).await?;
// Convert to protobuf format
let protobuf_tasks: Vec<crate::TaskSummary> = tasks
.into_iter()
.map(|task| crate::TaskSummary {
job_run_id: task.job_run_id,
job_label: task.job_label,
build_request_id: task.build_request_id,
status_code: task.status as i32,
status_name: task.status.to_display_string(),
target_partitions: task.target_partitions.into_iter().map(|p| crate::PartitionRef { str: p.str }).collect(),
scheduled_at: task.scheduled_at,
started_at: task.started_at,
completed_at: task.completed_at,
duration_ms: task.duration_ms,
cancelled: task.cancelled,
message: task.message,
})
.collect();
let total_count = protobuf_tasks.len() as u32;
Ok(TasksListResponse {
tasks: protobuf_tasks,
total_count,
})
}
}
#[cfg(test)]

View file

@ -120,164 +120,31 @@ pub struct BuildStatusRequest {
pub async fn get_build_status(
State(service): State<ServiceState>,
Path(BuildStatusRequest { build_request_id }): Path<BuildStatusRequest>,
) -> Result<Json<BuildStatusResponse>, (StatusCode, Json<ErrorResponse>)> {
// Get events for this build request from the event log (source of truth)
let events = match service.event_log.get_build_request_events(&build_request_id, None).await {
Ok(events) => events,
Err(e) => {
error!("Failed to get build request events: {}", e);
return Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Failed to query build request events: {}", e),
}),
));
) -> Result<Json<BuildDetailResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = crate::repositories::builds::BuildsRepository::new(service.event_log.clone());
match repository.show_protobuf(&build_request_id).await {
Ok(Some(build_detail)) => {
Ok(Json(build_detail))
}
};
info!("Build request {}: Found {} events", build_request_id, events.len());
// Check if build request exists by looking for any events
if events.is_empty() {
return Err((
Ok(None) => {
Err((
StatusCode::NOT_FOUND,
Json(ErrorResponse {
error: "Build request not found".to_string(),
}),
));
))
}
// Reconstruct build state from events - fail if no valid build request events found
let mut status: Option<BuildRequestStatus> = None;
let mut requested_partitions = Vec::new();
let mut created_at = 0i64;
let mut updated_at = 0i64;
let mut partitions_set = false;
// Sort events by timestamp to process in chronological order
let mut sorted_events = events.clone();
sorted_events.sort_by_key(|e| e.timestamp);
for event in &sorted_events {
if event.timestamp > updated_at {
updated_at = event.timestamp;
}
if created_at == 0 || event.timestamp < created_at {
created_at = event.timestamp;
}
// Extract information from build request events
if let Some(crate::build_event::EventType::BuildRequestEvent(req_event)) = &event.event_type {
info!("Processing BuildRequestEvent: status={}, message='{}'", req_event.status_code, req_event.message);
// Update status with the latest event - convert from i32 to enum
status = Some(match req_event.status_code {
0 => BuildRequestStatus::BuildRequestUnknown, // Default protobuf value - should not happen in production
1 => BuildRequestStatus::BuildRequestReceived,
2 => BuildRequestStatus::BuildRequestPlanning,
7 => BuildRequestStatus::BuildRequestAnalysisCompleted,
3 => BuildRequestStatus::BuildRequestExecuting,
4 => BuildRequestStatus::BuildRequestCompleted,
5 => BuildRequestStatus::BuildRequestFailed,
6 => BuildRequestStatus::BuildRequestCancelled,
unknown_status => {
error!("Invalid BuildRequestStatus value: {} in build request {}", unknown_status, build_request_id);
return Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Invalid build request status value: {}", unknown_status),
}),
));
},
});
// Use partitions from the first event that has them (typically the "received" event)
if !partitions_set && !req_event.requested_partitions.is_empty() {
info!("Setting requested partitions from event: {:?}", req_event.requested_partitions);
requested_partitions = req_event.requested_partitions.iter()
.map(|p| p.str.clone())
.collect();
partitions_set = true;
}
} else {
info!("Event is not a BuildRequestEvent: {:?}", event.event_type.as_ref().map(|t| std::mem::discriminant(t)));
}
}
// Ensure we found at least one valid BuildRequestEvent
let final_status = status.ok_or_else(|| {
error!("No valid BuildRequestEvent found in {} events for build request {}", events.len(), build_request_id);
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("No valid build request events found - data corruption detected"),
}),
)
})?;
// Clone events for later use in mermaid generation
let events_for_mermaid = events.clone();
// Convert events to summary format for response
let event_summaries: Vec<BuildEventSummary> = events.into_iter().map(|e| {
let (job_label, partition_ref, delegated_build_id) = extract_navigation_data(&e.event_type);
BuildEventSummary {
event_id: e.event_id,
timestamp: e.timestamp,
event_type: event_type_to_string(&e.event_type),
message: event_to_message(&e.event_type),
build_request_id: e.build_request_id,
job_label,
partition_ref,
delegated_build_id,
}
}).collect();
let final_status_string = BuildGraphService::status_to_string(final_status);
info!("Build request {}: Final status={}, partitions={:?}", build_request_id, final_status_string, requested_partitions);
// Extract the job graph from events (find the most recent JobGraphEvent)
let (job_graph_json, mermaid_diagram) = {
let mut job_graph: Option<JobGraph> = None;
// Find the most recent JobGraphEvent in the events
for event in &events_for_mermaid {
if let Some(crate::build_event::EventType::JobGraphEvent(graph_event)) = &event.event_type {
if let Some(ref graph) = graph_event.job_graph {
job_graph = Some(graph.clone());
}
}
}
if let Some(ref graph) = job_graph {
// Convert job graph to JSON
let graph_json = match serde_json::to_value(graph) {
Ok(json) => Some(json),
Err(e) => {
error!("Failed to serialize job graph: {}", e);
None
error!("Failed to get build status: {}", e);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Failed to get build status: {}", e),
}),
))
}
};
// Generate mermaid diagram with current status
let mermaid = mermaid_utils::generate_mermaid_with_status(graph, &events_for_mermaid);
(graph_json, Some(mermaid))
} else {
(None, None)
}
};
Ok(Json(BuildStatusResponse {
build_request_id,
status: final_status_string,
requested_partitions,
created_at: created_at,
updated_at: updated_at,
events: event_summaries,
job_graph: job_graph_json,
mermaid_diagram,
}))
}
#[derive(Deserialize, JsonSchema)]
@ -376,7 +243,8 @@ pub async fn get_partition_status(
Ok(Json(PartitionStatusResponse {
partition_ref,
status: BuildGraphService::partition_status_to_string(status),
status_code: status as i32,
status_name: status.to_display_string(),
last_updated,
build_requests,
}))
@ -698,45 +566,24 @@ use std::collections::HashMap;
pub async fn list_build_requests(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<BuildsListResponse>, (StatusCode, Json<ErrorResponse>)> {
) -> Result<Json<crate::BuildsListResponse>, (StatusCode, Json<ErrorResponse>)> {
let limit = params.get("limit")
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(20)
.min(100); // Cap at 100
let offset = params.get("offset")
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(0);
let status_filter = params.get("status")
.and_then(|s| match s.as_str() {
"received" => Some(BuildRequestStatus::BuildRequestReceived),
"planning" => Some(BuildRequestStatus::BuildRequestPlanning),
"executing" => Some(BuildRequestStatus::BuildRequestExecuting),
"completed" => Some(BuildRequestStatus::BuildRequestCompleted),
"failed" => Some(BuildRequestStatus::BuildRequestFailed),
"cancelled" => Some(BuildRequestStatus::BuildRequestCancelled),
_ => None,
});
match service.event_log.list_build_requests(limit, offset, status_filter).await {
Ok((summaries, total_count)) => {
let builds: Vec<BuildSummary> = summaries.into_iter().map(|s| BuildSummary {
build_request_id: s.build_request_id,
status: format!("{:?}", s.status),
requested_partitions: s.requested_partitions,
created_at: s.created_at,
updated_at: s.updated_at,
}).collect();
let has_more = (offset + limit) < total_count;
Ok(Json(BuildsListResponse {
// Use repository with protobuf format
let builds_repo = BuildsRepository::new(service.event_log.clone());
match builds_repo.list_protobuf(Some(limit as usize)).await {
Ok(builds) => {
let total_count = builds.len() as u32;
let response = crate::BuildsListResponse {
builds,
total_count,
has_more,
}))
}
total_count, // TODO: implement proper total count with pagination
has_more: false, // TODO: implement proper pagination
};
Ok(Json(response))
},
Err(e) => {
error!("Failed to list build requests: {}", e);
Err((
@ -752,44 +599,24 @@ pub async fn list_build_requests(
pub async fn list_partitions(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<PartitionsListResponse>, (StatusCode, Json<ErrorResponse>)> {
) -> Result<Json<crate::PartitionsListResponse>, (StatusCode, Json<ErrorResponse>)> {
let limit = params.get("limit")
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(20)
.min(100); // Cap at 100
let offset = params.get("offset")
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(0);
// Use repository with protobuf format
let partitions_repo = PartitionsRepository::new(service.event_log.clone());
let request = PartitionsListRequest {
limit: Some(limit),
offset: None,
status_filter: None,
};
let status_filter = params.get("status")
.and_then(|s| match s.as_str() {
"requested" => Some(PartitionStatus::PartitionRequested),
"analyzed" => Some(PartitionStatus::PartitionAnalyzed),
"building" => Some(PartitionStatus::PartitionBuilding),
"available" => Some(PartitionStatus::PartitionAvailable),
"failed" => Some(PartitionStatus::PartitionFailed),
"delegated" => Some(PartitionStatus::PartitionDelegated),
_ => None,
});
match service.event_log.list_recent_partitions(limit, offset, status_filter).await {
Ok((summaries, total_count)) => {
let partitions: Vec<PartitionSummary> = summaries.into_iter().map(|s| PartitionSummary {
partition_ref: s.partition_ref,
status: format!("{:?}", s.status),
updated_at: s.updated_at,
build_request_id: s.build_request_id,
}).collect();
let has_more = (offset + limit) < total_count;
Ok(Json(PartitionsListResponse {
partitions,
total_count,
has_more,
}))
}
match partitions_repo.list_protobuf(request).await {
Ok(response) => {
Ok(Json(response))
},
Err(e) => {
error!("Failed to list partitions: {}", e);
Err((
@ -846,23 +673,34 @@ pub async fn list_partitions_unified(
pub async fn get_activity_summary(
State(service): State<ServiceState>,
) -> Result<Json<ActivityResponse>, (StatusCode, Json<ErrorResponse>)> {
match service.event_log.get_activity_summary().await {
Ok(summary) => {
let recent_builds: Vec<BuildSummary> = summary.recent_builds.into_iter().map(|s| BuildSummary {
build_request_id: s.build_request_id,
status: format!("{:?}", s.status),
requested_partitions: s.requested_partitions,
created_at: s.created_at,
updated_at: s.updated_at,
}).collect();
) -> Result<Json<ActivityApiResponse>, (StatusCode, Json<ErrorResponse>)> {
// Build activity response using repositories to get dual status fields
let builds_repo = BuildsRepository::new(service.event_log.clone());
let partitions_repo = PartitionsRepository::new(service.event_log.clone());
let recent_partitions: Vec<PartitionSummary> = summary.recent_partitions.into_iter().map(|s| PartitionSummary {
partition_ref: s.partition_ref,
status: format!("{:?}", s.status),
updated_at: s.updated_at,
build_request_id: s.build_request_id,
}).collect();
// Get recent builds and partitions with dual status fields
let recent_builds = builds_repo.list_protobuf(Some(5)).await.unwrap_or_else(|_| vec![]);
let recent_partitions_request = PartitionsListRequest {
limit: Some(10),
offset: None,
status_filter: None
};
let recent_partitions_response = partitions_repo.list_protobuf(recent_partitions_request).await
.unwrap_or_else(|_| crate::PartitionsListResponse {
partitions: vec![],
total_count: 0,
has_more: false
});
// Get activity counts (fallback to event log method for now)
let summary = service.event_log.get_activity_summary().await.unwrap_or_else(|_| {
crate::event_log::ActivitySummary {
active_builds_count: 0,
recent_builds: vec![],
recent_partitions: vec![],
total_partitions_count: 0,
}
});
// Simple system status logic
let system_status = if summary.active_builds_count > 10 {
@ -871,25 +709,21 @@ pub async fn get_activity_summary(
"healthy".to_string()
};
Ok(Json(ActivityResponse {
// Build protobuf activity response with dual status fields
let protobuf_response = crate::ActivityResponse {
active_builds_count: summary.active_builds_count,
recent_builds,
recent_partitions,
recent_partitions: recent_partitions_response.partitions,
total_partitions_count: summary.total_partitions_count,
system_status,
graph_name: service.graph_label.clone(),
}))
}
Err(e) => {
error!("Failed to get activity summary: {}", e);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Failed to get activity summary: {}", e),
}),
))
}
}
};
let api_response = ActivityApiResponse {
data: protobuf_response,
request_id: None,
};
Ok(Json(api_response))
}
#[derive(Deserialize, JsonSchema)]
@ -900,106 +734,25 @@ pub struct JobMetricsRequest {
pub async fn list_jobs(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<JobsListResponse>, (StatusCode, Json<ErrorResponse>)> {
let search_term = params.get("search").map(|s| s.to_lowercase());
) -> Result<Json<crate::JobsListResponse>, (StatusCode, Json<ErrorResponse>)> {
let limit = params.get("limit")
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(20)
.min(100); // Cap at 100
// Debug: Let's see what's actually in the database
let debug_query = "
SELECT
je.job_label,
je.status,
COUNT(*) as count_for_this_status
FROM job_events je
JOIN build_events be ON je.event_id = be.event_id
WHERE je.job_label != ''
GROUP BY je.job_label, je.status
ORDER BY je.job_label, je.status";
let search = params.get("search").map(|s| s.to_string());
// Log the debug results first
if let Ok(debug_result) = service.event_log.execute_query(debug_query).await {
for row in &debug_result.rows {
if row.len() >= 3 {
log::info!("Debug: job_label={}, status={}, count={}", row[0], row[1], row[2]);
}
}
}
// Original query but let's see all statuses
let query = "
WITH job_durations AS (
SELECT
je.job_label,
be.build_request_id,
(MAX(be.timestamp) - MIN(be.timestamp)) / 1000000 as duration_ms
FROM job_events je
JOIN build_events be ON je.event_id = be.event_id
GROUP BY je.job_label, be.build_request_id
HAVING MAX(CASE WHEN je.status IN ('3', '4', '5', '6') THEN 1 ELSE 0 END) = 1
)
SELECT
je.job_label,
COUNT(CASE WHEN je.status IN ('3', '6') THEN 1 END) as completed_count,
COUNT(CASE WHEN je.status IN ('4', '5') THEN 1 END) as failed_count,
COUNT(CASE WHEN je.status IN ('3', '4', '5', '6') THEN 1 END) as total_count,
COALESCE(AVG(jd.duration_ms), 0) as avg_duration_ms,
MAX(be.timestamp) as last_run,
GROUP_CONCAT(DISTINCT je.status) as all_statuses
FROM job_events je
JOIN build_events be ON je.event_id = be.event_id
LEFT JOIN job_durations jd ON je.job_label = jd.job_label
WHERE je.job_label != ''
GROUP BY je.job_label
ORDER BY last_run DESC";
match service.event_log.execute_query(query).await {
Ok(result) => {
let mut jobs = Vec::new();
for row in result.rows {
if row.len() >= 7 {
let job_label = &row[0];
// Apply search filter if provided
if let Some(ref search) = search_term {
if !job_label.to_lowercase().contains(search) {
continue;
}
}
let completed_count: u32 = row[1].parse().unwrap_or(0);
let failed_count: u32 = row[2].parse().unwrap_or(0);
let total_count: u32 = row[3].parse().unwrap_or(0);
let avg_duration_ms: Option<i64> = row[4].parse::<f64>().ok().map(|f| f as i64);
let last_run: Option<i64> = row[5].parse().ok();
let all_statuses = &row[6];
// Log additional debug info
log::info!("Job: {}, completed: {}, failed: {}, total: {}, statuses: {}",
job_label, completed_count, failed_count, total_count, all_statuses);
let success_rate = if total_count > 0 {
completed_count as f64 / total_count as f64
} else {
0.0
// Use repository with protobuf format
let jobs_repo = JobsRepository::new(service.event_log.clone());
let request = JobsListRequest {
limit: Some(limit),
search,
};
jobs.push(JobSummary {
job_label: job_label.clone(),
success_rate,
avg_duration_ms,
recent_runs: total_count.min(50), // Limit to recent runs
last_run,
});
}
}
let total_count = jobs.len() as u32;
Ok(Json(JobsListResponse {
jobs,
total_count,
}))
}
match jobs_repo.list_protobuf(request).await {
Ok(response) => {
Ok(Json(response))
},
Err(e) => {
error!("Failed to list jobs: {}", e);
Err((
@ -1116,20 +869,21 @@ pub async fn get_job_metrics(
})
.collect();
let status = match status_code.as_str() {
"1" => "scheduled",
"2" => "running",
"3" => "completed",
"4" => "failed",
"5" => "cancelled",
"6" => "skipped",
_ => "unknown",
let (status_code_int, status_name) = match status_code.as_str() {
"1" => (1, "scheduled"),
"2" => (2, "running"),
"3" => (3, "completed"),
"4" => (4, "failed"),
"5" => (5, "cancelled"),
"6" => (6, "skipped"),
_ => (0, "unknown"),
};
JobRunSummary {
build_request_id,
partitions,
status: status.to_string(),
status_code: status_code_int,
status_name: status_name.to_string(),
duration_ms,
started_at,
}
@ -1301,36 +1055,117 @@ pub async fn invalidate_partition(
}
}
/// List partitions using repository
pub async fn list_partitions_repository(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<PartitionsListApiResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = PartitionsRepository::new(service.event_log.clone());
let limit = params.get("limit").and_then(|s| s.parse().ok());
let request = PartitionsListRequest {
limit,
offset: None,
status_filter: None,
};
match repository.list_protobuf(request).await {
Ok(protobuf_response) => {
let total_count = protobuf_response.total_count;
let has_more = protobuf_response.has_more;
let api_response = PartitionsListApiResponse {
data: protobuf_response,
request_id: None, // TODO: add request ID tracking
pagination: Some(PaginationInfo {
total_count,
has_more,
limit: limit.map(|l| l as u32),
offset: None,
}),
};
Ok(Json(api_response))
},
Err(e) => {
error!("Failed to list partitions: {}", e);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Failed to list partitions: {}", e),
}),
))
}
}
}
/// List tasks using repository
pub async fn list_tasks_repository(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<TasksListApiResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = TasksRepository::new(service.event_log.clone());
let limit = params.get("limit").and_then(|s| s.parse().ok());
let request = TasksListRequest { limit };
match repository.list_protobuf(request).await {
Ok(protobuf_response) => {
let total_count = protobuf_response.total_count;
let api_response = TasksListApiResponse {
data: protobuf_response,
request_id: None, // TODO: add request ID tracking
pagination: Some(PaginationInfo {
total_count,
has_more: false, // Tasks list doesn't implement has_more yet
limit: limit.map(|l| l as u32),
offset: None,
}),
};
Ok(Json(api_response))
},
Err(e) => {
error!("Failed to list tasks: {}", e);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Failed to list tasks: {}", e),
}),
))
}
}
}
/// List jobs using repository
pub async fn list_jobs_repository(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<JobsRepositoryListResponse>, (StatusCode, Json<ErrorResponse>)> {
) -> Result<Json<JobsListApiResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = JobsRepository::new(service.event_log.clone());
let limit = params.get("limit").and_then(|s| s.parse().ok());
let search = params.get("search").map(|s| s.to_string());
match repository.list(limit).await {
Ok(jobs) => {
let job_summaries: Vec<JobRepositorySummary> = jobs.into_iter().map(|job| {
JobRepositorySummary {
job_label: job.job_label,
total_runs: job.total_runs,
successful_runs: job.successful_runs,
failed_runs: job.failed_runs,
cancelled_runs: job.cancelled_runs,
average_partitions_per_run: job.average_partitions_per_run,
last_run_timestamp: job.last_run_timestamp,
last_run_status: format!("{:?}", job.last_run_status),
recent_builds: job.recent_builds,
}
}).collect();
let request = JobsListRequest {
limit,
search,
};
let total_count = job_summaries.len() as u32;
Ok(Json(JobsRepositoryListResponse {
jobs: job_summaries,
match repository.list_protobuf(request).await {
Ok(protobuf_response) => {
let total_count = protobuf_response.total_count;
let api_response = JobsListApiResponse {
data: protobuf_response,
request_id: None, // TODO: add request ID tracking
pagination: Some(PaginationInfo {
total_count,
}))
}
has_more: false, // Jobs list doesn't implement has_more yet
limit: limit.map(|l| l as u32),
offset: None,
}),
};
Ok(Json(api_response))
},
Err(e) => {
error!("Failed to list jobs: {}", e);
Err((
@ -1409,33 +1244,15 @@ pub async fn get_job_detail(
pub async fn list_tasks(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<TasksListResponse>, (StatusCode, Json<ErrorResponse>)> {
) -> Result<Json<crate::TasksListResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = TasksRepository::new(service.event_log.clone());
let limit = params.get("limit").and_then(|s| s.parse().ok());
match repository.list(limit).await {
Ok(tasks) => {
let task_summaries: Vec<TaskSummary> = tasks.into_iter().map(|task| {
TaskSummary {
job_run_id: task.job_run_id,
job_label: task.job_label,
build_request_id: task.build_request_id,
status: format!("{:?}", task.status),
target_partitions: task.target_partitions.into_iter().map(|p| p.str).collect(),
scheduled_at: task.scheduled_at,
started_at: task.started_at,
completed_at: task.completed_at,
duration_ms: task.duration_ms,
cancelled: task.cancelled,
message: task.message,
}
}).collect();
let request = TasksListRequest { limit };
let total_count = task_summaries.len() as u32;
Ok(Json(TasksListResponse {
tasks: task_summaries,
total_count,
}))
match repository.list_protobuf(request).await {
Ok(response) => {
Ok(Json(response))
}
Err(e) => {
error!("Failed to list tasks: {}", e);
@ -1552,35 +1369,31 @@ pub async fn cancel_task(
pub async fn list_builds_repository(
State(service): State<ServiceState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<BuildsRepositoryListResponse>, (StatusCode, Json<ErrorResponse>)> {
) -> Result<Json<BuildsListApiResponse>, (StatusCode, Json<ErrorResponse>)> {
let repository = BuildsRepository::new(service.event_log.clone());
let limit = params.get("limit").and_then(|s| s.parse().ok());
match repository.list(limit).await {
match repository.list_protobuf(limit).await {
Ok(builds) => {
let build_summaries: Vec<BuildRepositorySummary> = builds.into_iter().map(|build| {
BuildRepositorySummary {
build_request_id: build.build_request_id,
status: format!("{:?}", build.status),
requested_partitions: build.requested_partitions.into_iter().map(|p| p.str).collect(),
total_jobs: build.total_jobs,
completed_jobs: build.completed_jobs,
failed_jobs: build.failed_jobs,
cancelled_jobs: build.cancelled_jobs,
requested_at: build.requested_at,
started_at: build.started_at,
completed_at: build.completed_at,
duration_ms: build.duration_ms,
cancelled: build.cancelled,
}
}).collect();
let total_count = build_summaries.len() as u32;
Ok(Json(BuildsRepositoryListResponse {
builds: build_summaries,
let total_count = builds.len() as u32;
let protobuf_response = crate::BuildsListResponse {
builds,
total_count,
}))
}
has_more: false, // TODO: implement proper pagination
};
let api_response = BuildsListApiResponse {
data: protobuf_response,
request_id: None, // TODO: add request ID tracking
pagination: Some(PaginationInfo {
total_count,
has_more: false,
limit: limit.map(|l| l as u32),
offset: None,
}),
};
Ok(Json(api_response))
},
Err(e) => {
error!("Failed to list builds: {}", e);
Err((

View file

@ -75,7 +75,8 @@ pub struct BuildEventSummary {
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionStatusResponse {
pub partition_ref: String,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub last_updated: Option<i64>,
pub build_requests: Vec<String>,
}
@ -143,6 +144,50 @@ pub struct BuildsListResponse {
pub has_more: bool,
}
// Wrapper structs for API responses that contain protobuf data + service metadata
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildsListApiResponse {
pub data: crate::BuildsListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PartitionsListApiResponse {
pub data: crate::PartitionsListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct JobsListApiResponse {
pub data: crate::JobsListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct TasksListApiResponse {
pub data: crate::TasksListResponse,
pub request_id: Option<String>,
pub pagination: Option<PaginationInfo>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ActivityApiResponse {
pub data: crate::ActivityResponse,
pub request_id: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PaginationInfo {
pub total_count: u32,
pub has_more: bool,
pub limit: Option<u32>,
pub offset: Option<u32>,
}
// Legacy types kept for backward compatibility (will be removed eventually)
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct BuildSummary {
pub build_request_id: String,
@ -209,7 +254,8 @@ pub struct JobMetricsResponse {
pub struct JobRunSummary {
pub build_request_id: String,
pub partitions: Vec<String>,
pub status: String,
pub status_code: i32,
pub status_name: String,
pub duration_ms: Option<i64>,
pub started_at: i64,
}
@ -250,7 +296,7 @@ impl BuildGraphService {
.api_route("/api/v1/builds", get(handlers::list_builds_repository))
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_detail))
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_repository))
.api_route("/api/v1/partitions", get(handlers::list_partitions))
.api_route("/api/v1/partitions", get(handlers::list_partitions_repository))
.api_route("/api/v1/partitions/:partition_ref", get(handlers::get_partition_detail))
.api_route("/api/v1/partitions/:partition_ref/status", get(handlers::get_partition_status))
.api_route("/api/v1/partitions/:partition_ref/events", get(handlers::get_partition_events))
@ -258,7 +304,7 @@ impl BuildGraphService {
.api_route("/api/v1/jobs", get(handlers::list_jobs_repository))
.api_route("/api/v1/jobs/:label", get(handlers::get_job_detail))
.api_route("/api/v1/jobs/:label/metrics", get(handlers::get_job_metrics))
.api_route("/api/v1/tasks", get(handlers::list_tasks))
.api_route("/api/v1/tasks", get(handlers::list_tasks_repository))
.api_route("/api/v1/tasks/:job_run_id", get(handlers::get_task_detail))
.api_route("/api/v1/tasks/:job_run_id/cancel", post(handlers::cancel_task))
.api_route("/api/v1/activity", get(handlers::get_activity_summary))
@ -276,7 +322,7 @@ impl BuildGraphService {
.api_route("/api/v1/builds", get(handlers::list_builds_repository))
.api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_detail))
.api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_repository))
.api_route("/api/v1/partitions", get(handlers::list_partitions))
.api_route("/api/v1/partitions", get(handlers::list_partitions_repository))
.api_route("/api/v1/partitions/:partition_ref", get(handlers::get_partition_detail))
.api_route("/api/v1/partitions/:partition_ref/status", get(handlers::get_partition_status))
.api_route("/api/v1/partitions/:partition_ref/events", get(handlers::get_partition_events))
@ -284,7 +330,7 @@ impl BuildGraphService {
.api_route("/api/v1/jobs", get(handlers::list_jobs_repository))
.api_route("/api/v1/jobs/:label", get(handlers::get_job_detail))
.api_route("/api/v1/jobs/:label/metrics", get(handlers::get_job_metrics))
.api_route("/api/v1/tasks", get(handlers::list_tasks))
.api_route("/api/v1/tasks", get(handlers::list_tasks_repository))
.api_route("/api/v1/tasks/:job_run_id", get(handlers::get_task_detail))
.api_route("/api/v1/tasks/:job_run_id/cancel", post(handlers::cancel_task))
.api_route("/api/v1/activity", get(handlers::get_activity_summary))

View file

@ -11,6 +11,7 @@ rust_test(
edition = "2021",
deps = [
"@crates//:prost",
"@crates//:schemars",
"@crates//:serde",
"@crates//:serde_json",
],
@ -45,6 +46,7 @@ rust_test(
edition = "2021",
deps = [
"@crates//:prost",
"@crates//:schemars",
"@crates//:serde",
"@crates//:serde_json",
],

View file

@ -1,5 +1,6 @@
- Remove manual reference of enum values, e.g. [here](../databuild/repositories/builds/mod.rs:85)
- Type-safe mithril [claude link](https://claude.ai/share/f33f8605-472a-4db4-9211-5a1e52087316)
- Status indicator for page selection
- On build request detail page, show aggregated job results
- Use path based navigation instead of hashbang?

View file

@ -80,7 +80,7 @@ echo "[INFO] Created build request: $BUILD_ID"
# Wait for build completion
for i in {1..60}; do
STATUS_RESPONSE=$(curl -s "http://127.0.0.1:$SERVICE_PORT/api/v1/builds/$BUILD_ID")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status' 2>/dev/null || echo "UNKNOWN")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status_name' 2>/dev/null || echo "UNKNOWN")
case "$STATUS" in
"completed"|"COMPLETED"|"BuildRequestCompleted")

View file

@ -89,7 +89,7 @@ echo "[INFO] Created build request: $BUILD_ID"
# Wait for build completion
for i in {1..30}; do
STATUS_RESPONSE=$(curl -s "http://127.0.0.1:$SERVICE_PORT/api/v1/builds/$BUILD_ID")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status' 2>/dev/null || echo "UNKNOWN")
STATUS=$(echo "$STATUS_RESPONSE" | jq -r '.status_name' 2>/dev/null || echo "UNKNOWN")
case "$STATUS" in
"completed"|"COMPLETED"|"BuildRequestCompleted")