fix up want lineage view
Some checks failed
/ setup (push) Has been cancelled

This commit is contained in:
Stuart Axelbrooke 2025-12-01 03:54:29 +08:00
parent e221cd8502
commit 8176a8261e
14 changed files with 586 additions and 207 deletions

View file

@ -350,11 +350,13 @@ impl Clone for BuildEventLog<MemoryBELStorage> {
}
}
#[cfg(test)]
mod tests {
mod sqlite_bel_storage {
use crate::build_event_log::{BELStorage, BuildEventLog, SqliteBELStorage};
use crate::build_state::BuildState;
use crate::data_build_event::Event;
use crate::util::test_scenarios::default_originating_lifetime;
use crate::{PartitionRef, WantCreateEventV1};
use uuid::Uuid;
@ -387,6 +389,7 @@ mod tests {
e.partitions = vec![PartitionRef {
r#ref: "sqlite_partition_1234".to_string(),
}];
e.lifetime = Some(default_originating_lifetime());
let event_id = log
.append_event(&Event::WantCreateV1(e))
.expect("append_event failed");
@ -430,14 +433,17 @@ mod tests {
let mut e2 = WantCreateEventV1::default();
e2.want_id = Uuid::new_v4().into();
e2.lifetime = Some(default_originating_lifetime());
log.append_event(&Event::WantCreateV1(e2))
.expect("append_event failed");
let mut e3 = WantCreateEventV1::default();
e3.want_id = Uuid::new_v4().into();
e3.lifetime = Some(default_originating_lifetime());
log.append_event(&Event::WantCreateV1(e3))
.expect("append_event failed");
let mut e4 = WantCreateEventV1::default();
e4.want_id = Uuid::new_v4().into();
e4.lifetime = Some(default_originating_lifetime());
log.append_event(&Event::WantCreateV1(e4))
.expect("append_event failed");

View file

@ -4,12 +4,12 @@
//! returning derivative events to be appended to the BEL.
use crate::data_build_event::Event;
use crate::data_deps::{WantTimestamps, missing_deps_to_want_events};
use crate::event_source::Source as EventSourceVariant;
use crate::data_deps::missing_deps_to_want_events;
use crate::job_run_state::{JobRun, JobRunWithState, QueuedState as JobQueuedState};
use crate::partition_state::{BuildingPartitionRef, Partition};
use crate::util::current_timestamp;
use crate::want_state::{NewState as WantNewState, Want, WantWithState};
use crate::want_create_event_v1::Lifetime;
use crate::want_state::{NewState as WantNewState, Want, WantLifetime, WantWithState};
use crate::{
JobRunBufferEventV1, JobRunCancelEventV1, JobRunFailureEventV1, JobRunHeartbeatEventV1,
JobRunMissingDepsEventV1, JobRunSuccessEventV1, PartitionRef, TaintCancelEventV1,
@ -45,26 +45,22 @@ impl BuildState {
// Create want in New state from event
let want_new: WantWithState<WantNewState> = event.clone().into();
// Log creation with derivative vs user-created distinction
let is_derivative = if let Some(source) = &event.source {
if let Some(EventSourceVariant::JobTriggered(job_triggered)) = &source.source {
tracing::info!(
want_id = %event.want_id,
partitions = ?event.partitions.iter().map(|p| &p.r#ref).collect::<Vec<_>>(),
source_job_run_id = %job_triggered.job_run_id,
"Want created (derivative - auto-created due to missing dependency)"
);
true
} else {
false
}
// Log creation with derivative vs user-created distinction based on lifetime
let is_derivative = matches!(&event.lifetime, Some(Lifetime::Ephemeral(_)));
if let Some(Lifetime::Ephemeral(eph)) = &event.lifetime {
tracing::info!(
want_id = %event.want_id,
partitions = ?event.partitions.iter().map(|p| &p.r#ref).collect::<Vec<_>>(),
source_job_run_id = %eph.job_run_id,
"Want created (ephemeral - auto-created due to missing dependency)"
);
} else {
tracing::info!(
want_id = %event.want_id,
partitions = ?event.partitions.iter().map(|p| &p.r#ref).collect::<Vec<_>>(),
"Want created (user-requested)"
"Want created (originating - user-requested)"
);
false
};
// Register this want with all its partitions (via inverted index)
@ -175,17 +171,22 @@ impl BuildState {
self.wants.insert(event.want_id.clone(), final_want);
// If this is a derivative want (triggered by a job's dep miss), transition impacted wants to UpstreamBuilding
if is_derivative {
if let Some(source) = &event.source {
if let Some(EventSourceVariant::JobTriggered(job_triggered)) = &source.source {
self.handle_derivative_want_creation(
&event.want_id,
&event.partitions,
&job_triggered.job_run_id,
);
// If this is an ephemeral want (triggered by a job's dep miss):
// 1. Record the derivative want ID on the source job run
// 2. Transition impacted wants to UpstreamBuilding
if let Some(Lifetime::Ephemeral(eph)) = &event.lifetime {
// Add this want as a derivative of the source job run
if let Some(job_run) = self.job_runs.get_mut(&eph.job_run_id) {
if let JobRun::DepMiss(dep_miss) = job_run {
dep_miss.add_derivative_want_id(&event.want_id);
}
}
self.handle_derivative_want_creation(
&event.want_id,
&event.partitions,
&eph.job_run_id,
);
}
vec![]
@ -242,7 +243,7 @@ impl BuildState {
// Create job run in Queued state
let queued: JobRunWithState<JobQueuedState> = event.clone().into();
// Transition wants to Building
// Transition wants to Building and track this job run on each want
// Valid states when job buffer event arrives:
// - Idle: First job starting for this want (normal case)
// - Building: Another job already started for this want (multiple jobs can service same want)
@ -255,7 +256,7 @@ impl BuildState {
wap.want_id
));
let transitioned = match want {
let mut transitioned = match want {
Want::New(new_want) => {
// Want was just created and hasn't fully sensed yet - transition to Building
// This can happen if want creation and job buffer happen in quick succession
@ -287,6 +288,9 @@ impl BuildState {
}
};
// Track this job run on the want for lineage
transitioned.add_job_run_id(&event.job_run_id);
self.wants.insert(wap.want_id.clone(), transitioned);
}
@ -575,15 +579,6 @@ impl BuildState {
}
};
// Infer data/SLA timestamps from servicing wants
let want_timestamps: WantTimestamps = dep_miss
.info
.servicing_wants
.iter()
.flat_map(|wap| self.get_want(&wap.want_id).map(|w| w.into()))
.reduce(|a: WantTimestamps, b: WantTimestamps| a.merge(b))
.expect("BUG: No servicing wants found");
// Collect all missing deps into a flat list of partition refs
let all_missing_deps: Vec<PartitionRef> = event
.missing_deps
@ -595,13 +590,11 @@ impl BuildState {
let building_refs_to_reset = dep_miss.get_building_partitions_to_reset();
self.transition_partitions_to_upstream_building(&building_refs_to_reset, all_missing_deps);
// Generate WantCreateV1 events for the missing dependencies
// Generate ephemeral WantCreateV1 events for the missing dependencies
// These events will be returned and appended to the BEL by BuildEventLog.append_event()
let want_events = missing_deps_to_want_events(
dep_miss.get_missing_deps().to_vec(),
&event.job_run_id,
want_timestamps,
);
// Ephemeral wants delegate freshness decisions to their originating want via the job_run_id reference
let want_events =
missing_deps_to_want_events(dep_miss.get_missing_deps().to_vec(), &event.job_run_id);
// Store the job run in DepMiss state so we can access the missing_deps later
// When the derivative WantCreateV1 events get processed by handle_want_create(),
@ -706,13 +699,19 @@ mod tests {
mod want {
use super::*;
use crate::WantDetail;
use crate::want_create_event_v1::Lifetime;
use crate::{OriginatingLifetime, WantDetail};
#[test]
fn test_should_create_want() {
let mut e = WantCreateEventV1::default();
e.want_id = "1234".to_string();
e.partitions = vec!["mypart".into()];
e.lifetime = Some(Lifetime::Originating(OriginatingLifetime {
data_timestamp: 1000,
ttl_seconds: 3600,
sla_seconds: 7200,
}));
let mut state = BuildState::default();
state.handle_event(&e.clone().into());
@ -725,9 +724,12 @@ mod tests {
#[test]
fn test_should_cancel_want() {
use crate::util::test_scenarios::default_originating_lifetime;
let mut e = WantCreateEventV1::default();
e.want_id = "1234".to_string();
e.partitions = vec!["mypart".into()];
e.lifetime = Some(default_originating_lifetime());
let mut state = BuildState::default();
state.handle_event(&e.clone().into());
@ -776,6 +778,7 @@ mod tests {
/// This was the original bug that motivated the UUID refactor.
#[test]
fn test_concurrent_wants_same_partition() {
use crate::util::test_scenarios::default_originating_lifetime;
use crate::{
JobRunBufferEventV1, JobRunHeartbeatEventV1, PartitionRef,
WantAttributedPartitions, WantCreateEventV1,
@ -790,6 +793,7 @@ mod tests {
create_want_1.partitions = vec![PartitionRef {
r#ref: "data/beta".to_string(),
}];
create_want_1.lifetime = Some(default_originating_lifetime());
state.handle_event(&Event::WantCreateV1(create_want_1));
// Want 1 should be Idle (no partition exists yet)
@ -807,6 +811,7 @@ mod tests {
create_want_2.partitions = vec![PartitionRef {
r#ref: "data/beta".to_string(),
}];
create_want_2.lifetime = Some(default_originating_lifetime());
state.handle_event(&Event::WantCreateV1(create_want_2));
// Want 2 should also be Idle
@ -884,6 +889,7 @@ mod tests {
mod partition_lifecycle {
use super::*;
use crate::util::test_scenarios::default_originating_lifetime;
use crate::{
JobRunBufferEventV1, JobRunFailureEventV1, JobRunHeartbeatEventV1,
JobRunMissingDepsEventV1, JobRunSuccessEventV1, MissingDeps, PartitionRef,
@ -904,6 +910,7 @@ mod tests {
create_beta.partitions = vec![PartitionRef {
r#ref: "data/beta".to_string(),
}];
create_beta.lifetime = Some(default_originating_lifetime());
state.handle_event(&Event::WantCreateV1(create_beta));
// 2. Job buffers for beta
@ -1034,6 +1041,7 @@ mod tests {
create_beta.partitions = vec![PartitionRef {
r#ref: "data/beta".to_string(),
}];
create_beta.lifetime = Some(default_originating_lifetime());
state.handle_event(&Event::WantCreateV1(create_beta));
// 2. First job buffers for beta (creates uuid-1)
@ -1081,6 +1089,7 @@ mod tests {
create_alpha.partitions = vec![PartitionRef {
r#ref: "data/alpha".to_string(),
}];
create_alpha.lifetime = Some(default_originating_lifetime());
state.handle_event(&Event::WantCreateV1(create_alpha));
let alpha_job_id = "alpha-job".to_string();

View file

@ -15,7 +15,34 @@ use super::{BuildState, consts};
impl BuildState {
pub fn get_want(&self, want_id: &str) -> Option<WantDetail> {
self.wants.get(want_id).map(|w| w.to_detail())
self.wants.get(want_id).map(|w| {
let mut detail = w.to_detail();
// Populate job_runs and compute derivative_want_ids by traversing job runs.
//
// derivative_want_ids is computed at query time rather than maintained during
// event handling. The relationship flows: Want → JobRun → (dep-miss) → EphemeralWant
//
// - JobRun tracks which derivative wants it spawned (on DepMissState)
// - Want only tracks which job runs serviced it (job_run_ids)
// - At query time, we traverse: Want's job_run_ids → each JobRun's derivative_want_ids
//
// This keeps event handling simple (just update the job run) and keeps JobRun
// as the source of truth for derivative want relationships.
for job_run_id in &detail.job_run_ids {
if let Some(job_run) = self.job_runs.get(job_run_id) {
let job_detail = job_run.to_detail();
// Collect derivative want IDs
for derivative_want_id in &job_detail.derivative_want_ids {
if !detail.derivative_want_ids.contains(derivative_want_id) {
detail.derivative_want_ids.push(derivative_want_id.clone());
}
}
// Add full job run details
detail.job_runs.push(job_detail);
}
}
detail
})
}
pub fn get_taint(&self, taint_id: &str) -> Option<TaintDetail> {

View file

@ -1,7 +1,7 @@
use crate::data_build_event::Event;
use crate::want_create_event_v1::Lifetime;
use crate::{
JobRunMissingDeps, JobRunReadDeps, JobTriggeredEvent, MissingDeps, ReadDeps, WantCreateEventV1,
WantDetail,
EphemeralLifetime, JobRunMissingDeps, JobRunReadDeps, MissingDeps, ReadDeps, WantCreateEventV1,
};
use uuid::Uuid;
@ -82,53 +82,19 @@ fn line_matches<'a>(line: &'a str, prefix: &'a str) -> Option<&'a str> {
line.trim().strip_prefix(prefix)
}
pub struct WantTimestamps {
data_timestamp: u64,
ttl_seconds: u64,
sla_seconds: u64,
}
impl From<WantDetail> for WantTimestamps {
fn from(want_detail: WantDetail) -> Self {
WantTimestamps {
data_timestamp: want_detail.data_timestamp,
ttl_seconds: want_detail.ttl_seconds,
sla_seconds: want_detail.sla_seconds,
}
}
}
impl WantTimestamps {
pub fn merge(self, other: WantTimestamps) -> WantTimestamps {
// TODO does this make sense?
WantTimestamps {
data_timestamp: self.data_timestamp.min(other.data_timestamp),
ttl_seconds: self.ttl_seconds.max(other.ttl_seconds),
sla_seconds: self.sla_seconds.max(other.sla_seconds),
}
}
}
pub fn missing_deps_to_want_events(
missing_deps: Vec<MissingDeps>,
job_run_id: &String,
want_timestamps: WantTimestamps,
) -> Vec<Event> {
/// Create ephemeral want events from missing dependencies.
/// Ephemeral wants are derivative wants created by the system when a job hits a dep-miss.
/// They delegate freshness decisions to their originating want.
pub fn missing_deps_to_want_events(missing_deps: Vec<MissingDeps>, job_run_id: &str) -> Vec<Event> {
missing_deps
.iter()
.map(|md| {
Event::WantCreateV1(WantCreateEventV1 {
want_id: Uuid::new_v4().into(),
partitions: md.missing.clone(),
data_timestamp: want_timestamps.data_timestamp,
ttl_seconds: want_timestamps.ttl_seconds,
sla_seconds: want_timestamps.sla_seconds,
source: Some(
JobTriggeredEvent {
job_run_id: job_run_id.clone(),
}
.into(),
),
lifetime: Some(Lifetime::Ephemeral(EphemeralLifetime {
job_run_id: job_run_id.to_string(),
})),
comment: Some("Missing data".to_string()),
})
})

View file

@ -62,6 +62,20 @@ message JobTriggeredEvent {
string job_run_id = 1;
}
// Want lifetime semantics
// Originating wants are user-created with explicit freshness requirements
message OriginatingLifetime {
uint64 data_timestamp = 1;
uint64 ttl_seconds = 2;
uint64 sla_seconds = 3;
}
// Ephemeral wants are system-created (derivative) from dep-miss
// They delegate freshness decisions to their originating want
message EphemeralLifetime {
// The job run that hit dep-miss and created this derivative want
string job_run_id = 1;
}
message WantAttributedPartitions {
string want_id = 1;
repeated PartitionRef partitions = 2;
@ -136,12 +150,14 @@ message WantCreateEventV1 {
// The unique ID of this want
string want_id = 1;
repeated PartitionRef partitions = 2;
uint64 data_timestamp = 3;
uint64 ttl_seconds = 4;
uint64 sla_seconds = 5;
// The source of the want. Can be from job, API, CLI, web app...
EventSource source = 6;
optional string comment = 7;
// Lifetime semantics - exactly one must be set
oneof lifetime {
OriginatingLifetime originating = 3;
EphemeralLifetime ephemeral = 4;
}
optional string comment = 5;
}
message WantCancelEventV1 {
string want_id = 1;
@ -207,17 +223,22 @@ message WantDetail {
repeated PartitionRef partitions = 2;
// The upstream partitions, detected from a dep miss job run failure
repeated PartitionRef upstreams = 3;
uint64 data_timestamp = 4;
uint64 ttl_seconds = 5;
uint64 sla_seconds = 6;
EventSource source = 7;
optional string comment = 8;
WantStatus status = 9;
uint64 last_updated_timestamp = 10;
// Lineage: all job runs that have serviced this want
repeated string job_run_ids = 11;
// Lineage: derivative wants spawned by this want's job dep-misses
repeated string derivative_want_ids = 12;
// Lifetime semantics
oneof lifetime {
OriginatingLifetime originating = 4;
EphemeralLifetime ephemeral = 5;
}
optional string comment = 6;
WantStatus status = 7;
uint64 last_updated_timestamp = 8;
// Lineage: all job runs that have serviced this want (IDs for reference)
repeated string job_run_ids = 9;
// Lineage: derivative wants spawned by this want's job dep-misses (computed from job_run_ids)
repeated string derivative_want_ids = 10;
// Lineage: full details of job runs servicing this want (for display in tables)
repeated JobRunDetail job_runs = 11;
}
message PartitionDetail {
@ -307,6 +328,11 @@ message JobRunDetail {
map<string, string> wrote_partition_uuids = 8;
// Lineage: derivative wants spawned by this job's dep-miss (for DepMiss jobs)
repeated string derivative_want_ids = 9;
// Timestamps for tracking job lifecycle
optional uint64 queued_at = 10;
optional uint64 started_at = 11;
// The job label (e.g. "//path/to:job")
string job_label = 12;
}
@ -378,11 +404,11 @@ message ListJobRunsResponse {
message CreateWantRequest {
repeated PartitionRef partitions = 1;
// User-created wants are always originating (have explicit freshness requirements)
uint64 data_timestamp = 2;
uint64 ttl_seconds = 3;
uint64 sla_seconds = 4;
EventSource source = 5;
optional string comment = 6;
optional string comment = 5;
}
message CreateWantResponse {
WantDetail data = 1;

View file

@ -1,14 +1,15 @@
use crate::PartitionStatusCode::{PartitionFailed, PartitionLive};
use crate::data_build_event::Event;
use crate::job_run_state::{JobInfo, JobRunWithState, QueuedState};
use crate::job_run_state::{JobInfo, JobRunWithState, QueuedState, TimingInfo};
use crate::util::current_timestamp;
use crate::want_create_event_v1::Lifetime;
use crate::{
CancelWantRequest, CancelWantResponse, CreateTaintRequest, CreateTaintResponse,
CreateWantRequest, CreateWantResponse, EventSource, GetWantResponse, JobRunBufferEventV1,
JobRunDetail, JobRunStatus, JobRunStatusCode, JobTriggeredEvent, ManuallyTriggeredEvent,
PartitionDetail, PartitionRef, PartitionStatus, PartitionStatusCode, TaintCancelEventV1,
TaintCreateEventV1, TaintDetail, WantAttributedPartitions, WantCancelEventV1,
WantCreateEventV1, WantDetail, WantStatus, WantStatusCode, event_source,
OriginatingLifetime, PartitionDetail, PartitionRef, PartitionStatus, PartitionStatusCode,
TaintCancelEventV1, TaintCreateEventV1, TaintDetail, WantAttributedPartitions,
WantCancelEventV1, WantCreateEventV1, WantDetail, WantStatus, WantStatusCode, event_source,
};
use uuid::Uuid;
@ -19,19 +20,23 @@ impl From<&WantCreateEventV1> for WantDetail {
}
impl From<WantCreateEventV1> for WantDetail {
fn from(e: WantCreateEventV1) -> Self {
// Convert want_create_event_v1::Lifetime to want_detail::Lifetime
let lifetime = e.lifetime.map(|l| match l {
Lifetime::Originating(orig) => crate::want_detail::Lifetime::Originating(orig),
Lifetime::Ephemeral(eph) => crate::want_detail::Lifetime::Ephemeral(eph),
});
WantDetail {
want_id: e.want_id,
partitions: e.partitions,
upstreams: vec![],
data_timestamp: e.data_timestamp,
ttl_seconds: e.ttl_seconds,
sla_seconds: e.sla_seconds,
source: e.source,
lifetime,
comment: e.comment,
status: Some(WantStatusCode::WantIdle.into()),
last_updated_timestamp: current_timestamp(),
job_run_ids: vec![],
derivative_want_ids: vec![],
job_runs: vec![],
}
}
}
@ -79,6 +84,7 @@ impl From<JobRunBufferEventV1> for JobRunDetail {
use std::collections::HashMap;
Self {
id: value.job_run_id,
job_label: value.job_label,
status: Some(JobRunStatusCode::JobRunQueued.into()),
last_heartbeat_at: None,
building_partitions: value.building_partitions,
@ -87,21 +93,27 @@ impl From<JobRunBufferEventV1> for JobRunDetail {
read_partition_uuids: HashMap::new(),
wrote_partition_uuids: HashMap::new(),
derivative_want_ids: vec![],
queued_at: Some(current_timestamp()),
started_at: None,
}
}
}
impl From<JobRunBufferEventV1> for JobRunWithState<QueuedState> {
fn from(event: JobRunBufferEventV1) -> Self {
let queued_at = current_timestamp();
JobRunWithState {
info: JobInfo {
id: event.job_run_id,
job_label: event.job_label,
building_partitions: event.building_partitions,
servicing_wants: event.want_attributed_partitions,
},
state: QueuedState {
queued_at: current_timestamp(),
timing: TimingInfo {
queued_at,
started_at: None,
},
state: QueuedState { queued_at },
}
}
}
@ -197,13 +209,15 @@ impl From<&WantDetail> for WantAttributedPartitions {
impl From<CreateWantRequest> for WantCreateEventV1 {
fn from(value: CreateWantRequest) -> Self {
// User-created wants are always originating (have explicit freshness requirements)
WantCreateEventV1 {
want_id: Uuid::new_v4().into(),
partitions: value.partitions,
data_timestamp: value.data_timestamp,
ttl_seconds: value.ttl_seconds,
sla_seconds: value.sla_seconds,
source: value.source,
lifetime: Some(Lifetime::Originating(OriginatingLifetime {
data_timestamp: value.data_timestamp,
ttl_seconds: value.ttl_seconds,
sla_seconds: value.sla_seconds,
})),
comment: value.comment,
}
}

View file

@ -2,7 +2,7 @@ use crate::build_event_log::BELStorage;
use crate::build_state::BuildState;
use crate::commands::Command;
use crate::web::templates::{
BaseContext, HomePage, JobRunDetailPage, JobRunDetailView, JobRunsListPage,
BaseContext, DerivativeWantView, HomePage, JobRunDetailPage, JobRunDetailView, JobRunsListPage,
PartitionDetailPage, PartitionDetailView, PartitionsListPage, WantCreatePage, WantDetailPage,
WantDetailView, WantsListPage,
};
@ -260,9 +260,17 @@ async fn want_detail_page(
match build_state.get_want(&want_id) {
Some(want) => {
// Fetch derivative wants
let derivative_wants: Vec<_> = want
.derivative_want_ids
.iter()
.filter_map(|id| build_state.get_want(id))
.map(|w| DerivativeWantView::from(&w))
.collect();
let template = WantDetailPage {
base: BaseContext::default(),
want: WantDetailView::from(want),
want: WantDetailView::new(&want, derivative_wants),
};
match template.render() {
Ok(html) => Html(html).into_response(),

View file

@ -45,6 +45,8 @@ pub struct DepMissState {
pub detected_at: u64,
pub missing_deps: Vec<MissingDeps>,
pub read_deps: Vec<ReadDeps>,
/// Want IDs of ephemeral wants spawned by this dep-miss
pub derivative_want_ids: Vec<String>,
}
/// State: Job was explicitly canceled
@ -59,14 +61,25 @@ pub struct CanceledState {
#[derive(Debug, Clone)]
pub struct JobInfo {
pub id: String,
pub job_label: String,
pub building_partitions: Vec<PartitionRef>,
pub servicing_wants: Vec<WantAttributedPartitions>,
}
/// Timing information preserved across state transitions
#[derive(Debug, Clone)]
pub struct TimingInfo {
/// When the job was first queued
pub queued_at: u64,
/// When the job started running (None if still queued or canceled before starting)
pub started_at: Option<u64>,
}
/// Generic job run struct parameterized by state
#[derive(Debug, Clone)]
pub struct JobRunWithState<S> {
pub info: JobInfo,
pub timing: TimingInfo,
pub state: S,
}
@ -88,6 +101,10 @@ impl JobRunWithState<QueuedState> {
pub fn start_running(self, timestamp: u64) -> JobRunWithState<RunningState> {
JobRunWithState {
info: self.info,
timing: TimingInfo {
queued_at: self.timing.queued_at,
started_at: Some(timestamp),
},
state: RunningState {
started_at: timestamp,
last_heartbeat_at: timestamp, // Initialize to start time
@ -104,6 +121,7 @@ impl JobRunWithState<QueuedState> {
) -> JobRunWithState<CanceledState> {
JobRunWithState {
info: self.info,
timing: self.timing, // Preserve timing (started_at remains None)
state: CanceledState {
canceled_at: timestamp,
source,
@ -130,6 +148,7 @@ impl JobRunWithState<RunningState> {
) -> JobRunWithState<SucceededState> {
JobRunWithState {
info: self.info,
timing: self.timing,
state: SucceededState {
completed_at: timestamp,
read_deps,
@ -143,6 +162,7 @@ impl JobRunWithState<RunningState> {
pub fn fail(self, timestamp: u64, reason: String) -> JobRunWithState<FailedState> {
JobRunWithState {
info: self.info,
timing: self.timing,
state: FailedState {
failed_at: timestamp,
failure_reason: reason,
@ -158,11 +178,13 @@ impl JobRunWithState<RunningState> {
read_deps: Vec<ReadDeps>,
) -> JobRunWithState<DepMissState> {
JobRunWithState {
timing: self.timing,
info: self.info,
state: DepMissState {
detected_at: timestamp,
missing_deps,
read_deps,
derivative_want_ids: vec![], // Populated later when ephemeral wants are created
},
}
}
@ -176,6 +198,7 @@ impl JobRunWithState<RunningState> {
) -> JobRunWithState<CanceledState> {
JobRunWithState {
info: self.info,
timing: self.timing,
state: CanceledState {
canceled_at: timestamp,
source,
@ -305,6 +328,17 @@ impl JobRunWithState<DepMissState> {
pub fn get_read_deps(&self) -> &[ReadDeps] {
&self.state.read_deps
}
/// Add a derivative want ID (ephemeral want spawned by this dep-miss)
pub fn add_derivative_want_id(&mut self, want_id: &str) {
if !self
.state
.derivative_want_ids
.contains(&want_id.to_string())
{
self.state.derivative_want_ids.push(want_id.to_string());
}
}
}
impl JobRunWithState<CanceledState> {
@ -447,6 +481,7 @@ impl JobRun {
match self {
JobRun::Queued(queued) => JobRunDetail {
id: queued.info.id.clone(),
job_label: queued.info.job_label.clone(),
status: Some(JobRunStatusCode::JobRunQueued.into()),
last_heartbeat_at: None,
building_partitions: queued.info.building_partitions.clone(),
@ -455,9 +490,12 @@ impl JobRun {
read_partition_uuids: HashMap::new(),
wrote_partition_uuids: HashMap::new(),
derivative_want_ids: vec![],
queued_at: Some(queued.timing.queued_at),
started_at: queued.timing.started_at,
},
JobRun::Running(running) => JobRunDetail {
id: running.info.id.clone(),
job_label: running.info.job_label.clone(),
status: Some(JobRunStatusCode::JobRunRunning.into()),
last_heartbeat_at: Some(running.state.last_heartbeat_at),
building_partitions: running.info.building_partitions.clone(),
@ -466,9 +504,12 @@ impl JobRun {
read_partition_uuids: HashMap::new(),
wrote_partition_uuids: HashMap::new(),
derivative_want_ids: vec![],
queued_at: Some(running.timing.queued_at),
started_at: running.timing.started_at,
},
JobRun::Succeeded(succeeded) => JobRunDetail {
id: succeeded.info.id.clone(),
job_label: succeeded.info.job_label.clone(),
status: Some(JobRunStatusCode::JobRunSucceeded.into()),
last_heartbeat_at: None,
building_partitions: succeeded.info.building_partitions.clone(),
@ -487,9 +528,12 @@ impl JobRun {
.into_iter()
.collect(),
derivative_want_ids: vec![],
queued_at: Some(succeeded.timing.queued_at),
started_at: succeeded.timing.started_at,
},
JobRun::Failed(failed) => JobRunDetail {
id: failed.info.id.clone(),
job_label: failed.info.job_label.clone(),
status: Some(JobRunStatusCode::JobRunFailed.into()),
last_heartbeat_at: None,
building_partitions: failed.info.building_partitions.clone(),
@ -498,9 +542,12 @@ impl JobRun {
read_partition_uuids: HashMap::new(),
wrote_partition_uuids: HashMap::new(),
derivative_want_ids: vec![],
queued_at: Some(failed.timing.queued_at),
started_at: failed.timing.started_at,
},
JobRun::DepMiss(dep_miss) => JobRunDetail {
id: dep_miss.info.id.clone(),
job_label: dep_miss.info.job_label.clone(),
status: Some(JobRunStatusCode::JobRunDepMiss.into()),
last_heartbeat_at: None,
building_partitions: dep_miss.info.building_partitions.clone(),
@ -508,12 +555,13 @@ impl JobRun {
read_deps: dep_miss.state.read_deps.clone(),
read_partition_uuids: HashMap::new(),
wrote_partition_uuids: HashMap::new(),
// Note: derivative_want_ids would need to be populated from BuildState
// since the job doesn't track which wants it spawned (BEL does)
derivative_want_ids: vec![],
derivative_want_ids: dep_miss.state.derivative_want_ids.clone(),
queued_at: Some(dep_miss.timing.queued_at),
started_at: dep_miss.timing.started_at,
},
JobRun::Canceled(canceled) => JobRunDetail {
id: canceled.info.id.clone(),
job_label: canceled.info.job_label.clone(),
status: Some(JobRunStatusCode::JobRunCanceled.into()),
last_heartbeat_at: None,
building_partitions: canceled.info.building_partitions.clone(),
@ -522,6 +570,8 @@ impl JobRun {
read_partition_uuids: HashMap::new(),
wrote_partition_uuids: HashMap::new(),
derivative_want_ids: vec![],
queued_at: Some(canceled.timing.queued_at),
started_at: canceled.timing.started_at,
},
}
}

View file

@ -459,12 +459,13 @@ impl<S: BELStorage + Debug> Orchestrator<S> {
#[cfg(test)]
mod tests {
use crate::WantCreateEventV1;
use crate::build_event_log::MemoryBELStorage;
use crate::job::JobConfiguration;
use crate::mock_job_run::MockJobRun;
use crate::orchestrator::{Orchestrator, OrchestratorConfig};
use crate::util::current_timestamp;
use crate::want_create_event_v1::Lifetime;
use crate::{OriginatingLifetime, WantCreateEventV1};
use uuid::Uuid;
fn build_orchestrator() -> Orchestrator<MemoryBELStorage> {
@ -477,10 +478,11 @@ mod tests {
Self {
want_id: Uuid::new_v4().to_string(),
partitions: vec![],
data_timestamp: current_timestamp(),
ttl_seconds: 1000,
sla_seconds: 1000,
source: None,
lifetime: Some(Lifetime::Originating(OriginatingLifetime {
data_timestamp: current_timestamp(),
ttl_seconds: 1000,
sla_seconds: 1000,
})),
comment: Some("test want".to_string()),
}
}
@ -1045,7 +1047,8 @@ echo 'Beta succeeded'
mod want_grouping {
use super::super::*;
use crate::build_event_log::MemoryBELStorage;
use crate::{PartitionRef, WantDetail};
use crate::want_detail::Lifetime;
use crate::{OriginatingLifetime, PartitionRef, WantDetail};
fn create_job_config(label: &str, pattern: &str) -> JobConfiguration {
JobConfiguration {
@ -1066,15 +1069,17 @@ echo 'Beta succeeded'
})
.collect(),
upstreams: vec![],
data_timestamp: 0,
ttl_seconds: 0,
sla_seconds: 0,
source: None,
lifetime: Some(Lifetime::Originating(OriginatingLifetime {
data_timestamp: 0,
ttl_seconds: 0,
sla_seconds: 0,
})),
comment: None,
status: None,
last_updated_timestamp: 0,
job_run_ids: vec![],
derivative_want_ids: vec![],
job_runs: vec![],
}
}

View file

@ -149,12 +149,23 @@ impl std::fmt::Display for DatabuildError {
#[cfg(test)]
pub mod test_scenarios {
use crate::data_build_event::Event;
use crate::want_create_event_v1::Lifetime;
use crate::{
JobRunBufferEventV1, JobRunHeartbeatEventV1, JobRunMissingDepsEventV1,
JobRunSuccessEventV1, MissingDeps, PartitionRef, ReadDeps, WantAttributedPartitions,
WantCreateEventV1,
EphemeralLifetime, JobRunBufferEventV1, JobRunHeartbeatEventV1, JobRunMissingDepsEventV1,
JobRunSuccessEventV1, MissingDeps, OriginatingLifetime, PartitionRef, ReadDeps,
WantAttributedPartitions, WantCreateEventV1,
};
/// Returns a default Originating lifetime for use in tests.
/// WantCreateEventV1 requires a lifetime to be set.
pub fn default_originating_lifetime() -> Lifetime {
Lifetime::Originating(OriginatingLifetime {
data_timestamp: 1000,
ttl_seconds: 3600,
sla_seconds: 7200,
})
}
/// IDs used in the multihop scenario for easy reference in tests
pub struct MultihopIds {
pub beta_want_id: String,
@ -189,13 +200,18 @@ pub mod test_scenarios {
let ids = MultihopIds::default();
let mut events = vec![];
// 1. Create want for data/beta
// 1. Create originating want for data/beta (user-requested)
events.push(Event::WantCreateV1(WantCreateEventV1 {
want_id: ids.beta_want_id.clone(),
partitions: vec![PartitionRef {
r#ref: "data/beta".to_string(),
}],
..Default::default()
lifetime: Some(Lifetime::Originating(OriginatingLifetime {
data_timestamp: 1000,
ttl_seconds: 3600,
sla_seconds: 7200,
})),
comment: Some("User requested beta data".to_string()),
}));
// 2. Queue beta job (first attempt)
@ -234,13 +250,16 @@ pub mod test_scenarios {
..Default::default()
}));
// 5. Create derivative want for data/alpha
// 5. Create ephemeral want for data/alpha (derivative - created due to dep-miss)
events.push(Event::WantCreateV1(WantCreateEventV1 {
want_id: ids.alpha_want_id.clone(),
partitions: vec![PartitionRef {
r#ref: "data/alpha".to_string(),
}],
..Default::default()
lifetime: Some(Lifetime::Ephemeral(EphemeralLifetime {
job_run_id: ids.beta_job_1_id.clone(),
})),
comment: Some("Missing data".to_string()),
}));
// 6. Queue alpha job

View file

@ -1,8 +1,31 @@
use crate::partition_state::FailedPartitionRef;
use crate::util::{HasRelatedIds, RelatedIds, current_timestamp};
use crate::{EventSource, PartitionRef, WantCreateEventV1, WantDetail, WantStatusCode};
use crate::want_create_event_v1::Lifetime;
use crate::want_detail::Lifetime as WantDetailLifetime;
use crate::{
EphemeralLifetime, EventSource, OriginatingLifetime, PartitionRef, WantCreateEventV1,
WantDetail, WantStatusCode,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// Want lifetime semantics - determines how freshness/TTL is evaluated
#[derive(Debug, Clone)]
pub enum WantLifetime {
/// User/API-created wants with explicit freshness requirements.
/// These drive ongoing rebuilds when partitions get tainted.
Originating {
data_timestamp: u64,
ttl_seconds: u64,
sla_seconds: u64,
},
/// System-created (derivative) wants from dep-miss.
/// Delegate freshness decisions to their originating want.
/// Complete when partitions become Live, never trigger independent rebuilds.
Ephemeral {
/// The job run that hit dep-miss and created this derivative want
job_run_id: String,
},
}
/// State: Want has just been created, state not yet determined by sensing partition states
#[derive(Debug, Clone)]
@ -57,12 +80,11 @@ pub struct CanceledState {
pub struct WantInfo {
pub want_id: String,
pub partitions: Vec<PartitionRef>,
pub data_timestamp: u64,
pub ttl_seconds: u64,
pub sla_seconds: u64,
pub source: Option<EventSource>,
pub lifetime: WantLifetime,
pub comment: Option<String>,
pub last_updated_at: u64,
/// Job runs that have serviced this want (populated by handle_job_run_buffer)
pub job_run_ids: Vec<String>,
}
impl Default for WantInfo {
@ -70,12 +92,14 @@ impl Default for WantInfo {
Self {
want_id: uuid::Uuid::new_v4().to_string(),
partitions: vec![],
data_timestamp: 0,
ttl_seconds: 0,
sla_seconds: 0,
source: None,
lifetime: WantLifetime::Originating {
data_timestamp: 0,
ttl_seconds: 0,
sla_seconds: 0,
},
comment: None,
last_updated_at: 0,
job_run_ids: vec![],
}
}
}
@ -180,16 +204,26 @@ impl WantWithState<CanceledState> {
// From impl for creating want from event - creates in New state for sensing
impl From<WantCreateEventV1> for WantWithState<NewState> {
fn from(event: WantCreateEventV1) -> Self {
let lifetime = match event.lifetime {
Some(Lifetime::Originating(orig)) => WantLifetime::Originating {
data_timestamp: orig.data_timestamp,
ttl_seconds: orig.ttl_seconds,
sla_seconds: orig.sla_seconds,
},
Some(Lifetime::Ephemeral(eph)) => WantLifetime::Ephemeral {
job_run_id: eph.job_run_id,
},
None => panic!("Unexpectedly empty want lifetime"),
};
WantWithState {
want: WantInfo {
want_id: event.want_id,
partitions: event.partitions,
data_timestamp: event.data_timestamp,
ttl_seconds: event.ttl_seconds,
sla_seconds: event.sla_seconds,
source: event.source,
lifetime,
comment: event.comment,
last_updated_at: current_timestamp(),
job_run_ids: vec![],
},
state: NewState {},
}
@ -495,31 +529,103 @@ impl HasRelatedIds for Want {
// Helper methods on the Want enum
impl Want {
/// Create a new want in the Idle state
pub fn new(
/// Create a new originating want in the Idle state
pub fn new_originating(
want_id: String,
partitions: Vec<PartitionRef>,
data_timestamp: u64,
ttl_seconds: u64,
sla_seconds: u64,
source: Option<EventSource>,
comment: Option<String>,
) -> Self {
Want::Idle(WantWithState {
want: WantInfo {
want_id,
partitions,
data_timestamp,
ttl_seconds,
sla_seconds,
source,
lifetime: WantLifetime::Originating {
data_timestamp,
ttl_seconds,
sla_seconds,
},
comment,
last_updated_at: current_timestamp(),
job_run_ids: vec![],
},
state: IdleState {},
})
}
/// Create a new ephemeral want in the Idle state (derivative from dep-miss)
pub fn new_ephemeral(
want_id: String,
partitions: Vec<PartitionRef>,
job_run_id: String,
comment: Option<String>,
) -> Self {
Want::Idle(WantWithState {
want: WantInfo {
want_id,
partitions,
lifetime: WantLifetime::Ephemeral { job_run_id },
comment,
last_updated_at: current_timestamp(),
job_run_ids: vec![],
},
state: IdleState {},
})
}
/// Get the lifetime of this want
pub fn lifetime(&self) -> &WantLifetime {
&self.want().lifetime
}
/// Add a job run ID to this want's list of servicing job runs
pub fn add_job_run_id(&mut self, job_run_id: &str) {
match self {
Want::New(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::Idle(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::Building(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::UpstreamBuilding(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::Successful(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::Failed(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::UpstreamFailed(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
Want::Canceled(w) => {
if !w.want.job_run_ids.contains(&job_run_id.to_string()) {
w.want.job_run_ids.push(job_run_id.to_string());
}
}
}
}
/// Check if want is schedulable (Idle or UpstreamBuilding with satisfied upstreams)
pub fn is_schedulable(&self) -> bool {
match self {
@ -552,17 +658,31 @@ impl Want {
}
/// Convert to WantDetail for API responses and queries.
/// Note: job_run_ids and derivative_want_ids are empty here and will be
/// populated by BuildState from its inverted indexes.
/// job_run_ids are returned from the Want itself.
/// derivative_want_ids are computed by traversing job runs (done by BuildState).
pub fn to_detail(&self) -> WantDetail {
let lifetime = match &self.want().lifetime {
WantLifetime::Originating {
data_timestamp,
ttl_seconds,
sla_seconds,
} => Some(WantDetailLifetime::Originating(OriginatingLifetime {
data_timestamp: *data_timestamp,
ttl_seconds: *ttl_seconds,
sla_seconds: *sla_seconds,
})),
WantLifetime::Ephemeral { job_run_id } => {
Some(WantDetailLifetime::Ephemeral(EphemeralLifetime {
job_run_id: job_run_id.clone(),
}))
}
};
WantDetail {
want_id: self.want().want_id.clone(),
partitions: self.want().partitions.clone(),
upstreams: vec![], // Upstreams are tracked via want relationships, not stored here
data_timestamp: self.want().data_timestamp,
ttl_seconds: self.want().ttl_seconds,
sla_seconds: self.want().sla_seconds,
source: self.want().source.clone(),
lifetime,
comment: self.want().comment.clone(),
last_updated_timestamp: self.want().last_updated_at,
status: match self {
@ -575,8 +695,9 @@ impl Want {
Want::UpstreamFailed(_) => Some(WantStatusCode::WantUpstreamFailed.into()),
Want::Canceled(_) => Some(WantStatusCode::WantCanceled.into()),
},
job_run_ids: vec![], // Populated by BuildState
derivative_want_ids: vec![], // Populated by BuildState
job_run_ids: self.want().job_run_ids.clone(),
derivative_want_ids: vec![], // Computed by BuildState via job traversal
job_runs: vec![], // Populated by BuildState.get_want()
}
}
}

View file

@ -73,13 +73,50 @@ impl From<&JobRunStatus> for JobRunStatusView {
}
}
/// Simple view for derivative wants in the want detail page
pub struct DerivativeWantView {
pub want_id: String,
pub partitions: Vec<PartitionRefView>,
pub status: Option<WantStatusView>,
}
impl From<&WantDetail> for DerivativeWantView {
fn from(w: &WantDetail) -> Self {
Self {
want_id: w.want_id.clone(),
partitions: w.partitions.iter().map(PartitionRefView::from).collect(),
status: w.status.as_ref().map(WantStatusView::from),
}
}
}
/// Enum representing the want lifetime type for templates
pub enum WantLifetimeView {
Originating {
data_timestamp: u64,
ttl_seconds: u64,
sla_seconds: u64,
},
Ephemeral {
job_run_id: String,
},
}
pub struct WantDetailView {
pub want_id: String,
pub partitions: Vec<PartitionRefView>,
pub upstreams: Vec<PartitionRefView>,
pub lifetime: Option<WantLifetimeView>,
/// Convenience accessor for originating wants - returns data_timestamp or 0
pub data_timestamp: u64,
/// Convenience accessor for originating wants - returns ttl_seconds or 0
pub ttl_seconds: u64,
/// Convenience accessor for originating wants - returns sla_seconds or 0
pub sla_seconds: u64,
/// True if this is an ephemeral (derivative) want
pub is_ephemeral: bool,
/// Job run that created this ephemeral want (if ephemeral)
pub source_job_run_id: Option<String>,
pub comment: Option<String>,
pub comment_display: String,
pub status: Option<WantStatusView>,
@ -87,30 +124,69 @@ pub struct WantDetailView {
// Lineage fields
pub job_run_ids: Vec<String>,
pub derivative_want_ids: Vec<String>,
pub job_runs: Vec<JobRunDetailView>,
pub derivative_wants: Vec<DerivativeWantView>,
}
impl From<&WantDetail> for WantDetailView {
fn from(w: &WantDetail) -> Self {
impl WantDetailView {
/// Create a WantDetailView with derivative wants populated.
/// Use this for the detail page where derivative wants need to be shown.
pub fn new(w: &WantDetail, derivative_wants: Vec<DerivativeWantView>) -> Self {
use crate::want_detail::Lifetime;
let (lifetime, data_timestamp, ttl_seconds, sla_seconds, is_ephemeral, source_job_run_id) =
match &w.lifetime {
Some(Lifetime::Originating(orig)) => (
Some(WantLifetimeView::Originating {
data_timestamp: orig.data_timestamp,
ttl_seconds: orig.ttl_seconds,
sla_seconds: orig.sla_seconds,
}),
orig.data_timestamp,
orig.ttl_seconds,
orig.sla_seconds,
false,
None,
),
Some(Lifetime::Ephemeral(eph)) => (
Some(WantLifetimeView::Ephemeral {
job_run_id: eph.job_run_id.clone(),
}),
0,
0,
0,
true,
Some(eph.job_run_id.clone()),
),
None => (None, 0, 0, 0, false, None),
};
Self {
want_id: w.want_id.clone(),
partitions: w.partitions.iter().map(PartitionRefView::from).collect(),
upstreams: w.upstreams.iter().map(PartitionRefView::from).collect(),
data_timestamp: w.data_timestamp,
ttl_seconds: w.ttl_seconds,
sla_seconds: w.sla_seconds,
lifetime,
data_timestamp,
ttl_seconds,
sla_seconds,
is_ephemeral,
source_job_run_id,
comment: w.comment.clone(),
comment_display: w.comment.as_deref().unwrap_or("-").to_string(),
status: w.status.as_ref().map(WantStatusView::from),
last_updated_timestamp: w.last_updated_timestamp,
job_run_ids: w.job_run_ids.clone(),
derivative_want_ids: w.derivative_want_ids.clone(),
job_runs: w.job_runs.iter().map(JobRunDetailView::from).collect(),
derivative_wants,
}
}
}
/// For list pages where derivative wants aren't needed
impl From<WantDetail> for WantDetailView {
fn from(w: WantDetail) -> Self {
Self::from(&w)
Self::new(&w, vec![])
}
}
@ -199,8 +275,11 @@ pub struct PartitionRefWithUuidView {
pub struct JobRunDetailView {
pub id: String,
pub job_label: String,
pub status: Option<JobRunStatusView>,
pub last_heartbeat_at: Option<u64>,
pub queued_at: Option<u64>,
pub started_at: Option<u64>,
pub building_partitions: Vec<PartitionRefView>,
pub servicing_wants: Vec<WantAttributedPartitionsView>,
// Lineage fields (populated for Succeeded/DepMiss states)
@ -236,8 +315,11 @@ impl From<&JobRunDetail> for JobRunDetailView {
Self {
id: jr.id.clone(),
job_label: jr.job_label.clone(),
status: jr.status.as_ref().map(JobRunStatusView::from),
last_heartbeat_at: jr.last_heartbeat_at,
queued_at: jr.queued_at,
started_at: jr.started_at,
building_partitions: jr
.building_partitions
.iter()
@ -497,9 +579,18 @@ mod tests {
let want_detail = state
.get_want(&ids.beta_want_id)
.expect("beta want should exist");
// Fetch derivative wants (like the http_server does)
let derivative_wants: Vec<_> = want_detail
.derivative_want_ids
.iter()
.filter_map(|id| state.get_want(id))
.map(|w| DerivativeWantView::from(&w))
.collect();
let template = WantDetailPage {
base: BaseContext::default(),
want: WantDetailView::from(want_detail),
want: WantDetailView::new(&want_detail, derivative_wants),
};
let html = template.render().expect("template should render");

View file

@ -65,25 +65,55 @@
</div>
{% endif %}
{% if !want.job_run_ids.is_empty() %}
{% if !want.job_runs.is_empty() %}
<div class="detail-section">
<h2>Fulfillment - Job Runs ({{ want.job_run_ids.len() }})</h2>
<ul class="partition-list">
{% for id in want.job_run_ids %}
<li><a href="/job_runs/{{ id }}">{{ id }}</a></li>
{% endfor %}
</ul>
<h2>Fulfillment - Job Runs ({{ want.job_runs.len() }})</h2>
<table class="data-table">
<thead>
<tr>
<th>ID</th>
<th>Job Label</th>
<th>Started</th>
<th>Duration</th>
<th>Status</th>
</tr>
</thead>
<tbody>
{% for jr in want.job_runs %}
<tr>
<td><a href="/job_runs/{{ jr.id }}">{{ jr.id }}</a></td>
<td>{{ jr.job_label }}</td>
<td>{% match jr.started_at %}{% when Some with (ts) %}{{ ts }}{% when None %}-{% endmatch %}</td>
<td>{% match jr.started_at %}{% when Some with (started) %}{% match jr.queued_at %}{% when Some with (queued) %}{{ started - queued }}ms{% when None %}-{% endmatch %}{% when None %}-{% endmatch %}</td>
<td>{% match jr.status %}{% when Some with (s) %}<span class="status status-{{ s.name_lowercase }}">{{ s.name }}</span>{% when None %}-{% endmatch %}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
{% if !want.derivative_want_ids.is_empty() %}
{% if !want.derivative_wants.is_empty() %}
<div class="detail-section">
<h2>Fulfillment - Derivative Wants ({{ want.derivative_want_ids.len() }})</h2>
<ul class="partition-list">
{% for id in want.derivative_want_ids %}
<li><a href="/wants/{{ id }}">{{ id }}</a></li>
{% endfor %}
</ul>
<h2>Fulfillment - Derivative Wants ({{ want.derivative_wants.len() }})</h2>
<table class="data-table">
<thead>
<tr>
<th>ID</th>
<th>Partitions</th>
<th>Status</th>
</tr>
</thead>
<tbody>
{% for dw in want.derivative_wants %}
<tr>
<td><a href="/wants/{{ dw.want_id }}">{{ dw.want_id }}</a></td>
<td>{% for p in dw.partitions %}{{ p.partition_ref }}{% if !loop.last %}, {% endif %}{% endfor %}</td>
<td>{% match dw.status %}{% when Some with (s) %}<span class="status status-{{ s.name|lower }}">{{ s.name }}</span>{% when None %}-{% endmatch %}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}

View file

@ -10,8 +10,9 @@
"https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da",
"https://bcr.bazel.build/modules/apple_support/1.17.1/MODULE.bazel": "655c922ab1209978a94ef6ca7d9d43e940cd97d9c172fb55f94d91ac53f8610b",
"https://bcr.bazel.build/modules/apple_support/1.17.1/source.json": "6b2b8c74d14e8d485528a938e44bdb72a5ba17632b9e14ef6e68a5ee96c8347f",
"https://bcr.bazel.build/modules/apple_support/1.23.1/MODULE.bazel": "53763fed456a968cf919b3240427cf3a9d5481ec5466abc9d5dc51bc70087442",
"https://bcr.bazel.build/modules/apple_support/1.24.1/MODULE.bazel": "f46e8ddad60aef170ee92b2f3d00ef66c147ceafea68b6877cb45bd91737f5f8",
"https://bcr.bazel.build/modules/apple_support/1.24.1/source.json": "cf725267cbacc5f028ef13bb77e7f2c2e0066923a4dab1025e4a0511b1ed258a",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/MODULE.bazel": "2b31ffcc9bdc8295b2167e07a757dbbc9ac8906e7028e5170a3708cecaac119f",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/source.json": "0cf1826853b0bef8b5cd19c0610d717500f5521aa2b38b72b2ec302ac5e7526c",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.7.2/MODULE.bazel": "780d1a6522b28f5edb7ea09630748720721dfe27690d65a2d33aa7509de77e07",
@ -31,8 +32,11 @@
"https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a",
"https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58",
"https://bcr.bazel.build/modules/bazel_features/1.21.0/MODULE.bazel": "675642261665d8eea09989aa3b8afb5c37627f1be178382c320d1b46afba5e3b",
"https://bcr.bazel.build/modules/bazel_features/1.27.0/MODULE.bazel": "621eeee06c4458a9121d1f104efb80f39d34deff4984e778359c60eaf1a8cb65",
"https://bcr.bazel.build/modules/bazel_features/1.28.0/MODULE.bazel": "4b4200e6cbf8fa335b2c3f43e1d6ef3e240319c33d43d60cc0fbd4b87ece299d",
"https://bcr.bazel.build/modules/bazel_features/1.30.0/MODULE.bazel": "a14b62d05969a293b80257e72e597c2da7f717e1e69fa8b339703ed6731bec87",
"https://bcr.bazel.build/modules/bazel_features/1.30.0/source.json": "b07e17f067fe4f69f90b03b36ef1e08fe0d1f3cac254c1241a1818773e3423bc",
"https://bcr.bazel.build/modules/bazel_features/1.32.0/MODULE.bazel": "095d67022a58cb20f7e20e1aefecfa65257a222c18a938e2914fd257b5f1ccdc",
"https://bcr.bazel.build/modules/bazel_features/1.32.0/source.json": "2546c766986a6541f0bacd3e8542a1f621e2b14a80ea9e88c6f89f7eedf64ae1",
"https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7",
"https://bcr.bazel.build/modules/bazel_features/1.9.0/MODULE.bazel": "885151d58d90d8d9c811eb75e3288c11f850e1d6b481a8c9f766adee4712358b",
"https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a",
@ -48,7 +52,8 @@
"https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/MODULE.bazel": "88ade7293becda963e0e3ea33e7d54d3425127e0a326e0d17da085a5f1f03ff6",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/source.json": "7ebaefba0b03efe59cac88ed5bbc67bcf59a3eff33af937345ede2a38b2d368a",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.2/MODULE.bazel": "69ad6927098316848b34a9142bcc975e018ba27f08c4ff403f50c1b6e646ca67",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.2/source.json": "34a3c8bcf233b835eb74be9d628899bb32999d3e0eadef1947a0a562a2b16ffb",
"https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
"https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
"https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
@ -61,13 +66,14 @@
"https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902",
"https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5",
"https://bcr.bazel.build/modules/platforms/0.0.11/MODULE.bazel": "0daefc49732e227caa8bfa834d65dc52e8cc18a2faf80df25e8caea151a9413f",
"https://bcr.bazel.build/modules/platforms/0.0.11/source.json": "f7e188b79ebedebfe75e9e1d098b8845226c7992b307e28e1496f23112e8fc29",
"https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee",
"https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37",
"https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615",
"https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814",
"https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d",
"https://bcr.bazel.build/modules/platforms/0.0.9/MODULE.bazel": "4a87a60c927b56ddd67db50c89acaa62f4ce2a1d2149ccb63ffd871d5ce29ebc",
"https://bcr.bazel.build/modules/platforms/1.0.0/MODULE.bazel": "f05feb42b48f1b3c225e4ccf351f367be0371411a803198ec34a389fb22aa580",
"https://bcr.bazel.build/modules/platforms/1.0.0/source.json": "f4ff1fd412e0246fd38c82328eb209130ead81d62dcd5a9e40910f867f733d96",
"https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7",
"https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c",
"https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d",
@ -93,7 +99,9 @@
"https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e",
"https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5",
"https://bcr.bazel.build/modules/rules_cc/0.1.1/MODULE.bazel": "2f0222a6f229f0bf44cd711dc13c858dad98c62d52bd51d8fc3a764a83125513",
"https://bcr.bazel.build/modules/rules_cc/0.1.1/source.json": "d61627377bd7dd1da4652063e368d9366fc9a73920bfa396798ad92172cf645c",
"https://bcr.bazel.build/modules/rules_cc/0.2.4/MODULE.bazel": "1ff1223dfd24f3ecf8f028446d4a27608aa43c3f41e346d22838a4223980b8cc",
"https://bcr.bazel.build/modules/rules_cc/0.2.8/MODULE.bazel": "f1df20f0bf22c28192a794f29b501ee2018fa37a3862a1a2132ae2940a23a642",
"https://bcr.bazel.build/modules/rules_cc/0.2.8/source.json": "85087982aca15f31307bd52698316b28faa31bd2c3095a41f456afec0131344c",
"https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6",
"https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8",
"https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e",
@ -108,8 +116,8 @@
"https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab",
"https://bcr.bazel.build/modules/rules_java/7.3.2/MODULE.bazel": "50dece891cfdf1741ea230d001aa9c14398062f2b7c066470accace78e412bc2",
"https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe",
"https://bcr.bazel.build/modules/rules_java/8.12.0/MODULE.bazel": "8e6590b961f2defdfc2811c089c75716cb2f06c8a4edeb9a8d85eaa64ee2a761",
"https://bcr.bazel.build/modules/rules_java/8.12.0/source.json": "cbd5d55d9d38d4008a7d00bee5b5a5a4b6031fcd4a56515c9accbcd42c7be2ba",
"https://bcr.bazel.build/modules/rules_java/8.14.0/MODULE.bazel": "717717ed40cc69994596a45aec6ea78135ea434b8402fb91b009b9151dd65615",
"https://bcr.bazel.build/modules/rules_java/8.14.0/source.json": "8a88c4ca9e8759da53cddc88123880565c520503321e2566b4e33d0287a3d4bc",
"https://bcr.bazel.build/modules/rules_java/8.3.2/MODULE.bazel": "7336d5511ad5af0b8615fdc7477535a2e4e723a357b6713af439fe8cf0195017",
"https://bcr.bazel.build/modules/rules_java/8.5.1/MODULE.bazel": "d8a9e38cc5228881f7055a6079f6f7821a073df3744d441978e7a43e20226939",
"https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7",
@ -149,12 +157,11 @@
"https://bcr.bazel.build/modules/rules_python/1.3.0/MODULE.bazel": "8361d57eafb67c09b75bf4bbe6be360e1b8f4f18118ab48037f2bd50aa2ccb13",
"https://bcr.bazel.build/modules/rules_python/1.5.1/MODULE.bazel": "acfe65880942d44a69129d4c5c3122d57baaf3edf58ae5a6bd4edea114906bf5",
"https://bcr.bazel.build/modules/rules_python/1.5.1/source.json": "aa903e1bcbdfa1580f2b8e2d55100b7c18bc92d779ebb507fec896c75635f7bd",
"https://bcr.bazel.build/modules/rules_rust/0.61.0/MODULE.bazel": "0318a95777b9114c8740f34b60d6d68f9cfef61e2f4b52424ca626213d33787b",
"https://bcr.bazel.build/modules/rules_rust/0.61.0/source.json": "d1bc743b5fa2e2abb35c436df7126a53dab0c3f35890ae6841592b2253786a63",
"https://bcr.bazel.build/modules/rules_rust/0.67.0/MODULE.bazel": "87c3816c4321352dcfd9e9e26b58e84efc5b21351ae3ef8fb5d0d57bde7237f5",
"https://bcr.bazel.build/modules/rules_rust/0.67.0/source.json": "a8ef4d3be30eb98e060cad9e5875a55b603195487f76e01b619b51a1df4641cc",
"https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c",
"https://bcr.bazel.build/modules/rules_shell/0.3.0/MODULE.bazel": "de4402cd12f4cc8fda2354fce179fdb068c0b9ca1ec2d2b17b3e21b24c1a937b",
"https://bcr.bazel.build/modules/rules_shell/0.4.0/MODULE.bazel": "0f8f11bb3cd11755f0b48c1de0bbcf62b4b34421023aa41a2fc74ef68d9584f0",
"https://bcr.bazel.build/modules/rules_shell/0.4.0/source.json": "1d7fa7f941cd41dc2704ba5b4edc2e2230eea1cc600d80bd2b65838204c50b95",
"https://bcr.bazel.build/modules/rules_shell/0.6.1/MODULE.bazel": "72e76b0eea4e81611ef5452aa82b3da34caca0c8b7b5c0c9584338aa93bae26b",
"https://bcr.bazel.build/modules/rules_shell/0.6.1/source.json": "20ec05cd5e592055e214b2da8ccb283c7f2a421ea0dc2acbf1aa792e11c03d0c",
"https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8",
"https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c",
"https://bcr.bazel.build/modules/stardoc/0.5.4/MODULE.bazel": "6569966df04610b8520957cb8e97cf2e9faac2c0309657c537ab51c16c18a2a4",