This commit is contained in:
parent
8e8ff33ef8
commit
eb44350865
11 changed files with 381 additions and 221 deletions
|
|
@ -1,7 +1,13 @@
|
||||||
use crate::build_state::BuildState;
|
use crate::build_state::BuildState;
|
||||||
use crate::data_build_event::Event;
|
use crate::data_build_event::Event;
|
||||||
use crate::util::{current_timestamp, DatabuildError};
|
use crate::util::{DatabuildError, current_timestamp};
|
||||||
use crate::{CancelWantRequest, CancelWantResponse, CreateTaintRequest, CreateTaintResponse, CreateWantRequest, CreateWantResponse, DataBuildEvent, GetTaintRequest, GetTaintResponse, GetWantRequest, GetWantResponse, ListJobRunsRequest, ListJobRunsResponse, ListPartitionsRequest, ListPartitionsResponse, ListTaintsRequest, ListTaintsResponse, ListWantsRequest, ListWantsResponse, TaintCreateEventV1, WantCancelEventV1, WantCreateEventV1};
|
use crate::{
|
||||||
|
CancelWantRequest, CancelWantResponse, CreateTaintRequest, CreateTaintResponse,
|
||||||
|
CreateWantRequest, CreateWantResponse, DataBuildEvent, GetTaintRequest, GetTaintResponse,
|
||||||
|
GetWantRequest, GetWantResponse, ListJobRunsRequest, ListJobRunsResponse,
|
||||||
|
ListPartitionsRequest, ListPartitionsResponse, ListTaintsRequest, ListTaintsResponse,
|
||||||
|
ListWantsRequest, ListWantsResponse, TaintCreateEventV1, WantCancelEventV1, WantCreateEventV1,
|
||||||
|
};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use rusqlite::Connection;
|
use rusqlite::Connection;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
@ -173,7 +179,6 @@ impl<S: BELStorage + Debug> BuildEventLog<S> {
|
||||||
Ok(idx)
|
Ok(idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// API methods
|
// API methods
|
||||||
pub fn api_handle_list_wants(&self, req: ListWantsRequest) -> ListWantsResponse {
|
pub fn api_handle_list_wants(&self, req: ListWantsRequest) -> ListWantsResponse {
|
||||||
self.state.list_wants(&req)
|
self.state.list_wants(&req)
|
||||||
|
|
@ -191,7 +196,10 @@ impl<S: BELStorage + Debug> BuildEventLog<S> {
|
||||||
self.state.list_job_runs(&req)
|
self.state.list_job_runs(&req)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn api_handle_want_create(&mut self, req: CreateWantRequest) -> Result<CreateWantResponse, DatabuildError> {
|
pub fn api_handle_want_create(
|
||||||
|
&mut self,
|
||||||
|
req: CreateWantRequest,
|
||||||
|
) -> Result<CreateWantResponse, DatabuildError> {
|
||||||
let ev: WantCreateEventV1 = req.into();
|
let ev: WantCreateEventV1 = req.into();
|
||||||
self.append_event(&ev.clone().into())?;
|
self.append_event(&ev.clone().into())?;
|
||||||
Ok(self.state.get_want(&ev.want_id).into())
|
Ok(self.state.get_want(&ev.want_id).into())
|
||||||
|
|
@ -201,13 +209,19 @@ impl<S: BELStorage + Debug> BuildEventLog<S> {
|
||||||
self.state.get_want(&req.want_id).into()
|
self.state.get_want(&req.want_id).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn api_handle_want_cancel(&mut self, req: CancelWantRequest) -> Result<CancelWantResponse, DatabuildError> {
|
pub fn api_handle_want_cancel(
|
||||||
|
&mut self,
|
||||||
|
req: CancelWantRequest,
|
||||||
|
) -> Result<CancelWantResponse, DatabuildError> {
|
||||||
let ev: WantCancelEventV1 = req.into();
|
let ev: WantCancelEventV1 = req.into();
|
||||||
self.append_event(&ev.clone().into())?;
|
self.append_event(&ev.clone().into())?;
|
||||||
Ok(self.state.get_want(&ev.want_id).into())
|
Ok(self.state.get_want(&ev.want_id).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn api_handle_taint_create(&mut self, req: CreateTaintRequest) -> Result<CreateTaintResponse, DatabuildError> {
|
pub fn api_handle_taint_create(
|
||||||
|
&mut self,
|
||||||
|
req: CreateTaintRequest,
|
||||||
|
) -> Result<CreateTaintResponse, DatabuildError> {
|
||||||
// TODO Need to do this hierarchically? A taint will impact downstream partitions also
|
// TODO Need to do this hierarchically? A taint will impact downstream partitions also
|
||||||
todo!();
|
todo!();
|
||||||
let ev: TaintCreateEventV1 = req.into();
|
let ev: TaintCreateEventV1 = req.into();
|
||||||
|
|
@ -264,7 +278,9 @@ mod tests {
|
||||||
// Append an event
|
// Append an event
|
||||||
let mut e = WantCreateEventV1::default();
|
let mut e = WantCreateEventV1::default();
|
||||||
e.want_id = want_id.clone();
|
e.want_id = want_id.clone();
|
||||||
e.partitions = vec!(PartitionRef { r#ref: "sqlite_partition_1234".to_string() });
|
e.partitions = vec![PartitionRef {
|
||||||
|
r#ref: "sqlite_partition_1234".to_string(),
|
||||||
|
}];
|
||||||
let event_id = log
|
let event_id = log
|
||||||
.append_event(&Event::WantCreateV1(e))
|
.append_event(&Event::WantCreateV1(e))
|
||||||
.expect("append_event failed");
|
.expect("append_event failed");
|
||||||
|
|
@ -298,7 +314,8 @@ mod tests {
|
||||||
"want_id not found in state"
|
"want_id not found in state"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
log.state.get_want(&want_id)
|
log.state
|
||||||
|
.get_want(&want_id)
|
||||||
.map(|want| want.want_id.clone())
|
.map(|want| want.want_id.clone())
|
||||||
.expect("state.wants want_id not found"),
|
.expect("state.wants want_id not found"),
|
||||||
want_id,
|
want_id,
|
||||||
|
|
@ -307,13 +324,16 @@ mod tests {
|
||||||
|
|
||||||
let mut e2 = WantCreateEventV1::default();
|
let mut e2 = WantCreateEventV1::default();
|
||||||
e2.want_id = Uuid::new_v4().into();
|
e2.want_id = Uuid::new_v4().into();
|
||||||
log.append_event(&Event::WantCreateV1(e2)).expect("append_event failed");
|
log.append_event(&Event::WantCreateV1(e2))
|
||||||
|
.expect("append_event failed");
|
||||||
let mut e3 = WantCreateEventV1::default();
|
let mut e3 = WantCreateEventV1::default();
|
||||||
e3.want_id = Uuid::new_v4().into();
|
e3.want_id = Uuid::new_v4().into();
|
||||||
log.append_event(&Event::WantCreateV1(e3)).expect("append_event failed");
|
log.append_event(&Event::WantCreateV1(e3))
|
||||||
|
.expect("append_event failed");
|
||||||
let mut e4 = WantCreateEventV1::default();
|
let mut e4 = WantCreateEventV1::default();
|
||||||
e4.want_id = Uuid::new_v4().into();
|
e4.want_id = Uuid::new_v4().into();
|
||||||
log.append_event(&Event::WantCreateV1(e4)).expect("append_event failed");
|
log.append_event(&Event::WantCreateV1(e4))
|
||||||
|
.expect("append_event failed");
|
||||||
|
|
||||||
let events = log
|
let events = log
|
||||||
.storage
|
.storage
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,13 @@
|
||||||
use crate::data_build_event::Event;
|
use crate::data_build_event::Event;
|
||||||
use crate::data_deps::{WantTimestamps, missing_deps_to_want_events};
|
use crate::data_deps::{WantTimestamps, missing_deps_to_want_events};
|
||||||
use crate::partition_state::Partition;
|
use crate::partition_state::{
|
||||||
|
FailedPartitionRef, LivePartitionRef, MissingPartitionRef, MissingState, Partition,
|
||||||
|
PartitionWithState, TaintedPartitionRef,
|
||||||
|
};
|
||||||
use crate::util::current_timestamp;
|
use crate::util::current_timestamp;
|
||||||
use crate::want_state::{IdleState as WantIdleState, Want, WantWithState};
|
use crate::want_state::{
|
||||||
|
FailedWantId, IdleState as WantIdleState, SuccessfulWantId, Want, WantWithState,
|
||||||
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
JobRunBufferEventV1, JobRunCancelEventV1, JobRunDetail, JobRunFailureEventV1,
|
JobRunBufferEventV1, JobRunCancelEventV1, JobRunDetail, JobRunFailureEventV1,
|
||||||
JobRunHeartbeatEventV1, JobRunMissingDepsEventV1, JobRunStatusCode, JobRunSuccessEventV1,
|
JobRunHeartbeatEventV1, JobRunMissingDepsEventV1, JobRunStatusCode, JobRunSuccessEventV1,
|
||||||
|
|
@ -120,14 +125,14 @@ impl BuildState {
|
||||||
/// Used when a job run successfully completes
|
/// Used when a job run successfully completes
|
||||||
fn transition_partitions_to_live(
|
fn transition_partitions_to_live(
|
||||||
&mut self,
|
&mut self,
|
||||||
partition_refs: &[PartitionRef],
|
partition_refs: &[LivePartitionRef],
|
||||||
job_run_id: &str,
|
job_run_id: &str,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
) {
|
) {
|
||||||
for pref in partition_refs {
|
for pref in partition_refs {
|
||||||
let partition = self.partitions.remove(&pref.r#ref).expect(&format!(
|
let partition = self.partitions.remove(&pref.0.r#ref).expect(&format!(
|
||||||
"BUG: Partition {} must exist and be in Building state before completion",
|
"BUG: Partition {} must exist and be in Building state before completion",
|
||||||
pref.r#ref
|
pref.0.r#ref
|
||||||
));
|
));
|
||||||
|
|
||||||
// ONLY valid transition: Building -> Live
|
// ONLY valid transition: Building -> Live
|
||||||
|
|
@ -139,11 +144,11 @@ impl BuildState {
|
||||||
_ => {
|
_ => {
|
||||||
panic!(
|
panic!(
|
||||||
"BUG: Invalid state - partition {} must be Building to transition to Live, found {:?}",
|
"BUG: Invalid state - partition {} must be Building to transition to Live, found {:?}",
|
||||||
pref.r#ref, partition
|
pref.0.r#ref, partition
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
self.partitions.insert(pref.r#ref.clone(), transitioned);
|
self.partitions.insert(pref.0.r#ref.clone(), transitioned);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -151,14 +156,14 @@ impl BuildState {
|
||||||
/// Used when a job run fails
|
/// Used when a job run fails
|
||||||
fn transition_partitions_to_failed(
|
fn transition_partitions_to_failed(
|
||||||
&mut self,
|
&mut self,
|
||||||
partition_refs: &[PartitionRef],
|
partition_refs: &[FailedPartitionRef],
|
||||||
job_run_id: &str,
|
job_run_id: &str,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
) {
|
) {
|
||||||
for pref in partition_refs {
|
for pref in partition_refs {
|
||||||
let partition = self.partitions.remove(&pref.r#ref).expect(&format!(
|
let partition = self.partitions.remove(&pref.0.r#ref).expect(&format!(
|
||||||
"BUG: Partition {} must exist and be in Building state before failure",
|
"BUG: Partition {} must exist and be in Building state before failure",
|
||||||
pref.r#ref
|
pref.0.r#ref
|
||||||
));
|
));
|
||||||
|
|
||||||
// ONLY valid transition: Building -> Failed
|
// ONLY valid transition: Building -> Failed
|
||||||
|
|
@ -170,20 +175,17 @@ impl BuildState {
|
||||||
_ => {
|
_ => {
|
||||||
panic!(
|
panic!(
|
||||||
"BUG: Invalid state - partition {} must be Building to transition to Failed, found {:?}",
|
"BUG: Invalid state - partition {} must be Building to transition to Failed, found {:?}",
|
||||||
pref.r#ref, partition
|
pref.0.r#ref, partition
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
self.partitions.insert(pref.r#ref.clone(), transitioned);
|
self.partitions.insert(pref.0.r#ref.clone(), transitioned);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reset partitions from Building back to Missing state
|
/// Reset partitions from Building back to Missing state
|
||||||
/// Used when a job run encounters missing dependencies and cannot proceed
|
/// Used when a job run encounters missing dependencies and cannot proceed
|
||||||
fn reset_partitions_to_missing(
|
fn reset_partitions_to_missing(&mut self, partition_refs: &[PartitionRef]) {
|
||||||
&mut self,
|
|
||||||
partition_refs: &[PartitionRef],
|
|
||||||
) {
|
|
||||||
for pref in partition_refs {
|
for pref in partition_refs {
|
||||||
let partition = self.partitions.remove(&pref.r#ref).expect(&format!(
|
let partition = self.partitions.remove(&pref.r#ref).expect(&format!(
|
||||||
"BUG: Partition {} must exist and be in Building state during dep_miss",
|
"BUG: Partition {} must exist and be in Building state during dep_miss",
|
||||||
|
|
@ -209,16 +211,16 @@ impl BuildState {
|
||||||
/// Transitions Building → Successful, returns list of newly successful want IDs
|
/// Transitions Building → Successful, returns list of newly successful want IDs
|
||||||
fn complete_successful_wants(
|
fn complete_successful_wants(
|
||||||
&mut self,
|
&mut self,
|
||||||
newly_live_partitions: &[PartitionRef],
|
newly_live_partitions: &[LivePartitionRef],
|
||||||
job_run_id: &str,
|
job_run_id: &str,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
) -> Vec<String> {
|
) -> Vec<SuccessfulWantId> {
|
||||||
let mut newly_successful_wants: Vec<String> = Vec::new();
|
let mut newly_successful_wants: Vec<SuccessfulWantId> = Vec::new();
|
||||||
|
|
||||||
for pref in newly_live_partitions {
|
for pref in newly_live_partitions {
|
||||||
let want_ids = self
|
let want_ids = self
|
||||||
.partitions
|
.partitions
|
||||||
.get(&pref.r#ref)
|
.get(&pref.0.r#ref)
|
||||||
.map(|p| p.want_ids().clone())
|
.map(|p| p.want_ids().clone())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
|
@ -239,10 +241,10 @@ impl BuildState {
|
||||||
});
|
});
|
||||||
|
|
||||||
if all_partitions_live {
|
if all_partitions_live {
|
||||||
newly_successful_wants.push(want_id.clone());
|
let successful_want =
|
||||||
Want::Successful(
|
building.complete(job_run_id.to_string(), timestamp);
|
||||||
building.complete(job_run_id.to_string(), timestamp),
|
newly_successful_wants.push(successful_want.get_id());
|
||||||
)
|
Want::Successful(successful_want)
|
||||||
} else {
|
} else {
|
||||||
Want::Building(building) // Still building other partitions
|
Want::Building(building) // Still building other partitions
|
||||||
}
|
}
|
||||||
|
|
@ -250,7 +252,7 @@ impl BuildState {
|
||||||
_ => {
|
_ => {
|
||||||
panic!(
|
panic!(
|
||||||
"BUG: Want {} in invalid state {:?} when partition {} became Live. Should be Building.",
|
"BUG: Want {} in invalid state {:?} when partition {} became Live. Should be Building.",
|
||||||
want_id, want, pref.r#ref
|
want_id, want, pref.0.r#ref
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
@ -267,14 +269,14 @@ impl BuildState {
|
||||||
/// Returns list of newly failed want IDs for downstream cascade
|
/// Returns list of newly failed want IDs for downstream cascade
|
||||||
fn fail_directly_affected_wants(
|
fn fail_directly_affected_wants(
|
||||||
&mut self,
|
&mut self,
|
||||||
failed_partitions: &[PartitionRef],
|
failed_partitions: &[FailedPartitionRef],
|
||||||
) -> Vec<String> {
|
) -> Vec<FailedWantId> {
|
||||||
let mut newly_failed_wants: Vec<String> = Vec::new();
|
let mut newly_failed_wants: Vec<FailedWantId> = Vec::new();
|
||||||
|
|
||||||
for pref in failed_partitions {
|
for pref in failed_partitions {
|
||||||
let want_ids = self
|
let want_ids = self
|
||||||
.partitions
|
.partitions
|
||||||
.get(&pref.r#ref)
|
.get(&pref.0.r#ref)
|
||||||
.map(|p| p.want_ids().clone())
|
.map(|p| p.want_ids().clone())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
|
@ -286,10 +288,10 @@ impl BuildState {
|
||||||
|
|
||||||
let transitioned = match want {
|
let transitioned = match want {
|
||||||
Want::Building(building) => {
|
Want::Building(building) => {
|
||||||
newly_failed_wants.push(want_id.clone());
|
let failed = building
|
||||||
Want::Failed(
|
.fail(vec![pref.0.clone()], "Partition build failed".to_string());
|
||||||
building.fail(vec![pref.clone()], "Partition build failed".to_string()),
|
newly_failed_wants.push(failed.get_id());
|
||||||
)
|
Want::Failed(failed)
|
||||||
}
|
}
|
||||||
// Failed → Failed: add new failed partition to existing failed state
|
// Failed → Failed: add new failed partition to existing failed state
|
||||||
Want::Failed(failed) => {
|
Want::Failed(failed) => {
|
||||||
|
|
@ -298,7 +300,7 @@ impl BuildState {
|
||||||
_ => {
|
_ => {
|
||||||
panic!(
|
panic!(
|
||||||
"BUG: Want {} in invalid state {:?} when partition {} failed. Should be Building or Failed.",
|
"BUG: Want {} in invalid state {:?} when partition {} failed. Should be Building or Failed.",
|
||||||
want_id, want, pref.r#ref
|
want_id, want, pref.0.r#ref
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
@ -314,7 +316,7 @@ impl BuildState {
|
||||||
/// Transitions UpstreamBuilding → Idle (when ready) or Building (when partitions already building)
|
/// Transitions UpstreamBuilding → Idle (when ready) or Building (when partitions already building)
|
||||||
fn unblock_downstream_wants(
|
fn unblock_downstream_wants(
|
||||||
&mut self,
|
&mut self,
|
||||||
newly_successful_wants: &[String],
|
newly_successful_wants: &[SuccessfulWantId],
|
||||||
job_run_id: &str,
|
job_run_id: &str,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
) {
|
) {
|
||||||
|
|
@ -327,11 +329,10 @@ impl BuildState {
|
||||||
match want {
|
match want {
|
||||||
Want::UpstreamBuilding(downstream_want) => {
|
Want::UpstreamBuilding(downstream_want) => {
|
||||||
// Is this downstream want waiting for any of the newly successful wants?
|
// Is this downstream want waiting for any of the newly successful wants?
|
||||||
let is_affected = downstream_want
|
let is_affected =
|
||||||
.state
|
downstream_want.state.upstream_want_ids.iter().any(|up_id| {
|
||||||
.upstream_want_ids
|
newly_successful_wants.iter().any(|swid| &swid.0 == up_id)
|
||||||
.iter()
|
});
|
||||||
.any(|up_id| newly_successful_wants.contains(up_id));
|
|
||||||
if is_affected { Some(id.clone()) } else { None }
|
if is_affected { Some(id.clone()) } else { None }
|
||||||
}
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
|
|
@ -374,10 +375,8 @@ impl BuildState {
|
||||||
if any_partition_building {
|
if any_partition_building {
|
||||||
// Some partitions still being built, continue in Building state
|
// Some partitions still being built, continue in Building state
|
||||||
Want::Building(
|
Want::Building(
|
||||||
downstream_want.continue_building(
|
downstream_want
|
||||||
job_run_id.to_string(),
|
.continue_building(job_run_id.to_string(), timestamp),
|
||||||
timestamp,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
// No partitions being built, become schedulable again
|
// No partitions being built, become schedulable again
|
||||||
|
|
@ -401,7 +400,7 @@ impl BuildState {
|
||||||
/// Transitions UpstreamBuilding → UpstreamFailed
|
/// Transitions UpstreamBuilding → UpstreamFailed
|
||||||
fn cascade_failures_to_downstream_wants(
|
fn cascade_failures_to_downstream_wants(
|
||||||
&mut self,
|
&mut self,
|
||||||
newly_failed_wants: &[String],
|
newly_failed_wants: &[FailedWantId],
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
) {
|
) {
|
||||||
// Find downstream wants that are waiting for any of the newly failed wants
|
// Find downstream wants that are waiting for any of the newly failed wants
|
||||||
|
|
@ -413,11 +412,10 @@ impl BuildState {
|
||||||
match want {
|
match want {
|
||||||
Want::UpstreamBuilding(downstream_want) => {
|
Want::UpstreamBuilding(downstream_want) => {
|
||||||
// Is this downstream want waiting for any of the newly failed wants?
|
// Is this downstream want waiting for any of the newly failed wants?
|
||||||
let is_affected = downstream_want
|
let is_affected =
|
||||||
.state
|
downstream_want.state.upstream_want_ids.iter().any(|up_id| {
|
||||||
.upstream_want_ids
|
newly_failed_wants.iter().any(|fwid| &fwid.0 == up_id)
|
||||||
.iter()
|
});
|
||||||
.any(|up_id| newly_failed_wants.contains(up_id));
|
|
||||||
if is_affected { Some(id.clone()) } else { None }
|
if is_affected { Some(id.clone()) } else { None }
|
||||||
}
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
|
|
@ -433,8 +431,13 @@ impl BuildState {
|
||||||
|
|
||||||
let transitioned = match want {
|
let transitioned = match want {
|
||||||
Want::UpstreamBuilding(downstream_want) => Want::UpstreamFailed(
|
Want::UpstreamBuilding(downstream_want) => Want::UpstreamFailed(
|
||||||
downstream_want
|
downstream_want.upstream_failed(
|
||||||
.upstream_failed(newly_failed_wants.to_vec(), timestamp),
|
newly_failed_wants
|
||||||
|
.iter()
|
||||||
|
.map(|fwid| fwid.0.clone())
|
||||||
|
.collect(),
|
||||||
|
timestamp,
|
||||||
|
),
|
||||||
),
|
),
|
||||||
_ => {
|
_ => {
|
||||||
panic!("BUG: Want {} should be UpstreamBuilding here", want_id);
|
panic!("BUG: Want {} should be UpstreamBuilding here", want_id);
|
||||||
|
|
@ -570,10 +573,7 @@ impl BuildState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_want_create(
|
fn handle_want_create(&mut self, event: &WantCreateEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &WantCreateEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
// Use From impl to create want in Idle state
|
// Use From impl to create want in Idle state
|
||||||
let want_idle: WantWithState<WantIdleState> = event.clone().into();
|
let want_idle: WantWithState<WantIdleState> = event.clone().into();
|
||||||
self.wants
|
self.wants
|
||||||
|
|
@ -587,10 +587,7 @@ impl BuildState {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_want_cancel(
|
fn handle_want_cancel(&mut self, event: &WantCancelEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &WantCancelEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
// TODO actually cancel in-progress job runs that no longer have a sponsoring want
|
// TODO actually cancel in-progress job runs that no longer have a sponsoring want
|
||||||
|
|
||||||
// Type-safe transition (API layer should prevent canceling terminal wants)
|
// Type-safe transition (API layer should prevent canceling terminal wants)
|
||||||
|
|
@ -626,10 +623,7 @@ impl BuildState {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_job_run_buffer(
|
fn handle_job_run_buffer(&mut self, event: &JobRunBufferEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &JobRunBufferEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
// No job run should exist - if it does, that's a BUG in the orchestrator
|
// No job run should exist - if it does, that's a BUG in the orchestrator
|
||||||
if self.job_runs.get(&event.job_run_id).is_some() {
|
if self.job_runs.get(&event.job_run_id).is_some() {
|
||||||
panic!(
|
panic!(
|
||||||
|
|
@ -683,11 +677,7 @@ impl BuildState {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_job_run_status(
|
fn update_job_run_status(&mut self, job_run_id: &str, status: JobRunStatusCode) {
|
||||||
&mut self,
|
|
||||||
job_run_id: &str,
|
|
||||||
status: JobRunStatusCode,
|
|
||||||
) {
|
|
||||||
let job_run = self.job_runs.get_mut(job_run_id).expect(&format!(
|
let job_run = self.job_runs.get_mut(job_run_id).expect(&format!(
|
||||||
"BUG: Job run ID {} must exist to update status",
|
"BUG: Job run ID {} must exist to update status",
|
||||||
job_run_id
|
job_run_id
|
||||||
|
|
@ -696,52 +686,69 @@ impl BuildState {
|
||||||
job_run.status = Some(status.into());
|
job_run.status = Some(status.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_job_run_heartbeat(
|
fn handle_job_run_heartbeat(&mut self, event: &JobRunHeartbeatEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &JobRunHeartbeatEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
self.update_job_run_status(&event.job_run_id, JobRunStatusCode::JobRunRunning);
|
self.update_job_run_status(&event.job_run_id, JobRunStatusCode::JobRunRunning);
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_job_run_success(
|
fn handle_job_run_success(&mut self, event: &JobRunSuccessEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &JobRunSuccessEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
println!("Job run success event: {:?}", event);
|
println!("Job run success event: {:?}", event);
|
||||||
self.update_job_run_status(&event.job_run_id, JobRunStatusCode::JobRunSucceeded);
|
self.update_job_run_status(&event.job_run_id, JobRunStatusCode::JobRunSucceeded);
|
||||||
let job_run = self.get_job_run(&event.job_run_id).unwrap();
|
let job_run = self.get_job_run(&event.job_run_id).unwrap();
|
||||||
|
|
||||||
// Clone building_partitions before we use it multiple times
|
// Clone building_partitions before we use it multiple times
|
||||||
let newly_live_partitions: Vec<PartitionRef> = job_run.building_partitions.clone();
|
// TODO correct this explicit upcasting of partition ref type
|
||||||
|
let newly_live_partitions: Vec<LivePartitionRef> = job_run
|
||||||
|
.building_partitions
|
||||||
|
.iter()
|
||||||
|
.map(|pref| LivePartitionRef(pref.clone()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
// Update partitions being built by this job (strict type-safe transitions)
|
// Update partitions being built by this job (strict type-safe transitions)
|
||||||
self.transition_partitions_to_live(&newly_live_partitions, &event.job_run_id, current_timestamp());
|
self.transition_partitions_to_live(
|
||||||
|
&newly_live_partitions,
|
||||||
|
&event.job_run_id,
|
||||||
|
current_timestamp(),
|
||||||
|
);
|
||||||
|
|
||||||
// Building → Successful (when all partitions Live)
|
// Building → Successful (when all partitions Live)
|
||||||
let newly_successful_wants = self.complete_successful_wants(&newly_live_partitions, &event.job_run_id, current_timestamp());
|
let newly_successful_wants: Vec<SuccessfulWantId> = self.complete_successful_wants(
|
||||||
|
&newly_live_partitions,
|
||||||
|
&event.job_run_id,
|
||||||
|
current_timestamp(),
|
||||||
|
);
|
||||||
|
|
||||||
// UpstreamBuilding → Idle/Building (for downstream wants waiting on newly successful wants)
|
// UpstreamBuilding → Idle/Building (for downstream wants waiting on newly successful wants)
|
||||||
self.unblock_downstream_wants(&newly_successful_wants, &event.job_run_id, current_timestamp());
|
self.unblock_downstream_wants(
|
||||||
|
&newly_successful_wants,
|
||||||
|
&event.job_run_id,
|
||||||
|
current_timestamp(),
|
||||||
|
);
|
||||||
|
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_job_run_failure(
|
fn handle_job_run_failure(&mut self, event: &JobRunFailureEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &JobRunFailureEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
self.update_job_run_status(&event.job_run_id, JobRunStatusCode::JobRunFailed);
|
self.update_job_run_status(&event.job_run_id, JobRunStatusCode::JobRunFailed);
|
||||||
let job_run = self.get_job_run(&event.job_run_id).unwrap();
|
let job_run = self.get_job_run(&event.job_run_id).unwrap();
|
||||||
|
|
||||||
// Clone building_partitions before we use it multiple times
|
// Clone building_partitions before we use it multiple times
|
||||||
let failed_partitions: Vec<PartitionRef> = job_run.building_partitions.clone();
|
let failed_partitions: Vec<FailedPartitionRef> = job_run
|
||||||
|
.building_partitions
|
||||||
|
.iter()
|
||||||
|
.map(|pref| FailedPartitionRef(pref.clone()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
// Transition partitions using strict type-safe methods
|
// Transition partitions using strict type-safe methods
|
||||||
self.transition_partitions_to_failed(&failed_partitions, &event.job_run_id, current_timestamp());
|
self.transition_partitions_to_failed(
|
||||||
|
&failed_partitions,
|
||||||
|
&event.job_run_id,
|
||||||
|
current_timestamp(),
|
||||||
|
);
|
||||||
|
|
||||||
// Building → Failed (for wants directly building failed partitions)
|
// Building → Failed (for wants directly building failed partitions)
|
||||||
let newly_failed_wants = self.fail_directly_affected_wants(&failed_partitions);
|
let newly_failed_wants: Vec<FailedWantId> =
|
||||||
|
self.fail_directly_affected_wants(&failed_partitions);
|
||||||
|
|
||||||
// UpstreamBuilding → UpstreamFailed (for downstream wants waiting on newly failed wants)
|
// UpstreamBuilding → UpstreamFailed (for downstream wants waiting on newly failed wants)
|
||||||
self.cascade_failures_to_downstream_wants(&newly_failed_wants, current_timestamp());
|
self.cascade_failures_to_downstream_wants(&newly_failed_wants, current_timestamp());
|
||||||
|
|
@ -749,17 +756,11 @@ impl BuildState {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_job_run_cancel(
|
fn handle_job_run_cancel(&mut self, _event: &JobRunCancelEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
_event: &JobRunCancelEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
todo!("should update already inserted job run, partition status, want status")
|
todo!("should update already inserted job run, partition status, want status")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_job_run_dep_miss(
|
pub fn handle_job_run_dep_miss(&mut self, event: &JobRunMissingDepsEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
event: &JobRunMissingDepsEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
let job_run_detail = self.get_job_run(&event.job_run_id).expect(&format!(
|
let job_run_detail = self.get_job_run(&event.job_run_id).expect(&format!(
|
||||||
"BUG: Unable to find job run with id `{}`",
|
"BUG: Unable to find job run with id `{}`",
|
||||||
event.job_run_id
|
event.job_run_id
|
||||||
|
|
@ -801,17 +802,11 @@ impl BuildState {
|
||||||
want_events
|
want_events
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_taint_create(
|
fn handle_taint_create(&mut self, _event: &TaintCreateEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
_event: &TaintCreateEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
todo!("...?")
|
todo!("...?")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_taint_delete(
|
fn handle_taint_delete(&mut self, _event: &TaintCancelEventV1) -> Vec<Event> {
|
||||||
&mut self,
|
|
||||||
_event: &TaintCancelEventV1,
|
|
||||||
) -> Vec<Event> {
|
|
||||||
todo!("...?")
|
todo!("...?")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -913,22 +908,25 @@ impl BuildState {
|
||||||
*/
|
*/
|
||||||
pub fn want_schedulability(&self, want: &WantDetail) -> WantSchedulability {
|
pub fn want_schedulability(&self, want: &WantDetail) -> WantSchedulability {
|
||||||
// Use type-safe partition checks from partitions
|
// Use type-safe partition checks from partitions
|
||||||
let mut live: Vec<PartitionRef> = Vec::new();
|
let mut live: Vec<LivePartitionRef> = Vec::new();
|
||||||
let mut tainted: Vec<PartitionRef> = Vec::new();
|
let mut tainted: Vec<TaintedPartitionRef> = Vec::new();
|
||||||
let mut missing: Vec<PartitionRef> = Vec::new();
|
let mut missing: Vec<MissingPartitionRef> = Vec::new();
|
||||||
|
|
||||||
for upstream_ref in &want.upstreams {
|
for upstream_ref in &want.upstreams {
|
||||||
match self.partitions.get(&upstream_ref.r#ref) {
|
match self.partitions.get(&upstream_ref.r#ref) {
|
||||||
Some(partition) => {
|
Some(partition) => {
|
||||||
if partition.is_live() {
|
match partition {
|
||||||
live.push(upstream_ref.clone());
|
Partition::Live(p) => live.push(p.get_ref()),
|
||||||
} else if matches!(partition, Partition::Tainted(_)) {
|
Partition::Tainted(p) => tainted.push(p.get_ref()),
|
||||||
tainted.push(upstream_ref.clone());
|
Partition::Missing(p) => missing.push(p.get_ref()),
|
||||||
|
_ => (), // Other states (Missing, Building, Failed) don't add to any list
|
||||||
}
|
}
|
||||||
// Other states (Missing, Building, Failed) don't add to any list
|
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
missing.push(upstream_ref.clone());
|
// TODO this definitely feels dirty, but we can't take a mutable ref of self to
|
||||||
|
// insert the missing partition here, and it feels a little over the top to
|
||||||
|
// create a more elaborate way to mint a missing ref.
|
||||||
|
missing.push(MissingPartitionRef(upstream_ref.clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -958,9 +956,9 @@ impl BuildState {
|
||||||
/// The status of partitions required by a want to build (sensed from dep miss job run)
|
/// The status of partitions required by a want to build (sensed from dep miss job run)
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
pub struct WantUpstreamStatus {
|
pub struct WantUpstreamStatus {
|
||||||
pub live: Vec<PartitionRef>,
|
pub live: Vec<LivePartitionRef>,
|
||||||
pub tainted: Vec<PartitionRef>,
|
pub tainted: Vec<TaintedPartitionRef>,
|
||||||
pub missing: Vec<PartitionRef>,
|
pub missing: Vec<MissingPartitionRef>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,15 @@
|
||||||
use uuid::Uuid;
|
use crate::PartitionStatusCode::{PartitionFailed, PartitionLive};
|
||||||
use crate::data_build_event::Event;
|
use crate::data_build_event::Event;
|
||||||
use crate::util::current_timestamp;
|
use crate::util::current_timestamp;
|
||||||
use crate::{event_source, CancelWantRequest, CancelWantResponse, CreateTaintRequest, CreateTaintResponse, CreateWantRequest, CreateWantResponse, EventSource, GetWantResponse, JobRunBufferEventV1, JobRunDetail, JobRunStatus, JobRunStatusCode, JobTriggeredEvent, ManuallyTriggeredEvent, PartitionDetail, PartitionRef, PartitionStatus, PartitionStatusCode, TaintCancelEventV1, TaintCreateEventV1, TaintDetail, WantAttributedPartitions, WantCancelEventV1, WantCreateEventV1, WantDetail, WantStatus, WantStatusCode};
|
use crate::{
|
||||||
use crate::PartitionStatusCode::{PartitionFailed, PartitionLive};
|
CancelWantRequest, CancelWantResponse, CreateTaintRequest, CreateTaintResponse,
|
||||||
|
CreateWantRequest, CreateWantResponse, EventSource, GetWantResponse, JobRunBufferEventV1,
|
||||||
|
JobRunDetail, JobRunStatus, JobRunStatusCode, JobTriggeredEvent, ManuallyTriggeredEvent,
|
||||||
|
PartitionDetail, PartitionRef, PartitionStatus, PartitionStatusCode, TaintCancelEventV1,
|
||||||
|
TaintCreateEventV1, TaintDetail, WantAttributedPartitions, WantCancelEventV1,
|
||||||
|
WantCreateEventV1, WantDetail, WantStatus, WantStatusCode, event_source,
|
||||||
|
};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
impl From<&WantCreateEventV1> for WantDetail {
|
impl From<&WantCreateEventV1> for WantDetail {
|
||||||
fn from(e: &WantCreateEventV1) -> Self {
|
fn from(e: &WantCreateEventV1) -> Self {
|
||||||
|
|
@ -76,25 +83,31 @@ impl From<JobRunBufferEventV1> for JobRunDetail {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn want_status_matches_any(
|
||||||
pub fn want_status_matches_any(pds: &Vec<Option<PartitionDetail>>, status: PartitionStatusCode) -> bool {
|
pds: &Vec<Option<PartitionDetail>>,
|
||||||
pds.iter()
|
status: PartitionStatusCode,
|
||||||
.any(|pd| pd.clone()
|
) -> bool {
|
||||||
|
pds.iter().any(|pd| {
|
||||||
|
pd.clone()
|
||||||
.map(|pd| pd.status == Some(status.into()))
|
.map(|pd| pd.status == Some(status.into()))
|
||||||
.unwrap_or(false))
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn want_status_matches_all(pds: &Vec<Option<PartitionDetail>>, status: PartitionStatusCode) -> bool {
|
pub fn want_status_matches_all(
|
||||||
pds.iter()
|
pds: &Vec<Option<PartitionDetail>>,
|
||||||
.all(|pd| pd.clone()
|
status: PartitionStatusCode,
|
||||||
|
) -> bool {
|
||||||
|
pds.iter().all(|pd| {
|
||||||
|
pd.clone()
|
||||||
.map(|pd| pd.status == Some(status.into()))
|
.map(|pd| pd.status == Some(status.into()))
|
||||||
.unwrap_or(false))
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Merges a list of partition details into a single status code.
|
/// Merges a list of partition details into a single status code.
|
||||||
/// Takes the lowest state as the want status.
|
/// Takes the lowest state as the want status.
|
||||||
impl Into<WantStatusCode> for Vec<Option<PartitionDetail>> {
|
impl Into<WantStatusCode> for Vec<Option<PartitionDetail>> {
|
||||||
|
|
||||||
fn into(self) -> WantStatusCode {
|
fn into(self) -> WantStatusCode {
|
||||||
if want_status_matches_any(&self, PartitionFailed) {
|
if want_status_matches_any(&self, PartitionFailed) {
|
||||||
WantStatusCode::WantFailed
|
WantStatusCode::WantFailed
|
||||||
|
|
@ -175,17 +188,13 @@ impl From<CreateWantRequest> for WantCreateEventV1 {
|
||||||
|
|
||||||
impl Into<CreateWantResponse> for Option<WantDetail> {
|
impl Into<CreateWantResponse> for Option<WantDetail> {
|
||||||
fn into(self) -> CreateWantResponse {
|
fn into(self) -> CreateWantResponse {
|
||||||
CreateWantResponse {
|
CreateWantResponse { data: self }
|
||||||
data: self,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Into<GetWantResponse> for Option<WantDetail> {
|
impl Into<GetWantResponse> for Option<WantDetail> {
|
||||||
fn into(self) -> GetWantResponse {
|
fn into(self) -> GetWantResponse {
|
||||||
GetWantResponse {
|
GetWantResponse { data: self }
|
||||||
data: self,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -201,9 +210,7 @@ impl From<CancelWantRequest> for WantCancelEventV1 {
|
||||||
|
|
||||||
impl Into<CancelWantResponse> for Option<WantDetail> {
|
impl Into<CancelWantResponse> for Option<WantDetail> {
|
||||||
fn into(self) -> CancelWantResponse {
|
fn into(self) -> CancelWantResponse {
|
||||||
CancelWantResponse {
|
CancelWantResponse { data: self }
|
||||||
data: self,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -219,4 +226,4 @@ impl Into<CreateTaintResponse> for Option<TaintDetail> {
|
||||||
// TODO
|
// TODO
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
use crate::job_run::{JobRun, SubProcessBackend};
|
use crate::job_run::{JobRun, SubProcessBackend};
|
||||||
|
use crate::util::DatabuildError;
|
||||||
use crate::{JobConfig, PartitionRef, WantDetail};
|
use crate::{JobConfig, PartitionRef, WantDetail};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use crate::util::DatabuildError;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct JobConfiguration {
|
pub struct JobConfiguration {
|
||||||
|
|
@ -12,17 +12,21 @@ pub struct JobConfiguration {
|
||||||
|
|
||||||
impl JobConfiguration {
|
impl JobConfiguration {
|
||||||
/** Launch job to build the partitions specified by the provided wants. */
|
/** Launch job to build the partitions specified by the provided wants. */
|
||||||
pub fn spawn(&self, wants: Vec<WantDetail>) -> Result<JobRun<SubProcessBackend>, std::io::Error> {
|
pub fn spawn(
|
||||||
let wanted_refs: Vec<PartitionRef> =
|
&self,
|
||||||
wants.iter().flat_map(|want| want.partitions.clone()).collect();
|
wants: Vec<WantDetail>,
|
||||||
|
) -> Result<JobRun<SubProcessBackend>, std::io::Error> {
|
||||||
|
let wanted_refs: Vec<PartitionRef> = wants
|
||||||
|
.iter()
|
||||||
|
.flat_map(|want| want.partitions.clone())
|
||||||
|
.collect();
|
||||||
let args: Vec<String> = wanted_refs.iter().map(|pref| pref.r#ref.clone()).collect();
|
let args: Vec<String> = wanted_refs.iter().map(|pref| pref.r#ref.clone()).collect();
|
||||||
Ok(JobRun::spawn(self.entry_point.clone(), args))
|
Ok(JobRun::spawn(self.entry_point.clone(), args))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn matches(&self, refs: &PartitionRef) -> bool {
|
pub fn matches(&self, refs: &PartitionRef) -> bool {
|
||||||
self.patterns.iter().any(|pattern| {
|
self.patterns.iter().any(|pattern| {
|
||||||
let regex =
|
let regex = Regex::new(&pattern).expect(&format!("Invalid regex pattern: {}", pattern));
|
||||||
Regex::new(&pattern).expect(&format!("Invalid regex pattern: {}", pattern));
|
|
||||||
regex.is_match(&refs.r#ref)
|
regex.is_match(&refs.r#ref)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -417,7 +417,7 @@ impl ToEvent for SubProcessDepMiss {
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::data_build_event::Event;
|
use crate::data_build_event::Event;
|
||||||
use crate::data_deps::DATABUILD_MISSING_DEPS_JSON;
|
use crate::data_deps::DATABUILD_MISSING_DEPS_JSON;
|
||||||
use crate::job_run::{JobRun, JobRunBackend, VisitResult, SubProcessBackend};
|
use crate::job_run::{JobRun, JobRunBackend, SubProcessBackend, VisitResult};
|
||||||
use crate::mock_job_run::MockJobRun;
|
use crate::mock_job_run::MockJobRun;
|
||||||
use crate::{JobRunMissingDeps, MissingDeps};
|
use crate::{JobRunMissingDeps, MissingDeps};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
mod build_event_log;
|
mod build_event_log;
|
||||||
mod orchestrator;
|
mod build_state;
|
||||||
mod job_run;
|
mod data_deps;
|
||||||
|
mod event_transforms;
|
||||||
mod job;
|
mod job;
|
||||||
|
mod job_run;
|
||||||
|
mod mock_job_run;
|
||||||
|
mod orchestrator;
|
||||||
mod partition_state;
|
mod partition_state;
|
||||||
mod util;
|
mod util;
|
||||||
mod build_state;
|
|
||||||
mod event_transforms;
|
|
||||||
mod data_deps;
|
|
||||||
mod mock_job_run;
|
|
||||||
mod want_state;
|
mod want_state;
|
||||||
|
|
||||||
// Include generated protobuf code
|
// Include generated protobuf code
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use std::collections::HashMap;
|
|
||||||
use crate::data_deps::DataDepLogLine;
|
use crate::data_deps::DataDepLogLine;
|
||||||
use crate::{JobRunMissingDeps, MissingDeps};
|
use crate::{JobRunMissingDeps, MissingDeps};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
pub struct MockJobRun {
|
pub struct MockJobRun {
|
||||||
sleep_ms: u64,
|
sleep_ms: u64,
|
||||||
|
|
@ -54,13 +54,13 @@ impl MockJobRun {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dep_miss(self, missing_deps: Vec<MissingDeps>) -> Self {
|
pub fn dep_miss(self, missing_deps: Vec<MissingDeps>) -> Self {
|
||||||
self.exit_code(1)
|
self.exit_code(1).stdout_msg(
|
||||||
.stdout_msg(
|
&DataDepLogLine::DepMiss(JobRunMissingDeps {
|
||||||
&DataDepLogLine::DepMiss(JobRunMissingDeps {
|
version: "1".to_string(),
|
||||||
version: "1".to_string(),
|
missing_deps,
|
||||||
missing_deps,
|
})
|
||||||
}).into()
|
.into(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_env(&self) -> HashMap<String, String> {
|
pub fn to_env(&self) -> HashMap<String, String> {
|
||||||
|
|
|
||||||
|
|
@ -97,7 +97,6 @@ struct WantGroup {
|
||||||
wants: Vec<WantDetail>,
|
wants: Vec<WantDetail>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct GroupedWants {
|
struct GroupedWants {
|
||||||
want_groups: Vec<WantGroup>,
|
want_groups: Vec<WantGroup>,
|
||||||
|
|
@ -151,32 +150,30 @@ impl<S: BELStorage + Debug> Orchestrator<S> {
|
||||||
let mut new_jobs = Vec::new();
|
let mut new_jobs = Vec::new();
|
||||||
for job in self.job_runs.drain(..) {
|
for job in self.job_runs.drain(..) {
|
||||||
let transitioned = match job {
|
let transitioned = match job {
|
||||||
JobRun::Running(running) => {
|
JobRun::Running(running) => match running.visit()? {
|
||||||
match running.visit()? {
|
VisitResult::StillRunning(still_running) => {
|
||||||
VisitResult::StillRunning(still_running) => {
|
println!("Still running job: {:?}", still_running.job_run_id);
|
||||||
println!("Still running job: {:?}", still_running.job_run_id);
|
JobRun::Running(still_running)
|
||||||
JobRun::Running(still_running)
|
|
||||||
}
|
|
||||||
VisitResult::Completed(completed) => {
|
|
||||||
println!("Completed job: {:?}", completed.job_run_id);
|
|
||||||
let event = completed.state.to_event(&completed.job_run_id);
|
|
||||||
self.bel.append_event(&event)?;
|
|
||||||
JobRun::Completed(completed)
|
|
||||||
}
|
|
||||||
VisitResult::Failed(failed) => {
|
|
||||||
println!("Failed job: {:?}", failed.job_run_id);
|
|
||||||
let event = failed.state.to_event(&failed.job_run_id);
|
|
||||||
self.bel.append_event(&event)?;
|
|
||||||
JobRun::Failed(failed)
|
|
||||||
}
|
|
||||||
VisitResult::DepMiss(dep_miss) => {
|
|
||||||
println!("Dep miss job: {:?}", dep_miss.job_run_id);
|
|
||||||
let event = dep_miss.state.to_event(&dep_miss.job_run_id);
|
|
||||||
self.bel.append_event(&event)?;
|
|
||||||
JobRun::DepMiss(dep_miss)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
VisitResult::Completed(completed) => {
|
||||||
|
println!("Completed job: {:?}", completed.job_run_id);
|
||||||
|
let event = completed.state.to_event(&completed.job_run_id);
|
||||||
|
self.bel.append_event(&event)?;
|
||||||
|
JobRun::Completed(completed)
|
||||||
|
}
|
||||||
|
VisitResult::Failed(failed) => {
|
||||||
|
println!("Failed job: {:?}", failed.job_run_id);
|
||||||
|
let event = failed.state.to_event(&failed.job_run_id);
|
||||||
|
self.bel.append_event(&event)?;
|
||||||
|
JobRun::Failed(failed)
|
||||||
|
}
|
||||||
|
VisitResult::DepMiss(dep_miss) => {
|
||||||
|
println!("Dep miss job: {:?}", dep_miss.job_run_id);
|
||||||
|
let event = dep_miss.state.to_event(&dep_miss.job_run_id);
|
||||||
|
self.bel.append_event(&event)?;
|
||||||
|
JobRun::DepMiss(dep_miss)
|
||||||
|
}
|
||||||
|
},
|
||||||
other => other, // Pass through all non-running states unchanged
|
other => other, // Pass through all non-running states unchanged
|
||||||
};
|
};
|
||||||
new_jobs.push(transitioned);
|
new_jobs.push(transitioned);
|
||||||
|
|
@ -231,8 +228,11 @@ impl<S: BELStorage + Debug> Orchestrator<S> {
|
||||||
use crate::job_run::JobRun;
|
use crate::job_run::JobRun;
|
||||||
|
|
||||||
// Compute args from wants the same way JobConfiguration::spawn() does
|
// Compute args from wants the same way JobConfiguration::spawn() does
|
||||||
let wanted_refs: Vec<crate::PartitionRef> =
|
let wanted_refs: Vec<crate::PartitionRef> = wg
|
||||||
wg.wants.iter().flat_map(|want| want.partitions.clone()).collect();
|
.wants
|
||||||
|
.iter()
|
||||||
|
.flat_map(|want| want.partitions.clone())
|
||||||
|
.collect();
|
||||||
let args: Vec<String> = wanted_refs.iter().map(|pref| pref.r#ref.clone()).collect();
|
let args: Vec<String> = wanted_refs.iter().map(|pref| pref.r#ref.clone()).collect();
|
||||||
let job_run = JobRun::spawn(wg.job.entry_point.clone(), args);
|
let job_run = JobRun::spawn(wg.job.entry_point.clone(), args);
|
||||||
|
|
||||||
|
|
@ -264,7 +264,10 @@ impl<S: BELStorage + Debug> Orchestrator<S> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn count_running_jobs(&self) -> usize {
|
fn count_running_jobs(&self) -> usize {
|
||||||
use crate::job_run::JobRun;
|
use crate::job_run::JobRun;
|
||||||
self.job_runs.iter().filter(|j| matches!(j, JobRun::Running(_))).count()
|
self.job_runs
|
||||||
|
.iter()
|
||||||
|
.filter(|j| matches!(j, JobRun::Running(_)))
|
||||||
|
.count()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
@ -275,19 +278,28 @@ impl<S: BELStorage + Debug> Orchestrator<S> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn count_not_started_jobs(&self) -> usize {
|
fn count_not_started_jobs(&self) -> usize {
|
||||||
use crate::job_run::JobRun;
|
use crate::job_run::JobRun;
|
||||||
self.job_runs.iter().filter(|j| matches!(j, JobRun::NotStarted(_))).count()
|
self.job_runs
|
||||||
|
.iter()
|
||||||
|
.filter(|j| matches!(j, JobRun::NotStarted(_)))
|
||||||
|
.count()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn count_dep_miss_jobs(&self) -> usize {
|
fn count_dep_miss_jobs(&self) -> usize {
|
||||||
use crate::job_run::JobRun;
|
use crate::job_run::JobRun;
|
||||||
self.job_runs.iter().filter(|j| matches!(j, JobRun::DepMiss(_))).count()
|
self.job_runs
|
||||||
|
.iter()
|
||||||
|
.filter(|j| matches!(j, JobRun::DepMiss(_)))
|
||||||
|
.count()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn count_completed_jobs(&self) -> usize {
|
fn count_completed_jobs(&self) -> usize {
|
||||||
use crate::job_run::JobRun;
|
use crate::job_run::JobRun;
|
||||||
self.job_runs.iter().filter(|j| matches!(j, JobRun::Completed(_))).count()
|
self.job_runs
|
||||||
|
.iter()
|
||||||
|
.filter(|j| matches!(j, JobRun::Completed(_)))
|
||||||
|
.count()
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Entrypoint for running jobs */
|
/** Entrypoint for running jobs */
|
||||||
|
|
@ -433,9 +445,17 @@ mod tests {
|
||||||
assert_eq!(orchestrator.count_not_started_jobs(), 1);
|
assert_eq!(orchestrator.count_not_started_jobs(), 1);
|
||||||
// Verify the job has the right args by checking the first NotStarted job
|
// Verify the job has the right args by checking the first NotStarted job
|
||||||
use crate::job_run::JobRun;
|
use crate::job_run::JobRun;
|
||||||
let not_started_job = orchestrator.job_runs.iter().find(|j| matches!(j, JobRun::NotStarted(_))).unwrap();
|
let not_started_job = orchestrator
|
||||||
|
.job_runs
|
||||||
|
.iter()
|
||||||
|
.find(|j| matches!(j, JobRun::NotStarted(_)))
|
||||||
|
.unwrap();
|
||||||
if let JobRun::NotStarted(job) = not_started_job {
|
if let JobRun::NotStarted(job) = not_started_job {
|
||||||
assert_eq!(job.state.args, vec!["data/alpha"], "should have scheduled alpha job");
|
assert_eq!(
|
||||||
|
job.state.args,
|
||||||
|
vec!["data/alpha"],
|
||||||
|
"should have scheduled alpha job"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
assert_eq!(orchestrator.bel.state.count_job_runs(), 1);
|
assert_eq!(orchestrator.bel.state.count_job_runs(), 1);
|
||||||
}
|
}
|
||||||
|
|
@ -599,9 +619,7 @@ mod tests {
|
||||||
|
|
||||||
thread::sleep(Duration::from_millis(1));
|
thread::sleep(Duration::from_millis(1));
|
||||||
// Should still be running after 1ms
|
// Should still be running after 1ms
|
||||||
orchestrator
|
orchestrator.step().expect("should still be running");
|
||||||
.step()
|
|
||||||
.expect("should still be running");
|
|
||||||
assert_eq!(orchestrator.count_running_jobs(), 1);
|
assert_eq!(orchestrator.count_running_jobs(), 1);
|
||||||
assert_eq!(orchestrator.bel.state.count_job_runs(), 1);
|
assert_eq!(orchestrator.bel.state.count_job_runs(), 1);
|
||||||
println!("STATE: {:?}", orchestrator.bel.state);
|
println!("STATE: {:?}", orchestrator.bel.state);
|
||||||
|
|
@ -773,7 +791,8 @@ echo 'Beta succeeded'
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
beta_wants.iter().any(|w| w.status.as_ref().map(|s| s.code) == Some(WantStatusCode::WantUpstreamBuilding as i32)),
|
beta_wants.iter().any(|w| w.status.as_ref().map(|s| s.code)
|
||||||
|
== Some(WantStatusCode::WantUpstreamBuilding as i32)),
|
||||||
"At least one beta want should be in UpstreamBuilding state, found: {:?}",
|
"At least one beta want should be in UpstreamBuilding state, found: {:?}",
|
||||||
beta_wants.iter().map(|w| &w.status).collect::<Vec<_>>()
|
beta_wants.iter().map(|w| &w.status).collect::<Vec<_>>()
|
||||||
);
|
);
|
||||||
|
|
@ -819,7 +838,11 @@ echo 'Beta succeeded'
|
||||||
|
|
||||||
// Step 7: Beta is rescheduled and started (want -> running_jobs)
|
// Step 7: Beta is rescheduled and started (want -> running_jobs)
|
||||||
orchestrator.step().expect("step 7");
|
orchestrator.step().expect("step 7");
|
||||||
assert_eq!(orchestrator.count_running_jobs(), 1, "beta should be running");
|
assert_eq!(
|
||||||
|
orchestrator.count_running_jobs(),
|
||||||
|
1,
|
||||||
|
"beta should be running"
|
||||||
|
);
|
||||||
|
|
||||||
// Step 8: Beta completes successfully
|
// Step 8: Beta completes successfully
|
||||||
wait_for_jobs_to_complete(&mut orchestrator, 10).expect("beta job should complete");
|
wait_for_jobs_to_complete(&mut orchestrator, 10).expect("beta job should complete");
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
use crate::{PartitionRef, PartitionDetail, PartitionStatus, PartitionStatusCode};
|
use crate::{PartitionDetail, PartitionRef, PartitionStatus, PartitionStatusCode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// State: Partition has been referenced but not yet built
|
/// State: Partition has been referenced but not yet built
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
|
@ -49,6 +50,50 @@ pub enum Partition {
|
||||||
Tainted(PartitionWithState<TaintedState>),
|
Tainted(PartitionWithState<TaintedState>),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Type-safe partition reference wrappers that encode state expectations in function signatures. It
|
||||||
|
/// is critical that these be treated with respect, not just summoned because it's convenient.
|
||||||
|
/// These should be created ephemerally from typestate objects via .get_ref() and used
|
||||||
|
/// immediately — never stored long-term, as partition state can change.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct MissingPartitionRef(pub PartitionRef);
|
||||||
|
impl PartitionWithState<MissingState> {
|
||||||
|
pub fn get_ref(&self) -> MissingPartitionRef {
|
||||||
|
MissingPartitionRef(self.partition_ref.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct BuildingPartitionRef(pub PartitionRef);
|
||||||
|
impl PartitionWithState<BuildingState> {
|
||||||
|
pub fn get_ref(&self) -> BuildingPartitionRef {
|
||||||
|
BuildingPartitionRef(self.partition_ref.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct LivePartitionRef(pub PartitionRef);
|
||||||
|
impl PartitionWithState<LiveState> {
|
||||||
|
pub fn get_ref(&self) -> LivePartitionRef {
|
||||||
|
LivePartitionRef(self.partition_ref.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct FailedPartitionRef(pub PartitionRef);
|
||||||
|
impl PartitionWithState<FailedState> {
|
||||||
|
pub fn get_ref(&self) -> FailedPartitionRef {
|
||||||
|
FailedPartitionRef(self.partition_ref.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct TaintedPartitionRef(pub PartitionRef);
|
||||||
|
impl PartitionWithState<TaintedState> {
|
||||||
|
pub fn get_ref(&self) -> TaintedPartitionRef {
|
||||||
|
TaintedPartitionRef(self.partition_ref.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Type-safe transition methods for MissingState
|
// Type-safe transition methods for MissingState
|
||||||
impl PartitionWithState<MissingState> {
|
impl PartitionWithState<MissingState> {
|
||||||
/// Transition from Missing to Building when a job starts building this partition
|
/// Transition from Missing to Building when a job starts building this partition
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
use std::backtrace::Backtrace;
|
use std::backtrace::Backtrace;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
pub fn current_timestamp() -> u64 {
|
pub fn current_timestamp() -> u64 {
|
||||||
let now = SystemTime::now();
|
let now = SystemTime::now();
|
||||||
|
|
@ -27,7 +27,7 @@ impl DatabuildError {
|
||||||
Self {
|
Self {
|
||||||
msg: msg.into(),
|
msg: msg.into(),
|
||||||
source: None,
|
source: None,
|
||||||
backtrace: maybe_backtrace()
|
backtrace: maybe_backtrace(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -37,7 +37,7 @@ impl From<std::io::Error> for DatabuildError {
|
||||||
Self {
|
Self {
|
||||||
msg: err.to_string(),
|
msg: err.to_string(),
|
||||||
source: Some(Box::new(err)),
|
source: Some(Box::new(err)),
|
||||||
backtrace: maybe_backtrace()
|
backtrace: maybe_backtrace(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -47,7 +47,7 @@ impl From<rusqlite::Error> for DatabuildError {
|
||||||
Self {
|
Self {
|
||||||
msg: err.to_string(),
|
msg: err.to_string(),
|
||||||
source: Some(Box::new(err)),
|
source: Some(Box::new(err)),
|
||||||
backtrace: maybe_backtrace()
|
backtrace: maybe_backtrace(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -57,7 +57,7 @@ impl From<prost::EncodeError> for DatabuildError {
|
||||||
Self {
|
Self {
|
||||||
msg: err.to_string(),
|
msg: err.to_string(),
|
||||||
source: Some(Box::new(err)),
|
source: Some(Box::new(err)),
|
||||||
backtrace: maybe_backtrace()
|
backtrace: maybe_backtrace(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -67,7 +67,7 @@ impl From<serde_json::Error> for DatabuildError {
|
||||||
Self {
|
Self {
|
||||||
msg: err.to_string(),
|
msg: err.to_string(),
|
||||||
source: Some(Box::new(err)),
|
source: Some(Box::new(err)),
|
||||||
backtrace: maybe_backtrace()
|
backtrace: maybe_backtrace(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,8 @@
|
||||||
|
use crate::partition_state::FailedPartitionRef;
|
||||||
use crate::util::current_timestamp;
|
use crate::util::current_timestamp;
|
||||||
use crate::{EventSource, PartitionRef, WantCreateEventV1, WantDetail, WantStatusCode};
|
use crate::{EventSource, PartitionRef, WantCreateEventV1, WantDetail, WantStatusCode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
/// State: Want has been created and is ready to be scheduled
|
/// State: Want has been created and is ready to be scheduled
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
|
@ -101,6 +104,66 @@ pub enum Want {
|
||||||
Canceled(WantWithState<CanceledState>),
|
Canceled(WantWithState<CanceledState>),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Type-safe partition reference wrappers that encode state expectations in function signatures. It
|
||||||
|
/// is critical that these be treated with respect, not just summoned because it's convenient.
|
||||||
|
/// These should be created ephemerally from typestate objects via .get_ref() and used
|
||||||
|
/// immediately — never stored long-term, as partition state can change.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct IdleWantId(pub String);
|
||||||
|
impl WantWithState<IdleState> {
|
||||||
|
pub fn get_id(&self) -> IdleWantId {
|
||||||
|
IdleWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct BuildingWantId(pub String);
|
||||||
|
impl WantWithState<BuildingState> {
|
||||||
|
pub fn get_id(&self) -> BuildingWantId {
|
||||||
|
BuildingWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct UpstreamBuildingWantId(pub String);
|
||||||
|
impl WantWithState<UpstreamBuildingState> {
|
||||||
|
pub fn get_id(&self) -> UpstreamBuildingWantId {
|
||||||
|
UpstreamBuildingWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct SuccessfulWantId(pub String);
|
||||||
|
impl WantWithState<SuccessfulState> {
|
||||||
|
pub fn get_id(&self) -> SuccessfulWantId {
|
||||||
|
SuccessfulWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct FailedWantId(pub String);
|
||||||
|
impl WantWithState<FailedState> {
|
||||||
|
pub fn get_id(&self) -> FailedWantId {
|
||||||
|
FailedWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct UpstreamFailedWantId(pub String);
|
||||||
|
impl WantWithState<UpstreamFailedState> {
|
||||||
|
pub fn get_id(&self) -> UpstreamFailedWantId {
|
||||||
|
UpstreamFailedWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct CanceledWantId(pub String);
|
||||||
|
impl WantWithState<CanceledState> {
|
||||||
|
pub fn get_id(&self) -> CanceledWantId {
|
||||||
|
CanceledWantId(self.want.want_id.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// From impl for creating want from event
|
// From impl for creating want from event
|
||||||
impl From<WantCreateEventV1> for WantWithState<IdleState> {
|
impl From<WantCreateEventV1> for WantWithState<IdleState> {
|
||||||
fn from(event: WantCreateEventV1) -> Self {
|
fn from(event: WantCreateEventV1) -> Self {
|
||||||
|
|
@ -207,20 +270,20 @@ impl WantWithState<BuildingState> {
|
||||||
// Type-safe transition methods for FailedState
|
// Type-safe transition methods for FailedState
|
||||||
impl WantWithState<FailedState> {
|
impl WantWithState<FailedState> {
|
||||||
/// Add more failed partitions to an already-failed want (self-transition)
|
/// Add more failed partitions to an already-failed want (self-transition)
|
||||||
pub fn add_failed_partitions(mut self, partition_refs: Vec<PartitionRef>) -> Self {
|
pub fn add_failed_partitions(mut self, partition_refs: Vec<FailedPartitionRef>) -> Self {
|
||||||
for partition_ref in partition_refs {
|
for partition_ref in partition_refs {
|
||||||
if self
|
if self
|
||||||
.state
|
.state
|
||||||
.failed_partition_refs
|
.failed_partition_refs
|
||||||
.iter()
|
.iter()
|
||||||
.any(|p| p.r#ref == partition_ref.r#ref)
|
.any(|p| p.r#ref == partition_ref.0.r#ref)
|
||||||
{
|
{
|
||||||
panic!(
|
panic!(
|
||||||
"BUG: Attempted to add failed partition {} that already exists in want {}",
|
"BUG: Attempted to add failed partition {} that already exists in want {}",
|
||||||
partition_ref.r#ref, self.want.want_id
|
partition_ref.0.r#ref, self.want.want_id
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.state.failed_partition_refs.push(partition_ref);
|
self.state.failed_partition_refs.push(partition_ref.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
WantWithState {
|
WantWithState {
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue