add state comments in protobuf

This commit is contained in:
Stuart Axelbrooke 2025-11-24 21:49:02 +08:00
parent 5a768e9270
commit c556fec218

View file

@ -176,13 +176,27 @@ message WantStatus {
string name = 2; string name = 2;
} }
enum WantStatusCode { enum WantStatusCode {
WantIdle = 0; // Wants are created in this state, and they should immediately transition to another state based on the current state
WantBuilding = 1; // of partitions they reference.
WantFailed = 2; WantNew = 0;
WantSuccessful = 3; // The want is not building, but not blocked from building either - it is schedulable.
WantCanceled = 4; WantIdle = 1;
WantUpstreamBuilding = 5; // No referenced partitions are failed, and at least one referenced partition is building.
WantUpstreamFailed = 6; WantBuilding = 2;
// At least 1 referenced partition is failed.
WantFailed = 3;
// All referenced partitions are live.
WantSuccessful = 4;
// The want itself has been canceled. It should no longer influence job scheduling, and any existing jobs not building
// partitions requested by other active wants should be canceled.
WantCanceled = 5;
// A referenced partition's building job failed with a dep miss, and a derivative want is now building the missed
// partitions. This want is waiting for missed partitions to be live before going back to Idle and becoming
// schedulable again.
WantUpstreamBuilding = 6;
// After entering WantUpstreamBuilding state, one of the derivative want's triggered jobs has failed, meaning this
// want will not be able to succeed.
WantUpstreamFailed = 7;
} }
message WantDetail { message WantDetail {
@ -219,13 +233,18 @@ message PartitionStatus {
string name = 2; string name = 2;
} }
enum PartitionStatusCode { enum PartitionStatusCode {
// TODO how do we avoid copying job states here? This is essentially a union of job states and taints? // Work is in progress to produce the partition. This state acts as a leasing mechanism: the orchestrator will not
PartitionUnknown = 0; // schedule other jobs to produce this partition while it is in Building; e.g., a dep miss may have occurred when
PartitionWanted = 1; // trying to build the partition, and jobs for the upstreams may be in progress, and this state enables us to signal
PartitionBuilding = 2; // that we shouldn't reschedule
PartitionLive = 3; PartitionBuilding = 0;
PartitionFailed = 4; // The partition has been produced and is currently valid.
PartitionTainted = 5; PartitionLive = 1;
// Building of the partition has failed in a way that is not retryable.
PartitionFailed = 2;
// The partition has been marked as tainted. It shouldn't be read, and if any active wants reference it, a job to
// build it should be scheduled.
PartitionTainted = 3;
} }
message TaintDetail { message TaintDetail {
@ -237,11 +256,17 @@ message JobRunStatus {
string name = 2; string name = 2;
} }
enum JobRunStatusCode { enum JobRunStatusCode {
// The job run has been queued, and will be run at some point in the future (e.g. pool slot opens, etc).
JobRunQueued = 0; JobRunQueued = 0;
// The job run is now running.
JobRunRunning = 1; JobRunRunning = 1;
// The job run has failed for a non-recoverable reason.
JobRunFailed = 2; JobRunFailed = 2;
// The job run has been canceled.
JobRunCanceled = 3; JobRunCanceled = 3;
// The job run succeeded.
JobRunSucceeded = 4; JobRunSucceeded = 4;
// The job run failed due to specific missing deps, emitting a JobRunMissingDeps.
JobRunDepMiss = 5; JobRunDepMiss = 5;
} }
message JobRunDetail { message JobRunDetail {