178 lines
4.5 KiB
Protocol Buffer
178 lines
4.5 KiB
Protocol Buffer
syntax = "proto3";
|
|
|
|
package databuild.v1;
|
|
|
|
// Core Build Event Log (BEL)
|
|
|
|
message PartitionRef {
|
|
string ref = 1;
|
|
}
|
|
|
|
// The base event for all events written to the BEL
|
|
message DataBuildEvent {
|
|
uint64 timestamp = 1;
|
|
uint64 event_id = 2;
|
|
oneof event {
|
|
// Job run events
|
|
JobRunBufferEventV1 job_run_buffer_v1 = 10;
|
|
JobRunQueueEventV1 job_run_queue_v1 = 11;
|
|
JobRunHeartbeatEventV1 job_run_heartbeat_v1 = 12;
|
|
JobRunSuccessEventV1 job_run_success_v1 = 13;
|
|
JobRunFailureEventV1 job_run_failure_v1 = 14;
|
|
JobRunCancelEventV1 job_run_cancel_v1 = 15;
|
|
JobRunMissingDepsEventV1 job_run_missing_deps_v1 = 16;
|
|
// Want events
|
|
WantCreateEventV1 want_create_v1 = 17;
|
|
WantCancelEventV1 want_cancel_v1 = 18;
|
|
// Taint events
|
|
TaintCreateEventV1 taint_create_v1 = 19;
|
|
TaintDeleteEventV1 taint_delete_v1 = 20;
|
|
}
|
|
}
|
|
|
|
// Source metadata for user-driven events
|
|
message EventSource {
|
|
// Revisit - how do we model this? See this chat: https://claude.ai/share/76622c1c-7489-496e-be81-a64fef24e636
|
|
EventSourceType source_type = 1;
|
|
string source_name = 2;
|
|
}
|
|
message EventSourceType {
|
|
EventSourceCode code = 1;
|
|
string name = 2;
|
|
}
|
|
enum EventSourceCode{
|
|
Manual = 0;
|
|
Automated = 1;
|
|
Propagated = 2;
|
|
}
|
|
|
|
// Indicates buffer state for job.
|
|
message JobRunBufferEventV1 {
|
|
string job_run_id = 1;
|
|
string job_label = 2;
|
|
repeated string servicing_want_ids = 3;
|
|
repeated string producing_partition_refs = 4;
|
|
// TODO how do we handle buffer definition? Start simple, noop until we want something here?
|
|
}
|
|
// Just indicates that job has entered queue
|
|
message JobRunQueueEventV1 {
|
|
string job_run_id = 1;
|
|
}
|
|
// Emitted immediately on job spawn, and periodically by job to indicate job health when heartbeating is required. In
|
|
// future it will also be used to enable job re-entrance.
|
|
message JobRunHeartbeatEventV1 {
|
|
string job_run_id = 1;
|
|
// TODO reentrance?
|
|
}
|
|
// Simply indicates that the job has succeeded.
|
|
message JobRunSuccessEventV1 {
|
|
string job_run_id = 1;
|
|
}
|
|
// Simply indicates that the job has failed. Depending on retry logic defined in job, it may retry.
|
|
message JobRunFailureEventV1 {
|
|
string job_run_id = 1;
|
|
}
|
|
// Job was explicitly canceled.
|
|
message JobRunCancelEventV1 {
|
|
string job_run_id = 1;
|
|
EventSource source = 2;
|
|
optional string comment = 3;
|
|
}
|
|
// Job indicating that required deps are missing, listing upstreams -> impacted outputs so that wants can be propagated.
|
|
message JobRunMissingDepsEventV1 {
|
|
string job_run_id = 1;
|
|
repeated MissingDeps missing_deps = 2;
|
|
}
|
|
message MissingDeps {
|
|
// The list of partition refs that are prevented from building by these missing deps (can be just 1)
|
|
repeated PartitionRef impacted = 1;
|
|
repeated PartitionRef missing = 2;
|
|
}
|
|
|
|
|
|
message WantCreateEventV1 {
|
|
string want_id = 1;
|
|
string root_want_id = 2;
|
|
string parent_want_id = 3;
|
|
repeated PartitionRef partitions = 4;
|
|
uint64 data_timestamp = 5;
|
|
uint64 ttl_seconds = 6;
|
|
uint64 sla_seconds = 7;
|
|
EventSource source = 8;
|
|
optional string comment = 9;
|
|
}
|
|
message WantCancelEventV1 {
|
|
string want_id = 1;
|
|
EventSource source = 2;
|
|
optional string comment = 3;
|
|
}
|
|
|
|
message TaintCreateEventV1 {
|
|
string taint_id = 1;
|
|
string root_taint_id = 2;
|
|
string parent_taint_id = 3;
|
|
repeated PartitionRef partitions = 4;
|
|
EventSource source = 5;
|
|
optional string comment = 6;
|
|
}
|
|
message TaintDeleteEventV1 {
|
|
string taint_id = 1;
|
|
EventSource source = 2;
|
|
optional string comment = 3;
|
|
}
|
|
|
|
// Build State
|
|
|
|
// Represents the whole state of the system
|
|
message BuildState {
|
|
map<string, WantDetail> wants = 1;
|
|
map<string, PartitionDetail> partitions = 2;
|
|
map<string, TaintDetail> taints = 3;
|
|
map<string, JobRunDetail> job_runs = 4;
|
|
}
|
|
|
|
message WantDetail {
|
|
string want_id = 1;
|
|
repeated PartitionRef refs = 2;
|
|
// TODO
|
|
}
|
|
|
|
message PartitionDetail {
|
|
// The partition reference
|
|
PartitionRef ref = 1;
|
|
// The partitions current status
|
|
PartitionStatus status = 2;
|
|
// The latest update to the partition's status
|
|
optional uint64 last_updated_at = 3;
|
|
// IDs that associate the partition with other objects
|
|
repeated string job_run_ids = 4;
|
|
repeated string want_ids = 5;
|
|
repeated string taint_ids = 6;
|
|
}
|
|
message PartitionStatus {
|
|
PartitionStatusCode code = 1;
|
|
string name = 2;
|
|
}
|
|
enum PartitionStatusCode {
|
|
// TODO how do we avoid copying job states here?
|
|
Unknown = 0;
|
|
Wanted = 1;
|
|
Building = 2;
|
|
Live = 3;
|
|
Failed = 4;
|
|
Tainted = 5;
|
|
}
|
|
|
|
message TaintDetail {
|
|
// TODO
|
|
}
|
|
|
|
message JobRunDetail {
|
|
// TODO
|
|
}
|
|
|
|
|
|
message EventFilter {
|
|
// IDs of wants to get relevant events for
|
|
repeated string want_ids = 1;
|
|
}
|