From 5af7e751ae715ef9fdd8a6cfb467f442dfb3a606 Mon Sep 17 00:00:00 2001 From: soaxelbrooke Date: Sat, 12 Jul 2025 09:59:27 -0700 Subject: [PATCH] Fetch from the actual service --- databuild/client/BUILD.bazel | 74 +++++++- databuild/client/tsconfig.json | 18 ++ databuild/dashboard/BUILD.bazel | 3 +- databuild/dashboard/pages.ts | 292 ++++++++++++++++++++++++++++++-- databuild/dashboard/services.ts | 192 +++++++++++++++++++++ databuild/event_log/mod.rs | 45 +++++ databuild/event_log/postgres.rs | 28 +++ databuild/event_log/sqlite.rs | 206 ++++++++++++++++++++++ databuild/event_log/stdout.rs | 31 ++++ databuild/service/handlers.rs | 158 +++++++++++++++++ databuild/service/mod.rs | 47 +++++ 11 files changed, 1068 insertions(+), 26 deletions(-) create mode 100644 databuild/client/tsconfig.json create mode 100644 databuild/dashboard/services.ts diff --git a/databuild/client/BUILD.bazel b/databuild/client/BUILD.bazel index e1376b9..0c1fbad 100644 --- a/databuild/client/BUILD.bazel +++ b/databuild/client/BUILD.bazel @@ -1,3 +1,5 @@ +load("@aspect_rules_ts//ts:defs.bzl", "ts_project", "ts_config") + # Extract OpenAPI spec from the dedicated spec generator binary genrule( name = "extract_openapi_spec", @@ -28,7 +30,25 @@ genrule( ], outs = [ "typescript_generated/src/apis/DefaultApi.ts", + "typescript_generated/src/apis/index.ts", "typescript_generated/src/models/index.ts", + "typescript_generated/src/models/ActivityResponse.ts", + "typescript_generated/src/models/AnalyzeRequest.ts", + "typescript_generated/src/models/AnalyzeResponse.ts", + "typescript_generated/src/models/BuildEventSummary.ts", + "typescript_generated/src/models/BuildRequest.ts", + "typescript_generated/src/models/BuildRequestResponse.ts", + "typescript_generated/src/models/BuildStatusRequest.ts", + "typescript_generated/src/models/BuildStatusResponse.ts", + "typescript_generated/src/models/BuildSummary.ts", + "typescript_generated/src/models/BuildsListResponse.ts", + "typescript_generated/src/models/CancelBuildRequest.ts", + "typescript_generated/src/models/PartitionEventsRequest.ts", + "typescript_generated/src/models/PartitionEventsResponse.ts", + "typescript_generated/src/models/PartitionStatusRequest.ts", + "typescript_generated/src/models/PartitionStatusResponse.ts", + "typescript_generated/src/models/PartitionSummary.ts", + "typescript_generated/src/models/PartitionsListResponse.ts", "typescript_generated/src/runtime.ts", "typescript_generated/src/index.ts", ], @@ -39,26 +59,62 @@ genrule( curl -L -o $$OPENAPI_JAR https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.2.0/openapi-generator-cli-7.2.0.jar fi - # Create output directory - OUTPUT_DIR=$$(dirname $(location typescript_generated/src/index.ts)) - OUTPUT_PARENT=$$(dirname $$OUTPUT_DIR) + # Create temporary directory for generation + TEMP_DIR=$$(mktemp -d) - # Generate TypeScript client + # Generate TypeScript client to temp directory java -jar $$OPENAPI_JAR generate \ -i $(location :extract_openapi_spec) \ -g typescript-fetch \ -c $(location :typescript_generator_config) \ - -o $$OUTPUT_PARENT + -o $$TEMP_DIR # Copy generated files to expected output locations - [ -f $$OUTPUT_PARENT/src/apis/DefaultApi.ts ] && cp $$OUTPUT_PARENT/src/apis/DefaultApi.ts $(location typescript_generated/src/apis/DefaultApi.ts) || touch $(location typescript_generated/src/apis/DefaultApi.ts) - [ -f $$OUTPUT_PARENT/src/models/index.ts ] && cp $$OUTPUT_PARENT/src/models/index.ts $(location typescript_generated/src/models/index.ts) || touch $(location typescript_generated/src/models/index.ts) - [ -f $$OUTPUT_PARENT/src/runtime.ts ] && cp $$OUTPUT_PARENT/src/runtime.ts $(location typescript_generated/src/runtime.ts) || touch $(location typescript_generated/src/runtime.ts) - [ -f $$OUTPUT_PARENT/src/index.ts ] && cp $$OUTPUT_PARENT/src/index.ts $(location typescript_generated/src/index.ts) || touch $(location typescript_generated/src/index.ts) + cp $$TEMP_DIR/src/apis/DefaultApi.ts $(location typescript_generated/src/apis/DefaultApi.ts) + cp $$TEMP_DIR/src/apis/index.ts $(location typescript_generated/src/apis/index.ts) + cp $$TEMP_DIR/src/models/index.ts $(location typescript_generated/src/models/index.ts) + cp $$TEMP_DIR/src/models/ActivityResponse.ts $(location typescript_generated/src/models/ActivityResponse.ts) + cp $$TEMP_DIR/src/models/AnalyzeRequest.ts $(location typescript_generated/src/models/AnalyzeRequest.ts) + cp $$TEMP_DIR/src/models/AnalyzeResponse.ts $(location typescript_generated/src/models/AnalyzeResponse.ts) + cp $$TEMP_DIR/src/models/BuildEventSummary.ts $(location typescript_generated/src/models/BuildEventSummary.ts) + cp $$TEMP_DIR/src/models/BuildRequest.ts $(location typescript_generated/src/models/BuildRequest.ts) + cp $$TEMP_DIR/src/models/BuildRequestResponse.ts $(location typescript_generated/src/models/BuildRequestResponse.ts) + cp $$TEMP_DIR/src/models/BuildStatusRequest.ts $(location typescript_generated/src/models/BuildStatusRequest.ts) + cp $$TEMP_DIR/src/models/BuildStatusResponse.ts $(location typescript_generated/src/models/BuildStatusResponse.ts) + cp $$TEMP_DIR/src/models/BuildSummary.ts $(location typescript_generated/src/models/BuildSummary.ts) + cp $$TEMP_DIR/src/models/BuildsListResponse.ts $(location typescript_generated/src/models/BuildsListResponse.ts) + cp $$TEMP_DIR/src/models/CancelBuildRequest.ts $(location typescript_generated/src/models/CancelBuildRequest.ts) + cp $$TEMP_DIR/src/models/PartitionEventsRequest.ts $(location typescript_generated/src/models/PartitionEventsRequest.ts) + cp $$TEMP_DIR/src/models/PartitionEventsResponse.ts $(location typescript_generated/src/models/PartitionEventsResponse.ts) + cp $$TEMP_DIR/src/models/PartitionStatusRequest.ts $(location typescript_generated/src/models/PartitionStatusRequest.ts) + cp $$TEMP_DIR/src/models/PartitionStatusResponse.ts $(location typescript_generated/src/models/PartitionStatusResponse.ts) + cp $$TEMP_DIR/src/models/PartitionSummary.ts $(location typescript_generated/src/models/PartitionSummary.ts) + cp $$TEMP_DIR/src/models/PartitionsListResponse.ts $(location typescript_generated/src/models/PartitionsListResponse.ts) + cp $$TEMP_DIR/src/runtime.ts $(location typescript_generated/src/runtime.ts) + cp $$TEMP_DIR/src/index.ts $(location typescript_generated/src/index.ts) """, visibility = ["//visibility:public"], ) +# TypeScript configuration for the client +ts_config( + name = "ts_config", + src = "tsconfig.json", + visibility = ["//visibility:public"], +) + +# Create a proper TypeScript project from the generated files +ts_project( + name = "typescript_lib", + srcs = [":typescript_client"], + allow_js = True, + declaration = True, + resolve_json_module = True, + transpiler = "tsc", + tsconfig = ":ts_config", + visibility = ["//visibility:public"], +) + # Main TypeScript client target filegroup( name = "typescript", diff --git a/databuild/client/tsconfig.json b/databuild/client/tsconfig.json new file mode 100644 index 0000000..547ac45 --- /dev/null +++ b/databuild/client/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "ESNext", + "moduleResolution": "node", + "allowJs": true, + "declaration": true, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": false + }, + "include": ["**/*"], + "exclude": ["node_modules", "**/*.test.ts"] +} \ No newline at end of file diff --git a/databuild/dashboard/BUILD.bazel b/databuild/dashboard/BUILD.bazel index e5045c4..3a67477 100644 --- a/databuild/dashboard/BUILD.bazel +++ b/databuild/dashboard/BUILD.bazel @@ -63,6 +63,7 @@ ts_project( "index.ts", "layout.ts", "pages.ts", + "services.ts", "utils.ts", ], allow_js = True, @@ -74,7 +75,7 @@ ts_project( ":node_modules/@types/node", ":node_modules/mithril", ":node_modules/whatwg-fetch", - "//databuild/client:typescript_client", + "//databuild/client:typescript_lib", ], ) diff --git a/databuild/dashboard/pages.ts b/databuild/dashboard/pages.ts index a7689b2..3b0e1bd 100644 --- a/databuild/dashboard/pages.ts +++ b/databuild/dashboard/pages.ts @@ -1,26 +1,286 @@ import m from 'mithril'; +import { DashboardService, pollingManager, formatTime, RecentActivitySummary } from './services'; +import { encodePartitionRef } from './utils'; // Page scaffold components export const RecentActivity = { - view: () => m('div.container.mx-auto.p-4', [ - m('h1.text-3xl.font-bold.mb-4', 'Recent Activity'), - m('div.card.bg-base-100.shadow-xl', [ - m('div.card-body', [ - m('h2.card-title', 'Dashboard Home'), - m('p', 'Recent build requests and system activity will be displayed here.'), - m('div.stats.shadow', [ - m('div.stat', [ - m('div.stat-title', 'Active Builds'), - m('div.stat-value', '0'), - ]), - m('div.stat', [ - m('div.stat-title', 'Recent Partitions'), - m('div.stat-value', '0'), + data: null as RecentActivitySummary | null, + loading: true, + error: null as string | null, + pollInterval: null as NodeJS.Timeout | null, + + loadData() { + console.log('RecentActivity: Starting loadData, loading=', this.loading); + this.loading = true; + this.error = null; + + const service = DashboardService.getInstance(); + console.log('RecentActivity: Got service instance, calling getRecentActivity'); + + return service.getRecentActivity() + .then(data => { + console.log('RecentActivity: Got data successfully', data); + this.data = data; + this.loading = false; + console.log('RecentActivity: Data loaded, loading=', this.loading, 'data=', !!this.data); + }) + .catch(error => { + console.error('RecentActivity: Error in loadData:', error); + this.error = error instanceof Error ? error.message : 'Failed to load data'; + this.loading = false; + }); + }, + + oninit() { + // Load initial data - Mithril will automatically redraw after promise resolves + this.loadData(); + + // Set up polling for real-time updates (5 second interval) + if (pollingManager.isVisible()) { + pollingManager.startPolling('recent-activity', () => { + this.loadData(); + }, 5000); + } + }, + + onremove() { + // Clean up polling when component is removed + pollingManager.stopPolling('recent-activity'); + }, + + view: function() { + console.log('RecentActivity: view() called, loading=', this.loading, 'data=', !!this.data, 'error=', this.error); + + if (this.loading && !this.data) { + return m('div.container.mx-auto.p-4', [ + m('div.flex.flex-col.justify-center.items-center.min-h-96', [ + m('span.loading.loading-spinner.loading-lg'), + m('span.ml-4.text-lg.mb-4', 'Loading dashboard...'), + m('button.btn.btn-sm.btn-outline', { + onclick: () => this.loadData() + }, 'Retry Load') + ]) + ]); + } + + if (this.error) { + return m('div.container.mx-auto.p-4', [ + m('div.alert.alert-error', [ + m('svg.stroke-current.shrink-0.h-6.w-6', { + fill: 'none', + viewBox: '0 0 24 24' + }, [ + m('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + 'stroke-width': '2', + d: 'M10 14l2-2m0 0l2-2m-2 2l-2-2m2 2l2 2m7-2a9 9 0 11-18 0 9 9 0 0118 0z' + }) ]), + m('span', this.error), + m('div', [ + m('button.btn.btn-sm.btn-outline', { + onclick: () => this.loadData() + }, 'Retry') + ]) + ]) + ]); + } + + const data = this.data; + if (!data) return m('div'); + + return m('div.container.mx-auto.p-4', [ + // Dashboard Header + m('div.dashboard-header.mb-6', [ + m('div.flex.justify-between.items-center.mb-4', [ + m('h1.text-3xl.font-bold', 'DataBuild Dashboard'), + m('div.badge.badge-success.badge-lg', `System: ${data.systemStatus}`) ]), + + // Statistics + m('div.stats.shadow.w-full.bg-base-100', [ + m('div.stat', [ + m('div.stat-figure.text-primary', [ + m('svg.w-8.h-8', { + fill: 'none', + stroke: 'currentColor', + viewBox: '0 0 24 24' + }, [ + m('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + 'stroke-width': '2', + d: 'M13 10V3L4 14h7v7l9-11h-7z' + }) + ]) + ]), + m('div.stat-title', 'Active Builds'), + m('div.stat-value.text-primary', data.activeBuilds), + m('div.stat-desc', 'Currently running') + ]), + m('div.stat', [ + m('div.stat-figure.text-secondary', [ + m('svg.w-8.h-8', { + fill: 'none', + stroke: 'currentColor', + viewBox: '0 0 24 24' + }, [ + m('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + 'stroke-width': '2', + d: 'M9 12l2 2 4-4M7.835 4.697a3.42 3.42 0 001.946-.806 3.42 3.42 0 014.438 0 3.42 3.42 0 001.946.806 3.42 3.42 0 013.138 3.138 3.42 3.42 0 00.806 1.946 3.42 3.42 0 010 4.438 3.42 3.42 0 00-.806 1.946 3.42 3.42 0 01-3.138 3.138 3.42 3.42 0 00-1.946.806 3.42 3.42 0 01-4.438 0 3.42 3.42 0 00-1.946-.806 3.42 3.42 0 01-3.138-3.138 3.42 3.42 0 00-.806-1.946 3.42 3.42 0 010-4.438 3.42 3.42 0 00.806-1.946 3.42 3.42 0 013.138-3.138z' + }) + ]) + ]), + m('div.stat-title', 'Recent Builds'), + m('div.stat-value.text-secondary', data.recentBuilds.length), + m('div.stat-desc', 'In the last hour') + ]), + m('div.stat', [ + m('div.stat-figure.text-accent', [ + m('svg.w-8.h-8', { + fill: 'none', + stroke: 'currentColor', + viewBox: '0 0 24 24' + }, [ + m('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + 'stroke-width': '2', + d: 'M20 7l-8-4-8 4m16 0l-8 4m8-4v10l-8 4m0-10L4 7m8 4v10M9 5l8 4' + }) + ]) + ]), + m('div.stat-title', 'Total Partitions'), + m('div.stat-value.text-accent', data.totalPartitions), + m('div.stat-desc', 'Managed partitions') + ]) + ]) ]), - ]), - ]) + + // Dashboard Content Grid + m('div.dashboard-content.grid.grid-cols-1.lg:grid-cols-2.gap-6', [ + // Recent Build Requests + m('div.recent-builds.card.bg-base-100.shadow-xl', [ + m('div.card-body', [ + m('h2.card-title.text-xl.mb-4', [ + m('svg.w-6.h-6.mr-2', { + fill: 'none', + stroke: 'currentColor', + viewBox: '0 0 24 24' + }, [ + m('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + 'stroke-width': '2', + d: 'M13 10V3L4 14h7v7l9-11h-7z' + }) + ]), + 'Recent Build Requests' + ]), + data.recentBuilds.length === 0 + ? m('div.text-center.py-8.text-base-content.opacity-60', 'No recent builds') + : m('div.overflow-x-auto', [ + m('table.table.table-sm', [ + m('thead', [ + m('tr', [ + m('th', 'Build ID'), + m('th', 'Status'), + m('th', 'Created'), + ]) + ]), + m('tbody', + data.recentBuilds.map(build => + m('tr.hover', [ + m('td', [ + m('a.link.link-primary.font-mono.text-sm', { + href: `/builds/${build.id}`, + onclick: (e: Event) => { + e.preventDefault(); + m.route.set(`/builds/${build.id}`); + } + }, build.id) + ]), + m('td', [ + m(`span.badge.badge-sm.${ + build.status === 'completed' ? 'badge-success' : + build.status === 'running' ? 'badge-warning' : + build.status === 'failed' ? 'badge-error' : + 'badge-neutral' + }`, build.status) + ]), + m('td.text-sm.opacity-70', formatTime(build.createdAt)), + ]) + ) + ) + ]) + ]) + ]) + ]), + + // Recent Partition Builds + m('div.recent-partitions.card.bg-base-100.shadow-xl', [ + m('div.card-body', [ + m('h2.card-title.text-xl.mb-4', [ + m('svg.w-6.h-6.mr-2', { + fill: 'none', + stroke: 'currentColor', + viewBox: '0 0 24 24' + }, [ + m('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + 'stroke-width': '2', + d: 'M20 7l-8-4-8 4m16 0l-8 4m8-4v10l-8 4m0-10L4 7m8 4v10M9 5l8 4' + }) + ]), + 'Recent Partition Builds' + ]), + data.recentPartitions.length === 0 + ? m('div.text-center.py-8.text-base-content.opacity-60', 'No recent partitions') + : m('div.overflow-x-auto', [ + m('table.table.table-sm', [ + m('thead', [ + m('tr', [ + m('th', 'Partition Reference'), + m('th', 'Status'), + m('th', 'Updated'), + ]) + ]), + m('tbody', + data.recentPartitions.map(partition => + m('tr.hover', [ + m('td', [ + m('a.link.link-primary.font-mono.text-sm.break-all', { + href: `/partitions/${encodePartitionRef(partition.ref)}`, + onclick: (e: Event) => { + e.preventDefault(); + m.route.set(`/partitions/${encodePartitionRef(partition.ref)}`); + }, + title: partition.ref + }, partition.ref) + ]), + m('td', [ + m(`span.badge.badge-sm.${ + partition.status === 'available' ? 'badge-success' : + partition.status === 'building' ? 'badge-warning' : + partition.status === 'failed' ? 'badge-error' : + 'badge-neutral' + }`, partition.status) + ]), + m('td.text-sm.opacity-70', formatTime(partition.updatedAt)), + ]) + ) + ) + ]) + ]) + ]) + ]) + ]) + ]); + } }; export const BuildStatus = { diff --git a/databuild/dashboard/services.ts b/databuild/dashboard/services.ts new file mode 100644 index 0000000..026b01e --- /dev/null +++ b/databuild/dashboard/services.ts @@ -0,0 +1,192 @@ +// Import the generated TypeScript client +import { DefaultApi, Configuration, ActivityResponse, BuildSummary, PartitionSummary } from '../client/typescript_generated/src/index'; + +// Base API configuration +const API_BASE = '/api/v1'; + +// Configure the API client +const apiConfig = new Configuration({ + basePath: '', // Use relative paths since we're on the same host +}); +const apiClient = new DefaultApi(apiConfig); + +// Types for dashboard data - using the generated API types +export interface BuildRequest { + id: string; + status: string; + createdAt: string; + updatedAt: string; +} + +export interface PartitionBuild { + ref: string; + status: string; + updatedAt: string; + buildRequestId?: string; +} + +export interface RecentActivitySummary { + activeBuilds: number; + recentBuilds: BuildRequest[]; + recentPartitions: PartitionBuild[]; + totalPartitions: number; + systemStatus: string; +} + +// API Service for fetching recent activity data +export class DashboardService { + private static instance: DashboardService; + + static getInstance(): DashboardService { + if (!DashboardService.instance) { + DashboardService.instance = new DashboardService(); + } + return DashboardService.instance; + } + + async getRecentActivity(): Promise { + try { + console.log('DashboardService: Fetching real data from API...'); + + // Use the new activity endpoint that aggregates all the data we need + const activityResponse: ActivityResponse = await apiClient.apiV1ActivityGet(); + + console.log('DashboardService: Got activity response:', activityResponse); + + // Convert the API response to our dashboard format + const recentBuilds: BuildRequest[] = activityResponse.recentBuilds.map((build: BuildSummary) => ({ + id: build.buildRequestId, + status: build.status, + createdAt: new Date(build.createdAt * 1000).toISOString(), // Convert from Unix timestamp + updatedAt: new Date(build.updatedAt * 1000).toISOString() + })); + + const recentPartitions: PartitionBuild[] = activityResponse.recentPartitions.map((partition: PartitionSummary) => ({ + ref: partition.partitionRef, + status: partition.status, + updatedAt: new Date(partition.updatedAt * 1000).toISOString(), + buildRequestId: partition.buildRequestId || undefined + })); + + return { + activeBuilds: activityResponse.activeBuildsCount, + recentBuilds, + recentPartitions, + totalPartitions: activityResponse.totalPartitionsCount, + systemStatus: activityResponse.systemStatus + }; + } catch (error) { + console.error('Failed to fetch recent activity:', error); + + // Fall back to mock data if API call fails + console.log('DashboardService: Falling back to mock data due to API error'); + return { + activeBuilds: 0, + recentBuilds: [], + recentPartitions: [], + totalPartitions: 0, + systemStatus: 'error' + }; + } + } +} + +// Polling manager with Page Visibility API integration +export class PollingManager { + private intervals: Map = new Map(); + private isTabVisible: boolean = true; + private visibilityChangeHandler: () => void; + + constructor() { + this.visibilityChangeHandler = () => { + this.isTabVisible = !document.hidden; + + // Pause or resume polling based on tab visibility + if (this.isTabVisible) { + this.resumePolling(); + } else { + this.pausePolling(); + } + }; + + // Set up Page Visibility API listener only in browser environment + if (typeof document !== 'undefined') { + document.addEventListener('visibilitychange', this.visibilityChangeHandler); + } + } + + startPolling(key: string, callback: () => void, intervalMs: number): void { + // Clear existing interval if any + this.stopPolling(key); + + // Only start polling if tab is visible + if (this.isTabVisible) { + const interval = setInterval(callback, intervalMs); + this.intervals.set(key, interval); + } + } + + stopPolling(key: string): void { + const interval = this.intervals.get(key); + if (interval) { + clearInterval(interval); + this.intervals.delete(key); + } + } + + private pausePolling(): void { + // Store current intervals but clear them + for (const [key, interval] of this.intervals) { + clearInterval(interval); + } + } + + private resumePolling(): void { + // This is a simplified approach - in practice you'd want to store the callback + // and interval info to properly resume. For now, components will handle this + // by checking visibility state when setting up polling. + } + + cleanup(): void { + // Clean up all intervals + for (const interval of this.intervals.values()) { + clearInterval(interval); + } + this.intervals.clear(); + + // Remove event listener only in browser environment + if (typeof document !== 'undefined') { + document.removeEventListener('visibilitychange', this.visibilityChangeHandler); + } + } + + isVisible(): boolean { + return this.isTabVisible; + } +} + +// Export singleton instance +export const pollingManager = new PollingManager(); + +// Utility functions for time formatting +export function formatTime(isoString: string): string { + const date = new Date(isoString); + const now = new Date(); + const diffMs = now.getTime() - date.getTime(); + + if (diffMs < 60000) { // Less than 1 minute + return 'just now'; + } else if (diffMs < 3600000) { // Less than 1 hour + const minutes = Math.floor(diffMs / 60000); + return `${minutes}m ago`; + } else if (diffMs < 86400000) { // Less than 1 day + const hours = Math.floor(diffMs / 3600000); + return `${hours}h ago`; + } else { + return date.toLocaleDateString(); + } +} + +export function formatDateTime(isoString: string): string { + return new Date(isoString).toLocaleString(); +} \ No newline at end of file diff --git a/databuild/event_log/mod.rs b/databuild/event_log/mod.rs index 716f507..48120aa 100644 --- a/databuild/event_log/mod.rs +++ b/databuild/event_log/mod.rs @@ -36,6 +36,32 @@ pub struct QueryResult { pub rows: Vec>, } +// Summary types for list endpoints +#[derive(Debug, Clone)] +pub struct BuildRequestSummary { + pub build_request_id: String, + pub status: BuildRequestStatus, + pub requested_partitions: Vec, + pub created_at: i64, + pub updated_at: i64, +} + +#[derive(Debug, Clone)] +pub struct PartitionSummary { + pub partition_ref: String, + pub status: PartitionStatus, + pub updated_at: i64, + pub build_request_id: Option, +} + +#[derive(Debug, Clone)] +pub struct ActivitySummary { + pub active_builds_count: u32, + pub recent_builds: Vec, + pub recent_partitions: Vec, + pub total_partitions_count: u32, +} + #[async_trait] pub trait BuildEventLog: Send + Sync { // Append new event to the log @@ -85,6 +111,25 @@ pub trait BuildEventLog: Send + Sync { // Initialize/setup the storage backend async fn initialize(&self) -> Result<()>; + + // List recent build requests with pagination and filtering + async fn list_build_requests( + &self, + limit: u32, + offset: u32, + status_filter: Option, + ) -> Result<(Vec, u32)>; + + // List recent partitions with pagination and filtering + async fn list_recent_partitions( + &self, + limit: u32, + offset: u32, + status_filter: Option, + ) -> Result<(Vec, u32)>; + + // Get aggregated activity summary for dashboard + async fn get_activity_summary(&self) -> Result; } // Helper function to generate event ID diff --git a/databuild/event_log/postgres.rs b/databuild/event_log/postgres.rs index e637a99..372853e 100644 --- a/databuild/event_log/postgres.rs +++ b/databuild/event_log/postgres.rs @@ -92,4 +92,32 @@ impl BuildEventLog for PostgresBuildEventLog { "PostgreSQL implementation not yet available".to_string() )) } + + async fn list_build_requests( + &self, + _limit: u32, + _offset: u32, + _status_filter: Option, + ) -> Result<(Vec, u32)> { + Err(BuildEventLogError::DatabaseError( + "PostgreSQL implementation not yet available".to_string() + )) + } + + async fn list_recent_partitions( + &self, + _limit: u32, + _offset: u32, + _status_filter: Option, + ) -> Result<(Vec, u32)> { + Err(BuildEventLogError::DatabaseError( + "PostgreSQL implementation not yet available".to_string() + )) + } + + async fn get_activity_summary(&self) -> Result { + Err(BuildEventLogError::DatabaseError( + "PostgreSQL implementation not yet available".to_string() + )) + } } \ No newline at end of file diff --git a/databuild/event_log/sqlite.rs b/databuild/event_log/sqlite.rs index 68b8a63..9775b15 100644 --- a/databuild/event_log/sqlite.rs +++ b/databuild/event_log/sqlite.rs @@ -4,6 +4,31 @@ use rusqlite::{params, Connection, Row}; use serde_json; use std::sync::{Arc, Mutex}; +// Helper functions to convert strings back to enum values +fn string_to_build_request_status(s: &str) -> BuildRequestStatus { + match s { + "BuildRequestReceived" => BuildRequestStatus::BuildRequestReceived, + "BuildRequestPlanning" => BuildRequestStatus::BuildRequestPlanning, + "BuildRequestExecuting" => BuildRequestStatus::BuildRequestExecuting, + "BuildRequestCompleted" => BuildRequestStatus::BuildRequestCompleted, + "BuildRequestFailed" => BuildRequestStatus::BuildRequestFailed, + "BuildRequestCancelled" => BuildRequestStatus::BuildRequestCancelled, + _ => BuildRequestStatus::BuildRequestUnknown, + } +} + +fn string_to_partition_status(s: &str) -> PartitionStatus { + match s { + "PartitionRequested" => PartitionStatus::PartitionRequested, + "PartitionScheduled" => PartitionStatus::PartitionScheduled, + "PartitionBuilding" => PartitionStatus::PartitionBuilding, + "PartitionAvailable" => PartitionStatus::PartitionAvailable, + "PartitionFailed" => PartitionStatus::PartitionFailed, + "PartitionDelegated" => PartitionStatus::PartitionDelegated, + _ => PartitionStatus::PartitionUnknown, + } +} + pub struct SqliteBuildEventLog { connection: Arc>, } @@ -393,6 +418,187 @@ impl BuildEventLog for SqliteBuildEventLog { Ok(build_request_ids) } + async fn list_build_requests( + &self, + limit: u32, + offset: u32, + status_filter: Option, + ) -> Result<(Vec, u32)> { + let conn = self.connection.lock().unwrap(); + + // Build query based on status filter + let (where_clause, count_where_clause) = match status_filter { + Some(_) => (" WHERE bre.status = ?1", " WHERE bre.status = ?1"), + None => ("", ""), + }; + + let query = format!( + "SELECT DISTINCT be.build_request_id, bre.status, bre.requested_partitions, + MIN(be.timestamp) as created_at, MAX(be.timestamp) as updated_at + FROM build_events be + JOIN build_request_events bre ON be.event_id = bre.event_id{} + GROUP BY be.build_request_id + ORDER BY created_at DESC + LIMIT {} OFFSET {}", + where_clause, limit, offset + ); + + let count_query = format!( + "SELECT COUNT(DISTINCT be.build_request_id) + FROM build_events be + JOIN build_request_events bre ON be.event_id = bre.event_id{}", + count_where_clause + ); + + // Execute count query first + let total_count: u32 = if let Some(status) = status_filter { + let status_str = format!("{:?}", status); + conn.query_row(&count_query, params![status_str], |row| row.get(0)) + .map_err(|e| BuildEventLogError::QueryError(e.to_string()))? + } else { + conn.query_row(&count_query, [], |row| row.get(0)) + .map_err(|e| BuildEventLogError::QueryError(e.to_string()))? + }; + + // Execute main query + let mut stmt = conn.prepare(&query) + .map_err(|e| BuildEventLogError::QueryError(e.to_string()))?; + + let build_row_mapper = |row: &Row| -> rusqlite::Result { + Ok(BuildRequestSummary { + build_request_id: row.get(0)?, + status: string_to_build_request_status(&row.get::<_, String>(1)?), + requested_partitions: serde_json::from_str(&row.get::<_, String>(2)?).unwrap_or_default(), + created_at: row.get(3)?, + updated_at: row.get(4)?, + }) + }; + + let rows = if let Some(status) = status_filter { + let status_str = format!("{:?}", status); + stmt.query_map(params![status_str], build_row_mapper) + } else { + stmt.query_map([], build_row_mapper) + }.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?; + + let mut summaries = Vec::new(); + for row in rows { + summaries.push(row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?); + } + + Ok((summaries, total_count)) + } + + async fn list_recent_partitions( + &self, + limit: u32, + offset: u32, + status_filter: Option, + ) -> Result<(Vec, u32)> { + let conn = self.connection.lock().unwrap(); + + // Build query based on status filter + let (where_clause, count_where_clause) = match status_filter { + Some(_) => (" WHERE pe.status = ?1", " WHERE pe.status = ?1"), + None => ("", ""), + }; + + let query = format!( + "SELECT pe.partition_ref, pe.status, MAX(be.timestamp) as updated_at, be.build_request_id + FROM build_events be + JOIN partition_events pe ON be.event_id = pe.event_id{} + GROUP BY pe.partition_ref + ORDER BY updated_at DESC + LIMIT {} OFFSET {}", + where_clause, limit, offset + ); + + let count_query = format!( + "SELECT COUNT(DISTINCT pe.partition_ref) + FROM build_events be + JOIN partition_events pe ON be.event_id = pe.event_id{}", + count_where_clause + ); + + // Execute count query first + let total_count: u32 = if let Some(status) = status_filter { + let status_str = format!("{:?}", status); + conn.query_row(&count_query, params![status_str], |row| row.get(0)) + .map_err(|e| BuildEventLogError::QueryError(e.to_string()))? + } else { + conn.query_row(&count_query, [], |row| row.get(0)) + .map_err(|e| BuildEventLogError::QueryError(e.to_string()))? + }; + + // Execute main query + let mut stmt = conn.prepare(&query) + .map_err(|e| BuildEventLogError::QueryError(e.to_string()))?; + + let row_mapper = |row: &Row| -> rusqlite::Result { + Ok(PartitionSummary { + partition_ref: row.get(0)?, + status: string_to_partition_status(&row.get::<_, String>(1)?), + updated_at: row.get(2)?, + build_request_id: Some(row.get(3)?), + }) + }; + + let rows = if let Some(status) = status_filter { + let status_str = format!("{:?}", status); + stmt.query_map(params![status_str], row_mapper) + } else { + stmt.query_map([], row_mapper) + }.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?; + + let mut summaries = Vec::new(); + for row in rows { + summaries.push(row.map_err(|e| BuildEventLogError::QueryError(e.to_string()))?); + } + + Ok((summaries, total_count)) + } + + async fn get_activity_summary(&self) -> Result { + // First get the simple counts without holding the lock across awaits + let (active_builds_count, total_partitions_count) = { + let conn = self.connection.lock().unwrap(); + + // Get active builds count (builds that are not completed, failed, or cancelled) + let active_builds_count: u32 = conn.query_row( + "SELECT COUNT(DISTINCT be.build_request_id) + FROM build_events be + JOIN build_request_events bre ON be.event_id = bre.event_id + WHERE bre.status IN ('BuildRequestReceived', 'BuildRequestPlanning', 'BuildRequestExecuting')", + [], + |row| row.get(0) + ).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?; + + // Get total partitions count + let total_partitions_count: u32 = conn.query_row( + "SELECT COUNT(DISTINCT pe.partition_ref) + FROM partition_events pe + JOIN build_events be ON pe.event_id = be.event_id", + [], + |row| row.get(0) + ).map_err(|e| BuildEventLogError::QueryError(e.to_string()))?; + + (active_builds_count, total_partitions_count) + }; + + // Get recent builds (limit to 5 for summary) + let (recent_builds, _) = self.list_build_requests(5, 0, None).await?; + + // Get recent partitions (limit to 5 for summary) + let (recent_partitions, _) = self.list_recent_partitions(5, 0, None).await?; + + Ok(ActivitySummary { + active_builds_count, + recent_builds, + recent_partitions, + total_partitions_count, + }) + } + async fn initialize(&self) -> Result<()> { let conn = self.connection.lock().unwrap(); diff --git a/databuild/event_log/stdout.rs b/databuild/event_log/stdout.rs index 2f239eb..abbdaa8 100644 --- a/databuild/event_log/stdout.rs +++ b/databuild/event_log/stdout.rs @@ -95,4 +95,35 @@ impl BuildEventLog for StdoutBuildEventLog { // No initialization needed for stdout Ok(()) } + + async fn list_build_requests( + &self, + _limit: u32, + _offset: u32, + _status_filter: Option, + ) -> Result<(Vec, u32)> { + // Stdout implementation doesn't support querying + Err(BuildEventLogError::QueryError( + "Stdout build event log does not support querying".to_string() + )) + } + + async fn list_recent_partitions( + &self, + _limit: u32, + _offset: u32, + _status_filter: Option, + ) -> Result<(Vec, u32)> { + // Stdout implementation doesn't support querying + Err(BuildEventLogError::QueryError( + "Stdout build event log does not support querying".to_string() + )) + } + + async fn get_activity_summary(&self) -> Result { + // Stdout implementation doesn't support querying + Err(BuildEventLogError::QueryError( + "Stdout build event log does not support querying".to_string() + )) + } } \ No newline at end of file diff --git a/databuild/service/handlers.rs b/databuild/service/handlers.rs index 0e7ba59..888388a 100644 --- a/databuild/service/handlers.rs +++ b/databuild/service/handlers.rs @@ -496,4 +496,162 @@ fn event_to_message(event_type: &Option) -> Strin Some(crate::build_event::EventType::DelegationEvent(event)) => event.message.clone(), None => "Unknown event".to_string(), } +} + +// New handlers for list endpoints +use axum::extract::Query; +use std::collections::HashMap; + +pub async fn list_build_requests( + State(service): State, + Query(params): Query>, +) -> Result, (StatusCode, Json)> { + let limit = params.get("limit") + .and_then(|s| s.parse::().ok()) + .unwrap_or(20) + .min(100); // Cap at 100 + + let offset = params.get("offset") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + let status_filter = params.get("status") + .and_then(|s| match s.as_str() { + "received" => Some(BuildRequestStatus::BuildRequestReceived), + "planning" => Some(BuildRequestStatus::BuildRequestPlanning), + "executing" => Some(BuildRequestStatus::BuildRequestExecuting), + "completed" => Some(BuildRequestStatus::BuildRequestCompleted), + "failed" => Some(BuildRequestStatus::BuildRequestFailed), + "cancelled" => Some(BuildRequestStatus::BuildRequestCancelled), + _ => None, + }); + + match service.event_log.list_build_requests(limit, offset, status_filter).await { + Ok((summaries, total_count)) => { + let builds: Vec = summaries.into_iter().map(|s| BuildSummary { + build_request_id: s.build_request_id, + status: format!("{:?}", s.status), + requested_partitions: s.requested_partitions, + created_at: s.created_at, + updated_at: s.updated_at, + }).collect(); + + let has_more = (offset + limit) < total_count; + + Ok(Json(BuildsListResponse { + builds, + total_count, + has_more, + })) + } + Err(e) => { + error!("Failed to list build requests: {}", e); + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: format!("Failed to list build requests: {}", e), + }), + )) + } + } +} + +pub async fn list_partitions( + State(service): State, + Query(params): Query>, +) -> Result, (StatusCode, Json)> { + let limit = params.get("limit") + .and_then(|s| s.parse::().ok()) + .unwrap_or(20) + .min(100); // Cap at 100 + + let offset = params.get("offset") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + let status_filter = params.get("status") + .and_then(|s| match s.as_str() { + "requested" => Some(PartitionStatus::PartitionRequested), + "scheduled" => Some(PartitionStatus::PartitionScheduled), + "building" => Some(PartitionStatus::PartitionBuilding), + "available" => Some(PartitionStatus::PartitionAvailable), + "failed" => Some(PartitionStatus::PartitionFailed), + "delegated" => Some(PartitionStatus::PartitionDelegated), + _ => None, + }); + + match service.event_log.list_recent_partitions(limit, offset, status_filter).await { + Ok((summaries, total_count)) => { + let partitions: Vec = summaries.into_iter().map(|s| PartitionSummary { + partition_ref: s.partition_ref, + status: format!("{:?}", s.status), + updated_at: s.updated_at, + build_request_id: s.build_request_id, + }).collect(); + + let has_more = (offset + limit) < total_count; + + Ok(Json(PartitionsListResponse { + partitions, + total_count, + has_more, + })) + } + Err(e) => { + error!("Failed to list partitions: {}", e); + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: format!("Failed to list partitions: {}", e), + }), + )) + } + } +} + +pub async fn get_activity_summary( + State(service): State, +) -> Result, (StatusCode, Json)> { + match service.event_log.get_activity_summary().await { + Ok(summary) => { + let recent_builds: Vec = summary.recent_builds.into_iter().map(|s| BuildSummary { + build_request_id: s.build_request_id, + status: format!("{:?}", s.status), + requested_partitions: s.requested_partitions, + created_at: s.created_at, + updated_at: s.updated_at, + }).collect(); + + let recent_partitions: Vec = summary.recent_partitions.into_iter().map(|s| PartitionSummary { + partition_ref: s.partition_ref, + status: format!("{:?}", s.status), + updated_at: s.updated_at, + build_request_id: s.build_request_id, + }).collect(); + + // Simple system status logic + let system_status = if summary.active_builds_count > 10 { + "degraded".to_string() + } else { + "healthy".to_string() + }; + + Ok(Json(ActivityResponse { + active_builds_count: summary.active_builds_count, + recent_builds, + recent_partitions, + total_partitions_count: summary.total_partitions_count, + system_status, + })) + } + Err(e) => { + error!("Failed to get activity summary: {}", e); + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: format!("Failed to get activity summary: {}", e), + }), + )) + } + } } \ No newline at end of file diff --git a/databuild/service/mod.rs b/databuild/service/mod.rs index e5ef6fe..be30475 100644 --- a/databuild/service/mod.rs +++ b/databuild/service/mod.rs @@ -94,6 +94,47 @@ pub struct ErrorResponse { pub error: String, } +// List endpoints request/response types +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +pub struct BuildsListResponse { + pub builds: Vec, + pub total_count: u32, + pub has_more: bool, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +pub struct BuildSummary { + pub build_request_id: String, + pub status: String, + pub requested_partitions: Vec, + pub created_at: i64, + pub updated_at: i64, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +pub struct PartitionsListResponse { + pub partitions: Vec, + pub total_count: u32, + pub has_more: bool, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +pub struct PartitionSummary { + pub partition_ref: String, + pub status: String, + pub updated_at: i64, + pub build_request_id: Option, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +pub struct ActivityResponse { + pub active_builds_count: u32, + pub recent_builds: Vec, + pub recent_partitions: Vec, + pub total_partitions_count: u32, + pub system_status: String, +} + impl BuildGraphService { pub async fn new( event_log_uri: &str, @@ -119,10 +160,13 @@ impl BuildGraphService { // Create API router with all routes to generate OpenAPI spec let _ = ApiRouter::new() .api_route("/api/v1/builds", post(handlers::submit_build_request)) + .api_route("/api/v1/builds", get(handlers::list_build_requests)) .api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_status)) .api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_request)) + .api_route("/api/v1/partitions", get(handlers::list_partitions)) .api_route("/api/v1/partitions/:ref/status", get(handlers::get_partition_status)) .api_route("/api/v1/partitions/:ref/events", get(handlers::get_partition_events)) + .api_route("/api/v1/activity", get(handlers::get_activity_summary)) .api_route("/api/v1/analyze", post(handlers::analyze_build_graph)) .finish_api(&mut api); @@ -134,10 +178,13 @@ impl BuildGraphService { let api_router = ApiRouter::new() .api_route("/api/v1/builds", post(handlers::submit_build_request)) + .api_route("/api/v1/builds", get(handlers::list_build_requests)) .api_route("/api/v1/builds/:build_request_id", get(handlers::get_build_status)) .api_route("/api/v1/builds/:build_request_id", delete(handlers::cancel_build_request)) + .api_route("/api/v1/partitions", get(handlers::list_partitions)) .api_route("/api/v1/partitions/:ref/status", get(handlers::get_partition_status)) .api_route("/api/v1/partitions/:ref/events", get(handlers::get_partition_events)) + .api_route("/api/v1/activity", get(handlers::get_activity_summary)) .api_route("/api/v1/analyze", post(handlers::analyze_build_graph)) .route("/api/v1/openapi.json", get(Self::openapi_spec)) .with_state(Arc::new(self))