Tracking

Monitor Job Progress

Track job execution status, monitor progress, and retrieve detailed execution logs.

Get Job Execution Logs

GET /api/{tenantId}/{projectId}/execution/job/{jobId}/logs

Retrieves detailed execution logs for a specific job, including progress updates, error messages, and performance metrics.

Parameters

Parameter Type Location Description
tenantId GUID Path The tenant identifier
projectId GUID Path The project identifier
jobId GUID Path The job identifier

Query Parameters

Parameter Type Description
level string Filter by log level: DEBUG, INFO, WARN, ERROR (default: INFO)
fromTime datetime Get logs from this timestamp
toTime datetime Get logs until this timestamp
limit integer Maximum number of log entries (default: 1000, max: 10000)
format string Response format: structured, raw (default: structured)

Response

{
  "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
  "jobName": "Customer Journey Analysis",
  "jobStatus": "Running",
  "logsSummary": {
    "totalEntries": 247,
    "debugEntries": 89,
    "infoEntries": 145,
    "warnEntries": 11,
    "errorEntries": 2,
    "timeRange": {
      "startTime": "2024-01-20T10:30:00Z",
      "endTime": "2024-01-20T10:45:00Z"
    }
  },
  "logs": [
    {
      "timestamp": "2024-01-20T10:30:15Z",
      "level": "INFO",
      "component": "DataLoader",
      "stage": "Initialization",
      "message": "Starting data load for dataset 880e8400-e29b-41d4-a716-446655440000",
      "details": {
        "datasetSize": "45.7 MB",
        "expectedRecords": 192850,
        "format": "CSV"
      }
    },
    {
      "timestamp": "2024-01-20T10:32:45Z",
      "level": "INFO",
      "component": "ProcessMiner",
      "stage": "Data Processing",
      "message": "Processing batch 1 of 15",
      "details": {
        "batchSize": 12856,
        "progress": 6.7,
        "recordsPerSecond": 1247
      }
    },
    {
      "timestamp": "2024-01-20T10:38:22Z",
      "level": "WARN",
      "component": "DataValidator",
      "stage": "Data Processing",
      "message": "Found 125 records with missing timestamps",
      "details": {
        "affectedRecords": 125,
        "action": "Timestamp inferred from surrounding events",
        "impactOnAnalysis": "Minimal"
      }
    },
    {
      "timestamp": "2024-01-20T10:41:10Z",
      "level": "ERROR",
      "component": "AnalyticsEngine",
      "stage": "Analysis",
      "message": "Memory limit exceeded during bottleneck analysis",
      "details": {
        "memoryUsage": "3.8 GB",
        "memoryLimit": "4.0 GB",
        "action": "Switching to disk-based processing",
        "retry": true
      }
    },
    {
      "timestamp": "2024-01-20T10:45:33Z",
      "level": "INFO",
      "component": "ReportGenerator",
      "stage": "Output Generation",
      "message": "Generating process map visualization",
      "details": {
        "activitiesCount": 47,
        "pathsCount": 156,
        "formatRequested": "SVG"
      }
    }
  ],
  "executionMetrics": {
    "currentStage": "Output Generation",
    "stageProgress": 78,
    "overallProgress": 85,
    "processingRate": "1250 records/second",
    "memoryUsage": "2.3 GB",
    "cpuUsage": 67,
    "estimatedCompletion": "2024-01-20T10:52:00Z"
  }
}

Track Job Progress

GET /api/{tenantId}/{projectId}/execution/job/{jobId}/progress

Retrieves real-time progress information for a running job, including stage-by-stage completion and performance metrics.

Response

{
  "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
  "jobName": "Customer Journey Analysis",
  "status": "Running",
  "overallProgress": {
    "percentage": 85,
    "startTime": "2024-01-20T10:30:00Z",
    "elapsedTime": "15 minutes 33 seconds",
    "estimatedRemaining": "2 minutes 27 seconds",
    "estimatedCompletion": "2024-01-20T10:52:00Z"
  },
  "stages": [
    {
      "stageName": "Data Loading",
      "stageOrder": 1,
      "status": "Completed",
      "progress": 100,
      "startTime": "2024-01-20T10:30:00Z",
      "endTime": "2024-01-20T10:32:15Z",
      "duration": "2 minutes 15 seconds",
      "recordsProcessed": 192850,
      "metrics": {
        "throughput": "1427 records/second",
        "dataValidated": true,
        "errorsFound": 0
      }
    },
    {
      "stageName": "Process Discovery",
      "stageOrder": 2,
      "status": "Completed",
      "progress": 100,
      "startTime": "2024-01-20T10:32:15Z",
      "endTime": "2024-01-20T10:41:30Z",
      "duration": "9 minutes 15 seconds",
      "recordsProcessed": 192850,
      "metrics": {
        "activitiesDiscovered": 47,
        "variantsFound": 234,
        "pathsIdentified": 156
      }
    },
    {
      "stageName": "Performance Analysis",
      "stageOrder": 3,
      "status": "Running",
      "progress": 78,
      "startTime": "2024-01-20T10:41:30Z",
      "estimatedEndTime": "2024-01-20T10:48:00Z",
      "recordsProcessed": 150243,
      "totalRecords": 192850,
      "metrics": {
        "bottlenecksIdentified": 8,
        "waitTimeCalculated": 150243,
        "cycleTimeCalculated": 150243
      }
    },
    {
      "stageName": "Report Generation",
      "stageOrder": 4,
      "status": "Pending",
      "progress": 0,
      "estimatedStartTime": "2024-01-20T10:48:00Z",
      "estimatedEndTime": "2024-01-20T10:52:00Z"
    }
  ],
  "currentActivity": {
    "component": "PerformanceAnalyzer",
    "operation": "Calculating resource utilization metrics",
    "details": "Processing activity transitions for efficiency analysis"
  },
  "resourceUsage": {
    "memoryUsage": "2.3 GB",
    "memoryLimit": "4.0 GB",
    "cpuUsage": 67,
    "diskUsage": "890 MB",
    "networkIO": "12 MB",
    "processingRate": "1250 records/second"
  },
  "qualityMetrics": {
    "dataQualityScore": 94.8,
    "validationsPassed": 15,
    "validationsFailed": 1,
    "warningsGenerated": 11,
    "errorsEncountered": 2
  }
}

Get Job Execution Timeline

GET /api/{tenantId}/{projectId}/execution/job/{jobId}/timeline

Retrieves a detailed timeline of job execution events, including stage transitions, resource allocation changes, and significant milestones.

Query Parameters

Parameter Type Description
includeSubEvents boolean Include detailed sub-events (default: false)
granularity string Timeline granularity: seconds, minutes, major_events (default: minutes)

Response

{
  "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
  "jobName": "Customer Journey Analysis",
  "timelineScope": {
    "startTime": "2024-01-20T10:30:00Z",
    "currentTime": "2024-01-20T10:45:33Z",
    "endTime": null,
    "granularity": "minutes"
  },
  "timeline": [
    {
      "timestamp": "2024-01-20T10:30:00Z",
      "eventType": "JobStarted",
      "description": "Job execution initiated",
      "details": {
        "submittedBy": "user123",
        "priority": "High",
        "estimatedDuration": "20-25 minutes",
        "resourcesAllocated": {
          "cpuUnits": 2,
          "memoryGB": 4,
          "workerNode": "worker-node-02"
        }
      }
    },
    {
      "timestamp": "2024-01-20T10:30:15Z",
      "eventType": "StageStarted",
      "description": "Data Loading stage initiated",
      "details": {
        "stageName": "Data Loading",
        "expectedDuration": "2-3 minutes",
        "datasetSize": "45.7 MB",
        "recordCount": 192850
      }
    },
    {
      "timestamp": "2024-01-20T10:32:15Z",
      "eventType": "StageCompleted",
      "description": "Data Loading stage completed successfully",
      "details": {
        "stageName": "Data Loading",
        "actualDuration": "2 minutes 15 seconds",
        "recordsLoaded": 192850,
        "dataQualityScore": 98.2,
        "errorsFound": 0
      }
    },
    {
      "timestamp": "2024-01-20T10:32:15Z",
      "eventType": "StageStarted",
      "description": "Process Discovery stage initiated",
      "details": {
        "stageName": "Process Discovery",
        "expectedDuration": "8-12 minutes",
        "algorithm": "Alpha Miner Enhanced"
      }
    },
    {
      "timestamp": "2024-01-20T10:35:30Z",
      "eventType": "Milestone",
      "description": "Process model discovered",
      "details": {
        "activitiesFound": 47,
        "uniqueActivities": 47,
        "processComplexity": "Medium"
      }
    },
    {
      "timestamp": "2024-01-20T10:38:22Z",
      "eventType": "Warning",
      "description": "Data quality issue detected",
      "details": {
        "issue": "Missing timestamps",
        "affectedRecords": 125,
        "resolution": "Timestamps inferred",
        "impact": "Minimal"
      }
    },
    {
      "timestamp": "2024-01-20T10:41:10Z",
      "eventType": "Error",
      "description": "Memory limit approached",
      "details": {
        "memoryUsage": "3.8 GB",
        "memoryLimit": "4.0 GB",
        "action": "Switched to disk-based processing",
        "performanceImpact": "15% slower processing"
      }
    },
    {
      "timestamp": "2024-01-20T10:41:30Z",
      "eventType": "StageCompleted",
      "description": "Process Discovery stage completed",
      "details": {
        "stageName": "Process Discovery",
        "actualDuration": "9 minutes 15 seconds",
        "processVariants": 234,
        "pathsDiscovered": 156
      }
    },
    {
      "timestamp": "2024-01-20T10:41:30Z",
      "eventType": "StageStarted",
      "description": "Performance Analysis stage initiated",
      "details": {
        "stageName": "Performance Analysis",
        "expectedDuration": "6-8 minutes",
        "analysisTypes": ["Bottleneck", "Resource Utilization", "Cycle Time"]
      }
    },
    {
      "timestamp": "2024-01-20T10:45:33Z",
      "eventType": "Progress",
      "description": "Performance Analysis 78% complete",
      "details": {
        "stageName": "Performance Analysis",
        "progress": 78,
        "currentOperation": "Resource utilization analysis",
        "recordsProcessed": 150243,
        "remainingRecords": 42607
      }
    }
  ],
  "upcomingEvents": [
    {
      "estimatedTime": "2024-01-20T10:48:00Z",
      "eventType": "StageCompletion",
      "description": "Performance Analysis stage completion expected"
    },
    {
      "estimatedTime": "2024-01-20T10:48:00Z",
      "eventType": "StageStart",
      "description": "Report Generation stage start expected"
    },
    {
      "estimatedTime": "2024-01-20T10:52:00Z",
      "eventType": "JobCompletion",
      "description": "Job completion expected"
    }
  ]
}

Get Job Performance Metrics

GET /api/{tenantId}/{projectId}/execution/job/{jobId}/metrics

Retrieves detailed performance metrics for a job execution, including resource utilization, throughput, and efficiency measurements.

Query Parameters

Parameter Type Description
interval string Metrics collection interval: 1m, 5m, 15m (default: 5m)
metrics string Comma-separated metrics: cpu, memory, disk, network, throughput
includeHistory boolean Include historical metrics data (default: false)

Response

{
  "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
  "metricsCollectionTime": "2024-01-20T10:45:33Z",
  "currentMetrics": {
    "resourceUtilization": {
      "cpu": {
        "usage": 67,
        "cores": 2,
        "efficiency": 89.2
      },
      "memory": {
        "used": "2.3 GB",
        "allocated": "4.0 GB",
        "peak": "3.8 GB",
        "efficiency": 87.5
      },
      "disk": {
        "reads": "450 MB",
        "writes": "89 MB",
        "iops": 145,
        "latency": "12ms"
      },
      "network": {
        "bytesIn": "67 MB",
        "bytesOut": "12 MB",
        "connections": 8
      }
    },
    "processing": {
      "recordsPerSecond": 1250,
      "recordsProcessed": 150243,
      "totalRecords": 192850,
      "processingEfficiency": 78.3,
      "errorRate": 0.001,
      "retryRate": 0.015
    },
    "stages": {
      "completed": 2,
      "running": 1,
      "pending": 1,
      "averageStageTime": "5.75 minutes",
      "stageEfficiency": 91.2
    }
  },
  "historicalMetrics": [
    {
      "timestamp": "2024-01-20T10:30:00Z",
      "cpu": 15,
      "memory": 0.8,
      "recordsPerSecond": 0,
      "stage": "Initialization"
    },
    {
      "timestamp": "2024-01-20T10:35:00Z",
      "cpu": 85,
      "memory": 1.9,
      "recordsPerSecond": 1427,
      "stage": "Data Loading"
    },
    {
      "timestamp": "2024-01-20T10:40:00Z",
      "cpu": 72,
      "memory": 3.2,
      "recordsPerSecond": 1156,
      "stage": "Process Discovery"
    },
    {
      "timestamp": "2024-01-20T10:45:00Z",
      "cpu": 67,
      "memory": 2.3,
      "recordsPerSecond": 1250,
      "stage": "Performance Analysis"
    }
  ],
  "performanceTrends": {
    "cpuTrend": "Stable",
    "memoryTrend": "Declining",
    "throughputTrend": "Improving",
    "overallEfficiency": "Good",
    "predictionAccuracy": 94.2
  },
  "benchmarks": {
    "jobType": "ProcessMining",
    "averageJobDuration": "18.5 minutes",
    "averageThroughput": "1180 records/second",
    "currentPerformanceRank": "85th percentile",
    "similarJobsComparison": {
      "fasterThan": 85,
      "similarTo": 12,
      "slowerThan": 3
    }
  }
}

Track Multiple Jobs

GET /api/{tenantId}/{projectId}/execution/tracking/batch

Retrieves tracking information for multiple jobs simultaneously, useful for dashboard displays and batch monitoring.

Query Parameters

Parameter Type Description
jobIds string Comma-separated list of job IDs to track
status string Filter by status: Running, Queued, Completed, Failed
submittedBy string Filter by user who submitted jobs
includeMetrics boolean Include performance metrics for each job (default: false)
refreshInterval integer Auto-refresh interval in seconds for real-time tracking

Response

{
  "trackingTime": "2024-01-20T10:45:33Z",
  "jobCount": 5,
  "summary": {
    "running": 3,
    "queued": 1,
    "completed": 1,
    "failed": 0
  },
  "jobs": [
    {
      "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
      "jobName": "Customer Journey Analysis",
      "status": "Running",
      "progress": 85,
      "startTime": "2024-01-20T10:30:00Z",
      "estimatedCompletion": "2024-01-20T10:52:00Z",
      "currentStage": "Performance Analysis",
      "submittedBy": "user123",
      "priority": "High",
      "resourceUsage": {
        "cpu": 67,
        "memory": "2.3 GB",
        "processingRate": "1250 records/second"
      }
    },
    {
      "jobId": "dd0e8400-e29b-41d4-a716-446655440000",
      "jobName": "Sales Data Enrichment",
      "status": "Running",
      "progress": 45,
      "startTime": "2024-01-20T10:35:00Z",
      "estimatedCompletion": "2024-01-20T11:05:00Z",
      "currentStage": "Data Enrichment",
      "submittedBy": "system",
      "priority": "Normal",
      "resourceUsage": {
        "cpu": 52,
        "memory": "1.8 GB",
        "processingRate": "890 records/second"
      }
    },
    {
      "jobId": "ee0e8400-e29b-41d4-a716-446655440000",
      "jobName": "Weekly Report Generation",
      "status": "Queued",
      "progress": 0,
      "queuePosition": 2,
      "estimatedStartTime": "2024-01-20T10:55:00Z",
      "estimatedCompletion": "2024-01-20T11:20:00Z",
      "submittedBy": "user456",
      "priority": "Normal"
    }
  ],
  "systemHealth": {
    "overallLoad": 73,
    "queueHealth": "Good",
    "resourceAvailability": "Medium",
    "estimatedCapacity": "6 additional jobs"
  }
}

Subscribe to Job Events

POST /api/{tenantId}/{projectId}/execution/job/{jobId}/subscribe

Establishes a real-time subscription to job events for live tracking. Supports WebSocket connections and webhook notifications.

Request Body

{
  "subscriptionType": "webhook",
  "webhookUrl": "https://your-app.com/webhooks/job-events",
  "events": [
    "stageStarted",
    "stageCompleted",
    "progressUpdate",
    "error",
    "warning",
    "jobCompleted"
  ],
  "filters": {
    "minProgressIncrement": 5,
    "includeDebugEvents": false,
    "notifyOnErrors": true
  },
  "authentication": {
    "type": "bearer",
    "token": "your-webhook-auth-token"
  }
}

Response

{
  "subscriptionId": "sub-123e8400-e29b-41d4-a716-446655440000",
  "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
  "subscriptionType": "webhook",
  "status": "Active",
  "webhookUrl": "https://your-app.com/webhooks/job-events",
  "eventsSubscribed": [
    "stageStarted",
    "stageCompleted",
    "progressUpdate",
    "error",
    "warning",
    "jobCompleted"
  ],
  "createdAt": "2024-01-20T10:45:33Z",
  "expiresAt": "2024-01-20T18:45:33Z"
}

Get Job Dependencies

GET /api/{tenantId}/{projectId}/execution/job/{jobId}/dependencies

Retrieves information about job dependencies, including prerequisite jobs, dependent resources, and blocking conditions.

Response

{
  "jobId": "cc0e8400-e29b-41d4-a716-446655440000",
  "dependencies": {
    "prerequisiteJobs": [
      {
        "jobId": "aa0e8400-e29b-41d4-a716-446655440000",
        "jobName": "Data Preparation",
        "status": "Completed",
        "completedAt": "2024-01-20T10:25:00Z",
        "dependency": "Dataset must be validated before analysis"
      }
    ],
    "resourceDependencies": [
      {
        "resourceType": "Dataset",
        "resourceId": "880e8400-e29b-41d4-a716-446655440000",
        "resourceName": "Customer Journey Data",
        "status": "Available",
        "lastModified": "2024-01-20T10:25:00Z"
      },
      {
        "resourceType": "ComputeNode",
        "resourceId": "worker-node-02",
        "status": "Allocated",
        "allocatedAt": "2024-01-20T10:30:00Z"
      }
    ],
    "dependentJobs": [
      {
        "jobId": "ee0e8400-e29b-41d4-a716-446655440000",
        "jobName": "Weekly Report Generation",
        "status": "Queued",
        "waitingFor": "Customer Journey Analysis results"
      }
    ]
  },
  "blockingConditions": [
    {
      "condition": "Memory allocation below 2GB",
      "status": "Resolved",
      "resolvedAt": "2024-01-20T10:30:00Z",
      "resolution": "Additional memory allocated"
    }
  ],
  "dependencyGraph": {
    "nodes": [
      {
        "id": "aa0e8400-e29b-41d4-a716-446655440000",
        "type": "PrerequisiteJob",
        "status": "Completed"
      },
      {
        "id": "cc0e8400-e29b-41d4-a716-446655440000",
        "type": "CurrentJob",
        "status": "Running"
      },
      {
        "id": "ee0e8400-e29b-41d4-a716-446655440000",
        "type": "DependentJob",
        "status": "Queued"
      }
    ],
    "edges": [
      {
        "from": "aa0e8400-e29b-41d4-a716-446655440000",
        "to": "cc0e8400-e29b-41d4-a716-446655440000",
        "type": "prerequisite"
      },
      {
        "from": "cc0e8400-e29b-41d4-a716-446655440000",
        "to": "ee0e8400-e29b-41d4-a716-446655440000",
        "type": "dependency"
      }
    ]
  }
}

Example: Complete Job Tracking Workflow

This example demonstrates comprehensive job tracking and monitoring:

// 1. Start tracking a job
const trackJob = async (jobId) => {
  // Get initial job status
  const progress = await getJobProgress(jobId);
  console.log(`Tracking job: ${progress.jobName}`);
  console.log(`Current progress: ${progress.overallProgress.percentage}%`);

  // Subscribe to real-time events
  await subscribeToJobEvents(jobId);

  return progress;
};

// 2. Get detailed job progress
const getJobProgress = async (jobId) => {
  const response = await fetch(`/api/{tenantId}/{projectId}/execution/job/${jobId}/progress`, {
    headers: {
      'Authorization': `Bearer ${token}`
    }
  });

  return await response.json();
};

// 3. Subscribe to real-time job events
const subscribeToJobEvents = async (jobId) => {
  const response = await fetch(`/api/{tenantId}/{projectId}/execution/job/${jobId}/subscribe`, {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'Authorization': `Bearer ${token}`
    },
    body: JSON.stringify({
      subscriptionType: 'webhook',
      webhookUrl: 'https://your-app.com/webhooks/job-events',
      events: [
        'stageStarted',
        'stageCompleted',
        'progressUpdate',
        'error',
        'warning',
        'jobCompleted'
      ],
      filters: {
        minProgressIncrement: 10,
        includeDebugEvents: false,
        notifyOnErrors: true
      }
    })
  });

  const subscription = await response.json();
  console.log(`Subscribed to job events: ${subscription.subscriptionId}`);

  return subscription;
};

// 4. Get job performance metrics
const getJobMetrics = async (jobId) => {
  const response = await fetch(`/api/{tenantId}/{projectId}/execution/job/${jobId}/metrics?interval=5m&includeHistory=true`, {
    headers: {
      'Authorization': `Bearer ${token}`
    }
  });

  const metrics = await response.json();
  console.log('Performance Metrics:');
  console.log(`  CPU Usage: ${metrics.currentMetrics.resourceUtilization.cpu.usage}%`);
  console.log(`  Memory Usage: ${metrics.currentMetrics.resourceUtilization.memory.used}`);
  console.log(`  Processing Rate: ${metrics.currentMetrics.processing.recordsPerSecond} records/sec`);

  return metrics;
};

// 5. Get job execution logs
const getJobLogs = async (jobId, level = 'INFO') => {
  const response = await fetch(`/api/{tenantId}/{projectId}/execution/job/${jobId}/logs?level=${level}&limit=100`, {
    headers: {
      'Authorization': `Bearer ${token}`
    }
  });

  const logs = await response.json();
  console.log(`Retrieved ${logs.logs.length} log entries`);

  // Display recent important logs
  logs.logs.filter(log => log.level !== 'DEBUG').forEach(log => {
    console.log(`[${log.timestamp}] ${log.level}: ${log.message}`);
  });

  return logs;
};

// 6. Track multiple jobs in a dashboard
const trackMultipleJobs = async (jobIds) => {
  const response = await fetch(`/api/{tenantId}/{projectId}/execution/tracking/batch?jobIds=${jobIds.join(',')}&includeMetrics=true`, {
    headers: {
      'Authorization': `Bearer ${token}`
    }
  });

  const tracking = await response.json();
  console.log(`Tracking ${tracking.jobCount} jobs:`);

  tracking.jobs.forEach(job => {
    console.log(`  ${job.jobName}: ${job.status} (${job.progress}%)`);
  });

  return tracking;
};

// 7. Monitor job timeline
const getJobTimeline = async (jobId) => {
  const response = await fetch(`/api/{tenantId}/{projectId}/execution/job/${jobId}/timeline?includeSubEvents=true&granularity=minutes`, {
    headers: {
      'Authorization': `Bearer ${token}`
    }
  });

  const timeline = await response.json();
  console.log('Job Timeline:');

  timeline.timeline.forEach(event => {
    console.log(`[${event.timestamp}] ${event.eventType}: ${event.description}`);
  });

  return timeline;
};

// Execute comprehensive tracking workflow
const runTrackingWorkflow = async (jobId) => {
  try {
    console.log('Starting comprehensive job tracking...');

    // Track job progress
    const progress = await trackJob(jobId);

    // Get performance metrics
    const metrics = await getJobMetrics(jobId);

    // Get execution logs
    const logs = await getJobLogs(jobId, 'WARN');

    // Get timeline
    const timeline = await getJobTimeline(jobId);

    // Monitor until completion
    const monitoring = setInterval(async () => {
      const currentProgress = await getJobProgress(jobId);

      if (currentProgress.status === 'Completed') {
        console.log('Job completed successfully!');
        clearInterval(monitoring);

        // Get final metrics
        const finalMetrics = await getJobMetrics(jobId);
        console.log(`Final efficiency: ${finalMetrics.performanceTrends.overallEfficiency}`);

      } else if (currentProgress.status === 'Failed') {
        console.log('Job failed!');
        clearInterval(monitoring);

        // Get error logs
        const errorLogs = await getJobLogs(jobId, 'ERROR');
        console.log('Error details:', errorLogs.logs);
      } else {
        console.log(`Progress update: ${currentProgress.overallProgress.percentage}%`);
      }
    }, 30000); // Check every 30 seconds

  } catch (error) {
    console.error('Tracking workflow failed:', error);
  }
};

// Start tracking
runTrackingWorkflow('cc0e8400-e29b-41d4-a716-446655440000');

Python Example

import requests
import time
import json
from datetime import datetime, timedelta

class JobTracker:
    def __init__(self, base_url, tenant_id, project_id, token):
        self.base_url = base_url
        self.tenant_id = tenant_id
        self.project_id = project_id
        self.headers = {
            'Authorization': f'Bearer {token}',
            'Content-Type': 'application/json'
        }

    def get_job_progress(self, job_id):
        """Get current job progress"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/job/{job_id}/progress"
        response = requests.get(url, headers=self.headers)
        return response.json()

    def get_job_logs(self, job_id, level='INFO', limit=1000):
        """Get job execution logs"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/job/{job_id}/logs"
        params = {'level': level, 'limit': limit}
        response = requests.get(url, params=params, headers=self.headers)
        return response.json()

    def get_job_metrics(self, job_id, interval='5m', include_history=False):
        """Get job performance metrics"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/job/{job_id}/metrics"
        params = {
            'interval': interval,
            'includeHistory': str(include_history).lower()
        }
        response = requests.get(url, params=params, headers=self.headers)
        return response.json()

    def get_job_timeline(self, job_id, include_sub_events=False, granularity='minutes'):
        """Get job execution timeline"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/job/{job_id}/timeline"
        params = {
            'includeSubEvents': str(include_sub_events).lower(),
            'granularity': granularity
        }
        response = requests.get(url, params=params, headers=self.headers)
        return response.json()

    def track_multiple_jobs(self, job_ids, include_metrics=False):
        """Track multiple jobs simultaneously"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/tracking/batch"
        params = {
            'jobIds': ','.join(job_ids),
            'includeMetrics': str(include_metrics).lower()
        }
        response = requests.get(url, params=params, headers=self.headers)
        return response.json()

    def subscribe_to_job_events(self, job_id, webhook_url, events=None):
        """Subscribe to real-time job events"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/job/{job_id}/subscribe"
        payload = {
            'subscriptionType': 'webhook',
            'webhookUrl': webhook_url,
            'events': events or [
                'stageStarted', 'stageCompleted', 'progressUpdate',
                'error', 'warning', 'jobCompleted'
            ],
            'filters': {
                'minProgressIncrement': 10,
                'includeDebugEvents': False,
                'notifyOnErrors': True
            }
        }
        response = requests.post(url, json=payload, headers=self.headers)
        return response.json()

    def get_job_dependencies(self, job_id):
        """Get job dependencies and blocking conditions"""
        url = f"{self.base_url}/api/{self.tenant_id}/{self.project_id}/execution/job/{job_id}/dependencies"
        response = requests.get(url, headers=self.headers)
        return response.json()

    def monitor_job_until_completion(self, job_id, check_interval=30, timeout=3600):
        """Monitor job until completion with detailed tracking"""
        start_time = time.time()

        print(f"Starting monitoring for job {job_id}")

        # Get initial state
        progress = self.get_job_progress(job_id)
        print(f"Job: {progress['jobName']}")
        print(f"Initial progress: {progress['overallProgress']['percentage']}%")

        while time.time() - start_time < timeout:
            try:
                # Get current progress
                progress = self.get_job_progress(job_id)
                status = progress['status']
                percentage = progress['overallProgress']['percentage']

                print(f"Progress: {percentage}% - Status: {status}")

                if status == 'Completed':
                    print("Job completed successfully!")

                    # Get final metrics
                    metrics = self.get_job_metrics(job_id, include_history=True)
                    print(f"Final efficiency: {metrics['performanceTrends']['overallEfficiency']}")
                    print(f"Total duration: {progress['overallProgress']['elapsedTime']}")

                    return progress

                elif status == 'Failed':
                    print("Job failed!")

                    # Get error logs
                    logs = self.get_job_logs(job_id, level='ERROR')
                    print("Error logs:")
                    for log in logs['logs']:
                        print(f"  [{log['timestamp']}] {log['message']}")

                    return progress

                elif status == 'Running':
                    # Get performance metrics
                    metrics = self.get_job_metrics(job_id)
                    cpu_usage = metrics['currentMetrics']['resourceUtilization']['cpu']['usage']
                    memory_used = metrics['currentMetrics']['resourceUtilization']['memory']['used']
                    processing_rate = metrics['currentMetrics']['processing']['recordsPerSecond']

                    print(f"  CPU: {cpu_usage}%, Memory: {memory_used}, Rate: {processing_rate} rec/sec")

                    # Check for warnings or errors
                    recent_logs = self.get_job_logs(job_id, level='WARN')
                    recent_warnings = [log for log in recent_logs['logs']
                                     if (datetime.fromisoformat(log['timestamp'].replace('Z', '+00:00'))
                                         > datetime.now().replace(tzinfo=None) - timedelta(minutes=1))]

                    if recent_warnings:
                        for warning in recent_warnings:
                            print(f"  WARNING: {warning['message']}")

                time.sleep(check_interval)

            except Exception as e:
                print(f"Monitoring error: {e}")
                time.sleep(60)

        raise TimeoutError(f"Job {job_id} monitoring timed out after {timeout} seconds")

    def create_tracking_dashboard(self, job_ids):
        """Create a simple tracking dashboard for multiple jobs"""
        print("Job Tracking Dashboard")
        print("=" * 50)

        while True:
            try:
                tracking = self.track_multiple_jobs(job_ids, include_metrics=True)

                print(f"\nUpdate: {tracking['trackingTime']}")
                print(f"System Load: {tracking['systemHealth']['overallLoad']}%")
                print(f"Queue Health: {tracking['systemHealth']['queueHealth']}")
                print()

                for job in tracking['jobs']:
                    status_icon = "Done" if job['status'] == 'Completed' else "Running" if job['status'] == 'Running' else "Waiting"
                    print(f"{status_icon} {job['jobName']}: {job['progress']}% ({job['status']})")

                    if job['status'] == 'Running' and 'resourceUsage' in job:
                        print(f"    CPU: {job['resourceUsage']['cpu']}%, Memory: {job['resourceUsage']['memory']}")
                        print(f"    Rate: {job['resourceUsage']['processingRate']}")

                    if job['status'] == 'Queued':
                        print(f"    Queue position: {job.get('queuePosition', 'Unknown')}")
                        print(f"    Estimated start: {job.get('estimatedStartTime', 'Unknown')}")

                print("\n" + "=" * 50)

                # Check if all jobs are completed
                active_jobs = [job for job in tracking['jobs'] if job['status'] in ['Running', 'Queued']]
                if not active_jobs:
                    print("All jobs completed!")
                    break

                time.sleep(30)

            except KeyboardInterrupt:
                print("\nDashboard stopped by user")
                break
            except Exception as e:
                print(f"Dashboard error: {e}")
                time.sleep(60)

# Usage example
tracker = JobTracker(
    'https://your-mindzie-instance.com',
    'tenant-guid',
    'project-guid',
    'your-auth-token'
)

try:
    # Monitor a single job with comprehensive tracking
    job_id = 'cc0e8400-e29b-41d4-a716-446655440000'

    # Get initial job state
    progress = tracker.get_job_progress(job_id)
    print(f"Tracking job: {progress['jobName']}")
    print(f"Status: {progress['status']}")
    print(f"Progress: {progress['overallProgress']['percentage']}%")

    # Get job dependencies
    dependencies = tracker.get_job_dependencies(job_id)
    if dependencies['dependencies']['prerequisiteJobs']:
        print("Prerequisites:")
        for prereq in dependencies['dependencies']['prerequisiteJobs']:
            print(f"  - {prereq['jobName']}: {prereq['status']}")

    # Monitor until completion
    final_status = tracker.monitor_job_until_completion(job_id)

    # Get final timeline
    timeline = tracker.get_job_timeline(job_id, include_sub_events=True)
    print("\nExecution Timeline:")
    for event in timeline['timeline'][-5:]:  # Last 5 events
        print(f"  [{event['timestamp']}] {event['eventType']}: {event['description']}")

except Exception as e:
    print(f"Error in job tracking: {e}")
An error has occurred. This application may no longer respond until reloaded. Reload ??