From 1b44692a3eccd52631ee6596408b0f6fafa3d44f Mon Sep 17 00:00:00 2001 From: Catherine Lee Date: Fri, 3 Jan 2025 11:21:10 -0800 Subject: [PATCH] tc ghstack-source-id: a0cf54e15981475763f40bb9f3aabbd8d3c90bbb ghstack-comment-id: 2569699515 Pull Request resolved: https://github.com/pytorch/test-infra/pull/6136 --- .../__sql/oss_ci_benchmark_branches.sql | 42 --- .../__sql/oss_ci_benchmark_llms.sql | 95 ------- .../__sql/oss_ci_benchmark_names.sql | 32 --- .../oss_ci_benchmark_branches.lambda.json | 31 --- .../oss_ci_benchmark_llms.lambda.json | 61 ----- .../oss_ci_benchmark_names.lambda.json | 31 --- .../commons/__sql/annotated_flaky_jobs.sql | 31 --- .../commons/__sql/commit_failed_jobs.sql | 40 --- .../commons/__sql/commit_jobs_query.sql | 168 ------------ .../rockset/commons/__sql/commit_query.sql | 8 - .../__sql/disabled_non_flaky_tests.sql | 25 -- .../commons/__sql/disabled_test_labels.sql | 22 -- .../rockset/commons/__sql/disabled_tests.sql | 68 ----- .../commons/__sql/failed_workflow_jobs.sql | 59 ----- .../commons/__sql/failure_samples_query.sql | 35 --- .../commons/__sql/filter_forced_merge_pr.sql | 78 ------ torchci/rockset/commons/__sql/flaky_tests.sql | 94 ------- .../commons/__sql/flaky_tests_across_jobs.sql | 103 -------- .../commons/__sql/flaky_workflows_jobs.sql | 137 ---------- .../commons/__sql/get_relevant_alerts.sql | 29 --- .../commons/__sql/get_workflow_jobs.sql | 11 - torchci/rockset/commons/__sql/hud_query.sql | 99 ------- torchci/rockset/commons/__sql/issue_query.sql | 13 - .../commons/__sql/master_commit_red_jobs.sql | 103 -------- .../commons/__sql/num_commits_master.sql | 9 - torchci/rockset/commons/__sql/pr_commits.sql | 43 --- .../__sql/recent_pr_workflows_query.sql | 83 ------ .../__sql/reverted_prs_with_reason.sql | 37 --- .../__sql/test_insights_latest_runs.sql | 26 -- .../commons/__sql/test_time_per_class.sql | 68 ----- .../test_time_per_class_periodic_jobs.sql | 80 ------ .../commons/__sql/test_time_per_file.sql | 63 ----- .../test_time_per_file_periodic_jobs.sql | 75 ------ .../rockset/commons/__sql/unclassified.sql | 17 -- .../__sql/weekly_force_merge_stats.sql | 218 ---------------- .../commons/annotated_flaky_jobs.lambda.json | 26 -- .../commons/commit_failed_jobs.lambda.json | 11 - .../commons/commit_jobs_query.lambda.json | 16 -- .../rockset/commons/commit_query.lambda.json | 11 - .../disabled_non_flaky_tests.lambda.json | 16 -- .../commons/disabled_test_labels.lambda.json | 16 -- .../commons/disabled_tests.lambda.json | 31 --- .../commons/failed_workflow_jobs.lambda.json | 31 --- .../commons/failure_samples_query.lambda.json | 21 -- .../filter_forced_merge_pr.lambda.json | 21 -- .../commons/flaky_test_query.lambda.json | 26 -- .../rockset/commons/flaky_tests.lambda.json | 26 -- .../flaky_tests_across_jobs.lambda.json | 21 -- .../commons/flaky_workflows_jobs.lambda.json | 41 --- .../commons/get_relevant_alerts.lambda.json | 16 -- .../commons/get_workflow_jobs.lambda.json | 16 -- torchci/rockset/commons/hud_query.lambda.json | 16 -- .../rockset/commons/issue_query.lambda.json | 11 - .../master_commit_red_jobs.lambda.json | 21 -- .../commons/num_commits_master.lambda.json | 16 -- .../rockset/commons/pr_commits.lambda.json | 21 -- .../recent_pr_workflows_query.lambda.json | 21 -- .../reverted_prs_with_reason.lambda.json | 16 -- .../commons/test_time_per_class.lambda.json | 5 - ...t_time_per_class_periodic_jobs.lambda.json | 5 - .../commons/test_time_per_file.lambda.json | 5 - ...st_time_per_file_periodic_jobs.lambda.json | 5 - .../rockset/commons/unclassified.lambda.json | 11 - .../weekly_force_merge_stats.lambda.json | 31 --- .../__sql/compilers_benchmark_performance.sql | 151 ----------- ...mpilers_benchmark_performance_branches.sql | 22 -- .../rockset/inductor/__sql/torchao_query.sql | 159 ------------ .../inductor/__sql/torchao_query_branches.sql | 21 -- ...ompilers_benchmark_performance.lambda.json | 71 ----- ...benchmark_performance_branches.lambda.json | 31 --- .../inductor/torchao_query.lambda.json | 71 ----- .../torchao_query_branches.lambda.json | 36 --- .../metrics/__sql/correlation_matrix.sql | 55 ---- .../__sql/disabled_test_historical.sql | 180 ------------- .../metrics/__sql/disabled_test_total.sql | 7 - .../__sql/external_contribution_stats.sql | 25 -- .../metrics/__sql/get_workers_on_period.sql | 16 -- .../metrics/__sql/job_duration_avg.sql | 32 --- .../metrics/__sql/job_duration_percentile.sql | 38 --- .../metrics/__sql/last_branch_push.sql | 13 - .../metrics/__sql/last_successful_jobs.sql | 40 --- .../__sql/last_successful_workflow.sql | 19 -- .../metrics/__sql/lf_rollover_health.sql | 143 ---------- .../metrics/__sql/lf_rollover_percentage.sql | 113 -------- .../metrics/__sql/log_captures_count.sql | 20 -- .../metrics/__sql/master_commit_red.sql | 88 ------- .../metrics/__sql/master_commit_red_avg.sql | 81 ------ .../__sql/master_commit_red_percent.sql | 115 -------- .../master_commit_red_percent_groups.sql | 77 ------ .../rockset/metrics/__sql/master_jobs_red.sql | 54 ---- .../metrics/__sql/master_jobs_red_avg.sql | 47 ---- .../metrics/__sql/number_of_force_pushes.sql | 11 - .../metrics/__sql/queue_times_historical.sql | 23 -- .../__sql/queue_times_historical_pct.sql | 23 -- torchci/rockset/metrics/__sql/queued_jobs.sql | 36 --- .../metrics/__sql/queued_jobs_by_label.sql | 57 ---- torchci/rockset/metrics/__sql/reverts.sql | 14 - .../rockset/metrics/__sql/strict_lag_sec.sql | 34 --- torchci/rockset/metrics/__sql/top_reds.sql | 75 ------ torchci/rockset/metrics/__sql/tts_avg.sql | 32 --- .../metrics/__sql/tts_duration_historical.sql | 32 --- .../tts_duration_historical_percentile.sql | 50 ---- .../rockset/metrics/__sql/tts_percentile.sql | 48 ---- .../metrics/__sql/workflow_duration_avg.sql | 21 -- .../__sql/workflow_duration_percentile.sql | 31 --- .../rockset/metrics/__sql/workflow_load.sql | 33 --- .../metrics/correlation_matrix.lambda.json | 11 - .../disabled_test_historical.lambda.json | 51 ---- .../metrics/disabled_test_total.lambda.json | 11 - .../external_contribution_stats.lambda.json | 21 -- .../metrics/get_workers_on_period.lambda.json | 21 -- .../metrics/job_duration_avg.lambda.json | 21 -- .../job_duration_percentile.lambda.json | 27 -- .../metrics/last_branch_push.lambda.json | 11 - .../metrics/last_successful_jobs.lambda.json | 11 - .../last_successful_workflow.lambda.json | 11 - .../metrics/lf_rollover_health.lambda.json | 16 -- .../lf_rollover_percentage.lambda.json | 16 -- .../metrics/log_captures_count.lambda.json | 16 -- .../metrics/master_commit_red.lambda.json | 26 -- .../metrics/master_commit_red_avg.lambda.json | 21 -- .../master_commit_red_percent.lambda.json | 26 -- ...ster_commit_red_percent_groups.lambda.json | 26 -- .../metrics/master_jobs_red.lambda.json | 26 -- .../metrics/master_jobs_red_avg.lambda.json | 16 -- .../number_of_force_pushes.lambda.json | 16 -- .../queue_times_historical.lambda.json | 26 -- .../queue_times_historical_pct.lambda.json | 31 --- .../rockset/metrics/queued_jobs.lambda.json | 5 - .../metrics/queued_jobs_by_label.lambda.json | 5 - torchci/rockset/metrics/reverts.lambda.json | 16 -- .../metrics/strict_lag_sec.lambda.json | 16 -- torchci/rockset/metrics/top_reds.lambda.json | 21 -- torchci/rockset/metrics/tts_avg.lambda.json | 21 -- .../tts_duration_historical.lambda.json | 31 --- ...duration_historical_percentile.lambda.json | 41 --- .../metrics/tts_percentile.lambda.json | 26 -- .../metrics/workflow_duration_avg.lambda.json | 21 -- .../workflow_duration_percentile.lambda.json | 26 -- .../rockset/metrics/workflow_load.lambda.json | 31 --- .../nightlies/__sql/docker_jobs_red.sql | 53 ---- .../__sql/docker_jobs_red_past_day.sql | 14 - .../nightlies/__sql/nightly_jobs_red.sql | 36 --- .../__sql/nightly_jobs_red_by_name.sql | 19 -- .../__sql/nightly_jobs_red_by_platform.sql | 37 --- .../__sql/nightly_jobs_red_past_day.sql | 19 -- .../nightlies/__sql/validation_jobs_red.sql | 55 ---- .../__sql/validation_jobs_red_past_day.sql | 16 -- .../nightlies/nightly_jobs_red.lambda.json | 31 --- .../nightly_jobs_red_by_name.lambda.json | 16 -- .../nightly_jobs_red_by_platform.lambda.json | 16 -- .../nightly_jobs_red_past_day.lambda.json | 11 - .../nightlies/validation_jobs_red.lambda.json | 31 --- .../validation_jobs_red_past_day.lambda.json | 11 - torchci/rockset/prodVersions.json | 105 -------- .../__sql/monthly_contribution_stats.sql | 24 -- .../__sql/num_reverts.sql | 107 -------- .../number_of_force_pushes_historical.sql | 16 -- .../__sql/strict_lag_historical.sql | 19 -- .../__sql/time_to_merge.sql | 38 --- .../__sql/time_to_review.sql | 48 ---- .../__sql/time_to_signal.sql | 39 --- .../__sql/ttrs_percentiles.sql | 245 ------------------ .../monthly_contribution_stats.lambda.json | 21 -- .../num_reverts.lambda.json | 21 -- ...ber_of_force_pushes_historical.lambda.json | 21 -- .../strict_lag_historical.lambda.json | 21 -- .../time_to_merge.lambda.json | 21 -- .../time_to_review.lambda.json | 16 -- .../time_to_signal.lambda.json | 11 - .../ttrs_percentiles.lambda.json | 31 --- .../__sql/torchbench_list_userbenchmarks.sql | 5 - .../torchbench_userbenchmark_list_commits.sql | 13 - ...torchbench_userbenchmark_query_metrics.sql | 3 - ...torchbench_list_userbenchmarks.lambda.json | 5 - ...nch_userbenchmark_list_commits.lambda.json | 11 - ...ch_userbenchmark_query_metrics.lambda.json | 16 -- .../utilization/__sql/runner_utilization.sql | 23 -- .../__sql/runner_utilization_by_activity.sql | 31 --- .../__sql/runner_utilization_by_repo.sql | 24 -- .../runner_utilization.lambda.json | 26 -- ...runner_utilization_by_activity.lambda.json | 36 --- .../runner_utilization_by_repo.lambda.json | 31 --- 183 files changed, 7020 deletions(-) delete mode 100644 torchci/rockset/benchmarks/__sql/oss_ci_benchmark_branches.sql delete mode 100644 torchci/rockset/benchmarks/__sql/oss_ci_benchmark_llms.sql delete mode 100644 torchci/rockset/benchmarks/__sql/oss_ci_benchmark_names.sql delete mode 100644 torchci/rockset/benchmarks/oss_ci_benchmark_branches.lambda.json delete mode 100644 torchci/rockset/benchmarks/oss_ci_benchmark_llms.lambda.json delete mode 100644 torchci/rockset/benchmarks/oss_ci_benchmark_names.lambda.json delete mode 100644 torchci/rockset/commons/__sql/annotated_flaky_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/commit_failed_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/commit_jobs_query.sql delete mode 100644 torchci/rockset/commons/__sql/commit_query.sql delete mode 100644 torchci/rockset/commons/__sql/disabled_non_flaky_tests.sql delete mode 100644 torchci/rockset/commons/__sql/disabled_test_labels.sql delete mode 100644 torchci/rockset/commons/__sql/disabled_tests.sql delete mode 100644 torchci/rockset/commons/__sql/failed_workflow_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/failure_samples_query.sql delete mode 100644 torchci/rockset/commons/__sql/filter_forced_merge_pr.sql delete mode 100644 torchci/rockset/commons/__sql/flaky_tests.sql delete mode 100644 torchci/rockset/commons/__sql/flaky_tests_across_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/flaky_workflows_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/get_relevant_alerts.sql delete mode 100644 torchci/rockset/commons/__sql/get_workflow_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/hud_query.sql delete mode 100644 torchci/rockset/commons/__sql/issue_query.sql delete mode 100644 torchci/rockset/commons/__sql/master_commit_red_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/num_commits_master.sql delete mode 100644 torchci/rockset/commons/__sql/pr_commits.sql delete mode 100644 torchci/rockset/commons/__sql/recent_pr_workflows_query.sql delete mode 100644 torchci/rockset/commons/__sql/reverted_prs_with_reason.sql delete mode 100644 torchci/rockset/commons/__sql/test_insights_latest_runs.sql delete mode 100644 torchci/rockset/commons/__sql/test_time_per_class.sql delete mode 100644 torchci/rockset/commons/__sql/test_time_per_class_periodic_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/test_time_per_file.sql delete mode 100644 torchci/rockset/commons/__sql/test_time_per_file_periodic_jobs.sql delete mode 100644 torchci/rockset/commons/__sql/unclassified.sql delete mode 100644 torchci/rockset/commons/__sql/weekly_force_merge_stats.sql delete mode 100644 torchci/rockset/commons/annotated_flaky_jobs.lambda.json delete mode 100644 torchci/rockset/commons/commit_failed_jobs.lambda.json delete mode 100644 torchci/rockset/commons/commit_jobs_query.lambda.json delete mode 100644 torchci/rockset/commons/commit_query.lambda.json delete mode 100644 torchci/rockset/commons/disabled_non_flaky_tests.lambda.json delete mode 100644 torchci/rockset/commons/disabled_test_labels.lambda.json delete mode 100644 torchci/rockset/commons/disabled_tests.lambda.json delete mode 100644 torchci/rockset/commons/failed_workflow_jobs.lambda.json delete mode 100644 torchci/rockset/commons/failure_samples_query.lambda.json delete mode 100644 torchci/rockset/commons/filter_forced_merge_pr.lambda.json delete mode 100644 torchci/rockset/commons/flaky_test_query.lambda.json delete mode 100644 torchci/rockset/commons/flaky_tests.lambda.json delete mode 100644 torchci/rockset/commons/flaky_tests_across_jobs.lambda.json delete mode 100644 torchci/rockset/commons/flaky_workflows_jobs.lambda.json delete mode 100644 torchci/rockset/commons/get_relevant_alerts.lambda.json delete mode 100644 torchci/rockset/commons/get_workflow_jobs.lambda.json delete mode 100644 torchci/rockset/commons/hud_query.lambda.json delete mode 100644 torchci/rockset/commons/issue_query.lambda.json delete mode 100644 torchci/rockset/commons/master_commit_red_jobs.lambda.json delete mode 100644 torchci/rockset/commons/num_commits_master.lambda.json delete mode 100644 torchci/rockset/commons/pr_commits.lambda.json delete mode 100644 torchci/rockset/commons/recent_pr_workflows_query.lambda.json delete mode 100644 torchci/rockset/commons/reverted_prs_with_reason.lambda.json delete mode 100644 torchci/rockset/commons/test_time_per_class.lambda.json delete mode 100644 torchci/rockset/commons/test_time_per_class_periodic_jobs.lambda.json delete mode 100644 torchci/rockset/commons/test_time_per_file.lambda.json delete mode 100644 torchci/rockset/commons/test_time_per_file_periodic_jobs.lambda.json delete mode 100644 torchci/rockset/commons/unclassified.lambda.json delete mode 100644 torchci/rockset/commons/weekly_force_merge_stats.lambda.json delete mode 100644 torchci/rockset/inductor/__sql/compilers_benchmark_performance.sql delete mode 100644 torchci/rockset/inductor/__sql/compilers_benchmark_performance_branches.sql delete mode 100644 torchci/rockset/inductor/__sql/torchao_query.sql delete mode 100644 torchci/rockset/inductor/__sql/torchao_query_branches.sql delete mode 100644 torchci/rockset/inductor/compilers_benchmark_performance.lambda.json delete mode 100644 torchci/rockset/inductor/compilers_benchmark_performance_branches.lambda.json delete mode 100644 torchci/rockset/inductor/torchao_query.lambda.json delete mode 100644 torchci/rockset/inductor/torchao_query_branches.lambda.json delete mode 100644 torchci/rockset/metrics/__sql/correlation_matrix.sql delete mode 100644 torchci/rockset/metrics/__sql/disabled_test_historical.sql delete mode 100644 torchci/rockset/metrics/__sql/disabled_test_total.sql delete mode 100644 torchci/rockset/metrics/__sql/external_contribution_stats.sql delete mode 100644 torchci/rockset/metrics/__sql/get_workers_on_period.sql delete mode 100644 torchci/rockset/metrics/__sql/job_duration_avg.sql delete mode 100644 torchci/rockset/metrics/__sql/job_duration_percentile.sql delete mode 100644 torchci/rockset/metrics/__sql/last_branch_push.sql delete mode 100644 torchci/rockset/metrics/__sql/last_successful_jobs.sql delete mode 100644 torchci/rockset/metrics/__sql/last_successful_workflow.sql delete mode 100644 torchci/rockset/metrics/__sql/lf_rollover_health.sql delete mode 100644 torchci/rockset/metrics/__sql/lf_rollover_percentage.sql delete mode 100644 torchci/rockset/metrics/__sql/log_captures_count.sql delete mode 100644 torchci/rockset/metrics/__sql/master_commit_red.sql delete mode 100644 torchci/rockset/metrics/__sql/master_commit_red_avg.sql delete mode 100644 torchci/rockset/metrics/__sql/master_commit_red_percent.sql delete mode 100644 torchci/rockset/metrics/__sql/master_commit_red_percent_groups.sql delete mode 100644 torchci/rockset/metrics/__sql/master_jobs_red.sql delete mode 100644 torchci/rockset/metrics/__sql/master_jobs_red_avg.sql delete mode 100644 torchci/rockset/metrics/__sql/number_of_force_pushes.sql delete mode 100644 torchci/rockset/metrics/__sql/queue_times_historical.sql delete mode 100644 torchci/rockset/metrics/__sql/queue_times_historical_pct.sql delete mode 100644 torchci/rockset/metrics/__sql/queued_jobs.sql delete mode 100644 torchci/rockset/metrics/__sql/queued_jobs_by_label.sql delete mode 100644 torchci/rockset/metrics/__sql/reverts.sql delete mode 100644 torchci/rockset/metrics/__sql/strict_lag_sec.sql delete mode 100644 torchci/rockset/metrics/__sql/top_reds.sql delete mode 100644 torchci/rockset/metrics/__sql/tts_avg.sql delete mode 100644 torchci/rockset/metrics/__sql/tts_duration_historical.sql delete mode 100644 torchci/rockset/metrics/__sql/tts_duration_historical_percentile.sql delete mode 100644 torchci/rockset/metrics/__sql/tts_percentile.sql delete mode 100644 torchci/rockset/metrics/__sql/workflow_duration_avg.sql delete mode 100644 torchci/rockset/metrics/__sql/workflow_duration_percentile.sql delete mode 100644 torchci/rockset/metrics/__sql/workflow_load.sql delete mode 100644 torchci/rockset/metrics/correlation_matrix.lambda.json delete mode 100644 torchci/rockset/metrics/disabled_test_historical.lambda.json delete mode 100644 torchci/rockset/metrics/disabled_test_total.lambda.json delete mode 100644 torchci/rockset/metrics/external_contribution_stats.lambda.json delete mode 100644 torchci/rockset/metrics/get_workers_on_period.lambda.json delete mode 100644 torchci/rockset/metrics/job_duration_avg.lambda.json delete mode 100644 torchci/rockset/metrics/job_duration_percentile.lambda.json delete mode 100644 torchci/rockset/metrics/last_branch_push.lambda.json delete mode 100644 torchci/rockset/metrics/last_successful_jobs.lambda.json delete mode 100644 torchci/rockset/metrics/last_successful_workflow.lambda.json delete mode 100644 torchci/rockset/metrics/lf_rollover_health.lambda.json delete mode 100644 torchci/rockset/metrics/lf_rollover_percentage.lambda.json delete mode 100644 torchci/rockset/metrics/log_captures_count.lambda.json delete mode 100644 torchci/rockset/metrics/master_commit_red.lambda.json delete mode 100644 torchci/rockset/metrics/master_commit_red_avg.lambda.json delete mode 100644 torchci/rockset/metrics/master_commit_red_percent.lambda.json delete mode 100644 torchci/rockset/metrics/master_commit_red_percent_groups.lambda.json delete mode 100644 torchci/rockset/metrics/master_jobs_red.lambda.json delete mode 100644 torchci/rockset/metrics/master_jobs_red_avg.lambda.json delete mode 100644 torchci/rockset/metrics/number_of_force_pushes.lambda.json delete mode 100644 torchci/rockset/metrics/queue_times_historical.lambda.json delete mode 100644 torchci/rockset/metrics/queue_times_historical_pct.lambda.json delete mode 100644 torchci/rockset/metrics/queued_jobs.lambda.json delete mode 100644 torchci/rockset/metrics/queued_jobs_by_label.lambda.json delete mode 100644 torchci/rockset/metrics/reverts.lambda.json delete mode 100644 torchci/rockset/metrics/strict_lag_sec.lambda.json delete mode 100644 torchci/rockset/metrics/top_reds.lambda.json delete mode 100644 torchci/rockset/metrics/tts_avg.lambda.json delete mode 100644 torchci/rockset/metrics/tts_duration_historical.lambda.json delete mode 100644 torchci/rockset/metrics/tts_duration_historical_percentile.lambda.json delete mode 100644 torchci/rockset/metrics/tts_percentile.lambda.json delete mode 100644 torchci/rockset/metrics/workflow_duration_avg.lambda.json delete mode 100644 torchci/rockset/metrics/workflow_duration_percentile.lambda.json delete mode 100644 torchci/rockset/metrics/workflow_load.lambda.json delete mode 100644 torchci/rockset/nightlies/__sql/docker_jobs_red.sql delete mode 100644 torchci/rockset/nightlies/__sql/docker_jobs_red_past_day.sql delete mode 100644 torchci/rockset/nightlies/__sql/nightly_jobs_red.sql delete mode 100644 torchci/rockset/nightlies/__sql/nightly_jobs_red_by_name.sql delete mode 100644 torchci/rockset/nightlies/__sql/nightly_jobs_red_by_platform.sql delete mode 100644 torchci/rockset/nightlies/__sql/nightly_jobs_red_past_day.sql delete mode 100644 torchci/rockset/nightlies/__sql/validation_jobs_red.sql delete mode 100644 torchci/rockset/nightlies/__sql/validation_jobs_red_past_day.sql delete mode 100644 torchci/rockset/nightlies/nightly_jobs_red.lambda.json delete mode 100644 torchci/rockset/nightlies/nightly_jobs_red_by_name.lambda.json delete mode 100644 torchci/rockset/nightlies/nightly_jobs_red_by_platform.lambda.json delete mode 100644 torchci/rockset/nightlies/nightly_jobs_red_past_day.lambda.json delete mode 100644 torchci/rockset/nightlies/validation_jobs_red.lambda.json delete mode 100644 torchci/rockset/nightlies/validation_jobs_red_past_day.lambda.json delete mode 100644 torchci/rockset/prodVersions.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/monthly_contribution_stats.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/num_reverts.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/number_of_force_pushes_historical.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/strict_lag_historical.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_merge.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_review.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_signal.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/__sql/ttrs_percentiles.sql delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/monthly_contribution_stats.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/num_reverts.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/number_of_force_pushes_historical.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/strict_lag_historical.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/time_to_merge.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/time_to_review.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/time_to_signal.lambda.json delete mode 100644 torchci/rockset/pytorch_dev_infra_kpis/ttrs_percentiles.lambda.json delete mode 100644 torchci/rockset/torchbench/__sql/torchbench_list_userbenchmarks.sql delete mode 100644 torchci/rockset/torchbench/__sql/torchbench_userbenchmark_list_commits.sql delete mode 100644 torchci/rockset/torchbench/__sql/torchbench_userbenchmark_query_metrics.sql delete mode 100644 torchci/rockset/torchbench/torchbench_list_userbenchmarks.lambda.json delete mode 100644 torchci/rockset/torchbench/torchbench_userbenchmark_list_commits.lambda.json delete mode 100644 torchci/rockset/torchbench/torchbench_userbenchmark_query_metrics.lambda.json delete mode 100644 torchci/rockset/utilization/__sql/runner_utilization.sql delete mode 100644 torchci/rockset/utilization/__sql/runner_utilization_by_activity.sql delete mode 100644 torchci/rockset/utilization/__sql/runner_utilization_by_repo.sql delete mode 100644 torchci/rockset/utilization/runner_utilization.lambda.json delete mode 100644 torchci/rockset/utilization/runner_utilization_by_activity.lambda.json delete mode 100644 torchci/rockset/utilization/runner_utilization_by_repo.lambda.json diff --git a/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_branches.sql b/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_branches.sql deleted file mode 100644 index 4303526c6a..0000000000 --- a/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_branches.sql +++ /dev/null @@ -1,42 +0,0 @@ ---- This query is used to get the list of branches and commits used by different ---- OSS CI benchmark experiments. This powers HUD benchmarks dashboards -SELECT - DISTINCT w.head_branch, - w.head_sha, - w.id, - FORMAT_ISO8601( - DATE_TRUNC('day', TIMESTAMP_MILLIS(o.timestamp)) - ) AS event_time, - o.filename -FROM - benchmarks.oss_ci_benchmark_v2 o - LEFT JOIN commons.workflow_run w ON o.workflow_id = w.id -WHERE - TIMESTAMP_MILLIS(o.timestamp) >= PARSE_DATETIME_ISO8601(: startTime) - AND TIMESTAMP_MILLIS(o.timestamp) < PARSE_DATETIME_ISO8601(: stopTime) - AND ( - ARRAY_CONTAINS( - SPLIT(: filenames, ','), - o.filename - ) - OR : filenames = '' - ) - -- NB: DEVICE (ARCH) is the display format used by HUD when grouping together these two fields - AND ( - FORMAT( - '{} ({})', - o.device, - IF( - o.arch IS NULL, 'NVIDIA A100-SXM4-40GB', - o.arch - ) - ) = : deviceArch - OR : deviceArch = '' - ) - AND o.metric IS NOT NULL - AND w.html_url LIKE CONCAT('%', : repo, '%') - AND o.dtype IS NOT NULL - AND o.device IS NOT NULL -ORDER BY - w.head_branch, - event_time DESC \ No newline at end of file diff --git a/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_llms.sql b/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_llms.sql deleted file mode 100644 index 492aee9e4b..0000000000 --- a/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_llms.sql +++ /dev/null @@ -1,95 +0,0 @@ ---- This query is used to get the LLMs benchmark results from different experiments. It ---- queries the TPS and memory bandwidth for each model / quantization combos. This powers ---- the LLMs benchmark dashboard -SELECT - DISTINCT o.workflow_id, - -- As the JSON response is pretty big, only return the field if it's needed - IF(: getJobId, o.job_id, NULL) AS job_id, - o.name, - o.metric, - IF( - o.actual IS NOT NULL, - CAST(o.actual AS FLOAT), - 0.0 - ) AS actual, - IF( - o.target IS NOT NULL, - CAST(o.target AS FLOAT), - 0.0 - ) AS target, - FORMAT_ISO8601( - DATE_TRUNC( - : granularity, - TIMESTAMP_MILLIS(o.timestamp) - ) - ) AS granularity_bucket, - o.dtype, - o.device, - -- NB: Default to NVIDIA A100-SXM4-40GB for old records without arch column - IF( - o.arch IS NULL, 'NVIDIA A100-SXM4-40GB', - o.arch - ) as arch, -FROM - benchmarks.oss_ci_benchmark_v2 o - LEFT JOIN commons.workflow_run w ON o.workflow_id = w.id -WHERE - TIMESTAMP_MILLIS(o.timestamp) >= PARSE_DATETIME_ISO8601(: startTime) - AND TIMESTAMP_MILLIS(o.timestamp) < PARSE_DATETIME_ISO8601(: stopTime) - AND ( - ARRAY_CONTAINS( - SPLIT(: branches, ','), - w.head_branch - ) - OR : branches = '' - ) - AND ( - ARRAY_CONTAINS( - SPLIT(: commits, ','), - w.head_sha - ) - OR : commits = '' - ) - AND ( - ARRAY_CONTAINS( - SPLIT(: filenames, ','), - o.filename - ) - OR : filenames = '' - ) - AND ( - ARRAY_CONTAINS( - SPLIT(: names, ','), - o.name - ) - OR : names = '' - ) - -- NB: DEVICE (ARCH) is the display format used by HUD when grouping together these two fields - AND ( - FORMAT( - '{} ({})', - o.device, - IF( - o.arch IS NULL, 'NVIDIA A100-SXM4-40GB', - o.arch - ) - ) = : deviceArch - OR : deviceArch = '' - ) - AND ( - ARRAY_CONTAINS( - SPLIT(: dtypes, ','), - o.dtype - ) - OR : dtypes = '' - ) - AND o.metric IS NOT NULL - AND o.dtype IS NOT NULL - AND o.device IS NOT NULL - AND w.html_url LIKE CONCAT('%', : repo, '%') -ORDER BY - granularity_bucket DESC, - workflow_id DESC, - name, - dtype, - device \ No newline at end of file diff --git a/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_names.sql b/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_names.sql deleted file mode 100644 index 27cfb39d1c..0000000000 --- a/torchci/rockset/benchmarks/__sql/oss_ci_benchmark_names.sql +++ /dev/null @@ -1,32 +0,0 @@ ---- This query is used by HUD benchmarks dashboards to get the list of experiment names -SELECT DISTINCT - o.filename, - o.name, - o.metric, - o.dtype, - o.device, - -- NB: Default to NVIDIA A100-SXM4-40GB for old records without arch column - IF(o.arch IS NULL, 'NVIDIA A100-SXM4-40GB', o.arch) as arch, -FROM - benchmarks.oss_ci_benchmark_v2 o - LEFT JOIN commons.workflow_run w ON o.workflow_id = w.id -WHERE - TIMESTAMP_MILLIS(o.timestamp) >= PARSE_DATETIME_ISO8601(: startTime) - AND TIMESTAMP_MILLIS(o.timestamp) < PARSE_DATETIME_ISO8601(: stopTime) - AND ( - ARRAY_CONTAINS( - SPLIT(: filenames, ','), - o.filename - ) - OR : filenames = '' - ) - AND o.metric IS NOT NULL - AND w.html_url LIKE CONCAT('%', : repo, '%') - AND o.dtype IS NOT NULL - AND o.device IS NOT NULL -ORDER BY - o.filename, - o.name, - o.metric, - o.dtype, - o.device \ No newline at end of file diff --git a/torchci/rockset/benchmarks/oss_ci_benchmark_branches.lambda.json b/torchci/rockset/benchmarks/oss_ci_benchmark_branches.lambda.json deleted file mode 100644 index 3959842839..0000000000 --- a/torchci/rockset/benchmarks/oss_ci_benchmark_branches.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/oss_ci_benchmark_branches.sql", - "default_parameters": [ - { - "name": "deviceArch", - "type": "string", - "value": "" - }, - { - "name": "filenames", - "type": "string", - "value": "" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2024-05-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-08-01T00:00:00.00Z" - } - ], - "description": "Query branches and commits from OSS CI benchmarks" -} \ No newline at end of file diff --git a/torchci/rockset/benchmarks/oss_ci_benchmark_llms.lambda.json b/torchci/rockset/benchmarks/oss_ci_benchmark_llms.lambda.json deleted file mode 100644 index c86049c4e5..0000000000 --- a/torchci/rockset/benchmarks/oss_ci_benchmark_llms.lambda.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "sql_path": "__sql/oss_ci_benchmark_llms.sql", - "default_parameters": [ - { - "name": "branches", - "type": "string", - "value": "main" - }, - { - "name": "commits", - "type": "string", - "value": "" - }, - { - "name": "deviceArch", - "type": "string", - "value": "" - }, - { - "name": "dtypes", - "type": "string", - "value": "" - }, - { - "name": "filenames", - "type": "string", - "value": "" - }, - { - "name": "getJobId", - "type": "bool", - "value": "false" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "names", - "type": "string", - "value": "" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2024-05-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-08-01T00:00:00.00Z" - } - ], - "description": "The query to power LLMs benchmark dashboard" -} \ No newline at end of file diff --git a/torchci/rockset/benchmarks/oss_ci_benchmark_names.lambda.json b/torchci/rockset/benchmarks/oss_ci_benchmark_names.lambda.json deleted file mode 100644 index 5d516d4bd4..0000000000 --- a/torchci/rockset/benchmarks/oss_ci_benchmark_names.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/oss_ci_benchmark_names.sql", - "default_parameters": [ - { - "name": "filenames", - "type": "string", - "value": "" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2024-05-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-08-01T00:00:00.00Z" - } - ], - "description": "Query experiment names from OSS CI benchmarks" -} \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/annotated_flaky_jobs.sql b/torchci/rockset/commons/__sql/annotated_flaky_jobs.sql deleted file mode 100644 index 3a1852eda8..0000000000 --- a/torchci/rockset/commons/__sql/annotated_flaky_jobs.sql +++ /dev/null @@ -1,31 +0,0 @@ -select - job.head_sha as sha, - CONCAT(w.name, ' / ', job.name) as jobName, - job.id, - job.conclusion, - job.html_url as htmlUrl, - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - CAST(job.id as string) - ) as logUrl, - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) as durationS, - w.repository.full_name as repo, - ARRAY_CREATE(job.torchci_classification.line) as failureLines, - job.torchci_classification.captures as failureCaptures, - ARRAY_CREATE(job.torchci_classification.line_num) as failureLineNumbers, -from - commons.job_annotation a - join commons.workflow_job job on job.id = a.jobID - join commons.workflow_run w on w.id = job.run_id - and w.head_repository.full_name = a.repo and a.repo = :repo -where - a.annotation != 'BROKEN_TRUNK' - and w.head_branch = :branch - and job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - and job._event_time < PARSE_DATETIME_ISO8601(:stopTime) -order by - job._event_time diff --git a/torchci/rockset/commons/__sql/commit_failed_jobs.sql b/torchci/rockset/commons/__sql/commit_failed_jobs.sql deleted file mode 100644 index 023a05e0bf..0000000000 --- a/torchci/rockset/commons/__sql/commit_failed_jobs.sql +++ /dev/null @@ -1,40 +0,0 @@ --- This query is used by Dr.CI to get all the failed jobs from the base commit. They can then be --- used to decide if a failure is due to broken trunk -with relevant_pushes as ( - select - p.head_commit.timestamp, - p.after - from commons.push p - where - ARRAY_CONTAINS( - SPLIT(:shas, ','), p.after - ) -) -SELECT - j.id, - j.name AS jobName, - CONCAT(w.name, ' / ', j.name) AS name, - j.runner_name AS runnerName, - w.head_commit.author.email as authorEmail, - j.conclusion, - j.completed_at, - j.html_url, - j.head_sha, - p.timestamp AS head_sha_timestamp, - j.head_branch, - j.torchci_classification.captures AS failure_captures, - IF(j.torchci_classification.line IS NULL, null, ARRAY_CREATE(j.torchci_classification.line)) AS failure_lines, - j.torchci_classification.context AS failure_context, - j._event_time AS time, -FROM - commons.workflow_run w - JOIN commons.workflow_job j ON w.id = j.run_id HINT(join_broadcast = true) - -- Do a left join here because the push table won't have any information about - -- commits from forked repo - LEFT JOIN relevant_pushes p ON p.after = j.head_sha HINT(join_strategy = lookup) -WHERE - ARRAY_CONTAINS( - SPLIT(: shas, ','), - j.head_sha - ) - AND j.conclusion IN ('failure', 'cancelled') diff --git a/torchci/rockset/commons/__sql/commit_jobs_query.sql b/torchci/rockset/commons/__sql/commit_jobs_query.sql deleted file mode 100644 index 280e849ecb..0000000000 --- a/torchci/rockset/commons/__sql/commit_jobs_query.sql +++ /dev/null @@ -1,168 +0,0 @@ --- This query is used by HUD commit and pull request pages to get all jobs belong --- to specific commit hash. They can then be displayed on those pages. -WITH - job AS ( - SELECT - job._event_time AS time, - workflow.head_sha AS sha, - job.name AS job_name, - workflow.name AS workflow_name, - job.id, - workflow.id AS workflow_id, - workflow.artifacts_url AS github_artifact_url, - job.conclusion, - job.html_url, - IF( -: repo = 'pytorch/pytorch', - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - CAST(job.id AS string) - ), - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', -: repo, - '/', - CAST(job.id AS string) - ) - ) AS log_url, - DATE_DIFF( - 'SECOND', - job._event_time, - PARSE_TIMESTAMP_ISO8601(job.started_at) - ) AS queue_time_s, - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) AS duration_s, - job.torchci_classification.line, - job.torchci_classification.captures, - job.torchci_classification.line_num, - job.torchci_classification.context, - job.runner_name AS runner_name, - workflow.head_commit.author.email AS authorEmail, - FROM - workflow_job job - INNER JOIN workflow_run workflow ON workflow.id = job.run_id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND workflow.event != 'workflow_run' -- Filter out workflow_run-triggered jobs, which have nothing to do with the SHA - AND workflow.event != 'repository_dispatch' -- Filter out repository_dispatch-triggered jobs, which have nothing to do with the SHA - AND workflow.head_sha =: sha - AND job.head_sha =: sha - AND workflow.repository.full_name =: repo - UNION - -- Handle CircleCI - -- IMPORTANT: this needs to have the same order AS the query above - SELECT - job._event_time AS time, - job.pipeline.vcs.revision AS sha, - -- Swap workflow and job name for consistency with GHA naming style. - job.workflow.name AS job_name, - job.job.name AS workflow_name, - job.job.number AS id, - null AS workflow_id, - null AS github_artifact_id, - CASE - WHEN job.job.status = 'failed' THEN 'failure' - WHEN job.job.status = 'canceled' THEN 'cancelled' - ELSE job.job.status - END AS conclusion, - -- cirleci doesn't provide a url, piece one together out of the info we have - CONCAT( - 'https://app.circleci.com/pipelines/github/', -: repo, - '/', - CAST(job.pipeline.number AS string), - '/workflows/', - job.workflow.id, - '/jobs/', - CAST(job.job.number AS string) - ) AS html_url, - -- logs aren't downloaded currently, just reuse html_url - html_url AS log_url, - null AS queue_time_s, - -- for circle ci, the event time comes after the end time, so its not reliable for queueing - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.job.started_at), - PARSE_TIMESTAMP_ISO8601(job.job.stopped_at) - ) AS duration_s, - -- Classifications not yet supported - null, - null, - null, - null, - -- Don't care about runner name from CircleCI - null AS runner_name, - null AS authorEmail, - FROM - circleci.job job - WHERE - job.pipeline.vcs.revision =: sha - AND CONCAT(job.organization.name, '/', job.project.name) =: repo - UNION - SELECT - workflow._event_time AS time, - workflow.head_sha AS sha, - workflow.name AS job_name, - 'Workflow Startup Failure' AS workflow_name, - workflow.id, - null AS workflow_id, - workflow.artifacts_url AS github_artifact_url, - IF( - workflow.conclusion IS NULL and workflow.completed_at IS NULL and workflow.status = 'queued', - 'failure', - workflow.conclusion - ) as conclusion, - workflow.html_url, - null AS log_url, - DATE_DIFF( - 'SECOND', - workflow._event_time, - PARSE_TIMESTAMP_ISO8601(workflow.run_started_at) - ) AS queue_time_s, - null AS duration_s, - null as line, - null as captures, - null as line_num, - null as context, - null AS runner_name, - workflow.head_commit.author.email AS authorEmail, - FROM - workflow_run workflow - WHERE - workflow.event != 'workflow_run' -- Filter out workflow_run-triggered jobs, which have nothing to do with the SHA - AND workflow.event != 'repository_dispatch' -- Filter out repository_dispatch-triggered jobs, which have nothing to do with the SHA - AND workflow.head_sha =: sha - AND workflow.repository.full_name =: repo - ) -SELECT - sha, - workflow_name AS workflowName, - job_name AS jobName, - CONCAT(workflow_name, ' / ', job_name) AS name, - CAST(id AS string) AS id, - CAST(workflow_id AS string) AS workflowId, - github_artifact_url AS githubArtifactUrl, - CASE - WHEN conclusion IS NULL THEN 'pending' - ELSE conclusion - END AS conclusion, - html_url AS htmlUrl, - log_url AS logUrl, - duration_s AS durationS, - queue_time_s AS queueTimeS, - ARRAY_CREATE(line) AS failureLines, - ARRAY_CREATE(line_num) AS failureLineNumbers, - captures AS failureCaptures, - context AS failureContext, - runner_name AS runnerName, - authorEmail, - time, -FROM - job -ORDER BY - name, - time DESC diff --git a/torchci/rockset/commons/__sql/commit_query.sql b/torchci/rockset/commons/__sql/commit_query.sql deleted file mode 100644 index 5cdc940712..0000000000 --- a/torchci/rockset/commons/__sql/commit_query.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - workflow.head_commit as commit -FROM - workflow_run workflow -WHERE - workflow.head_commit.id = :sha -LIMIT - 1 diff --git a/torchci/rockset/commons/__sql/disabled_non_flaky_tests.sql b/torchci/rockset/commons/__sql/disabled_non_flaky_tests.sql deleted file mode 100644 index 0d19b67572..0000000000 --- a/torchci/rockset/commons/__sql/disabled_non_flaky_tests.sql +++ /dev/null @@ -1,25 +0,0 @@ -WITH aggregated_weekly_data AS ( - SELECT - name, - classname, - filename, - SUM(CASE WHEN flaky THEN 1 ELSE 0 END) > 0 AS flaky, - SUM(num_green) AS num_green, - SUM(num_red) as num_red - FROM - commons.rerun_disabled_tests - WHERE - _event_time > CURRENT_TIMESTAMP() - INTERVAL 7 DAY - GROUP BY - name, - classname, - filename -) -SELECT - * -FROM - aggregated_weekly_data -WHERE - flaky = false - AND num_green >= :min_num_green - AND num_red <= :max_num_red diff --git a/torchci/rockset/commons/__sql/disabled_test_labels.sql b/torchci/rockset/commons/__sql/disabled_test_labels.sql deleted file mode 100644 index 2313525472..0000000000 --- a/torchci/rockset/commons/__sql/disabled_test_labels.sql +++ /dev/null @@ -1,22 +0,0 @@ ---- This query returns the list of DISABLED tests labels. This powers ---- the disabled tests dashboard label dropdown list -SELECT - DISTINCT labels.value.name AS label, -FROM - commons.issues i, - UNNEST (i.labels AS value) AS labels -WHERE - ( - ARRAY_CONTAINS( - SPLIT(: states, ','), - i.state - ) - OR : states = '' - ) - AND i.repository_url = CONCAT( - 'https://api.github.com/repos/', - : repo - ) - AND i.title LIKE '%DISABLED%' -ORDER BY - label ASC \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/disabled_tests.sql b/torchci/rockset/commons/__sql/disabled_tests.sql deleted file mode 100644 index 44f733ea80..0000000000 --- a/torchci/rockset/commons/__sql/disabled_tests.sql +++ /dev/null @@ -1,68 +0,0 @@ ---- This query returns the list of DISABLED tests together with their labels. This powers ---- the disabled tests dashboard, contributing them to their owners. -WITH issues_with_labels AS ( - SELECT - i.number, - i.title, - i.body, - ARRAY_AGG(labels.value.name) AS labels, - i.assignee.login AS assignee, - i.html_url, - i.updated_at, - FROM - commons.issues i, - UNNEST (i.labels AS value) AS labels - WHERE - ( - i.state = : state - OR : state = '' - ) - AND i.repository_url = CONCAT( - 'https://api.github.com/repos/', - : repo - ) - AND i.title LIKE '%DISABLED%' - AND ( - : platform = '' - OR i.body LIKE CONCAT('%', : platform, '%') - OR (NOT i.body LIKE '%Platforms: %') - ) - GROUP BY - i.number, - i.title, - i.body, - i.assignee.login, - i.html_url, - i.updated_at -) -SELECT - * -FROM - issues_with_labels -WHERE - ARRAY_CONTAINS( - issues_with_labels.labels, 'skipped' - ) - AND ( - : label = '' - OR ARRAY_CONTAINS( - issues_with_labels.labels, : label - ) - ) - AND ( - : triaged = '' - OR ( - : triaged = 'yes' - AND ARRAY_CONTAINS( - issues_with_labels.labels, 'triaged' - ) - ) - OR ( - : triaged = 'no' - AND NOT ARRAY_CONTAINS( - issues_with_labels.labels, 'triaged' - ) - ) - ) -ORDER BY - issues_with_labels.updated_at DESC \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/failed_workflow_jobs.sql b/torchci/rockset/commons/__sql/failed_workflow_jobs.sql deleted file mode 100644 index d89273d993..0000000000 --- a/torchci/rockset/commons/__sql/failed_workflow_jobs.sql +++ /dev/null @@ -1,59 +0,0 @@ -WITH repeats AS ( - SELECT - array_agg(j.id) AS ids - FROM - workflow_run w - JOIN workflow_job j ON w.id = j.run_id HINT(join_strategy = lookup) - WHERE - j._event_time >= PARSE_DATETIME_ISO8601(: startTime) - AND j._event_time < PARSE_DATETIME_ISO8601(: stopTime) - AND w.head_repository.full_name = : repo - AND w.head_branch = : branch - AND w.event != 'workflow_run' - AND w.event != 'repository_dispatch' - GROUP BY - j.head_sha, - j.name, - w.name - HAVING - count(*) > : count - AND bool_or( - j.conclusion IN ( - 'failure', 'cancelled', 'time_out' - ) - ) -), -ids AS ( - SELECT - ids.id - FROM - repeats, - UNNEST(repeats.ids AS id) AS ids -) -SELECT - job.head_sha AS sha, - CONCAT(w.name, ' / ', job.name) AS jobName, - job.id, - job.conclusion, - job.html_url AS htmlUrl, - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - CAST(job.id AS string) - ) AS logUrl, - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) AS durationS, - w.repository.full_name AS repo, - ARRAY_CREATE(job.torchci_classification.line) AS failureLines, - job.torchci_classification.captures AS failureCaptures, - ARRAY_CREATE(job.torchci_classification.line_num) AS failureLineNumbers, -FROM - ids - JOIN workflow_job job on job.id = ids.id - INNER JOIN workflow_run w on w.id = job.run_id -WHERE - job.conclusion IN ( - 'failure', 'cancelled', 'time_out' - ) \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/failure_samples_query.sql b/torchci/rockset/commons/__sql/failure_samples_query.sql deleted file mode 100644 index 01dae553e9..0000000000 --- a/torchci/rockset/commons/__sql/failure_samples_query.sql +++ /dev/null @@ -1,35 +0,0 @@ -SELECT - job._event_time AS time, - w.name AS workflowName, - job.name AS jobName, - CONCAT(w.name, ' / ', job.name) AS name, - w.head_sha AS sha, - job.id AS id, - w.head_branch as branch, - CASE - WHEN job.conclusion IS NULL THEN 'pending' - ELSE job.conclusion - END AS conclusion, - job.html_url AS htmlUrl, - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - CAST(job.id AS string) - ) AS logUrl, - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) AS durationS, - ARRAY_CREATE(job.torchci_classification.line) AS failureLines, - ARRAY_CREATE(job.torchci_classification.line_num) AS failureLineNumbers, - job.torchci_classification.context AS failureContext, - job.torchci_classification.captures AS failureCaptures, -FROM - commons.workflow_job job - JOIN commons.workflow_run w HINT(access_path = column_scan) ON w.id = job.run_id -WHERE - w.head_branch LIKE :branch - AND w.head_repository.full_name = :repo - AND job.torchci_classification.line LIKE FORMAT('%{}%', REGEXP_REPLACE(:captures, ',', '%')) -ORDER BY - job.torchci_classification._event_time DESC diff --git a/torchci/rockset/commons/__sql/filter_forced_merge_pr.sql b/torchci/rockset/commons/__sql/filter_forced_merge_pr.sql deleted file mode 100644 index 97924cee06..0000000000 --- a/torchci/rockset/commons/__sql/filter_forced_merge_pr.sql +++ /dev/null @@ -1,78 +0,0 @@ --- This query is used by fetchHud to get the force merge status of the pull requests so that --- they can be highlighted on HUD. Specifically, force merges with failures are highlighted --- with a darker shade of orange while regular force merges due to impatience are marked with --- yellow. The logic needs to be in sync with weekly_force_merge_stats query. -WITH all_merges AS ( - SELECT - skip_mandatory_checks, - LENGTH(failed_checks) AS failed_checks_count, - LENGTH(ignore_current_checks) AS ignored_checks_count, - LENGTH(pending_checks) AS pending_checks_count, - ignore_current, - is_failed, - pr_num, - merge_commit_sha, - FROM - commons.merges - WHERE - owner = : owner - AND project = : project - AND ARRAY_CONTAINS( - SPLIT(: shas, ','), - merge_commit_sha - ) -), --- A legit force merge needs to satisfy one of the two conditions below: --- 1. skip_mandatory_checks is true (-f) and failed_checks_count > 0 (with failures) or pending_checks_count > 0 (impatience). --- Under this condition, if a force merge (-f) is done when there is no failure and all jobs have finished, it's arguably --- just a regular merge in disguise. --- 2. ignore_current is true (-i) and is_failed is false (indicating a successful merge) and ignored_checks_count > 0 (has failures). --- As -i still waits for all remaining jobs to finish, this shouldn't be counted toward force merge due to impatience. --- --- If none applies, the merge should be counted as a regular merge regardless of the use of -f or -i. We could track that --- (regular merges masquerading as force merges) to understand how devs use (or abuse) these flags, but that's arguably a --- different case altogether. -force_merges_with_failed_checks AS ( - SELECT - IF( - ( - skip_mandatory_checks = true - AND ( - failed_checks_count > 0 - OR pending_checks_count > 0 - ) - ) - OR ( - ignore_current = true - AND is_failed = false - AND ignored_checks_count > 0 -- if no checks were ignored, it's not a force merge - ), - 1, - 0 - ) AS force_merge, - failed_checks_count, - ignored_checks_count, - pr_num, - merge_commit_sha, - FROM - all_merges -) -SELECT - pr_num, - merge_commit_sha, - force_merge, - IF( - ( - force_merge = 1 - AND ( - failed_checks_count > 0 - OR ignored_checks_count > 0 - ) - ), - 1, - 0 - ) AS force_merge_with_failures -FROM - force_merges_with_failed_checks -WHERE - force_merge = 1 diff --git a/torchci/rockset/commons/__sql/flaky_tests.sql b/torchci/rockset/commons/__sql/flaky_tests.sql deleted file mode 100644 index 925778055e..0000000000 --- a/torchci/rockset/commons/__sql/flaky_tests.sql +++ /dev/null @@ -1,94 +0,0 @@ - SELECT - test_run.name, - test_run.classname as suite, - test_run.file, - test_run.invoking_file, - SUM( - ELEMENT_AT( - JSON_PARSE( - REPLACE(test_run.skipped.message, 'True', 'true') - ), - 'num_green' - ) - ) as numGreen, - SUM( - ELEMENT_AT( - JSON_PARSE( - REPLACE(test_run.skipped.message, 'True', 'true') - ), - 'num_red' - ) - ) as numRed, - ARRAY_AGG(job.name) as jobNames, - ARRAY_AGG(job.id) as jobIds, - ARRAY_AGG(workflow.id) as workflowIds, - ARRAY_AGG(workflow.name) as workflowNames, - ARRAY_AGG(workflow.head_branch) as branches, - ARRAY_AGG(test_run.workflow_run_attempt) as runAttempts, - ARBITRARY( - if( - TYPEOF(test_run.rerun) = 'object', - test_run.rerun.text, - test_run.rerun[1].text - ) - ) as sampleTraceback -FROM - commons.workflow_job job - INNER JOIN commons.test_run_s3 test_run ON test_run.job_id = job.id HINT(join_strategy = lookup) - INNER JOIN commons.workflow_run workflow ON job.run_id = workflow.id -WHERE - test_run.skipped.message LIKE '{%"flaky": _rue%' - AND test_run._event_time > (CURRENT_TIMESTAMP() - HOURs(:numHours)) - AND test_run.name LIKE :name - AND test_run.classname LIKE :suite - AND test_run.file LIKE :file - AND job.name NOT LIKE '%rerun_disabled_tests%' -GROUP BY - name, - suite, - file, - invoking_file -UNION -select - test_run.name, - test_run.classname as suite, - test_run.file, - test_run.invoking_file, - COUNT(*) as numGreen, - SUM( - if( - TYPEOF(test_run.rerun) = 'object', - 1, - Length(test_run.rerun) - ) - ) as numRed, - ARRAY_AGG(job.name) as jobNames, - ARRAY_AGG(job.id) as jobIds, - ARRAY_AGG(workflow.id) as workflowIds, - ARRAY_AGG(workflow.name) as workflowNames, - ARRAY_AGG(workflow.head_branch) as branches, - ARRAY_AGG(test_run.workflow_run_attempt) as runAttempts, - ARBITRARY( - if( - TYPEOF(test_run.rerun) = 'object', - test_run.rerun.text, - test_run.rerun[1].text - ) - ) as sampleTraceback -FROM - commons.workflow_job job - INNER JOIN commons.test_run_s3 test_run ON test_run.job_id = job.id HINT(join_strategy = lookup) - INNER JOIN commons.workflow_run workflow ON job.run_id = workflow.id -where - test_run.rerun is not null - and test_run.failure is null - AND test_run._event_time > (CURRENT_TIMESTAMP() - HOURs(:numHours)) - AND test_run.name LIKE :name - AND test_run.classname LIKE :suite - AND test_run.file LIKE :file - AND job.name NOT LIKE '%rerun_disabled_tests%' -GROUP BY - name, - suite, - file, - invoking_file diff --git a/torchci/rockset/commons/__sql/flaky_tests_across_jobs.sql b/torchci/rockset/commons/__sql/flaky_tests_across_jobs.sql deleted file mode 100644 index 2905c44029..0000000000 --- a/torchci/rockset/commons/__sql/flaky_tests_across_jobs.sql +++ /dev/null @@ -1,103 +0,0 @@ -with failed_jobs as ( - SELECT - FIRST_VALUE(job.conclusion) OVER( - PARTITION BY CONCAT(w.name, ' / ', job.name) - ORDER BY - push.head_commit.timestamp ROWS BETWEEN 1 PRECEDING - AND 1 FOLLOWING - ) = 'success' - and NTH_VALUE(job.conclusion, 2) OVER( - PARTITION BY CONCAT(w.name, ' / ', job.name) - ORDER BY - push.head_commit.timestamp ROWS BETWEEN 1 PRECEDING - AND 1 FOLLOWING - ) = 'failure' - and LAST_VALUE(job.conclusion) OVER( - PARTITION BY CONCAT(w.name, ' / ', job.name) - ORDER BY - push.head_commit.timestamp ROWS BETWEEN 1 PRECEDING - AND 1 FOLLOWING - ) = 'success' as flaky, - job.id, - job.head_sha, - job.name as jobname, - w.id as workflow_id, - w.head_branch, - w.name as workflow_name, - w.run_attempt as workflow_run_attempt, - from - commons.workflow_job job - join commons.workflow_run w on w.id = job.run_id - and w.head_repository.full_name = 'pytorch/pytorch' - join push on push.head_commit.id = w.head_commit.id - where - job._event_time >= CURRENT_DATE() - HOURS(:numHours) - and w.head_branch = 'main' - and w.name in ('trunk', 'pull') - and job.name not like '%mem_leak_check%' - and job.name not like '%rerun_disabled_tests%' - order by - job._event_time -), -flaky_jobs as ( - select - distinct * - from - failed_jobs - left join commons.job_annotation annotation on annotation.jobID = failed_jobs.id - where - ( - failed_jobs.flaky - and annotation.annotation is NULL - ) - or annotation.annotation = 'TEST_FLAKE' -), -flaky_tests as ( - select - test_run.name, - test_run.file, - test_run.classname, - test_run.invoking_file, - *, - if ( - test_run.failure is null, - test_run.error.message, - test_run.failure.message - ) as failure_or_err_message, - test_run._event_time as event_time - from - test_run_s3 test_run - join flaky_jobs job on test_run.job_id = job.id - and test_run.workflow_run_attempt = job.workflow_run_attempt - where - ( - test_run.error is not null - or test_run.failure is not null - ) - and test_run.file is not null - order by - test_run.name -) -select - name, - classname as suite, - file, - invoking_file, - ARRAY_AGG(jobname) as jobNames, - ARRAY_AGG(id) as jobIds, - ARRAY_AGG(workflow_id) as workflowIds, - ARRAY_AGG(workflow_name) as workflowNames, - ARRAY_AGG(workflow_run_attempt) as runAttempts, - ARRAY_AGG(event_time) as eventTimes, - ARRAY_AGG(head_branch) as branches -from - flaky_tests -where - not REGEXP_LIKE(failure_or_err_message, :ignoreMessages) -group by - name, - file, - classname, - invoking_file -order by - name diff --git a/torchci/rockset/commons/__sql/flaky_workflows_jobs.sql b/torchci/rockset/commons/__sql/flaky_workflows_jobs.sql deleted file mode 100644 index 9ad9c6ee47..0000000000 --- a/torchci/rockset/commons/__sql/flaky_workflows_jobs.sql +++ /dev/null @@ -1,137 +0,0 @@ --- This query is used to get flaky job on trunk so that they can be retried. A flaky job is the --- one that has the green / red / green pattern. The failure in the middle is considered flaky --- and can be retried -WITH dedups AS ( - -- Note that there can be more than one commit with the same ID with the actual author and pytorchmergebot. - -- This mess up the results in some cases, so this removes all redundant information and only keeps what is - -- needed for the later query - SELECT - DISTINCT CONCAT(w.name, ' / ', job.name) AS fullname, - w.name AS workflow_name, - w.id AS workflow_id, - job.name AS job_name, - job.id AS job_id, - job.conclusion AS conclusion, - push.head_commit.id AS head_commit, - push.head_commit.timestamp AS head_commit_timestamp, - job.run_attempt AS run_attempt, - ROW_NUMBER() OVER( - PARTITION BY w.id, - w.name, - job.name - ORDER BY - job.run_attempt DESC - ) AS row_num, - FROM - commons.workflow_run w - JOIN commons.workflow_job job ON w.id = job.run_id HINT(join_strategy = lookup) - JOIN push ON push.head_commit.id = w.head_commit.id - WHERE - ( - job._event_time >= CURRENT_DATE() - HOURS(: numHours) - OR : numHours = 0 - ) - AND w.head_repository.full_name = : repo - AND ARRAY_CONTAINS( - SPLIT(: branches, ','), - w.head_branch - ) - AND ARRAY_CONTAINS( - SPLIT(: workflowNames, ','), - w.name - ) - AND job.name NOT LIKE '%mem_leak_check%' - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%unstable%' -), -latest_attempts AS ( - -- Keep the latest run attempt to know if the job has already been retried - SELECT - * - FROM - dedups - WHERE - row_num = 1 -), -flaky_jobs AS ( - SELECT - workflow_name, - job_name, - -- Next commit - workflow_id AS next_workflow_id, - job_id AS next_job_id, - -- The flaky status of the job - FIRST_VALUE(conclusion) OVER( - PARTITION BY fullname - ORDER BY - head_commit_timestamp DESC ROWS BETWEEN CURRENT ROW - AND 2 FOLLOWING - ) = 'success' - AND NTH_VALUE(conclusion, 2) OVER( - PARTITION BY fullname - ORDER BY - head_commit_timestamp DESC ROWS BETWEEN CURRENT ROW - AND 2 FOLLOWING - ) = 'failure' - AND LAST_VALUE(conclusion) OVER( - PARTITION BY fullname - ORDER BY - head_commit_timestamp DESC ROWS BETWEEN CURRENT ROW - AND 2 FOLLOWING - ) = 'success' AS flaky, - -- The current commit - NTH_VALUE(workflow_id, 2) OVER( - PARTITION BY fullname - ORDER BY - head_commit_timestamp DESC ROWS BETWEEN CURRENT ROW - AND 2 FOLLOWING - ) AS workflow_id, - NTH_VALUE(job_id, 2) OVER( - PARTITION BY fullname - ORDER BY - head_commit_timestamp DESC ROWS BETWEEN CURRENT ROW - AND 2 FOLLOWING - ) AS job_id, - NTH_VALUE(run_attempt, 2) OVER( - PARTITION BY fullname - ORDER BY - head_commit_timestamp DESC ROWS BETWEEN CURRENT ROW - AND 2 FOLLOWING - ) AS run_attempt, - FROM - latest_attempts - WHERE - ( - latest_attempts.run_attempt <= : maxAttempt - OR : maxAttempt = 0 - ) -) -SELECT - DISTINCT flaky_jobs.workflow_name, - flaky_jobs.workflow_id, - flaky_jobs.job_name, - flaky_jobs.job_id, - flaky_jobs.flaky, - flaky_jobs.run_attempt, - flaky_jobs.next_workflow_id, - flaky_jobs.next_job_id, - annotation.annotation, -FROM - flaky_jobs - LEFT JOIN commons.job_annotation annotation on annotation.jobID = flaky_jobs.job_id -WHERE - ( - ( - flaky_jobs.flaky - AND annotation.annotation IS NULL - ) - OR annotation.annotation = 'TEST_FLAKE' - ) - AND ( - flaky_jobs.workflow_id = : workflowId - OR : workflowId = 0 - ) - AND ( - flaky_jobs.next_workflow_id = : nextWorkflowId - OR : nextWorkflowId = 0 - ) \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/get_relevant_alerts.sql b/torchci/rockset/commons/__sql/get_relevant_alerts.sql deleted file mode 100644 index 0d053e8aa4..0000000000 --- a/torchci/rockset/commons/__sql/get_relevant_alerts.sql +++ /dev/null @@ -1,29 +0,0 @@ -WITH - filtered_table as ( - SELECT - * - FROM - commons.alerts - WHERE - repo = :repo - and organization = :organization - and ( - closed = false - or ( - PARSE_DATETIME_ISO8601(timestamp) > (CURRENT_DATETIME() - INTERVAL 1 DAY) - ) - ) - ) -SELECT - * -FROM - filtered_table alerts - INNER JOIN ( - SELECT - AlertObject, - MAX(filtered_table.timestamp) - FROM - filtered_table - GROUP BY - AlertObject - ) b ON alerts.AlertObject = b.AlertObject diff --git a/torchci/rockset/commons/__sql/get_workflow_jobs.sql b/torchci/rockset/commons/__sql/get_workflow_jobs.sql deleted file mode 100644 index 77473cd976..0000000000 --- a/torchci/rockset/commons/__sql/get_workflow_jobs.sql +++ /dev/null @@ -1,11 +0,0 @@ -SELECT - job.id, - job.name, -FROM - workflow_job job - INNER JOIN workflow_run workflow on workflow.id = job.run_id HINT(join_strategy = lookup) -WHERE - workflow.id = :workflowId - AND job.name LIKE :jobName -ORDER BY - job.name \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/hud_query.sql b/torchci/rockset/commons/__sql/hud_query.sql deleted file mode 100644 index c47764fc7c..0000000000 --- a/torchci/rockset/commons/__sql/hud_query.sql +++ /dev/null @@ -1,99 +0,0 @@ -WITH job AS ( - SELECT - job.head_sha as sha, - job.name as job_name, - workflow.name as workflow_name, - job.id, - job.conclusion, - job.html_url as html_url, - IF( - :repo = 'pytorch/pytorch', - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - CAST(job.id as string) - ), - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - :repo, - '/', - CAST(job.id as string) - ) - ) as log_url, - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) as duration_s, - workflow.repository.full_name as repo, - job.torchci_classification.line, - job.torchci_classification.captures, - job.torchci_classification.line_num, - annotation.annotation, - FROM - workflow_job job - INNER JOIN workflow_run workflow on workflow.id = job.run_id - LEFT JOIN job_annotation annotation ON job.id = annotation.jobID - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND workflow.event != 'workflow_run' -- Filter out workflow_run-triggered jobs, which have nothing to do with the SHA - AND workflow.event != 'repository_dispatch' -- Filter out repository_dispatch-triggered jobs, which have nothing to do with the SHA - AND ARRAY_CONTAINS(SPLIT(:shas, ','), job.head_sha) - AND ARRAY_CONTAINS(SPLIT(:shas, ','), workflow.head_sha) - AND workflow.repository.full_name = :repo - UNION - -- Handle CircleCI - -- IMPORTANT: this needs to have the same order as the query above - SELECT - job.pipeline.vcs.revision as sha, - -- Swap workflow and job name for consistency with GHA naming style. - job.workflow.name as job_name, - job.job.name as workflow_name, - job.job.number as id, - case - WHEN job.job.status = 'failed' then 'failure' - WHEN job.job.status = 'canceled' then 'cancelled' - else job.job.status - END as conclusion, - -- cirleci doesn't provide a url, piece one together out of the info we have - CONCAT( - job.workflow.url, - '/jobs/', - CAST(job.job.number AS string) - ) as html_url, - -- logs aren't downloaded currently, just reuse html_url - html_url as log_url, - DATE_DIFF( - 'SECOND', - PARSE_TIMESTAMP_ISO8601(job.job.started_at), - PARSE_TIMESTAMP_ISO8601(job.job.stopped_at) - ) as duration_s, - CONCAT(job.organization.name, '/', job.project.name) as repo, - null, - null, - null, - null, - FROM - circleci.job job - WHERE - ARRAY_CONTAINS(SPLIT(:shas, ','), job.pipeline.vcs.revision) - AND CONCAT(job.organization.name, '/', job.project.name) = :repo -) -SELECT - sha, - CONCAT(workflow_name, ' / ', job_name) as name, - id, - CASE - when conclusion is NULL then 'pending' - else conclusion - END as conclusion, - html_url as htmlUrl, - log_url as logUrl, - duration_s as durationS, - repo as repo, - ARRAY_CREATE(line) as failureLines, - ARRAY_CREATE(line_num) as failureLineNumbers, - captures as failureCaptures, - annotation as failureAnnotation, -FROM - job diff --git a/torchci/rockset/commons/__sql/issue_query.sql b/torchci/rockset/commons/__sql/issue_query.sql deleted file mode 100644 index 4ca2231043..0000000000 --- a/torchci/rockset/commons/__sql/issue_query.sql +++ /dev/null @@ -1,13 +0,0 @@ -SELECT - issue.number, - issue.title, - issue.html_url, - issue.state, - issue.body, - issue.updated_at, - issue.author_association, -FROM - issues AS issue - CROSS JOIN UNNEST(issue.labels AS label) AS labels -WHERE - labels.label.name =: label diff --git a/torchci/rockset/commons/__sql/master_commit_red_jobs.sql b/torchci/rockset/commons/__sql/master_commit_red_jobs.sql deleted file mode 100644 index 4c7c53c7cb..0000000000 --- a/torchci/rockset/commons/__sql/master_commit_red_jobs.sql +++ /dev/null @@ -1,103 +0,0 @@ -WITH all_jobs AS ( - SELECT - push._event_time AS time, - job.conclusion AS conclusion, - push.head_commit.id AS sha, - push.head_commit.author.username AS author, - CONCAT( - workflow.name, - ' / ', - ELEMENT_AT(SPLIT(job.name, ' / '), 1), - CONCAT(' / ', ELEMENT_AT(SPLIT(ELEMENT_AT(SPLIT(job.name, ' / '), 2), ', '), 1)) - ) AS name, - ( - CASE - WHEN push.head_commit.author.username = 'pytorchmergebot' THEN push.head_commit.message - ELSE NULL - END - ) AS body, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow ON workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%filter%' - AND job.name NOT LIKE '%unstable%' - AND job.name LIKE '%/%' - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref = 'refs/heads/main' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) -), -filtered_jobs AS ( - SELECT - time, - sha, - IF (name LIKE '%(%' AND name NOT LIKE '%)%', CONCAT(name, ')'), name) AS name, - CAST( - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - WHEN conclusion = 'timed_out' THEN 1 - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS any_red, - author, - body - FROM - all_jobs - GROUP BY - time, - sha, - name, - author, - body - HAVING - COUNT(*) >= 1 -- Filter out jobs that didn't run anything. - AND SUM(IF(conclusion IS NULL, 1, 0)) = 0 -- Filter out commits that still have pending jobs. -), -reds AS ( - SELECT - time, - sha, - ARRAY_REMOVE( - ARRAY_AGG( - IF (any_red > 0, name) - ), - NULL - ) AS failures, - ARRAY_REMOVE( - ARRAY_AGG( - IF (any_red = 0, name) - ), - NULL - ) AS successes, - author, - body - FROM - filtered_jobs - GROUP BY - time, - sha, - author, - body -) -SELECT - time, - sha, - author, - body, - failures, - successes -FROM - reds -ORDER BY - time DESC diff --git a/torchci/rockset/commons/__sql/num_commits_master.sql b/torchci/rockset/commons/__sql/num_commits_master.sql deleted file mode 100644 index f0ae69df1a..0000000000 --- a/torchci/rockset/commons/__sql/num_commits_master.sql +++ /dev/null @@ -1,9 +0,0 @@ -select - SUM(LENGTH(p.commits)) as num -from - push p -where - p.repository.full_name = 'pytorch/pytorch' - and p.ref = 'refs/heads/main' - AND p._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND p._event_time < PARSE_DATETIME_ISO8601(:stopTime) \ No newline at end of file diff --git a/torchci/rockset/commons/__sql/pr_commits.sql b/torchci/rockset/commons/__sql/pr_commits.sql deleted file mode 100644 index 050981bf1e..0000000000 --- a/torchci/rockset/commons/__sql/pr_commits.sql +++ /dev/null @@ -1,43 +0,0 @@ --- This query is used by the HUD's /pull page to populate the list of historical commits --- made against a given PR. --- This improves upon the default github commits view because it allows HUD to show jobs --- that ran on a PR before it was rebased - -WITH --- Get all PRs that were merged into master, and get all the SHAs for commits from that PR which CI jobs ran against --- We need the shas because some jobs (like trunk) don't have a PR they explicitly ran against, but they _were_ run against --- a commit from a PR -pr_shas AS ( - SELECT DISTINCT - FORMAT_ISO8601( - PARSE_TIMESTAMP_ISO8601(p.head_commit.timestamp), - 'America/Los_Angeles' - ) as timestamp, - r.pull_requests[1].number AS pr_number, - p.head_commit.id AS sha, - p.head_commit.message, - CONCAT( - 'https://github.com/', - :owner, - '/', - :repo, - '/pull/', - r.pull_requests[1].number - ) AS pr_url, - p.head_commit.url AS commit_url, - FROM - commons.push p - JOIN commons.workflow_run r ON p.head_commit.id = r.head_sha HINT(join_strategy=lookup) - WHERE - 1 = 1 - AND LENGTH(r.pull_requests) = 1 - AND r.repository.owner.login = :owner - AND r.pull_requests[1].head.repo.name = :repo - AND r.pull_requests[1].number = :pr_num - -) -SELECT - * -FROM - pr_shas -ORDER BY timestamp diff --git a/torchci/rockset/commons/__sql/recent_pr_workflows_query.sql b/torchci/rockset/commons/__sql/recent_pr_workflows_query.sql deleted file mode 100644 index 5f2719057d..0000000000 --- a/torchci/rockset/commons/__sql/recent_pr_workflows_query.sql +++ /dev/null @@ -1,83 +0,0 @@ --- This workflow is used by Dr.CI to get all the jobs from pull requests. The failures will then be --- classified into new failures and unrelated failures such as broken trunk, flaky, unstable, etc. -WITH relevant_shas as ( - select j.head_sha - from workflow_job j - where - PARSE_TIMESTAMP_ISO8601(j.completed_at) > ( - CURRENT_TIMESTAMP() - MINUTES(: numMinutes) - ) - AND :prNumber = 0 - union - select pr.head.sha as head_sha - from commons.pull_request pr - where pr.number = :prNumber -), -recent_prs AS ( - SELECT - distinct pull_request.head.sha AS sha, - pull_request.number AS number, - push.head_commit.timestamp AS timestamp, - FROM - relevant_shas r - JOIN commons.pull_request pull_request ON r.head_sha = pull_request.head.sha HINT(join_broadcast = true) - -- Do a left join here because the push table won't have any information about - -- commits from forked repo - LEFT JOIN commons.push push ON r.head_sha = push.after HINT(join_strategy = lookup) - WHERE - pull_request.base.repo.full_name =: repo -) -SELECT - w.id AS workflowId, - w.workflow_id as workflowUniqueId, - j.id, - j.runner_name AS runnerName, - w.head_commit.author.email as authorEmail, - CONCAT(w.name, ' / ', j.name) AS name, - j.name AS jobName, - j.conclusion, - j.completed_at, - j.html_url, - j.head_branch, - recent_prs.number AS pr_number, - recent_prs.sha AS head_sha, - recent_prs.timestamp AS head_sha_timestamp, - j.torchci_classification.captures AS failure_captures, - IF( - j.torchci_classification.line IS NULL, - null, - ARRAY_CREATE(j.torchci_classification.line) - ) AS failure_lines, - j.torchci_classification.context AS failure_context, - j._event_time AS time -FROM - commons.workflow_run w - JOIN ( - commons.workflow_job j - JOIN recent_prs ON j.head_sha = recent_prs.sha HINT(join_strategy = lookup) - ) ON w.id = j.run_id HINT(join_broadcast = true) -UNION -SELECT - null AS workflowId, - w.workflow_id as workflowUniqueId, - w.id, - null AS runnerName, - w.head_commit.author.email as authorEmail, - w.name AS name, - w.name AS jobName, - w.conclusion, - w.completed_at, - w.html_url, - w.head_branch, - recent_prs.number AS pr_number, - w.head_sha, - recent_prs.timestamp AS head_sha_timestamp, - null AS failure_captures, - null AS failure_lines, - null AS failure_context, - w._event_time as time -FROM - commons.workflow_run w - JOIN recent_prs ON w.head_sha = recent_prs.sha HINT(join_broadcast = true) -ORDER BY - time DESC diff --git a/torchci/rockset/commons/__sql/reverted_prs_with_reason.sql b/torchci/rockset/commons/__sql/reverted_prs_with_reason.sql deleted file mode 100644 index 6a0187902f..0000000000 --- a/torchci/rockset/commons/__sql/reverted_prs_with_reason.sql +++ /dev/null @@ -1,37 +0,0 @@ -SELECT - ic._event_time revert_time, - ic.user.login as reverter, - REGEXP_EXTRACT( - ic.body, - '(-c|--classification)[\s =]+["'']?(\w+)["'']?', - 2 - ) as code, - REGEXP_EXTRACT( - ic.body, - '(-m|--message)[\s =]+["'']?([^"'']+)["'']?', - 2 - ) as message, - ic.html_url as comment_url -FROM - commons.issue_comment AS ic - INNER JOIN ( - SELECT - issue_comment.issue_url, - MAX(issue_comment._event_time) as event_time -- Use the max for when invalid revert commands are tried first - FROM - commons.issue_comment - WHERE - REGEXP_LIKE( - issue_comment.body, - ' *@pytorch(merge|)bot revert' - ) - GROUP BY - issue_comment.issue_url - ) AS rc ON ic.issue_url = rc.issue_url -WHERE - ic._event_time = rc.event_time - AND ic._event_time >= PARSE_TIMESTAMP_ISO8601(:startTime) - AND ic._event_time < PARSE_TIMESTAMP_ISO8601(:stopTime) - AND ic.user.login != 'pytorch-bot[bot]' -ORDER BY - code DESC diff --git a/torchci/rockset/commons/__sql/test_insights_latest_runs.sql b/torchci/rockset/commons/__sql/test_insights_latest_runs.sql deleted file mode 100644 index 5a3f7ca720..0000000000 --- a/torchci/rockset/commons/__sql/test_insights_latest_runs.sql +++ /dev/null @@ -1,26 +0,0 @@ -SELECT DISTINCT - test_run_summary.workflow_id, - test_run_summary.job_id, - test_run_summary._event_time, - test_run_summary.time, - test_run_summary.tests, - test_run_summary.skipped, - test_run_summary.failures, - test_run_summary.errors -FROM - commons.test_run_summary - JOIN commons.workflow_run on test_run_summary.workflow_id = CAST(workflow_run.id as string) - JOIN commons.workflow_job on test_run_summary.job_id = workflow_job.id -WHERE - test_run_summary._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND test_run_summary._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND test_run_summary.workflow_run_attempt = 1 - AND workflow_run.name = :workflowName - AND workflow_job.name = :jobName - AND test_run_summary.invoking_file = :testFile - AND test_run_summary.classname = :testClass - AND workflow_run.head_branch = 'main' -ORDER BY - test_run_summary._event_time DESC -LIMIT - :limit diff --git a/torchci/rockset/commons/__sql/test_time_per_class.sql b/torchci/rockset/commons/__sql/test_time_per_class.sql deleted file mode 100644 index 9e45fd6154..0000000000 --- a/torchci/rockset/commons/__sql/test_time_per_class.sql +++ /dev/null @@ -1,68 +0,0 @@ -WITH most_recent_strict_commits AS ( - SELECT - push.head_commit.id as sha, - FROM - commons.push - WHERE - push.ref = 'refs/heads/viable/strict' - AND push.repository.full_name = 'pytorch/pytorch' - ORDER BY - push._event_time DESC - LIMIT - 3 -), workflow AS ( - SELECT - id - FROM - commons.workflow_run w - INNER JOIN most_recent_strict_commits c on w.head_sha = c.sha -), -job AS ( - SELECT - j.name, - j.id, - j.run_id - FROM - commons.workflow_job j - INNER JOIN workflow w on w.id = j.run_id -), -class_duration_per_job AS ( - SELECT - test_run.invoking_file as file, - test_run.classname as classname, - SUM(time) as time, - REGEXP_EXTRACT(job.name, '^(.*) /', 1) as base_name, - REGEXP_EXTRACT(job.name, '/ test \(([\w-]*),', 1) as test_config, - FROM - commons.test_run_summary test_run - /* `test_run` is ginormous and `job` is small, so lookup join is essential */ - INNER JOIN job ON test_run.job_id = job.id HINT(join_strategy = lookup) - WHERE - /* cpp tests do not populate `file` for some reason. */ - /* Exclude them as we don't include them in our slow test infra */ - test_run.file IS NOT NULL - GROUP BY - test_run.invoking_file, - test_run.classname, - base_name, - test_config, - job.run_id -) -SELECT - REPLACE(file, '.', '/') AS file, - classname, - base_name, - test_config, - AVG(time) as time -FROM - class_duration_per_job -GROUP BY - file, - classname, - base_name, - test_config -ORDER BY - base_name, - test_config, - file, - classname diff --git a/torchci/rockset/commons/__sql/test_time_per_class_periodic_jobs.sql b/torchci/rockset/commons/__sql/test_time_per_class_periodic_jobs.sql deleted file mode 100644 index 5843258c0b..0000000000 --- a/torchci/rockset/commons/__sql/test_time_per_class_periodic_jobs.sql +++ /dev/null @@ -1,80 +0,0 @@ --- same as test_time_per_class query except for the first select -WITH good_periodic_sha AS ( - select - job.head_sha as sha - from - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - where - workflow.name = 'periodic' - AND workflow.head_branch LIKE 'main' - group by - job.head_sha, - push._event_time - having - BOOL_AND( - job.conclusion = 'success' - and job.conclusion is not null - ) - order by - push._event_time desc - limit - 3 -), workflow AS ( - SELECT - id - FROM - commons.workflow_run w - INNER JOIN good_periodic_sha c on w.head_sha = c.sha - and w.name = 'periodic' -), -job AS ( - SELECT - j.name, - j.id, - j.run_id, - FROM - commons.workflow_job j - INNER JOIN workflow w on w.id = j.run_id -), -class_duration_per_job AS ( - SELECT - test_run.invoking_file as file, - test_run.classname as classname, - SUM(time) as time, - REGEXP_EXTRACT(job.name, '^(.*) /', 1) as base_name, - REGEXP_EXTRACT(job.name, '/ test \(([\w-]*),', 1) as test_config, - FROM - commons.test_run_summary test_run - /* `test_run` is ginormous and `job` is small, so lookup join is essential */ - INNER JOIN job ON test_run.job_id = job.id HINT(join_strategy = lookup) - WHERE - /* cpp tests do not populate `file` for some reason. */ - /* Exclude them as we don't include them in our slow test infra */ - test_run.file IS NOT NULL - GROUP BY - test_run.invoking_file, - test_run.classname, - base_name, - test_config, - job.run_id -) -SELECT - REPLACE(file, '.', '/') AS file, - classname, - base_name, - test_config, - AVG(time) as time -FROM - class_duration_per_job -GROUP BY - file, - classname, - base_name, - test_config -ORDER BY - base_name, - test_config, - file, - classname diff --git a/torchci/rockset/commons/__sql/test_time_per_file.sql b/torchci/rockset/commons/__sql/test_time_per_file.sql deleted file mode 100644 index 219094f0a4..0000000000 --- a/torchci/rockset/commons/__sql/test_time_per_file.sql +++ /dev/null @@ -1,63 +0,0 @@ -WITH most_recent_strict_commits AS ( - SELECT - push.head_commit.id as sha, - FROM - commons.push - WHERE - push.ref = 'refs/heads/viable/strict' - AND push.repository.full_name = 'pytorch/pytorch' - ORDER BY - push._event_time DESC - LIMIT - 3 -), workflow AS ( - SELECT - id - FROM - commons.workflow_run w - INNER JOIN most_recent_strict_commits c on w.head_sha = c.sha -), -job AS ( - SELECT - j.name, - j.id, - j.run_id - FROM - commons.workflow_job j - INNER JOIN workflow w on w.id = j.run_id -), -file_duration_per_job AS ( - SELECT - test_run.invoking_file as file, - SUM(time) as time, - REGEXP_EXTRACT(job.name, '^(.*) /', 1) as base_name, - REGEXP_EXTRACT(job.name, '/ test \(([\w-]*),', 1) as test_config, - FROM - commons.test_run_summary test_run - /* `test_run` is ginormous and `job` is small, so lookup join is essential */ - INNER JOIN job ON test_run.job_id = job.id HINT(join_strategy = lookup) - WHERE - /* cpp tests do not populate `file` for some reason. */ - /* Exclude them as we don't include them in our slow test infra */ - test_run.file IS NOT NULL - GROUP BY - test_run.invoking_file, - base_name, - test_config, - job.run_id -) -SELECT - REPLACE(file, '.', '/') AS file, - base_name, - test_config, - AVG(time) as time -FROM - file_duration_per_job -GROUP BY - file, - base_name, - test_config -ORDER BY - base_name, - test_config, - file diff --git a/torchci/rockset/commons/__sql/test_time_per_file_periodic_jobs.sql b/torchci/rockset/commons/__sql/test_time_per_file_periodic_jobs.sql deleted file mode 100644 index 4e52e76121..0000000000 --- a/torchci/rockset/commons/__sql/test_time_per_file_periodic_jobs.sql +++ /dev/null @@ -1,75 +0,0 @@ --- same as test_time_per_file query except for the first select -WITH good_periodic_sha AS ( - select - job.head_sha as sha - from - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - where - workflow.name = 'periodic' - AND workflow.head_branch LIKE 'main' - group by - job.head_sha, - push._event_time - having - BOOL_AND( - job.conclusion = 'success' - and job.conclusion is not null - ) - order by - push._event_time desc - limit - 3 -), workflow AS ( - SELECT - id - FROM - commons.workflow_run w - INNER JOIN good_periodic_sha c on w.head_sha = c.sha - and w.name = 'periodic' -), -job AS ( - SELECT - j.name, - j.id, - j.run_id, - FROM - commons.workflow_job j - INNER JOIN workflow w on w.id = j.run_id -), -file_duration_per_job AS ( - SELECT - test_run.invoking_file as file, - SUM(time) as time, - REGEXP_EXTRACT(job.name, '^(.*) /', 1) as base_name, - REGEXP_EXTRACT(job.name, '/ test \(([\w-]*),', 1) as test_config, - FROM - commons.test_run_summary test_run - /* `test_run` is ginormous and `job` is small, so lookup join is essential */ - INNER JOIN job ON test_run.job_id = job.id HINT(join_strategy = lookup) - WHERE - /* cpp tests do not populate `file` for some reason. */ - /* Exclude them as we don't include them in our slow test infra */ - test_run.file IS NOT NULL - GROUP BY - test_run.invoking_file, - base_name, - test_config, - job.run_id -) -SELECT - REPLACE(file, '.', '/') AS file, - base_name, - test_config, - AVG(time) as time -FROM - file_duration_per_job -GROUP BY - file, - base_name, - test_config -ORDER BY - base_name, - test_config, - file diff --git a/torchci/rockset/commons/__sql/unclassified.sql b/torchci/rockset/commons/__sql/unclassified.sql deleted file mode 100644 index 6f26db589d..0000000000 --- a/torchci/rockset/commons/__sql/unclassified.sql +++ /dev/null @@ -1,17 +0,0 @@ -SELECT - job.html_url, - CONCAT( - 'https://ossci-raw-job-status.s3.amazonaws.com/log/', - CAST(job.id as string) - ) as log_url, - job.id as id, -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on job.run_id = workflow.id -WHERE - job.conclusion = 'failure' - AND job._event_time > (CURRENT_TIMESTAMP() - HOURS(24)) - AND job.torchci_classification IS NULL -ORDER BY - job._event_time ASC -LIMIT :n diff --git a/torchci/rockset/commons/__sql/weekly_force_merge_stats.sql b/torchci/rockset/commons/__sql/weekly_force_merge_stats.sql deleted file mode 100644 index e79268d3f8..0000000000 --- a/torchci/rockset/commons/__sql/weekly_force_merge_stats.sql +++ /dev/null @@ -1,218 +0,0 @@ --- Gets percentage of total force merges, force merges with failures, and force merges without failures (impatient) --- Specifically this query tracks the force merges kpi and metric on HUD --- --- Special params: --- one_bucket: If set to false, bucketizes the results over the requested granularity --- otherwise there is not bucketing --- merge_type: If set, will return only data about the requested force merge type. --- Can be one of: "All", "Impatience", "Failures", or " " (to get everything) -WITH issue_comments AS ( - SELECT - issue_comment.user.login, - issue_comment.author_association, - issue_comment.body, - issue_comment.issue_url, - issue_comment.html_url, - issue_comment.created, - CAST( - SUBSTR( - issue_comment.issue_url, - LENGTH( - 'https://api.github.com/repos/pytorch/pytorch/issues/' - ) + 1 - ) AS INT - ) AS pr_num - FROM - commons.issue_comment - WHERE - ( - issue_comment.body LIKE '%pytorchbot merge%' - OR issue_comment.body LIKE '%pytorchmergebot merge%' - ) - AND issue_comment.user.login NOT LIKE '%pytorch-bot%' - AND issue_comment.user.login NOT LIKE '%facebook-github-bot%' - AND issue_comment.user.login NOT LIKE '%pytorchmergebot%' - AND issue_comment.issue_url LIKE '%https://api.github.com/repos/pytorch/pytorch/issues/%' -), -all_merges AS ( - SELECT - DISTINCT m.skip_mandatory_checks, - LENGTH(m.failed_checks) AS failed_checks_count, - LENGTH(m.ignore_current_checks) AS ignored_checks_count, - LENGTH(m.pending_checks) AS pending_checks_count, - m.ignore_current, - m.is_failed, - m.pr_num, - m.merge_commit_sha, - max(c.created) AS time, - FROM - commons.merges m - INNER JOIN issue_comments c ON m.pr_num = c.pr_num - WHERE - m.owner = 'pytorch' - AND m.project = 'pytorch' - AND m.merge_commit_sha != '' -- only consider successful merges - AND m._event_time >= PARSE_DATETIME_ISO8601(: startTime) - AND m._event_time < PARSE_DATETIME_ISO8601(: stopTime) - GROUP BY - m.skip_mandatory_checks, - m.failed_checks, - m.ignore_current, - m.is_failed, - m.pr_num, - m.merge_commit_sha, - m.ignore_current_checks, - m.pending_checks -), --- A legit force merge needs to satisfy one of the two conditions below: --- 1. skip_mandatory_checks is true (-f) and failed_checks_count > 0 (with failures) or pending_checks_count > 0 (impatience). --- Under this condition, if a force merge (-f) is done when there is no failure and all jobs have finished, it's arguably --- just a regular merge in disguise. --- 2. ignore_current is true (-i) and is_failed is false (indicating a successful merge) and ignored_checks_count > 0 (has failures). --- As -i still waits for all remaining jobs to finish, this shouldn't be counted toward force merge due to impatience. --- --- If none applies, the merge should be counted as a regular merge regardless of the use of -f or -i. We could track that --- (regular merges masquerading as force merges) to understand how devs use (or abuse) these flags, but that's arguably a --- different case altogether. -merges_identifying_force_merges AS ( - SELECT - IF( - ( - skip_mandatory_checks = true - AND ( - failed_checks_count > 0 - OR pending_checks_count > 0 - ) - ) - OR ( - ignore_current = true - AND is_failed = false - AND ignored_checks_count > 0 -- if no checks were ignored, it's not a force merge - ), - 1, - 0 - ) AS force_merge, - failed_checks_count, - pr_num, - merge_commit_sha, - ignore_current, - ignored_checks_count, - time, - FROM - all_merges -), -results AS ( - SELECT - pr_num, - merge_commit_sha, - force_merge, - IF( - force_merge = 1 - AND ( - failed_checks_count > 0 - OR ignored_checks_count > 0 - ), - 1, - 0 - ) AS force_merge_with_failures, - CAST(time as DATE) AS date - FROM - merges_identifying_force_merges - ORDER BY - date DESC -), -bucketed_counts AS ( - SELECT - IF( - : one_bucket, - 'Overall', - FORMAT_TIMESTAMP( - '%Y-%m-%d', - DATE_TRUNC(: granularity, date) - ) - ) AS granularity_bucket, - SUM(force_merge_with_failures) AS with_failures_cnt, - SUM(force_merge) - SUM(force_merge_with_failures) AS impatience_cnt, - COUNT(*) AS total, - SUM(force_merge) AS total_force_merge_cnt - FROM - results - GROUP BY - granularity_bucket -), -rolling_raw_stats AS ( - -- Average over the past buckets - SELECT - granularity_bucket, - SUM(with_failures_cnt) OVER( - ORDER BY - granularity_bucket ROWS 1 PRECEDING - ) AS with_failures_cnt, - SUM(impatience_cnt) OVER( - ORDER BY - granularity_bucket ROWS 1 PRECEDING - ) AS impatience_cnt, - SUM(total_force_merge_cnt) OVER( - ORDER BY - granularity_bucket ROWS 1 PRECEDING - ) AS total_force_merge_cnt, - SUM(total) OVER( - ORDER BY - granularity_bucket ROWS 1 PRECEDING - ) AS total, - FROM - bucketed_counts -), -stats_per_bucket AS ( - SELECT - granularity_bucket, - with_failures_cnt * 100.0 / total AS with_failures_percent, - impatience_cnt * 100.0 / total AS impatience_percent, - total_force_merge_cnt * 100.0 / total AS force_merge_percent, - FROM - rolling_raw_stats -), -final_table AS ( - ( - SELECT - granularity_bucket, - with_failures_percent AS metric, - 'From Failures' AS name - FROM - stats_per_bucket - ) - UNION ALL - ( - SELECT - granularity_bucket, - impatience_percent AS metric, - 'From Impatience' AS name - FROM - stats_per_bucket - ) - UNION ALL - ( - SELECT - granularity_bucket, - force_merge_percent AS metric, - 'All Force Merges' AS name - FROM - stats_per_bucket - ) -), -filtered_result AS ( - SELECT - * - FROM - final_table - WHERE - TRIM(: merge_type) = '' - OR name LIKE CONCAT('%', : merge_type, '%') -) -SELECT - * -FROM - filtered_result -ORDER BY - granularity_bucket DESC, - name \ No newline at end of file diff --git a/torchci/rockset/commons/annotated_flaky_jobs.lambda.json b/torchci/rockset/commons/annotated_flaky_jobs.lambda.json deleted file mode 100644 index 0fcf53c8a9..0000000000 --- a/torchci/rockset/commons/annotated_flaky_jobs.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/annotated_flaky_jobs.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "main" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-10-15T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-09-19T00:06:32.839Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/commit_failed_jobs.lambda.json b/torchci/rockset/commons/commit_failed_jobs.lambda.json deleted file mode 100644 index 12c24f5ac8..0000000000 --- a/torchci/rockset/commons/commit_failed_jobs.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/commit_failed_jobs.sql", - "default_parameters": [ - { - "name": "shas", - "type": "string", - "value": "ba1da47e8fa95ca0dd8b2d63430f7eb54fdbbccb" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/commit_jobs_query.lambda.json b/torchci/rockset/commons/commit_jobs_query.lambda.json deleted file mode 100644 index 19754c2fd2..0000000000 --- a/torchci/rockset/commons/commit_jobs_query.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/commit_jobs_query.sql", - "default_parameters": [ - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "sha", - "type": "string", - "value": "155ffe8e1cf26e6a3d7f4f9dafeff1a1f26481aa" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/commit_query.lambda.json b/torchci/rockset/commons/commit_query.lambda.json deleted file mode 100644 index dad5288bf2..0000000000 --- a/torchci/rockset/commons/commit_query.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/commit_query.sql", - "default_parameters": [ - { - "name": "sha", - "type": "string", - "value": "" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/disabled_non_flaky_tests.lambda.json b/torchci/rockset/commons/disabled_non_flaky_tests.lambda.json deleted file mode 100644 index ad66580a64..0000000000 --- a/torchci/rockset/commons/disabled_non_flaky_tests.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/disabled_non_flaky_tests.sql", - "default_parameters": [ - { - "name": "max_num_red", - "type": "int", - "value": "0" - }, - { - "name": "min_num_green", - "type": "int", - "value": "150" - } - ], - "description": "" -} diff --git a/torchci/rockset/commons/disabled_test_labels.lambda.json b/torchci/rockset/commons/disabled_test_labels.lambda.json deleted file mode 100644 index 7406840443..0000000000 --- a/torchci/rockset/commons/disabled_test_labels.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/disabled_test_labels.sql", - "default_parameters": [ - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "states", - "type": "string", - "value": "open" - } - ], - "description": "Query the list of DISABLED tests labels" -} \ No newline at end of file diff --git a/torchci/rockset/commons/disabled_tests.lambda.json b/torchci/rockset/commons/disabled_tests.lambda.json deleted file mode 100644 index d8344042b7..0000000000 --- a/torchci/rockset/commons/disabled_tests.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/disabled_tests.sql", - "default_parameters": [ - { - "name": "label", - "type": "string", - "value": "" - }, - { - "name": "platform", - "type": "string", - "value": "" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "state", - "type": "string", - "value": "open" - }, - { - "name": "triaged", - "type": "string", - "value": "" - } - ], - "description": "Returns the list of DISABLED tests together with their labels" -} \ No newline at end of file diff --git a/torchci/rockset/commons/failed_workflow_jobs.lambda.json b/torchci/rockset/commons/failed_workflow_jobs.lambda.json deleted file mode 100644 index bfa0e1847e..0000000000 --- a/torchci/rockset/commons/failed_workflow_jobs.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/failed_workflow_jobs.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "master" - }, - { - "name": "count", - "type": "int", - "value": "1" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-03-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-04-01T00:00:00.00Z" - } - ], - "description": "Return failed GitHub jobs" -} \ No newline at end of file diff --git a/torchci/rockset/commons/failure_samples_query.lambda.json b/torchci/rockset/commons/failure_samples_query.lambda.json deleted file mode 100644 index 09b061f84d..0000000000 --- a/torchci/rockset/commons/failure_samples_query.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/failure_samples_query.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "%" - }, - { - "name": "captures", - "type": "string", - "value": "test_cublas_baddbmm_large_input_1_10000_10000_10000_cuda_float32,TestMatmulCudaCUDA" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/filter_forced_merge_pr.lambda.json b/torchci/rockset/commons/filter_forced_merge_pr.lambda.json deleted file mode 100644 index 7db62d7ec5..0000000000 --- a/torchci/rockset/commons/filter_forced_merge_pr.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/filter_forced_merge_pr.sql", - "default_parameters": [ - { - "name": "owner", - "type": "string", - "value": "pytorch" - }, - { - "name": "project", - "type": "string", - "value": "pytorch" - }, - { - "name": "shas", - "type": "string", - "value": "dafa009c3c0198d501fc7bc6cdcc7df14f800852,a0e6f82087af53299c70c09834db42787e750caf,c73923473d4ed0ab08143cb8fe3e8c3f86f2cf73" - } - ], - "description": "Check if these PRs are forced merge" -} diff --git a/torchci/rockset/commons/flaky_test_query.lambda.json b/torchci/rockset/commons/flaky_test_query.lambda.json deleted file mode 100644 index 30de1a422d..0000000000 --- a/torchci/rockset/commons/flaky_test_query.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/flaky_test_query.sql", - "default_parameters": [ - { - "name": "file", - "type": "string", - "value": "%" - }, - { - "name": "name", - "type": "string", - "value": "%" - }, - { - "name": "num_hours", - "type": "int", - "value": "3" - }, - { - "name": "suite", - "type": "string", - "value": "%" - } - ], - "description": "get flaky tests from the last num_hours hours" -} \ No newline at end of file diff --git a/torchci/rockset/commons/flaky_tests.lambda.json b/torchci/rockset/commons/flaky_tests.lambda.json deleted file mode 100644 index aa78d64350..0000000000 --- a/torchci/rockset/commons/flaky_tests.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/flaky_tests.sql", - "default_parameters": [ - { - "name": "file", - "type": "string", - "value": "%" - }, - { - "name": "name", - "type": "string", - "value": "%" - }, - { - "name": "numHours", - "type": "int", - "value": "3" - }, - { - "name": "suite", - "type": "string", - "value": "%" - } - ], - "description": "Flaky tests from the last numHours hours, using test_run" -} \ No newline at end of file diff --git a/torchci/rockset/commons/flaky_tests_across_jobs.lambda.json b/torchci/rockset/commons/flaky_tests_across_jobs.lambda.json deleted file mode 100644 index 45f63368b0..0000000000 --- a/torchci/rockset/commons/flaky_tests_across_jobs.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/flaky_tests_across_jobs.sql", - "default_parameters": [ - { - "name": "ignoreMessages", - "type": "string", - "value": "No CUDA GPUs are available" - }, - { - "name": "numHours", - "type": "int", - "value": "96" - }, - { - "name": "threshold", - "type": "int", - "value": "1" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/flaky_workflows_jobs.lambda.json b/torchci/rockset/commons/flaky_workflows_jobs.lambda.json deleted file mode 100644 index 9a6f6cfa91..0000000000 --- a/torchci/rockset/commons/flaky_workflows_jobs.lambda.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "sql_path": "__sql/flaky_workflows_jobs.sql", - "default_parameters": [ - { - "name": "branches", - "type": "string", - "value": "master,main" - }, - { - "name": "maxAttempt", - "type": "int", - "value": "1" - }, - { - "name": "nextWorkflowId", - "type": "int", - "value": "0" - }, - { - "name": "numHours", - "type": "int", - "value": "24" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "workflowId", - "type": "int", - "value": "0" - }, - { - "name": "workflowNames", - "type": "string", - "value": "pull,trunk" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/get_relevant_alerts.lambda.json b/torchci/rockset/commons/get_relevant_alerts.lambda.json deleted file mode 100644 index 1f1282a627..0000000000 --- a/torchci/rockset/commons/get_relevant_alerts.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/get_relevant_alerts.sql", - "default_parameters": [ - { - "name": "organization", - "type": "string", - "value": "test_org" - }, - { - "name": "repo", - "type": "string", - "value": "test_repo" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/get_workflow_jobs.lambda.json b/torchci/rockset/commons/get_workflow_jobs.lambda.json deleted file mode 100644 index acb19ca4d9..0000000000 --- a/torchci/rockset/commons/get_workflow_jobs.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/get_workflow_jobs.sql", - "default_parameters": [ - { - "name": "jobName", - "type": "string", - "value": "%test (%" - }, - { - "name": "workflowId", - "type": "int", - "value": "0" - } - ], - "description": "Get jobs workflow ID and names" -} \ No newline at end of file diff --git a/torchci/rockset/commons/hud_query.lambda.json b/torchci/rockset/commons/hud_query.lambda.json deleted file mode 100644 index 778a6841d1..0000000000 --- a/torchci/rockset/commons/hud_query.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/hud_query.sql", - "default_parameters": [ - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "shas", - "type": "string", - "value": "90c8623c2ec874b5a1d7092a56a9a83abad78f78,b33831dcd89b99318b703b90c0fffee7bd710b2f" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/issue_query.lambda.json b/torchci/rockset/commons/issue_query.lambda.json deleted file mode 100644 index f3166ce894..0000000000 --- a/torchci/rockset/commons/issue_query.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/issue_query.sql", - "default_parameters": [ - { - "name": "label", - "type": "string", - "value": "skipped" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/master_commit_red_jobs.lambda.json b/torchci/rockset/commons/master_commit_red_jobs.lambda.json deleted file mode 100644 index 0ef24132c2..0000000000 --- a/torchci/rockset/commons/master_commit_red_jobs.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/master_commit_red_jobs.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2023-04-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-05-01T00:00:00.000Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "lint,pull,trunk" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/num_commits_master.lambda.json b/torchci/rockset/commons/num_commits_master.lambda.json deleted file mode 100644 index f44db766a2..0000000000 --- a/torchci/rockset/commons/num_commits_master.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/num_commits_master.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-06-09T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-13T00:06:32.839Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/pr_commits.lambda.json b/torchci/rockset/commons/pr_commits.lambda.json deleted file mode 100644 index cea77c97e5..0000000000 --- a/torchci/rockset/commons/pr_commits.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/pr_commits.sql", - "default_parameters": [ - { - "name": "owner", - "type": "string", - "value": "pytorch" - }, - { - "name": "pr_num", - "type": "int", - "value": "110976" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch" - } - ], - "description": "Shows all commits for a PR" -} \ No newline at end of file diff --git a/torchci/rockset/commons/recent_pr_workflows_query.lambda.json b/torchci/rockset/commons/recent_pr_workflows_query.lambda.json deleted file mode 100644 index f628128c5d..0000000000 --- a/torchci/rockset/commons/recent_pr_workflows_query.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/recent_pr_workflows_query.sql", - "default_parameters": [ - { - "name": "numMinutes", - "type": "int", - "value": "15" - }, - { - "name": "prNumber", - "type": "int", - "value": "0" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - } - ], - "description": "retrieve the CI check results in the past N minutes, as well as the other workflows from the same PRs that were previously run/pending" -} \ No newline at end of file diff --git a/torchci/rockset/commons/reverted_prs_with_reason.lambda.json b/torchci/rockset/commons/reverted_prs_with_reason.lambda.json deleted file mode 100644 index b6bf00c294..0000000000 --- a/torchci/rockset/commons/reverted_prs_with_reason.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/reverted_prs_with_reason.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-10-17T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-10-24T00:06:32.839Z" - } - ], - "description": "Displays the PRs that were reverted and their classifications" -} \ No newline at end of file diff --git a/torchci/rockset/commons/test_time_per_class.lambda.json b/torchci/rockset/commons/test_time_per_class.lambda.json deleted file mode 100644 index 24e756d67b..0000000000 --- a/torchci/rockset/commons/test_time_per_class.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/test_time_per_class.sql", - "default_parameters": [], - "description": "Test time for each class" -} \ No newline at end of file diff --git a/torchci/rockset/commons/test_time_per_class_periodic_jobs.lambda.json b/torchci/rockset/commons/test_time_per_class_periodic_jobs.lambda.json deleted file mode 100644 index 7e8ee6b5b8..0000000000 --- a/torchci/rockset/commons/test_time_per_class_periodic_jobs.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/test_time_per_class_periodic_jobs.sql", - "default_parameters": [], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/test_time_per_file.lambda.json b/torchci/rockset/commons/test_time_per_file.lambda.json deleted file mode 100644 index 13160a991b..0000000000 --- a/torchci/rockset/commons/test_time_per_file.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/test_time_per_file.sql", - "default_parameters": [], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/test_time_per_file_periodic_jobs.lambda.json b/torchci/rockset/commons/test_time_per_file_periodic_jobs.lambda.json deleted file mode 100644 index ff58ca165c..0000000000 --- a/torchci/rockset/commons/test_time_per_file_periodic_jobs.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/test_time_per_file_periodic_jobs.sql", - "default_parameters": [], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/commons/unclassified.lambda.json b/torchci/rockset/commons/unclassified.lambda.json deleted file mode 100644 index 15e3fa3669..0000000000 --- a/torchci/rockset/commons/unclassified.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/unclassified.sql", - "default_parameters": [ - { - "name": "n", - "type": "int", - "value": "1000" - } - ], - "description": "up to n unclassified failed job ids from the last day" -} \ No newline at end of file diff --git a/torchci/rockset/commons/weekly_force_merge_stats.lambda.json b/torchci/rockset/commons/weekly_force_merge_stats.lambda.json deleted file mode 100644 index 1ddf685d74..0000000000 --- a/torchci/rockset/commons/weekly_force_merge_stats.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/weekly_force_merge_stats.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "week" - }, - { - "name": "merge_type", - "type": "string", - "value": " " - }, - { - "name": "one_bucket", - "type": "bool", - "value": "False" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-04-27T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-06-01T00:00:00.000Z" - } - ], - "description": "Force merge KPI stats for HUD" -} \ No newline at end of file diff --git a/torchci/rockset/inductor/__sql/compilers_benchmark_performance.sql b/torchci/rockset/inductor/__sql/compilers_benchmark_performance.sql deleted file mode 100644 index 20523c393d..0000000000 --- a/torchci/rockset/inductor/__sql/compilers_benchmark_performance.sql +++ /dev/null @@ -1,151 +0,0 @@ -WITH performance_results AS ( - SELECT - name, - IF(speedup = 'infra_error', NULL, speedup) AS speedup, -- Handle the recent burst of infra error - REPLACE( - filename, - CONCAT( - '_', : dtypes, '_', : mode, '_', : device, - '_performance' - ) - ) AS filename, - compilation_latency, - compression_ratio, - abs_latency, - mfu, - memory_bandwidth, - dynamo_peak_mem, - eager_peak_mem, - workflow_id, - CAST(job_id AS INT) AS job_id, - FROM - inductor.torch_dynamo_perf_stats_v2 - WHERE - filename LIKE CONCAT( - '%_', : dtypes, '_', : mode, '_', : device, - '_performance%' - ) - AND TIMESTAMP_MILLIS(timestamp) >= PARSE_DATETIME_ISO8601(:startTime) - AND TIMESTAMP_MILLIS(timestamp) < PARSE_DATETIME_ISO8601(:stopTime) - AND (workflow_id = :workflowId OR :workflowId = 0) -), -accuracy_results AS ( - SELECT - name, - accuracy, - REPLACE( - filename, - CONCAT( - '_', : dtypes, '_', : mode, '_', : device, - '_accuracy' - ) - ) AS filename, - workflow_id, - CAST(job_id AS INT) AS job_id, - FROM - inductor.torch_dynamo_perf_stats_v2 - WHERE - filename LIKE CONCAT( - '%_', : dtypes, '_', : mode, '_', : device, - '_accuracy%' - ) - AND TIMESTAMP_MILLIS(timestamp) >= PARSE_DATETIME_ISO8601(:startTime) - AND TIMESTAMP_MILLIS(timestamp) < PARSE_DATETIME_ISO8601(:stopTime) - AND (workflow_id = :workflowId OR :workflowId = 0) - AND accuracy != 'model_fail_to_load' - AND accuracy != 'eager_fail_to_run' -), -results AS ( - SELECT - accuracy_results.workflow_id AS workflow_id, - accuracy_results.job_id AS job_id, - CASE - WHEN accuracy_results.filename LIKE '%_torchbench' THEN 'torchbench' - WHEN accuracy_results.filename LIKE '%_timm_models' THEN 'timm_models' - WHEN accuracy_results.filename LIKE '%_huggingface' THEN 'huggingface' - ELSE NULL - END AS suite, - CASE - WHEN accuracy_results.filename LIKE '%_torchbench' THEN REPLACE( - accuracy_results.filename, '_torchbench' - ) - WHEN accuracy_results.filename LIKE '%_timm_models' THEN REPLACE( - accuracy_results.filename, '_timm_models' - ) - WHEN accuracy_results.filename LIKE '%_huggingface' THEN REPLACE( - accuracy_results.filename, '_huggingface' - ) - ELSE NULL - END AS compiler, - accuracy_results.name, - IF(TRY_CAST(speedup AS FLOAT) IS NOT NULL, - CAST(speedup AS FLOAT), - 0.0 - ) AS speedup, - accuracy, - IF(TRY_CAST(compilation_latency AS FLOAT) IS NOT NULL, - CAST(compilation_latency AS FLOAT), - 0.0 - ) AS compilation_latency, - IF(TRY_CAST(compression_ratio AS FLOAT) IS NOT NULL, - CAST(compression_ratio AS FLOAT), - 0.0 - ) AS compression_ratio, - IF(TRY_CAST(abs_latency AS FLOAT) IS NOT NULL, - CAST(abs_latency AS FLOAT), - 0.0 - ) AS abs_latency, - IF(TRY_CAST(mfu AS FLOAT) IS NOT NULL, - CAST(mfu AS FLOAT), - 0.0 - ) AS mfu, - IF(TRY_CAST(memory_bandwidth AS FLOAT) IS NOT NULL, - CAST(memory_bandwidth AS FLOAT), - 0.0 - ) AS memory_bandwidth, - IF(TRY_CAST(dynamo_peak_mem AS FLOAT) IS NOT NULL, - CAST(dynamo_peak_mem AS FLOAT), - 0.0 - ) AS dynamo_peak_mem, - IF(TRY_CAST(eager_peak_mem AS FLOAT) IS NOT NULL, - CAST(eager_peak_mem AS FLOAT), - 0.0 - ) AS eager_peak_mem, - FROM - accuracy_results - LEFT JOIN performance_results ON performance_results.name = accuracy_results.name - AND performance_results.filename = accuracy_results.filename - AND performance_results.workflow_id = accuracy_results.workflow_id -) -SELECT DISTINCT - results.workflow_id, - -- As the JSON response is pretty big, only return the field if it's needed - IF(:getJobId, results.job_id, NULL) AS job_id, - results.suite, - results.compiler, - results.name, - results.speedup, - results.accuracy, - results.compilation_latency, - results.compression_ratio, - results.abs_latency, - results.mfu, - results.memory_bandwidth, - results.dynamo_peak_mem, - results.eager_peak_mem, - FORMAT_ISO8601( - DATE_TRUNC(: granularity, w._event_time) - ) AS granularity_bucket, -FROM - results LEFT JOIN commons.workflow_run w ON results.workflow_id = w.id -WHERE - ARRAY_CONTAINS(SPLIT(:suites, ','), LOWER(results.suite)) - AND (ARRAY_CONTAINS(SPLIT(:compilers, ','), LOWER(results.compiler)) OR :compilers = '') - AND (ARRAY_CONTAINS(SPLIT(:branches, ','), head_branch) OR :branches = '') - AND (ARRAY_CONTAINS(SPLIT(:commits, ','), head_sha) OR :commits = '') -ORDER BY - granularity_bucket DESC, - workflow_id DESC, - suite ASC, - compiler ASC, - name ASC \ No newline at end of file diff --git a/torchci/rockset/inductor/__sql/compilers_benchmark_performance_branches.sql b/torchci/rockset/inductor/__sql/compilers_benchmark_performance_branches.sql deleted file mode 100644 index 120fd362d8..0000000000 --- a/torchci/rockset/inductor/__sql/compilers_benchmark_performance_branches.sql +++ /dev/null @@ -1,22 +0,0 @@ -SELECT - DISTINCT w.head_branch, - w.head_sha, - w.id, - FORMAT_ISO8601( - DATE_TRUNC( - 'day', TIMESTAMP_MILLIS(p.timestamp) - ) - ) AS event_time, -FROM - inductor.torch_dynamo_perf_stats_v2 AS p - LEFT JOIN commons.workflow_run w ON p.workflow_id = w.id -WHERE - TIMESTAMP_MILLIS(p.timestamp) >= PARSE_DATETIME_ISO8601(: startTime) - AND TIMESTAMP_MILLIS(p.timestamp) < PARSE_DATETIME_ISO8601(: stopTime) - AND p.filename LIKE CONCAT( - '%_', : dtypes, '_', : mode, '_', : device, - '_performance%' - ) -ORDER BY - w.head_branch, - event_time DESC diff --git a/torchci/rockset/inductor/__sql/torchao_query.sql b/torchci/rockset/inductor/__sql/torchao_query.sql deleted file mode 100644 index 0f7418ca58..0000000000 --- a/torchci/rockset/inductor/__sql/torchao_query.sql +++ /dev/null @@ -1,159 +0,0 @@ -WITH performance_results AS ( - SELECT - name, - IF(speedup = 'infra_error', NULL, speedup) AS speedup, -- Handle the recent burst of infra error - REPLACE( - filename, - CONCAT( - '_', : dtypes, '_', : mode, '_', : device, - '_performance' - ) - ) AS filename, - compilation_latency, - compression_ratio, - abs_latency, - mfu, - memory_bandwidth, - dynamo_peak_mem, - eager_peak_mem, - workflow_id, - CAST(job_id AS INT) AS job_id, - FORMAT_ISO8601( - DATE_TRUNC(: granularity, _event_time) - ) AS granularity_bucket, - head_sha, - head_branch, -FROM - inductor.torchao_perf_stats -WHERE - filename LIKE '%_performance' - AND filename LIKE CONCAT( - '%_', : dtypes, '_', : mode, '_', : device, - '_%' - ) - AND _event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND _event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND (workflow_id = :workflowId OR :workflowId = 0) -), -accuracy_results AS ( - SELECT - name, - accuracy, - REPLACE( - filename, - CONCAT( - '_', : dtypes, '_', : mode, '_', : device, - '_accuracy' - ) - ) AS filename, - workflow_id, - CAST(job_id AS INT) AS job_id, - FROM - inductor.torchao_perf_stats - WHERE - filename LIKE '%_accuracy' - AND filename LIKE CONCAT( - '%_', : dtypes, '_', : mode, '_', : device, - '_%' - ) - AND _event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND _event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND (workflow_id = :workflowId OR :workflowId = 0) - AND accuracy != 'model_fail_to_load' - AND accuracy != 'eager_fail_to_run' -), -results AS ( - SELECT - performance_results.granularity_bucket AS granularity_bucket, - performance_results.workflow_id AS workflow_id, - performance_results.job_id AS job_id, - performance_results.head_branch AS head_branch, - performance_results.head_sha AS head_sha, - CASE - WHEN performance_results.filename LIKE '%_torchbench' THEN 'torchbench' - WHEN performance_results.filename LIKE '%_timm_models' THEN 'timm_models' - WHEN performance_results.filename LIKE '%_huggingface' THEN 'huggingface' - ELSE NULL - END AS suite, - CASE - WHEN performance_results.filename LIKE '%_torchbench' THEN REPLACE( - performance_results.filename, '_torchbench' - ) - WHEN performance_results.filename LIKE '%_timm_models' THEN REPLACE( - performance_results.filename, '_timm_models' - ) - WHEN performance_results.filename LIKE '%_huggingface' THEN REPLACE( - performance_results.filename, '_huggingface' - ) - ELSE NULL - END AS compiler, - performance_results.name, - IF(TRY_CAST(speedup AS FLOAT) IS NOT NULL, - CAST(speedup AS FLOAT), - 0.0 - ) AS speedup, - accuracy_results.accuracy AS accuracy, - IF(TRY_CAST(compilation_latency AS FLOAT) IS NOT NULL, - CAST(compilation_latency AS FLOAT), - 0.0 - ) AS compilation_latency, - IF(TRY_CAST(compression_ratio AS FLOAT) IS NOT NULL, - CAST(compression_ratio AS FLOAT), - 0.0 - ) AS compression_ratio, - IF(TRY_CAST(abs_latency AS FLOAT) IS NOT NULL, - CAST(abs_latency AS FLOAT), - 0.0 - ) AS abs_latency, - IF(TRY_CAST(mfu AS FLOAT) IS NOT NULL, - CAST(mfu AS FLOAT), - 0.0 - ) AS mfu, - IF(TRY_CAST(memory_bandwidth AS FLOAT) IS NOT NULL, - CAST(memory_bandwidth AS FLOAT), - 0.0 - ) AS memory_bandwidth, - IF(TRY_CAST(dynamo_peak_mem AS FLOAT) IS NOT NULL, - CAST(dynamo_peak_mem AS FLOAT), - 0.0 - ) AS dynamo_peak_mem, - IF(TRY_CAST(eager_peak_mem AS FLOAT) IS NOT NULL, - CAST(eager_peak_mem AS FLOAT), - 0.0 - ) AS eager_peak_mem, - FROM - performance_results - LEFT JOIN accuracy_results ON performance_results.name = accuracy_results.name - AND performance_results.filename = accuracy_results.filename - AND performance_results.workflow_id = accuracy_results.workflow_id -) -SELECT DISTINCT - results.workflow_id, - -- As the JSON response is pretty big, only return the field if it's needed - IF(:getJobId, results.job_id, NULL) AS job_id, - results.suite, - results.compiler, - results.name, - results.speedup, - results.accuracy, - results.compilation_latency, - results.compression_ratio, - results.abs_latency, - results.mfu, - results.memory_bandwidth, - results.dynamo_peak_mem, - results.eager_peak_mem, - results.granularity_bucket, -FROM - results -WHERE - ARRAY_CONTAINS(SPLIT(:suites, ','), LOWER(results.suite)) - AND (ARRAY_CONTAINS(SPLIT(:compilers, ','), LOWER(results.compiler)) OR :compilers = '') - AND (ARRAY_CONTAINS(SPLIT(:branches, ','), results.head_branch) OR :branches = '') - AND (ARRAY_CONTAINS(SPLIT(:commits, ','), results.head_sha) OR :commits = '') -ORDER BY - granularity_bucket DESC, - workflow_id DESC, - suite ASC, - compiler ASC, - name ASC \ No newline at end of file diff --git a/torchci/rockset/inductor/__sql/torchao_query_branches.sql b/torchci/rockset/inductor/__sql/torchao_query_branches.sql deleted file mode 100644 index 1659e54a98..0000000000 --- a/torchci/rockset/inductor/__sql/torchao_query_branches.sql +++ /dev/null @@ -1,21 +0,0 @@ -SELECT - DISTINCT head_branch, - head_sha, - FORMAT_ISO8601( - DATE_TRUNC( - : granularity, _event_time - ) - ) AS event_time, -FROM - inductor.torchao_perf_stats -WHERE - torchao_perf_stats._event_time >= PARSE_DATETIME_ISO8601(: startTime) - AND torchao_perf_stats._event_time < PARSE_DATETIME_ISO8601(: stopTime) - AND torchao_perf_stats.filename LIKE '%_performance' - AND torchao_perf_stats.filename LIKE CONCAT( - '%_', : dtypes, '_', : mode, '_', : device, - '_%' - ) -ORDER BY - head_branch, - event_time DESC diff --git a/torchci/rockset/inductor/compilers_benchmark_performance.lambda.json b/torchci/rockset/inductor/compilers_benchmark_performance.lambda.json deleted file mode 100644 index 2eca213252..0000000000 --- a/torchci/rockset/inductor/compilers_benchmark_performance.lambda.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "sql_path": "__sql/compilers_benchmark_performance.sql", - "default_parameters": [ - { - "name": "branches", - "type": "string", - "value": "main,master" - }, - { - "name": "commits", - "type": "string", - "value": "" - }, - { - "name": "compilers", - "type": "string", - "value": "" - }, - { - "name": "device", - "type": "string", - "value": "cuda" - }, - { - "name": "dtypes", - "type": "string", - "value": "amp" - }, - { - "name": "getJobId", - "type": "bool", - "value": "false" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "mode", - "type": "string", - "value": "training" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-04-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-05-01T00:00:00.00Z" - }, - { - "name": "suites", - "type": "string", - "value": "torchbench,huggingface,timm_models" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - }, - { - "name": "workflowId", - "type": "int", - "value": "0" - } - ], - "description": "Gather the benchmark performance of different PyTorch compilers" -} \ No newline at end of file diff --git a/torchci/rockset/inductor/compilers_benchmark_performance_branches.lambda.json b/torchci/rockset/inductor/compilers_benchmark_performance_branches.lambda.json deleted file mode 100644 index 607ba1f124..0000000000 --- a/torchci/rockset/inductor/compilers_benchmark_performance_branches.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/compilers_benchmark_performance_branches.sql", - "default_parameters": [ - { - "name": "device", - "type": "string", - "value": "cuda" - }, - { - "name": "dtypes", - "type": "string", - "value": "amp" - }, - { - "name": "mode", - "type": "string", - "value": "training" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-02-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-04-01T00:00:00.00Z" - } - ], - "description": "Get all the branches that run the benchmark " -} \ No newline at end of file diff --git a/torchci/rockset/inductor/torchao_query.lambda.json b/torchci/rockset/inductor/torchao_query.lambda.json deleted file mode 100644 index e998fd8c34..0000000000 --- a/torchci/rockset/inductor/torchao_query.lambda.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "sql_path": "__sql/torchao_query.sql", - "default_parameters": [ - { - "name": "branches", - "type": "string", - "value": "main" - }, - { - "name": "commits", - "type": "string", - "value": "" - }, - { - "name": "compilers", - "type": "string", - "value": "" - }, - { - "name": "device", - "type": "string", - "value": "cuda" - }, - { - "name": "dtypes", - "type": "string", - "value": "bfloat16" - }, - { - "name": "getJobId", - "type": "bool", - "value": "false" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "mode", - "type": "string", - "value": "inference" - }, - { - "name": "startTime", - "type": "string", - "value": "2024-06-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-06-08T00:00:00.00Z" - }, - { - "name": "suites", - "type": "string", - "value": "torchbench,huggingface,timm_models" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - }, - { - "name": "workflowId", - "type": "int", - "value": "0" - } - ], - "description": "TorchAO Query" -} \ No newline at end of file diff --git a/torchci/rockset/inductor/torchao_query_branches.lambda.json b/torchci/rockset/inductor/torchao_query_branches.lambda.json deleted file mode 100644 index 79428ba5e2..0000000000 --- a/torchci/rockset/inductor/torchao_query_branches.lambda.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sql_path": "__sql/torchao_query_branches.sql", - "default_parameters": [ - { - "name": "device", - "type": "string", - "value": "cuda" - }, - { - "name": "dtypes", - "type": "string", - "value": "bfloat16" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "mode", - "type": "string", - "value": "inference" - }, - { - "name": "startTime", - "type": "string", - "value": "2024-06-01T00:00:00.00Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-06-06T00:00:00.00Z" - } - ], - "description": "TorchAO Query for commit branches" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/__sql/correlation_matrix.sql b/torchci/rockset/metrics/__sql/correlation_matrix.sql deleted file mode 100644 index 204a2d626d..0000000000 --- a/torchci/rockset/metrics/__sql/correlation_matrix.sql +++ /dev/null @@ -1,55 +0,0 @@ -WITH all_jobs AS ( - SELECT - CONCAT( - workflow.name, - ' / ', - ELEMENT_AT(SPLIT(job.name, ' / '), 1), - IF( - job.name LIKE '%/%', - CONCAT(' / ', ELEMENT_AT(SPLIT(ELEMENT_AT(SPLIT(job.name, ' / '), 2), ', '), 1)), - '' - ) - ) AS name, - workflow.head_sha, - CASE - WHEN job.conclusion = 'failure' THEN 0 - WHEN job.conclusion = 'timed_out' THEN 0 - WHEN job.conclusion = 'cancelled' THEN 0 - WHEN job.conclusion IS NULL THEN NULL - WHEN job.conclusion = 'skipped' THEN NULL - ELSE 1 - END AS is_green, - workflow._event_time AS event_time, - ROW_NUMBER() OVER(PARTITION BY job.name, push.head_commit.id ORDER BY job.run_attempt DESC) AS attempt, -FROM - workflow_run workflow - INNER JOIN commons.workflow_job job ON workflow.id = job.run_id - INNER JOIN push on workflow.head_commit.id = push.head_commit.id -WHERE - job._event_time > CURRENT_TIMESTAMP() - INTERVAL 21 DAY - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - AND workflow.event != 'workflow_run' - AND push.ref = 'refs/heads/master' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND job.name LIKE '%test%' - AND job.name NOT LIKE '%filter%' - AND job.name NOT LIKE '%rerun_disabled_tests%' -) -SELECT - IF (name LIKE '%(%' AND name NOT LIKE '%)%', CONCAT(name, ')'), name) AS name, - head_sha, - is_green, - event_time, -FROM - all_jobs -WHERE - is_green IS NOT NULL - AND attempt = 1 -GROUP BY - name, - head_sha, - is_green, - event_time -ORDER BY - event_time DESC diff --git a/torchci/rockset/metrics/__sql/disabled_test_historical.sql b/torchci/rockset/metrics/__sql/disabled_test_historical.sql deleted file mode 100644 index 83e5d8469b..0000000000 --- a/torchci/rockset/metrics/__sql/disabled_test_historical.sql +++ /dev/null @@ -1,180 +0,0 @@ ---- This query returns the number of new disabled tests (number_of_new_disabled_tests) ---- and the number of open disabled tests (number_of_open_disabled_tests) daily -WITH issues_with_labels AS ( - SELECT - i.title, - i.body, - ARRAY_AGG(labels.value.name) AS labels, - i.created_at, - i.closed_at - FROM - commons.issues i, - UNNEST (i.labels AS value) AS labels - WHERE - i.repository_url = CONCAT( - 'https://api.github.com/repos/', - : repo - ) - AND i.title LIKE '%DISABLED%' - AND ( - : platform = '' - OR i.body LIKE CONCAT('%', : platform, '%') - OR (NOT i.body LIKE '%Platforms: %') - ) - GROUP BY - i.title, - i.body, - i.created_at, - i.closed_at -), ---- There could be day where there is no new issue or no issue is closed and we want ---- the count on that day to be 0 -buckets AS ( - SELECT - DATE_TRUNC( - : granularity, - CAST(i.created_at AS TIMESTAMP) AT TIME ZONE : timezone - ) AS granularity_bucket - FROM - commons.issues i - WHERE - i.created_at IS NOT NULL - UNION - SELECT - DATE_TRUNC( - : granularity, - CAST(i.closed_at AS TIMESTAMP) AT TIME ZONE : timezone - ) AS granularity_bucket - FROM - commons.issues i - WHERE - i.closed_at IS NOT NULL -), ---- Count the newly created disabled tests -raw_new_disabled_tests AS ( - SELECT - DATE_TRUNC( - : granularity, - CAST(i.created_at AS TIMESTAMP) AT TIME ZONE : timezone - ) AS granularity_bucket, - COUNT(i.title) AS number_of_new_disabled_tests, - FROM - issues_with_labels i - WHERE - ARRAY_CONTAINS(i.labels, 'skipped') - AND ( - : label = '' - OR ARRAY_CONTAINS(i.labels, : label) - ) - AND ( - : triaged = '' - OR ( - : triaged = 'yes' - AND ARRAY_CONTAINS(i.labels, 'triaged') - ) - OR ( - : triaged = 'no' - AND NOT ARRAY_CONTAINS(i.labels, 'triaged') - ) - ) - GROUP BY - granularity_bucket -), -new_disabled_tests AS ( - SELECT - buckets.granularity_bucket, - COALESCE(number_of_new_disabled_tests, 0) AS number_of_new_disabled_tests, - FROM - buckets - LEFT JOIN raw_new_disabled_tests ON buckets.granularity_bucket = raw_new_disabled_tests.granularity_bucket -), -aggregated_new_disabled_tests AS ( - SELECT - granularity_bucket, - number_of_new_disabled_tests, - SUM(number_of_new_disabled_tests) OVER ( - ORDER BY - granularity_bucket - ) AS total_number_of_new_disabled_tests - FROM - new_disabled_tests -), ---- Count the closed disabled tests -raw_closed_disabled_tests AS ( - SELECT - DATE_TRUNC( - : granularity, - CAST(i.closed_at AS TIMESTAMP) AT TIME ZONE : timezone - ) AS granularity_bucket, - COUNT(i.title) AS number_of_closed_disabled_tests, - FROM - issues_with_labels i - WHERE - i.closed_at IS NOT NULL - AND ARRAY_CONTAINS(i.labels, 'skipped') - AND ( - : label = '' - OR ARRAY_CONTAINS(i.labels, : label) - ) - AND ( - : triaged = '' - OR ( - : triaged = 'yes' - AND ARRAY_CONTAINS(i.labels, 'triaged') - ) - OR ( - : triaged = 'no' - AND NOT ARRAY_CONTAINS(i.labels, 'triaged') - ) - ) - GROUP BY - granularity_bucket -), -closed_disabled_tests AS ( - SELECT - buckets.granularity_bucket, - COALESCE( - number_of_closed_disabled_tests, - 0 - ) AS number_of_closed_disabled_tests, - FROM - buckets - LEFT JOIN raw_closed_disabled_tests ON buckets.granularity_bucket = raw_closed_disabled_tests.granularity_bucket -), -aggregated_closed_disabled_tests AS ( - SELECT - granularity_bucket, - number_of_closed_disabled_tests, - SUM( - number_of_closed_disabled_tests - ) OVER ( - ORDER BY - granularity_bucket - ) AS total_number_of_closed_disabled_tests - FROM - closed_disabled_tests -), ---- The final aggregated count -aggregated_disabled_tests AS ( - SELECT - FORMAT_ISO8601( - aggregated_new_disabled_tests.granularity_bucket - ) AS granularity_bucket, - number_of_new_disabled_tests, - number_of_closed_disabled_tests, - total_number_of_new_disabled_tests, - total_number_of_closed_disabled_tests, - total_number_of_new_disabled_tests - total_number_of_closed_disabled_tests AS number_of_open_disabled_tests - FROM - aggregated_new_disabled_tests - LEFT JOIN aggregated_closed_disabled_tests ON aggregated_new_disabled_tests.granularity_bucket = aggregated_closed_disabled_tests.granularity_bucket -) -SELECT - * -FROM - aggregated_disabled_tests -WHERE - PARSE_DATETIME_ISO8601(granularity_bucket) >= PARSE_DATETIME_ISO8601(: startTime) - AND PARSE_DATETIME_ISO8601(granularity_bucket) < PARSE_DATETIME_ISO8601(: stopTime) -ORDER BY - granularity_bucket DESC \ No newline at end of file diff --git a/torchci/rockset/metrics/__sql/disabled_test_total.sql b/torchci/rockset/metrics/__sql/disabled_test_total.sql deleted file mode 100644 index 58b53e2038..0000000000 --- a/torchci/rockset/metrics/__sql/disabled_test_total.sql +++ /dev/null @@ -1,7 +0,0 @@ -SELECT - COUNT(issues.title) as number_of_open_disabled_tests, -FROM - commons.issues -WHERE - issues.title LIKE '%DISABLED%' - AND issues.state = :state diff --git a/torchci/rockset/metrics/__sql/external_contribution_stats.sql b/torchci/rockset/metrics/__sql/external_contribution_stats.sql deleted file mode 100644 index 7c3baf65eb..0000000000 --- a/torchci/rockset/metrics/__sql/external_contribution_stats.sql +++ /dev/null @@ -1,25 +0,0 @@ -WITH rolling_average_table as ( - SELECT - FORMAT_ISO8601( - CAST(date as date) - ) AS granularity_bucket, - -- weekly granularity with a 4 week rolling average - TRUNC(SUM(pr_count) - OVER(ORDER BY date ROWS 27 PRECEDING),1)/4 - AS weekly_pr_count_rolling_average, - TRUNC(LENGTH(ARRAY_DISTINCT(ARRAY_FLATTEN(ARRAY_AGG(users) - OVER(ORDER BY date ROWS 27 PRECEDING)))),1)/4 as weekly_user_count_rolling_average, -FROM - metrics.external_contribution_stats - WHERE CAST(date as date) >= PARSE_DATETIME_ISO8601(:startTime) - DAYS(28) - AND CAST(date as date) < PARSE_DATETIME_ISO8601(:stopTime) -) -SELECT -granularity_bucket, -weekly_pr_count_rolling_average AS pr_count, -weekly_user_count_rolling_average AS user_count, -FROM -rolling_average_table -WHERE CAST(granularity_bucket as date) >= PARSE_DATETIME_ISO8601(:startTime) - AND CAST(granularity_bucket as date) < PARSE_DATETIME_ISO8601(:stopTime) - AND (DATE_DIFF('DAY', CAST(granularity_bucket as date), CAST(PARSE_DATETIME_ISO8601(:startTime) as date)) % 7) = 0 \ No newline at end of file diff --git a/torchci/rockset/metrics/__sql/get_workers_on_period.sql b/torchci/rockset/metrics/__sql/get_workers_on_period.sql deleted file mode 100644 index 76ebb2fbbe..0000000000 --- a/torchci/rockset/metrics/__sql/get_workers_on_period.sql +++ /dev/null @@ -1,16 +0,0 @@ -WITH workers AS ( - SELECT - DISTINCT(qts.machine_type) as machine_type, - FROM - metrics.queue_times_24h_stats qts - WHERE - qts._event_time >= PARSE_DATETIME_ISO8601(:startTime) AT TIME ZONE :timezone - AND qts._event_time < PARSE_DATETIME_ISO8601(:stopTime) AT TIME ZONE :timezone -) -SELECT - w.machine_type -FROM - workers w -ORDER BY - w.machine_type ASC -; diff --git a/torchci/rockset/metrics/__sql/job_duration_avg.sql b/torchci/rockset/metrics/__sql/job_duration_avg.sql deleted file mode 100644 index cf0f169f57..0000000000 --- a/torchci/rockset/metrics/__sql/job_duration_avg.sql +++ /dev/null @@ -1,32 +0,0 @@ -SELECT - AVG( - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) - ) as duration_sec, - COUNT(*) as count, - CONCAT(workflow.name, ' / ', job.name) as name -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id -WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND workflow.repository.full_name = 'pytorch/pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND job.conclusion = 'success' - AND workflow.head_branch LIKE :branch - AND workflow.run_attempt = 1 -GROUP BY - name -ORDER BY - COUNT(*) * AVG( - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) - ) DESC diff --git a/torchci/rockset/metrics/__sql/job_duration_percentile.sql b/torchci/rockset/metrics/__sql/job_duration_percentile.sql deleted file mode 100644 index 3fcff3a23b..0000000000 --- a/torchci/rockset/metrics/__sql/job_duration_percentile.sql +++ /dev/null @@ -1,38 +0,0 @@ -SELECT - max(duration_sec) AS duration_sec, - COUNT(name) AS count, - name -FROM ( - SELECT - duration_sec, - name, - PERCENT_RANK() OVER (PARTITION BY name ORDER BY duration_sec DESC) AS percentile - FROM ( - SELECT - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(job.started_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) AS duration_sec, - CONCAT(workflow.name, ' / ', job.name) as name - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name != 'get_workflow_conclusion' - AND workflow.repository.full_name = 'pytorch/pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND job.conclusion = 'success' - AND workflow.head_branch LIKE :branch - AND workflow.run_attempt = 1 - ) AS duration -) AS p -WHERE - (SELECT NOT IS_NAN(p.percentile) AND p.percentile >= (1.0 - :percentile)) -GROUP BY - name -ORDER BY - COUNT(name) * MAX(duration_sec) DESC diff --git a/torchci/rockset/metrics/__sql/last_branch_push.sql b/torchci/rockset/metrics/__sql/last_branch_push.sql deleted file mode 100644 index d07377cc43..0000000000 --- a/torchci/rockset/metrics/__sql/last_branch_push.sql +++ /dev/null @@ -1,13 +0,0 @@ -select - DATE_DIFF('second', push._event_time, CURRENT_TIMESTAMP()) as push_seconds_ago -from - push -where - push.ref = :branch - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push.head_commit is not null -order by - push._event_time desc -limit - 1 diff --git a/torchci/rockset/metrics/__sql/last_successful_jobs.sql b/torchci/rockset/metrics/__sql/last_successful_jobs.sql deleted file mode 100644 index 84891844c2..0000000000 --- a/torchci/rockset/metrics/__sql/last_successful_jobs.sql +++ /dev/null @@ -1,40 +0,0 @@ -with successful_jobs as ( - select - DATE_DIFF( - 'second', - job._event_time, - CURRENT_TIMESTAMP() - ) as last_success_seconds_ago, - job.head_sha, - job.name name - from - workflow_job job - JOIN workflow_run workflow on workflow.id = job.run_id - where - workflow.repository.full_name = 'pytorch/pytorch' - AND workflow.head_branch IN ('master', 'main') - AND job.conclusion = 'success' - AND ARRAY_CONTAINS(SPLIT(:jobNames, ';'), job.name) - order by - job._event_time desc -), -successful_commits as ( - select - min(last_success_seconds_ago) seconds_ago, - count(DISTINCT name) distinct_names, - head_sha - from - successful_jobs - group by - head_sha -) -select - seconds_ago as last_success_seconds_ago -from - successful_commits -where - distinct_names >= LENGTH(SPLIT(:jobNames, ';')) -order by - seconds_ago -limit - 1 diff --git a/torchci/rockset/metrics/__sql/last_successful_workflow.sql b/torchci/rockset/metrics/__sql/last_successful_workflow.sql deleted file mode 100644 index 432c47746d..0000000000 --- a/torchci/rockset/metrics/__sql/last_successful_workflow.sql +++ /dev/null @@ -1,19 +0,0 @@ -select - DATE_DIFF( - 'second', - workflow._event_time, - CURRENT_TIMESTAMP() - ) as last_success_seconds_ago -from - workflow_run workflow - JOIN push on workflow.head_commit.id = push.head_commit.id -where - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND workflow.conclusion = 'success' - AND workflow.name = :workflowName -order by - workflow._event_time desc -LIMIT - 1 diff --git a/torchci/rockset/metrics/__sql/lf_rollover_health.sql b/torchci/rockset/metrics/__sql/lf_rollover_health.sql deleted file mode 100644 index 20e7e3c50a..0000000000 --- a/torchci/rockset/metrics/__sql/lf_rollover_health.sql +++ /dev/null @@ -1,143 +0,0 @@ - -WITH normalized_jobs AS ( - SELECT - j.started_at, - ROUND(DATE_DIFF('MINUTE', PARSE_TIMESTAMP_ISO8601(j.started_at), PARSE_TIMESTAMP_ISO8601(j.completed_at)), 1) as duration_min, - if( - strpos(l.label, 'amz2023.') = 0, - l.label, - CONCAT( - substr(l.label, 1, strpos(l.label, 'amz2023.') - 1), - substr(l.label, length('amz2023.') + strpos(l.label, 'amz2023.')) - ) - ) as label, - REGEXP_EXTRACT(j.name, '([^,]*),?', 1) as job_name, -- remove shard number and label from job names - j.workflow_name, - j.conclusion, - DATE_TRUNC(:granularity, PARSE_TIMESTAMP_ISO8601(j.started_at)) - AS bucket, - FROM - commons.workflow_job j - CROSS JOIN UNNEST(j.labels as label) as l - WHERE 1=1 - AND j.labels is not NULL - AND j._event_time > CURRENT_DATETIME() - DAYS(:days_ago) - AND j.status = 'completed' - AND l.label != 'self-hosted' - AND l.label not like 'lf.c.%' - AND l.label not like '%canary%' - -), migrated_jobs AS ( - SELECT DISTINCT - j.job_name - FROM - normalized_jobs j - WHERE 1=1 - AND j.label like 'lf%' -), comparable_jobs AS ( - SELECT - -- count(*) - j.bucket, - j.started_at, - j.duration_min,-- -- j.completed_at, - j.label, - j.job_name, -- remove shard number and label from job names - j.workflow_name, - j.conclusion, - FROM - normalized_jobs j - CROSS JOIN migrated_jobs mj - WHERE 1 = 1 - AND j.job_name = mj.job_name - -- AND STRPOS(j.name, mj.job_clean) > 0 - -), success_stats AS ( - SELECT - bucket, - count(*) as group_size, - job_name, - workflow_name, - label, - IF(SUBSTR(label, 1, 3) = 'lf.', True, False ) as lf_fleet, - SUM( - CASE - WHEN conclusion = 'success' THEN 1 - ELSE 0 - END - ) * 100 / (COUNT_IF(conclusion != 'cancelled') + 1) as success_rate, -- plus one is to handle divide by zero errors - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - ELSE 0 - END - ) * 100 / (COUNT_IF(conclusion != 'cancelled') + 1) as failure_rate, - SUM( - CASE - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) * 100 / COUNT(*) as cancelled_rate, -- cancelled rate is calculated over all jobs - SUM( - CASE - WHEN conclusion = 'success' THEN 1 - ELSE 0 - END - ) as success_count, - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - ELSE 0 - END - ) as failure_count, - SUM( - CASE - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) as cancelled_count, - COUNT(*) as total_count, - SUM( - CASE - WHEN conclusion = 'success' THEN duration_min - ELSE 0 - END - ) / COUNT(*) as success_avg_duration, - SUM( - CASE - WHEN conclusion = 'failure' THEN duration_min - ELSE 0 - END - ) / COUNT(*) as failure_avg_duration, - SUM( - CASE - WHEN conclusion = 'cancelled' THEN duration_min - ELSE 0 - END - ) / COUNT(*) as cancelled_avg_duration, - - FROM comparable_jobs - GROUP BY - bucket, job_name, workflow_name, label -), comparison_stats AS ( - SELECT - lf.bucket, - lf.workflow_name, - lf.job_name, - lf.group_size as sample_size_lf, - m.group_size as sample_size_meta, - lf.success_rate - m.success_rate as success_rate_delta, - lf.failure_rate - m.failure_rate as failure_rate_delta, - lf.cancelled_rate - m.cancelled_rate as cancelled_rate_delta, - IF(m.success_avg_duration = 0, 1, ROUND(lf.success_avg_duration * 1.0 / m.success_avg_duration, 2)) as success_duration_increase_ratio, - FROM success_stats lf - INNER JOIN success_stats m on lf.bucket = m.bucket - WHERE 1 = 1 - AND lf.job_name = m.job_name - AND lf.workflow_name = m.workflow_name - AND lf.lf_fleet = True - AND m.lf_fleet = False - AND lf.group_size > 3 - AND m.group_size > 3 -) -SELECT * from comparison_stats -ORDER by bucket desc, job_name desc, success_rate_delta, workflow_name diff --git a/torchci/rockset/metrics/__sql/lf_rollover_percentage.sql b/torchci/rockset/metrics/__sql/lf_rollover_percentage.sql deleted file mode 100644 index 54e4f98a02..0000000000 --- a/torchci/rockset/metrics/__sql/lf_rollover_percentage.sql +++ /dev/null @@ -1,113 +0,0 @@ -WITH - normalized_jobs AS ( - SELECT - if( - strpos(l.label, 'amz2023.') = 0, - l.label, - CONCAT( - substr(l.label, 1, strpos(l.label, 'amz2023.') - 1), - substr( - l.label, - length('amz2023.') + strpos(l.label, 'amz2023.') - ) - ) - ) as label, - REGEXP_EXTRACT(j.name, '([^,]*),?', 1) as job_name, - -- remove shard number and label from job names - j.workflow_name, - DATE_TRUNC( - :granularity, - PARSE_TIMESTAMP_ISO8601(j.started_at) - ) AS bucket, - FROM - commons.workflow_job j - CROSS JOIN UNNEST(j.labels as label) as l - WHERE - 1 = 1 - AND j.labels is not NULL - AND j._event_time > CURRENT_DATETIME() - DAYS(:days_ago) - AND j.status = 'completed' - AND l.label != 'self-hosted' - AND l.label not like 'lf.c.%' - AND l.label not like '%canary%' - ), - migrated_jobs AS ( - SELECT - DISTINCT j.job_name - FROM - normalized_jobs j - WHERE - 1 = 1 - AND j.label like 'lf%' - ), - comparable_jobs AS ( - SELECT - j.bucket, - j.label, - j.job_name, - -- remove shard number and label from job names - j.workflow_name, - FROM - normalized_jobs j - CROSS JOIN migrated_jobs mj - WHERE - 1 = 1 - AND j.job_name = mj.job_name -- AND STRPOS(j.name, mj.job_clean) > 0 - ), - success_stats AS ( - SELECT - bucket, - count(*) as group_size, - job_name, - workflow_name, - label, - IF(SUBSTR(label, 1, 3) = 'lf.', True, False) as lf_fleet, - FROM - comparable_jobs - GROUP BY - bucket, - job_name, - workflow_name, - label - ), - comparison_stats AS ( - SELECT - lf.bucket, - SUM(lf.group_size + m.group_size) as total_jobs, - SUM(m.group_size) as compliment_jobs, - SUM(lf.group_size) as counted_jobs, - m.lf_fleet as c_fleet, - lf.lf_fleet as m_fleet, - CAST(SUM(lf.group_size) as FLOAT) / SUM(lf.group_size + m.group_size) * 100 as percentage, - IF(lf.lf_fleet, 'Linux Foundation', 'Meta') as fleet - FROM - success_stats lf - INNER JOIN success_stats m on lf.bucket = m.bucket - WHERE - 1 = 1 - AND lf.job_name = m.job_name - AND lf.workflow_name = m.workflow_name - AND ( - ( - lf.lf_fleet = True - AND m.lf_fleet = False - ) - OR ( - lf.lf_fleet = False - AND m.lf_fleet = True - ) - ) - AND lf.group_size > 3 - AND m.group_size > 3 - GROUP BY - lf.bucket, - lf.lf_fleet, - m.lf_fleet - ) -SELECT - * -from - comparison_stats -ORDER BY - bucket DESC --- ORDER by bucket desc, job_name desc, success_rate_delta, workflow_name diff --git a/torchci/rockset/metrics/__sql/log_captures_count.sql b/torchci/rockset/metrics/__sql/log_captures_count.sql deleted file mode 100644 index 5fecb087bc..0000000000 --- a/torchci/rockset/metrics/__sql/log_captures_count.sql +++ /dev/null @@ -1,20 +0,0 @@ -select - COUNT(*) as num, - ARBITRARY(j.torchci_classification.line) as example, - j.torchci_classification.captures as captures, - ARRAY_JOIN(j.torchci_classification.captures, '%') as search_string -from - workflow_job j - join workflow_run w on w.id = j.run_id -where - j._event_time >= PARSE_TIMESTAMP_ISO8601(:startTime) - and j._event_time < PARSE_TIMESTAMP_ISO8601(:stopTime) - and w.head_branch = 'main' - and w.head_repository.full_name = 'pytorch/pytorch' - and j.conclusion in ('cancelled', 'failure', 'time_out') - AND w.event != 'workflow_run' - AND w.event != 'repository_dispatch' -group by - j.torchci_classification.captures -order by - COUNT(*) desc diff --git a/torchci/rockset/metrics/__sql/master_commit_red.sql b/torchci/rockset/metrics/__sql/master_commit_red.sql deleted file mode 100644 index e6148952e0..0000000000 --- a/torchci/rockset/metrics/__sql/master_commit_red.sql +++ /dev/null @@ -1,88 +0,0 @@ -with commit_overall_conclusion as ( - SELECT - time, - sha, - CASE - WHEN COUNT_IF(conclusion = 'red') > 0 THEN 'red' - WHEN COUNT_IF(conclusion = 'pending') > 0 THEN 'pending' - ELSE 'green' - END as overall_conclusion - FROM - ( - SELECT - push._event_time as time, - CASE - WHEN job.conclusion = 'failure' THEN 'red' - WHEN job.conclusion = 'timed_out' THEN 'red' - WHEN job.conclusion = 'cancelled' THEN 'red' - WHEN job.conclusion IS NULL THEN 'pending' - ELSE 'green' - END as conclusion, - push.head_commit.id as sha, - FROM - commons.workflow_job job - JOIN ( - commons.workflow_run workflow - JOIN push on workflow.head_commit.id = push.head_commit.id - ) on workflow.id = job.run_id HINT(join_strategy = lookup) - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND ( - -- Limit it to workflows which block viable/strict upgrades - workflow.name in ('Lint', 'pull', 'trunk') - OR workflow.name like 'linux-binary%' - ) - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%unstable%' - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) - UNION ALL - SELECT - push._event_time as time, - CASE - WHEN job.job.status = 'failed' THEN 'red' - WHEN job.job.status = 'timed_out' THEN 'red' - WHEN job.job.status = 'canceled' THEN 'red' - WHEN job.job.status IS NULL THEN 'pending' - ELSE 'green' - END as conclusion, - push.head_commit.id as sha, - FROM - circleci.job job - JOIN push on job.pipeline.vcs.revision = push.head_commit.id - WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) - ) as all_job - GROUP BY - time, - sha - HAVING - COUNT(*) > 10 -- Filter out jobs that didn't run anything. - ORDER BY - time DESC -) -SELECT - FORMAT_TIMESTAMP( - '%Y-%m-%d', - DATE_TRUNC('hour', time), - :timezone - ) AS granularity_bucket, - COUNT_IF(overall_conclusion = 'red') AS red, - COUNT_IF(overall_conclusion = 'pending') AS pending, - COUNT_IF(overall_conclusion = 'green') AS green, - COUNT(*) as total, -FROM - commit_overall_conclusion -GROUP BY - granularity_bucket -ORDER BY - granularity_bucket ASC diff --git a/torchci/rockset/metrics/__sql/master_commit_red_avg.sql b/torchci/rockset/metrics/__sql/master_commit_red_avg.sql deleted file mode 100644 index a9561a6f9b..0000000000 --- a/torchci/rockset/metrics/__sql/master_commit_red_avg.sql +++ /dev/null @@ -1,81 +0,0 @@ -WITH all_jobs AS ( - SELECT - job.conclusion AS conclusion, - push.head_commit.id AS sha, - ROW_NUMBER() OVER(PARTITION BY job.name, push.head_commit.id ORDER BY job.run_attempt DESC) AS row_num, - FROM - commons.workflow_job job - JOIN ( - push - JOIN commons.workflow_run workflow ON workflow.head_commit.id = push.head_commit.id - ) ON workflow.id = job.run_id HINT(join_strategy = lookup) - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND ( -- Limit it to workflows which block viable/strict upgrades - ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - OR workflow.name like 'linux-binary%' - ) - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%mem_leak_check%' - AND job.name NOT LIKE '%unstable%' - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) - UNION ALL - SELECT - CASE - WHEN job.job.status = 'failed' then 'failure' - WHEN job.job.status = 'canceled' then 'cancelled' - ELSE job.job.status - END AS conclusion, - push.head_commit.id AS sha, - ROW_NUMBER() OVER(PARTITION BY job.name, push.head_commit.id ORDER BY job.run_attempt DESC) AS row_num, - FROM - circleci.job job - JOIN push ON job.pipeline.vcs.revision = push.head_commit.id - WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) -), -all_reds AS ( - SELECT - CAST( - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - WHEN conclusion = 'timed_out' THEN 1 - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS any_red, - CAST( - SUM( - CASE - WHEN conclusion = 'failure' AND row_num = 1 THEN 1 - WHEN conclusion = 'timed_out' AND row_num = 1 THEN 1 - WHEN conclusion = 'cancelled' AND row_num = 1 THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS broken_trunk_red, - FROM - all_jobs - GROUP BY - sha - HAVING - COUNT(sha) > 10 -- Filter out jobs that didn't run anything. - AND SUM(IF(conclusion IS NULL, 1, 0)) = 0 -- Filter out commits that still have pending jobs. -) -SELECT - AVG(broken_trunk_red) AS broken_trunk_red, - AVG(any_red) - AVG(broken_trunk_red) AS flaky_red, -FROM - all_reds diff --git a/torchci/rockset/metrics/__sql/master_commit_red_percent.sql b/torchci/rockset/metrics/__sql/master_commit_red_percent.sql deleted file mode 100644 index cc8fe9c30e..0000000000 --- a/torchci/rockset/metrics/__sql/master_commit_red_percent.sql +++ /dev/null @@ -1,115 +0,0 @@ -WITH all_jobs AS ( - SELECT - push._event_time as time, - job.conclusion AS conclusion, - push.head_commit.id AS sha, - ROW_NUMBER() OVER(PARTITION BY job.name, push.head_commit.id ORDER BY job.run_attempt DESC) AS row_num, - FROM - push - JOIN commons.workflow_run workflow ON workflow.head_commit.id = push.head_commit.id - JOIN commons.workflow_job job ON workflow.id = job.run_id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND ( -- Limit it to workflows which block viable/strict upgrades - ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - OR workflow.name like 'linux-binary%' - ) - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%unstable%' - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) - UNION ALL - SELECT - push._event_time as time, - CASE - WHEN job.job.status = 'failed' then 'failure' - WHEN job.job.status = 'canceled' then 'cancelled' - ELSE job.job.status - END AS conclusion, - push.head_commit.id AS sha, - ROW_NUMBER() OVER(PARTITION BY job.name, push.head_commit.id ORDER BY job.run_attempt DESC) AS row_num, - FROM - circleci.job job - JOIN push ON job.pipeline.vcs.revision = push.head_commit.id - WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) -), -any_red AS ( - SELECT - FORMAT_TIMESTAMP('%Y-%m-%d', DATE_TRUNC(:granularity, time)) AS granularity_bucket, - sha, - CAST( - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - WHEN conclusion = 'timed_out' THEN 1 - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS all_red, - CAST( - SUM( - CASE - WHEN conclusion = 'failure' AND row_num = 1 THEN 1 - WHEN conclusion = 'timed_out' AND row_num = 1 THEN 1 - WHEN conclusion = 'cancelled' AND row_num = 1 THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS broken_trunk_red, - FROM - all_jobs - GROUP BY - granularity_bucket, - sha - HAVING - count(sha) > 10 -- Filter out jobs that didn't run anything. - AND SUM(IF(conclusion is NULL, 1, 0)) = 0 -- Filter out commits that still have pending jobs. -), -classified_red AS ( - SELECT - granularity_bucket, - ARRAY_CREATE( - ARRAY_CREATE('Broken trunk', AVG(broken_trunk_red)), - ARRAY_CREATE('Flaky', AVG(all_red) - AVG(broken_trunk_red)), - ARRAY_CREATE('Total', AVG(all_red)) - ) AS metrics, - FROM - any_red - GROUP BY - granularity_bucket -), -avg_red AS ( - SELECT - classified_red.granularity_bucket, - ELEMENT_AT(metrics.metric, 1) AS name, - ELEMENT_AT(metrics.metric, 2) AS metric, - FROM - classified_red - CROSS JOIN UNNEST(classified_red.metrics AS metric) AS metrics - ORDER BY - granularity_bucket DESC -) -SELECT - granularity_bucket, - name, - -- 2 week rolling average - ( - SUM(metric) OVER( - PARTITION BY name - ORDER BY - granularity_bucket ROWS 1 PRECEDING - ) - ) / 2.0 AS metric, -FROM - avg_red \ No newline at end of file diff --git a/torchci/rockset/metrics/__sql/master_commit_red_percent_groups.sql b/torchci/rockset/metrics/__sql/master_commit_red_percent_groups.sql deleted file mode 100644 index 51ba666eb8..0000000000 --- a/torchci/rockset/metrics/__sql/master_commit_red_percent_groups.sql +++ /dev/null @@ -1,77 +0,0 @@ -WITH all_jobs AS ( - SELECT - push._event_time AS time, - job.conclusion AS conclusion, - push.head_commit.id AS sha, - CONCAT( - workflow.name, - ' / ', - ELEMENT_AT(SPLIT(job.name, ' / '), 1), - CONCAT(' / ', ELEMENT_AT(SPLIT(ELEMENT_AT(SPLIT(job.name, ' / '), 2), ', '), 1)) - ) AS name, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow ON workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%filter%' - AND job.name NOT LIKE '%unstable%' - AND job.name LIKE '%/%' - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref = 'refs/heads/main' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) -), -reds AS( - SELECT - time, - sha, - IF (name LIKE '%(%' AND name NOT LIKE '%)%', CONCAT(name, ')'), name) AS name, - CAST( - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - WHEN conclusion = 'timed_out' THEN 1 - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS any_red, - COUNT(*) AS c - FROM - all_jobs - GROUP BY - time, - sha, - name - HAVING - COUNT(*) >= 1 -- Filter out jobs that didn't run anything. - AND SUM(IF(conclusion IS NULL, 1, 0)) = 0 -- Filter out commits that still have pending jobs. - ORDER BY - time DESC -), -reds_percentage AS ( - SELECT - FORMAT_TIMESTAMP('%Y-%m-%d', DATE_TRUNC(:granularity, time)) AS granularity_bucket, - name, - ROUND(AVG(any_red) * 100, 2) AS red, - FROM - reds - GROUP BY - granularity_bucket, - name -) -SELECT - * -FROM - reds_percentage -WHERE - red > 0 -ORDER BY - name ASC diff --git a/torchci/rockset/metrics/__sql/master_jobs_red.sql b/torchci/rockset/metrics/__sql/master_jobs_red.sql deleted file mode 100644 index 64981bf6da..0000000000 --- a/torchci/rockset/metrics/__sql/master_jobs_red.sql +++ /dev/null @@ -1,54 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC(:granularity, time) - ) AS granularity_bucket, - AVG( - CASE - when conclusion = 'failure' THEN 1 - when conclusion = 'timed_out' THEN 1 - when conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) as red, -FROM - ( - SELECT - job._event_time AT TIME ZONE :timezone as time, - job.conclusion as conclusion, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%unstable%' - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - UNION ALL - SELECT - job._event_time AT TIME ZONE :timezone as time, - case - WHEN job.job.status = 'failed' then 'failure' - WHEN job.job.status = 'canceled' then 'cancelled' - else job.job.status - END as conclusion, - FROM - circleci.job job - JOIN push on job.pipeline.vcs.revision = push.head_commit.id - WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - ) as all_job -GROUP BY - DATE_TRUNC(:granularity, time) -ORDER BY - DATE_TRUNC(:granularity, time) ASC diff --git a/torchci/rockset/metrics/__sql/master_jobs_red_avg.sql b/torchci/rockset/metrics/__sql/master_jobs_red_avg.sql deleted file mode 100644 index a15202e331..0000000000 --- a/torchci/rockset/metrics/__sql/master_jobs_red_avg.sql +++ /dev/null @@ -1,47 +0,0 @@ -SELECT - AVG( - CASE - when conclusion = 'failure' THEN 1 - when conclusion = 'timed_out' THEN 1 - when conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) as red, -FROM - ( - SELECT - job._event_time as time, - job.conclusion as conclusion, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%unstable%' - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - UNION ALL - SELECT - job._event_time as time, - case - WHEN job.job.status = 'failed' then 'failure' - WHEN job.job.status = 'canceled' then 'cancelled' - else job.job.status - END as conclusion, - FROM - circleci.job job - JOIN push on job.pipeline.vcs.revision = push.head_commit.id - WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - ) as all_job diff --git a/torchci/rockset/metrics/__sql/number_of_force_pushes.sql b/torchci/rockset/metrics/__sql/number_of_force_pushes.sql deleted file mode 100644 index 05b11f2f0e..0000000000 --- a/torchci/rockset/metrics/__sql/number_of_force_pushes.sql +++ /dev/null @@ -1,11 +0,0 @@ -SELECT - COUNT(DISTINCT issue_comment.issue_url) AS count -FROM - commons.issue_comment -WHERE - issue_comment.body LIKE '%@pytorchbot merge -f%' - AND _event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND _event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND issue_comment.user.login NOT LIKE '%pytorch-bot%' - AND issue_comment.user.login NOT LIKE '%facebook-github-bot%' - AND issue_comment.user.login NOT LIKE '%pytorchmergebot%' diff --git a/torchci/rockset/metrics/__sql/queue_times_historical.sql b/torchci/rockset/metrics/__sql/queue_times_historical.sql deleted file mode 100644 index ee4483ce8c..0000000000 --- a/torchci/rockset/metrics/__sql/queue_times_historical.sql +++ /dev/null @@ -1,23 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - q.time AT TIME ZONE :timezone - ) - ) AS granularity_bucket, - /* misnomer, this is the max queue time, not the avg queue time */ - AVG(q.avg_queue_s) as avg_queue_s, - q.machine_type, -FROM - metrics.queue_times_historical q -WHERE - q.time >= PARSE_DATETIME_ISO8601(:startTime) AT TIME ZONE :timezone - AND q.time < PARSE_DATETIME_ISO8601(:stopTime) AT TIME ZONE :timezone -GROUP BY - granularity_bucket, - q.machine_type -HAVING - /* filter out weird GH API bugs */ - AVG(q.count) > 5 -ORDER BY - granularity_bucket ASC diff --git a/torchci/rockset/metrics/__sql/queue_times_historical_pct.sql b/torchci/rockset/metrics/__sql/queue_times_historical_pct.sql deleted file mode 100644 index 0af7703810..0000000000 --- a/torchci/rockset/metrics/__sql/queue_times_historical_pct.sql +++ /dev/null @@ -1,23 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - 'hour', - q._event_time AT TIME ZONE :timezone - ) - ) AS granularity_bucket, - q.queue_s_max, - q.queue_s_p99, - q.queue_s_p95, - q.queue_s_p90, - q.queue_s_p80, - q.queue_s_p50, - q.queue_s_avg, - q.machine_type -FROM - metrics.queue_times_24h_stats q -WHERE - q._event_time >= DATE_TRUNC('hour', PARSE_DATETIME_ISO8601(:startTime) AT TIME ZONE :timezone) - AND q._event_time < DATE_TRUNC('hour', PARSE_DATETIME_ISO8601(:stopTime) AT TIME ZONE :timezone) - AND ARRAY_CONTAINS(SPLIT(:workersTypes, ','), q.machine_type) -ORDER BY - granularity_bucket, machine_type ASC diff --git a/torchci/rockset/metrics/__sql/queued_jobs.sql b/torchci/rockset/metrics/__sql/queued_jobs.sql deleted file mode 100644 index 3f00853296..0000000000 --- a/torchci/rockset/metrics/__sql/queued_jobs.sql +++ /dev/null @@ -1,36 +0,0 @@ ---- This query is used by HUD metrics page to get the list of queued jobs -SELECT - DATE_DIFF( - 'second', - job._event_time, - CURRENT_TIMESTAMP() - ) AS queue_s, - CONCAT(workflow.name, ' / ', job.name) AS name, - job.html_url, - IF( - LENGTH(job.labels) = 0, - 'N/A', - IF( - LENGTH(job.labels) > 1, - ELEMENT_AT(job.labels, 2), - ELEMENT_AT(job.labels, 1) - ) - ) AS machine_type, -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow ON workflow.id = job.run_id -WHERE - workflow.repository.full_name = 'pytorch/pytorch' - AND job.status = 'queued' - AND job._event_time < ( - CURRENT_TIMESTAMP() - INTERVAL 5 MINUTE - ) - /* These two conditions are workarounds for GitHub's broken API. Sometimes */ - /* jobs get stuck in a permanently "queued" state but definitely ran. We can */ - /* detect this by looking at whether any steps executed (if there were, */ - /* obviously the job started running), and whether the workflow was marked as */ - /* complete (somehow more reliable than the job-level API) */ - AND LENGTH(job.steps) = 0 - AND workflow.status != 'completed' -ORDER BY - queue_s DESC \ No newline at end of file diff --git a/torchci/rockset/metrics/__sql/queued_jobs_by_label.sql b/torchci/rockset/metrics/__sql/queued_jobs_by_label.sql deleted file mode 100644 index 8f5674ac1e..0000000000 --- a/torchci/rockset/metrics/__sql/queued_jobs_by_label.sql +++ /dev/null @@ -1,57 +0,0 @@ ---- This query is used by HUD metrics page to get the list of queued jobs grouped by their labels -WITH queued_jobs as ( - SELECT - DATE_DIFF( - 'second', - job._event_time, - CURRENT_TIMESTAMP() - ) AS queue_s, - CONCAT(workflow.name, ' / ', job.name) AS name, - job.html_url, - IF( - LENGTH(job.labels) = 0, - IF ( - job.runner_group_name IS NOT null - AND job.runner_group_name != 'Default' - AND job.runner_group_name != 'GitHub Actions' - AND job.runner_group_name != '' - AND job.runner_group_name != 'linux.rocm.gpu.group', - job.runner_group_name, - 'N/A' - ), - IF( - LENGTH(job.labels) > 1, - ELEMENT_AT(job.labels, 2), - ELEMENT_AT(job.labels, 1) - ) - ) AS machine_type, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow ON workflow.id = job.run_id - WHERE - workflow.repository.full_name = 'pytorch/pytorch' - AND job.status = 'queued' - AND job._event_time < ( - CURRENT_TIMESTAMP() - INTERVAL 5 MINUTE - ) - /* These two conditions are workarounds for GitHub's broken API. Sometimes */ - /* jobs get stuck in a permanently "queued" state but definitely ran. We can */ - /* detect this by looking at whether any steps executed (if there were, */ - /* obviously the job started running), and whether the workflow was marked as */ - /* complete (somehow more reliable than the job-level API) */ - AND LENGTH(job.steps) = 0 - AND workflow.status != 'completed' - ORDER BY - queue_s DESC -) -SELECT - COUNT(*) AS count, - MAX(queue_s) AS avg_queue_s, - machine_type, - CURRENT_TIMESTAMP() AS time -FROM - queued_jobs -GROUP BY - machine_type -ORDER BY - count DESC diff --git a/torchci/rockset/metrics/__sql/reverts.sql b/torchci/rockset/metrics/__sql/reverts.sql deleted file mode 100644 index 17ca26d31e..0000000000 --- a/torchci/rockset/metrics/__sql/reverts.sql +++ /dev/null @@ -1,14 +0,0 @@ -SELECT - COUNT(*) as num -FROM - push -WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND ( - push.head_commit.message LIKE 'Revert %' - OR push.head_commit.message LIKE 'Back out%' - ) - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) diff --git a/torchci/rockset/metrics/__sql/strict_lag_sec.sql b/torchci/rockset/metrics/__sql/strict_lag_sec.sql deleted file mode 100644 index 84a257c9e3..0000000000 --- a/torchci/rockset/metrics/__sql/strict_lag_sec.sql +++ /dev/null @@ -1,34 +0,0 @@ -WITH master as ( - SELECT - PARSE_TIMESTAMP_ISO8601(push.head_commit.timestamp) as master - FROM - push - WHERE - push.ref = :head - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = :repo - AND push.head_commit is not null - ORDER BY - push._event_time desc - LIMIT - 1 -), strict as ( - SELECT - PARSE_TIMESTAMP_ISO8601(push.head_commit.timestamp) as strict - FROM - push - WHERE - push.ref = 'refs/heads/viable/strict' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = :repo - AND push.head_commit is not null - ORDER BY - push._event_time desc - LIMIT - 1 -) -SELECT - DATE_DIFF('second', strict, master) as strict_lag_sec -FROM - master, - strict diff --git a/torchci/rockset/metrics/__sql/top_reds.sql b/torchci/rockset/metrics/__sql/top_reds.sql deleted file mode 100644 index c70d30a4c0..0000000000 --- a/torchci/rockset/metrics/__sql/top_reds.sql +++ /dev/null @@ -1,75 +0,0 @@ -WITH all_jobs AS ( - SELECT - push._event_time AS time, - job.conclusion AS conclusion, - push.head_commit.id AS sha, - CONCAT( - workflow.name, - ' / ', - ELEMENT_AT(SPLIT(job.name, ' / '), 1), - CONCAT(' / ', ELEMENT_AT(SPLIT(ELEMENT_AT(SPLIT(job.name, ' / '), 2), ', '), 1)) - ) AS name, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow ON workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name NOT LIKE '%rerun_disabled_tests%' - AND job.name NOT LIKE '%filter%' - AND job.name NOT LIKE '%unstable%' - AND job.name LIKE '%/%' - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - AND workflow.event != 'workflow_run' -- Filter out worflow_run-triggered jobs, which have nothing to do with the SHA - AND push.ref = 'refs/heads/main' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) -), -reds AS ( - SELECT - time, - sha, - IF (name LIKE '%(%' AND name NOT LIKE '%)%', CONCAT(name, ')'), name) AS name, - CAST( - SUM( - CASE - WHEN conclusion = 'failure' THEN 1 - WHEN conclusion = 'timed_out' THEN 1 - WHEN conclusion = 'cancelled' THEN 1 - ELSE 0 - END - ) > 0 AS int - ) AS any_red, - COUNT(*) AS c - FROM - all_jobs - GROUP BY - time, - sha, - name - HAVING - COUNT(*) >= 1 -- Filter out jobs that didn't run anything. - AND SUM(IF(conclusion IS NULL, 1, 0)) = 0 -- Filter out commits that still have pending jobs. - ORDER BY - time DESC -), -reds_percentage AS ( - SELECT - name, - ROUND(AVG(any_red) * 100, 2) AS red, - FROM - reds - GROUP BY - name -) -SELECT - * -FROM - reds_percentage -WHERE - red > 0 -ORDER BY - red DESC diff --git a/torchci/rockset/metrics/__sql/tts_avg.sql b/torchci/rockset/metrics/__sql/tts_avg.sql deleted file mode 100644 index 3c38b35298..0000000000 --- a/torchci/rockset/metrics/__sql/tts_avg.sql +++ /dev/null @@ -1,32 +0,0 @@ -SELECT - AVG( - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) - ) as tts_sec, - COUNT(*) as count, - CONCAT(workflow.name, ' / ', job.name) as name -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id -WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND workflow.repository.full_name = 'pytorch/pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND job.conclusion = 'success' - AND workflow.head_branch LIKE :branch - AND workflow.run_attempt = 1 -GROUP BY - name -ORDER BY - COUNT(*) * AVG( - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) - ) DESC diff --git a/torchci/rockset/metrics/__sql/tts_duration_historical.sql b/torchci/rockset/metrics/__sql/tts_duration_historical.sql deleted file mode 100644 index 61f73ab8a1..0000000000 --- a/torchci/rockset/metrics/__sql/tts_duration_historical.sql +++ /dev/null @@ -1,32 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - job._event_time AT TIME ZONE :timezone - ) - ) AS granularity_bucket, - AVG(DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at) AT TIME ZONE :timezone, - PARSE_TIMESTAMP_ISO8601(job.completed_at) AT TIME ZONE :timezone - )) as tts_avg_sec, - AVG(DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(job.started_at) AT TIME ZONE :timezone, - PARSE_TIMESTAMP_ISO8601(job.completed_at) AT TIME ZONE :timezone - )) as duration_avg_sec, - CONCAT(workflow.name, ' / ', job.name) as full_name, -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id -WHERE - job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), workflow.name) - AND workflow.head_branch LIKE 'main' - AND workflow.run_attempt = 1 -GROUP BY - granularity_bucket, - full_name -ORDER BY - full_name ASC diff --git a/torchci/rockset/metrics/__sql/tts_duration_historical_percentile.sql b/torchci/rockset/metrics/__sql/tts_duration_historical_percentile.sql deleted file mode 100644 index 639d9efa37..0000000000 --- a/torchci/rockset/metrics/__sql/tts_duration_historical_percentile.sql +++ /dev/null @@ -1,50 +0,0 @@ -SELECT - granularity_bucket, - MAX(tts_sec) AS tts_percentile_sec, - MAX(duration_sec) AS duration_percentile_sec, - full_name -FROM ( - SELECT - granularity_bucket, - tts_sec, - PERCENT_RANK() OVER (PARTITION BY full_name ORDER BY tts_sec DESC) AS tts_percentile, - duration_sec, - PERCENT_RANK() OVER (PARTITION BY full_name ORDER BY duration_sec DESC) AS duration_percentile, - full_name, - FROM ( - SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - job._event_time AT TIME ZONE :timezone - ) - ) AS granularity_bucket, - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at) AT TIME ZONE :timezone, - PARSE_TIMESTAMP_ISO8601(job.completed_at) AT TIME ZONE :timezone - ) AS tts_sec, - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(job.started_at) AT TIME ZONE :timezone, - PARSE_TIMESTAMP_ISO8601(job.completed_at) AT TIME ZONE :timezone - ) AS duration_sec, - CONCAT(workflow.name, ' / ', job.name) as full_name - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - WHERE - job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), workflow.name) - AND workflow.head_branch LIKE :branch - AND workflow.run_attempt = 1 - ) AS tts_duration -) AS p -WHERE - (SELECT p.tts_percentile >= (1.0 - :percentile) OR p.duration_percentile >= (1.0 - :percentile)) -GROUP BY - granularity_bucket, - full_name -ORDER BY - full_name ASC diff --git a/torchci/rockset/metrics/__sql/tts_percentile.sql b/torchci/rockset/metrics/__sql/tts_percentile.sql deleted file mode 100644 index 6a3946e4e5..0000000000 --- a/torchci/rockset/metrics/__sql/tts_percentile.sql +++ /dev/null @@ -1,48 +0,0 @@ -SELECT - max(tts_sec) AS tts_sec, - COUNT(name) AS count, - name -FROM - ( - SELECT - tts_sec, - name, - PERCENT_RANK() OVER ( - PARTITION BY name - ORDER BY - tts_sec DESC - ) AS percentile - FROM - ( - SELECT - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at), - PARSE_TIMESTAMP_ISO8601(job.completed_at) - ) AS tts_sec, - CONCAT(workflow.name, ' / ', job.name) as name - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - WHERE - job.name != 'ciflow_should_run' - AND job.name != 'generate-test-matrix' - AND job.name != 'get_workflow_conclusion' - AND workflow.repository.full_name = 'pytorch/pytorch' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND job.conclusion = 'success' - AND workflow.head_branch LIKE :branch - AND workflow.run_attempt = 1 - ) AS tts - ) AS p -WHERE - ( - NOT IS_NAN(p.percentile) - AND p.percentile >= (1.0 - :percentile) - ) -GROUP BY - name -ORDER BY - COUNT(name) * MAX(tts_sec) DESC - diff --git a/torchci/rockset/metrics/__sql/workflow_duration_avg.sql b/torchci/rockset/metrics/__sql/workflow_duration_avg.sql deleted file mode 100644 index b36dab8ba2..0000000000 --- a/torchci/rockset/metrics/__sql/workflow_duration_avg.sql +++ /dev/null @@ -1,21 +0,0 @@ -SELECT - AVG( - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at), - PARSE_TIMESTAMP_ISO8601(workflow.updated_at) - ) - ) as duration_sec, - name -FROM - commons.workflow_run workflow -WHERE - conclusion = 'success' - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - AND workflow._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND workflow._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND workflow.run_attempt = 1 -GROUP BY - workflow.name -ORDER BY - duration_sec DESC diff --git a/torchci/rockset/metrics/__sql/workflow_duration_percentile.sql b/torchci/rockset/metrics/__sql/workflow_duration_percentile.sql deleted file mode 100644 index a8972cba2d..0000000000 --- a/torchci/rockset/metrics/__sql/workflow_duration_percentile.sql +++ /dev/null @@ -1,31 +0,0 @@ -SELECT - duration_sec, - name, -FROM ( - SELECT - tts.*, - PERCENT_RANK() OVER (PARTITION BY name ORDER BY duration_sec DESC) AS percentile - FROM ( - SELECT - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(workflow.created_at), - PARSE_TIMESTAMP_ISO8601(workflow.updated_at) - ) as duration_sec, - name, - FROM - commons.workflow_run workflow - WHERE - conclusion = 'success' - AND ARRAY_CONTAINS(SPLIT(:workflowNames, ','), LOWER(workflow.name)) - AND PARSE_DATETIME_ISO8601(workflow.created_at) >= PARSE_DATETIME_ISO8601(:startTime) - AND PARSE_DATETIME_ISO8601(workflow.created_at) < PARSE_DATETIME_ISO8601(:stopTime) - AND workflow.run_attempt = 1 - ) AS tts -) AS p -WHERE - percentile >= (1.0 - :percentile) -ORDER BY - duration_sec DESC -LIMIT - 1 diff --git a/torchci/rockset/metrics/__sql/workflow_load.sql b/torchci/rockset/metrics/__sql/workflow_load.sql deleted file mode 100644 index f33ed7f00d..0000000000 --- a/torchci/rockset/metrics/__sql/workflow_load.sql +++ /dev/null @@ -1,33 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - PARSE_TIMESTAMP_ISO8601(workflow.created_at) AT TIME ZONE :timezone - ) - ) AS granularity_bucket, - workflow.name, - COUNT(*) as count, -FROM - workflow_run workflow -WHERE - PARSE_TIMESTAMP_ISO8601(workflow.created_at) >= PARSE_DATETIME_ISO8601(:startTime) - AND PARSE_TIMESTAMP_ISO8601(workflow.created_at) < PARSE_DATETIME_ISO8601(:stopTime) - AND workflow.name IN ( - 'pull', - 'trunk', - 'nightly', - 'periodic', - 'inductor', - 'inductor-periodic', - 'inductor-A100-perf-compare', - 'inductor-A100-perf-nightly', - 'inductor-cu124', - 'rocm', - 'inductor-rocm' - ) - AND workflow.repository.full_name like :repo -GROUP BY - granularity_bucket, - workflow.name -ORDER BY - count DESC diff --git a/torchci/rockset/metrics/correlation_matrix.lambda.json b/torchci/rockset/metrics/correlation_matrix.lambda.json deleted file mode 100644 index 35bee193b5..0000000000 --- a/torchci/rockset/metrics/correlation_matrix.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/correlation_matrix.sql", - "default_parameters": [ - { - "name": "workflowNames", - "type": "string", - "value": "pull,trunk,periodic" - } - ], - "description": "" -} diff --git a/torchci/rockset/metrics/disabled_test_historical.lambda.json b/torchci/rockset/metrics/disabled_test_historical.lambda.json deleted file mode 100644 index 7dfe0c41d4..0000000000 --- a/torchci/rockset/metrics/disabled_test_historical.lambda.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "sql_path": "__sql/disabled_test_historical.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "label", - "type": "string", - "value": "" - }, - { - "name": "platform", - "type": "string", - "value": "" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-07-01T00:00:00.000Z" - }, - { - "name": "state", - "type": "string", - "value": "open" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-12-01T00:00:00.000Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - }, - { - "name": "triaged", - "type": "string", - "value": "" - } - ], - "description": "Count the number of open disabled tests over time" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/disabled_test_total.lambda.json b/torchci/rockset/metrics/disabled_test_total.lambda.json deleted file mode 100644 index 78bbe5b2dd..0000000000 --- a/torchci/rockset/metrics/disabled_test_total.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/disabled_test_total.sql", - "default_parameters": [ - { - "name": "state", - "type": "string", - "value": "open" - } - ], - "description": "Return the total number of disabled tests" -} diff --git a/torchci/rockset/metrics/external_contribution_stats.lambda.json b/torchci/rockset/metrics/external_contribution_stats.lambda.json deleted file mode 100644 index de388a4fcf..0000000000 --- a/torchci/rockset/metrics/external_contribution_stats.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/external_contribution_stats.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-06-01T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-03-07T00:06:32.839Z" - } - ], - "description": "pr count and number of unique external contributors per day" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/get_workers_on_period.lambda.json b/torchci/rockset/metrics/get_workers_on_period.lambda.json deleted file mode 100644 index 5cca9a3c9d..0000000000 --- a/torchci/rockset/metrics/get_workers_on_period.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/get_workers_on_period.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-05-13T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-05-21T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "List the workers available on a given period" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/job_duration_avg.lambda.json b/torchci/rockset/metrics/job_duration_avg.lambda.json deleted file mode 100644 index 17879cc086..0000000000 --- a/torchci/rockset/metrics/job_duration_avg.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/job_duration_avg.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "%" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-02-22T00:08:03.395Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-03-01T00:08:03.395Z" - } - ], - "description": "" -} diff --git a/torchci/rockset/metrics/job_duration_percentile.lambda.json b/torchci/rockset/metrics/job_duration_percentile.lambda.json deleted file mode 100644 index be96ce000b..0000000000 --- a/torchci/rockset/metrics/job_duration_percentile.lambda.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "sql_path": "__sql/job_duration_percentile.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "%" - }, - { - "name": "percentile", - "type": "float", - "value": "0.9" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-07-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-01T00:00:00.000Z" - } - ], - "description": "Query job duration at different percentiles" -} - diff --git a/torchci/rockset/metrics/last_branch_push.lambda.json b/torchci/rockset/metrics/last_branch_push.lambda.json deleted file mode 100644 index 0a6715b245..0000000000 --- a/torchci/rockset/metrics/last_branch_push.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/last_branch_push.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "refs/heads/main" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/last_successful_jobs.lambda.json b/torchci/rockset/metrics/last_successful_jobs.lambda.json deleted file mode 100644 index 978b47dda6..0000000000 --- a/torchci/rockset/metrics/last_successful_jobs.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/last_successful_jobs.sql", - "default_parameters": [ - { - "name": "jobNames", - "type": "string", - "value": "docs push / build-docs (python, 30);docs push / build-docs (cpp, 180)" - } - ], - "description": "given a semicolon-separated list of jobs, return the number of seconds since master built all the jobs successfully" -} diff --git a/torchci/rockset/metrics/last_successful_workflow.lambda.json b/torchci/rockset/metrics/last_successful_workflow.lambda.json deleted file mode 100644 index 82c847ed43..0000000000 --- a/torchci/rockset/metrics/last_successful_workflow.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/last_successful_workflow.sql", - "default_parameters": [ - { - "name": "workflowName", - "type": "string", - "value": "docker-builds" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/lf_rollover_health.lambda.json b/torchci/rockset/metrics/lf_rollover_health.lambda.json deleted file mode 100644 index 039fc23029..0000000000 --- a/torchci/rockset/metrics/lf_rollover_health.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/lf_rollover_health.sql", - "default_parameters": [ - { - "name": "days_ago", - "type": "int", - "value": "14" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/lf_rollover_percentage.lambda.json b/torchci/rockset/metrics/lf_rollover_percentage.lambda.json deleted file mode 100644 index 0c963034a3..0000000000 --- a/torchci/rockset/metrics/lf_rollover_percentage.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/lf_rollover_percentage.sql", - "default_parameters": [ - { - "name": "days_ago", - "type": "int", - "value": "14" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/log_captures_count.lambda.json b/torchci/rockset/metrics/log_captures_count.lambda.json deleted file mode 100644 index f7f63cb5d9..0000000000 --- a/torchci/rockset/metrics/log_captures_count.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/log_captures_count.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-10-01T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-11-01T00:06:32.839Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/master_commit_red.lambda.json b/torchci/rockset/metrics/master_commit_red.lambda.json deleted file mode 100644 index aa855004ad..0000000000 --- a/torchci/rockset/metrics/master_commit_red.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/master_commit_red.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-10-12T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-10-21T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/master_commit_red_avg.lambda.json b/torchci/rockset/metrics/master_commit_red_avg.lambda.json deleted file mode 100644 index 8303ff0cae..0000000000 --- a/torchci/rockset/metrics/master_commit_red_avg.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/master_commit_red_avg.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2023-03-20T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-03-27T00:00:00.000Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "lint,pull,trunk" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/master_commit_red_percent.lambda.json b/torchci/rockset/metrics/master_commit_red_percent.lambda.json deleted file mode 100644 index da927725c4..0000000000 --- a/torchci/rockset/metrics/master_commit_red_percent.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/master_commit_red_percent.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "week" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-02-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-03-30T00:00:00.000Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "lint,pull,trunk" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/master_commit_red_percent_groups.lambda.json b/torchci/rockset/metrics/master_commit_red_percent_groups.lambda.json deleted file mode 100644 index 6d9eb8301a..0000000000 --- a/torchci/rockset/metrics/master_commit_red_percent_groups.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/master_commit_red_percent_groups.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-04-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-05-01T00:00:00.000Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "lint,pull,trunk" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/master_jobs_red.lambda.json b/torchci/rockset/metrics/master_jobs_red.lambda.json deleted file mode 100644 index ead290c253..0000000000 --- a/torchci/rockset/metrics/master_jobs_red.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/master_jobs_red.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "hour" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-02-09T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-02-16T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/master_jobs_red_avg.lambda.json b/torchci/rockset/metrics/master_jobs_red_avg.lambda.json deleted file mode 100644 index 419daa47b4..0000000000 --- a/torchci/rockset/metrics/master_jobs_red_avg.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/master_jobs_red_avg.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-02-09T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-02-16T00:06:32.839Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/number_of_force_pushes.lambda.json b/torchci/rockset/metrics/number_of_force_pushes.lambda.json deleted file mode 100644 index 9852247c2d..0000000000 --- a/torchci/rockset/metrics/number_of_force_pushes.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/number_of_force_pushes.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-07-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-01T00:00:00.000Z" - } - ], - "description": "Count the number of force pushes" -} diff --git a/torchci/rockset/metrics/queue_times_historical.lambda.json b/torchci/rockset/metrics/queue_times_historical.lambda.json deleted file mode 100644 index 7e76894ccc..0000000000 --- a/torchci/rockset/metrics/queue_times_historical.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/queue_times_historical.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "hour" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-05-13T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-05-21T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/queue_times_historical_pct.lambda.json b/torchci/rockset/metrics/queue_times_historical_pct.lambda.json deleted file mode 100644 index f3ca47029a..0000000000 --- a/torchci/rockset/metrics/queue_times_historical_pct.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/queue_times_historical_pct.sql", - "default_parameters": [ - { - "name": "pctile", - "type": "string", - "value": "queue_s_p50" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-05-13T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-05-21T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - }, - { - "name": "workersTypes", - "type": "string", - "value": "all" - } - ], - "description": "get computed statistics for a set of runner types in a time range" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/queued_jobs.lambda.json b/torchci/rockset/metrics/queued_jobs.lambda.json deleted file mode 100644 index 295e0a2539..0000000000 --- a/torchci/rockset/metrics/queued_jobs.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/queued_jobs.sql", - "default_parameters": [], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/queued_jobs_by_label.lambda.json b/torchci/rockset/metrics/queued_jobs_by_label.lambda.json deleted file mode 100644 index 1a77c67f68..0000000000 --- a/torchci/rockset/metrics/queued_jobs_by_label.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/queued_jobs_by_label.sql", - "default_parameters": [], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/reverts.lambda.json b/torchci/rockset/metrics/reverts.lambda.json deleted file mode 100644 index 11e7adcb4b..0000000000 --- a/torchci/rockset/metrics/reverts.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/reverts.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-02-09T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-02-16T00:06:32.839Z" - } - ], - "description": "" -} diff --git a/torchci/rockset/metrics/strict_lag_sec.lambda.json b/torchci/rockset/metrics/strict_lag_sec.lambda.json deleted file mode 100644 index 09f88f6f61..0000000000 --- a/torchci/rockset/metrics/strict_lag_sec.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/strict_lag_sec.sql", - "default_parameters": [ - { - "name": "head", - "type": "string", - "value": "refs/heads/main" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch" - } - ], - "description": "How many seconds viable/strict lags behind master" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/top_reds.lambda.json b/torchci/rockset/metrics/top_reds.lambda.json deleted file mode 100644 index f9a475f58d..0000000000 --- a/torchci/rockset/metrics/top_reds.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/top_reds.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2023-04-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-05-01T00:00:00.000Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "lint,pull,trunk" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/tts_avg.lambda.json b/torchci/rockset/metrics/tts_avg.lambda.json deleted file mode 100644 index db595fc1a5..0000000000 --- a/torchci/rockset/metrics/tts_avg.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/tts_avg.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "%" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-02-22T00:08:03.395Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-03-01T00:08:03.395Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/tts_duration_historical.lambda.json b/torchci/rockset/metrics/tts_duration_historical.lambda.json deleted file mode 100644 index 6f03503783..0000000000 --- a/torchci/rockset/metrics/tts_duration_historical.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/tts_duration_historical.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "week" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-01-13T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-06-09T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - }, - { - "name": "workflowNames", - "type": "string", - "value": "pull,trunk,nightly,periodic,inductor" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/metrics/tts_duration_historical_percentile.lambda.json b/torchci/rockset/metrics/tts_duration_historical_percentile.lambda.json deleted file mode 100644 index c66ba2f76a..0000000000 --- a/torchci/rockset/metrics/tts_duration_historical_percentile.lambda.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "sql_path": "__sql/tts_duration_historical_percentile.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "%" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "percentile", - "type": "float", - "value": "0.9" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-07-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-01T00:00:00.000Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - }, - { - "name": "workflowNames", - "type": "string", - "value": "pull,trunk,nightly,periodic,inductor" - } - ], - "description": "Query both TTS and duration percentiles and group them at different granularity" -} diff --git a/torchci/rockset/metrics/tts_percentile.lambda.json b/torchci/rockset/metrics/tts_percentile.lambda.json deleted file mode 100644 index fd25fae0c9..0000000000 --- a/torchci/rockset/metrics/tts_percentile.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/tts_percentile.sql", - "default_parameters": [ - { - "name": "branch", - "type": "string", - "value": "%" - }, - { - "name": "percentile", - "type": "float", - "value": "0.9" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-07-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-01T00:00:00.000Z" - } - ], - "description": "Query job TTS at different percentiles" -} diff --git a/torchci/rockset/metrics/workflow_duration_avg.lambda.json b/torchci/rockset/metrics/workflow_duration_avg.lambda.json deleted file mode 100644 index 99ddf350e9..0000000000 --- a/torchci/rockset/metrics/workflow_duration_avg.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/workflow_duration_avg.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-02-22T00:08:03.395Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-03-01T00:08:03.395Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "pull,trunk" - } - ], - "description": "" -} diff --git a/torchci/rockset/metrics/workflow_duration_percentile.lambda.json b/torchci/rockset/metrics/workflow_duration_percentile.lambda.json deleted file mode 100644 index 61a87197d7..0000000000 --- a/torchci/rockset/metrics/workflow_duration_percentile.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/workflow_duration_percentile.sql", - "default_parameters": [ - { - "name": "percentile", - "type": "float", - "value": "0.95" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-07-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-01T00:00:00.000Z" - }, - { - "name": "workflowNames", - "type": "string", - "value": "pull,trunk" - } - ], - "description": "Query workflow duration at different percentiles" -} diff --git a/torchci/rockset/metrics/workflow_load.lambda.json b/torchci/rockset/metrics/workflow_load.lambda.json deleted file mode 100644 index 33c4180db0..0000000000 --- a/torchci/rockset/metrics/workflow_load.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/workflow_load.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "hour" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch/pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-05-13T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-05-21T00:06:32.839Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/nightlies/__sql/docker_jobs_red.sql b/torchci/rockset/nightlies/__sql/docker_jobs_red.sql deleted file mode 100644 index fc59bbe24f..0000000000 --- a/torchci/rockset/nightlies/__sql/docker_jobs_red.sql +++ /dev/null @@ -1,53 +0,0 @@ -with commit_overall_conclusion as ( - SELECT - time, - CASE - WHEN COUNT_IF(conclusion = 'red') > 0 THEN 'red' - WHEN COUNT_IF(conclusion = 'pending') > 0 THEN 'pending' - ELSE 'green' - END as overall_conclusion - FROM - ( - SELECT - job._event_time as time, - CASE - WHEN job.conclusion = 'failure' THEN 'red' - WHEN job.conclusion = 'timed_out' THEN 'red' - WHEN job.conclusion = 'cancelled' THEN 'red' - WHEN job.conclusion IS NULL THEN 'pending' - ELSE 'green' - END as conclusion - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - WHERE - job.head_branch = 'main' - AND job.name like '%docker%' - AND workflow.repository.full_name = 'pytorch/builder' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - ) as all_job - GROUP BY - time - ORDER BY - time DESC -) -SELECT - FORMAT_TIMESTAMP( - '%Y-%m-%d', - DATE_TRUNC('hour', time), - :timezone - ) AS granularity_bucket, - COUNT_IF(overall_conclusion = 'red') AS red, - COUNT_IF(overall_conclusion = 'pending') AS pending, - COUNT_IF(overall_conclusion = 'green') AS green, - COUNT(*) as total, -FROM - commit_overall_conclusion -GROUP BY - granularity_bucket -ORDER BY - granularity_bucket ASC - - - diff --git a/torchci/rockset/nightlies/__sql/docker_jobs_red_past_day.sql b/torchci/rockset/nightlies/__sql/docker_jobs_red_past_day.sql deleted file mode 100644 index 25fbf3f54d..0000000000 --- a/torchci/rockset/nightlies/__sql/docker_jobs_red_past_day.sql +++ /dev/null @@ -1,14 +0,0 @@ -SELECT - COUNT(*) COUNT, - job.name -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id -WHERE - job.head_branch = 'main' - AND job.name like '%docker%' - AND job.conclusion in ('failure', 'timed_out', 'cancelled') - AND workflow.repository.full_name = 'pytorch/builder' - AND job._event_time >= CURRENT_DATE() - INTERVAL 1 DAY -GROUP BY job.name -ORDER BY COUNT DESC \ No newline at end of file diff --git a/torchci/rockset/nightlies/__sql/nightly_jobs_red.sql b/torchci/rockset/nightlies/__sql/nightly_jobs_red.sql deleted file mode 100644 index 5db3fd0fef..0000000000 --- a/torchci/rockset/nightlies/__sql/nightly_jobs_red.sql +++ /dev/null @@ -1,36 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC(:granularity, time) - ) AS granularity_bucket, - AVG( - CASE - when conclusion = 'failure' THEN 1 - when conclusion = 'timed_out' THEN 1 - when conclusion = 'cancelled' THEN 1 - when conclusion = 'skipped' THEN 1 - ELSE 0 - END - ) as red, -FROM - ( - SELECT - job._event_time AT TIME ZONE :timezone as time, - job.conclusion as conclusion, - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name NOT LIKE '%generate-matrix%' - AND job.name NOT LIKE '%unittests%' - AND workflow.name NOT IN ('cron', 'Bandit', 'tests') - AND push.ref = 'refs/heads/nightly' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = :repo - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - ) as all_job -GROUP BY - DATE_TRUNC(:granularity, time) -ORDER BY - DATE_TRUNC(:granularity, time) ASC diff --git a/torchci/rockset/nightlies/__sql/nightly_jobs_red_by_name.sql b/torchci/rockset/nightlies/__sql/nightly_jobs_red_by_name.sql deleted file mode 100644 index 62131053e7..0000000000 --- a/torchci/rockset/nightlies/__sql/nightly_jobs_red_by_name.sql +++ /dev/null @@ -1,19 +0,0 @@ -SELECT - COUNT(*) COUNT, workflow.name - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name NOT LIKE '%generate-matrix%' - AND job.name NOT LIKE '%unittests%' - AND workflow.name NOT IN ('cron', 'Bandit', 'tests', 'Lint') - AND push.ref = 'refs/heads/nightly' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name in ('pytorch', 'vision', 'audio', 'text') - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND job.conclusion in ('failure', 'timed_out', 'cancelled') - GROUP BY - workflow.name - ORDER BY COUNT DESC diff --git a/torchci/rockset/nightlies/__sql/nightly_jobs_red_by_platform.sql b/torchci/rockset/nightlies/__sql/nightly_jobs_red_by_platform.sql deleted file mode 100644 index 6572b48413..0000000000 --- a/torchci/rockset/nightlies/__sql/nightly_jobs_red_by_platform.sql +++ /dev/null @@ -1,37 +0,0 @@ -WITH all_failed_jobs AS ( - SELECT - COUNT(*) COUNT, workflow.path - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id - WHERE - job.name NOT LIKE '%generate-matrix%' - AND job.name NOT LIKE '%unittests%' - AND workflow.name NOT IN ('cron', 'Bandit', 'tests', 'Lint') - AND push.ref = 'refs/heads/nightly' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name in ('pytorch', 'vision', 'audio', 'text') - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - AND job.conclusion in ('failure', 'timed_out', 'cancelled') - GROUP BY - workflow.path ) -SELECT - SUM(COUNT) as Count, 'Conda' as Platform -FROM - all_failed_jobs -where path like '%conda%' -UNION -SELECT - SUM(COUNT) as Count, 'Wheel' as Platform -FROM - all_failed_jobs -where path like '%wheel%' -UNION -SELECT - SUM(COUNT) as Count, 'Libtorch' as Platform -FROM - all_failed_jobs -where path like '%libtorch%' - diff --git a/torchci/rockset/nightlies/__sql/nightly_jobs_red_past_day.sql b/torchci/rockset/nightlies/__sql/nightly_jobs_red_past_day.sql deleted file mode 100644 index eafec91557..0000000000 --- a/torchci/rockset/nightlies/__sql/nightly_jobs_red_past_day.sql +++ /dev/null @@ -1,19 +0,0 @@ -SELECT - COUNT(*) COUNT, - job.name -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - JOIN push on workflow.head_commit.id = push.head_commit.id -WHERE - job.name NOT LIKE '%generate-matrix%' - AND job.name NOT LIKE '%unittests%' - AND workflow.name NOT IN ('cron', 'Bandit', 'tests') - AND push.ref = 'refs/heads/nightly' - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = :repo - AND job.conclusion in ('failure', 'timed_out', 'cancelled') - AND job._event_time >= CURRENT_DATE() - INTERVAL 1 DAY -GROUP BY job.name -ORDER BY COUNT - diff --git a/torchci/rockset/nightlies/__sql/validation_jobs_red.sql b/torchci/rockset/nightlies/__sql/validation_jobs_red.sql deleted file mode 100644 index 39611fe0ea..0000000000 --- a/torchci/rockset/nightlies/__sql/validation_jobs_red.sql +++ /dev/null @@ -1,55 +0,0 @@ -with commit_overall_conclusion as ( - SELECT - time, - CASE - WHEN COUNT_IF(conclusion = 'red') > 0 THEN 'red' - WHEN COUNT_IF(conclusion = 'pending') > 0 THEN 'pending' - ELSE 'green' - END as overall_conclusion - FROM - ( - SELECT - job._event_time as time, - CASE - WHEN job.conclusion = 'failure' THEN 'red' - WHEN job.conclusion = 'timed_out' THEN 'red' - WHEN job.conclusion = 'cancelled' THEN 'red' - WHEN job.conclusion IS NULL THEN 'pending' - ELSE 'green' - END as conclusion - FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id - WHERE - job.head_branch = 'main' - AND workflow.name = 'cron' - AND workflow.event = 'schedule' - AND job.name like CONCAT('%',:channel,'%') - AND workflow.repository.full_name = 'pytorch/builder' - AND job._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND job._event_time < PARSE_DATETIME_ISO8601(:stopTime) - ) as all_job - GROUP BY - time - ORDER BY - time DESC -) -SELECT - FORMAT_TIMESTAMP( - '%Y-%m-%d', - DATE_TRUNC('hour', time), - :timezone - ) AS granularity_bucket, - COUNT_IF(overall_conclusion = 'red') AS red, - COUNT_IF(overall_conclusion = 'pending') AS pending, - COUNT_IF(overall_conclusion = 'green') AS green, - COUNT(*) as total, -FROM - commit_overall_conclusion -GROUP BY - granularity_bucket -ORDER BY - granularity_bucket ASC - - - diff --git a/torchci/rockset/nightlies/__sql/validation_jobs_red_past_day.sql b/torchci/rockset/nightlies/__sql/validation_jobs_red_past_day.sql deleted file mode 100644 index 2321f1aab1..0000000000 --- a/torchci/rockset/nightlies/__sql/validation_jobs_red_past_day.sql +++ /dev/null @@ -1,16 +0,0 @@ -SELECT - COUNT(*) COUNT, - job.name -FROM - commons.workflow_job job - JOIN commons.workflow_run workflow on workflow.id = job.run_id -WHERE - job.head_branch = 'main' - AND workflow.name = 'cron' - AND workflow.event = 'schedule' - AND job.conclusion in ('failure', 'timed_out', 'cancelled') - AND job.name like CONCAT('%',:channel,'%') - AND workflow.repository.full_name = 'pytorch/builder' - AND job._event_time >= CURRENT_DATE() - INTERVAL 1 DAY -GROUP BY job.name -ORDER BY COUNT DESC \ No newline at end of file diff --git a/torchci/rockset/nightlies/nightly_jobs_red.lambda.json b/torchci/rockset/nightlies/nightly_jobs_red.lambda.json deleted file mode 100644 index 250d54ecd1..0000000000 --- a/torchci/rockset/nightlies/nightly_jobs_red.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/nightly_jobs_red.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "repo", - "type": "string", - "value": "pytorch" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-05-01T00:00:38.270Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-06-17T20:20:38.270Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/nightlies/nightly_jobs_red_by_name.lambda.json b/torchci/rockset/nightlies/nightly_jobs_red_by_name.lambda.json deleted file mode 100644 index c157c9ddba..0000000000 --- a/torchci/rockset/nightlies/nightly_jobs_red_by_name.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/nightly_jobs_red_by_name.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2023-06-16T00:00:38.270Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-06-16T20:20:38.270Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/nightlies/nightly_jobs_red_by_platform.lambda.json b/torchci/rockset/nightlies/nightly_jobs_red_by_platform.lambda.json deleted file mode 100644 index 893b0c6b2d..0000000000 --- a/torchci/rockset/nightlies/nightly_jobs_red_by_platform.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/nightly_jobs_red_by_platform.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2023-06-16T00:00:38.270Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-06-16T20:20:38.270Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/nightlies/nightly_jobs_red_past_day.lambda.json b/torchci/rockset/nightlies/nightly_jobs_red_past_day.lambda.json deleted file mode 100644 index 77be211e6e..0000000000 --- a/torchci/rockset/nightlies/nightly_jobs_red_past_day.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/nightly_jobs_red_past_day.sql", - "default_parameters": [ - { - "name": "repo", - "type": "string", - "value": "pytorch" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/nightlies/validation_jobs_red.lambda.json b/torchci/rockset/nightlies/validation_jobs_red.lambda.json deleted file mode 100644 index c0781406fa..0000000000 --- a/torchci/rockset/nightlies/validation_jobs_red.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/validation_jobs_red.sql", - "default_parameters": [ - { - "name": "channel", - "type": "string", - "value": "release" - }, - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-06-19T00:00:38.270Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-06-20T20:20:38.270Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/nightlies/validation_jobs_red_past_day.lambda.json b/torchci/rockset/nightlies/validation_jobs_red_past_day.lambda.json deleted file mode 100644 index 63cd00b10c..0000000000 --- a/torchci/rockset/nightlies/validation_jobs_red_past_day.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/validation_jobs_red_past_day.sql", - "default_parameters": [ - { - "name": "channel", - "type": "string", - "value": "release" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/prodVersions.json b/torchci/rockset/prodVersions.json deleted file mode 100644 index a6e9767a10..0000000000 --- a/torchci/rockset/prodVersions.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "commons": { - "annotated_flaky_jobs": "bd991c8c9782f339", - "hud_query": "69f0bc9a618c82b1", - "commit_jobs_query": "10d4a302d49906bb", - "disabled_non_flaky_tests": "f909abf9eec15b56", - "commit_failed_jobs": "7365113df9e3280d", - "filter_forced_merge_pr": "e0dc3b64279c2e95", - "flaky_tests": "be53d5f27248e365", - "flaky_tests_across_jobs": "474e5454bda0c5bb", - "get_relevant_alerts": "727014a49bef2c20", - "flaky_workflows_jobs": "3ac657ca40327f94", - "failed_workflow_jobs": "a91753fbbf82d470", - "get_workflow_jobs": "6ed2029b19691a4b", - "test_time_per_file": "219d8bcff926d6c8", - "test_time_per_file_periodic_jobs": "fd632fe67c910f3a", - "issue_query": "e4d338de89980044", - "failure_samples_query": "7940a636284d0752", - "num_commits_master": "e4a864147cf3bf44", - "recent_pr_workflows_query": "db6b28f729e52892", - "reverted_prs_with_reason": "751f01cba16364f0", - "unclassified": "1b31a2d8f4ab7230", - "master_commit_red_jobs": "4869b467679a616a", - "weekly_force_merge_stats": "d2264131599bcf6e", - "pr_commits": "bbbbdf0c62db15b1", - "test_time_per_class": "c91400a2a182d595", - "test_time_per_class_periodic_jobs": "56fe9537b20cf862", - "disabled_tests": "96202f2d01403a94", - "disabled_test_labels": "91098effa428d64a" - }, - "pytorch_dev_infra_kpis": { - "monthly_contribution_stats": "c1a8751a22f6b6ce", - "num_reverts": "0163bf13629688e6", - "number_of_force_pushes_historical": "08c5ab5902940a88", - "time_to_merge": "be1c2a28cf75fe32", - "time_to_review": "adfbcdfc51ede11a", - "time_to_signal": "8866b339e1a255e6", - "strict_lag_historical": "d2a09d13caf8b76a", - "ttrs_percentiles": "ea95e9b56ab6900f" - }, - "metrics": { - "correlation_matrix": "35c05e04047123f0", - "disabled_test_historical": "5f8764703b63d0c2", - "disabled_test_total": "da5f834a6501fc63", - "external_contribution_stats": "d98863128f502cdb", - "job_duration_avg": "10a88ea2ebb80647", - "job_duration_percentile": "96507ed62db7a3a8", - "last_branch_push": "401211f8a7112d9e", - "last_successful_jobs": "2e04949378c58607", - "last_successful_workflow": "5d22927dd0b0956b", - "log_captures_count": "7365d7871017530a", - "master_commit_red_avg": "5739b387b1262bd8", - "master_commit_red": "029f1bbebb08f80c", - "master_commit_red_percent": "5c1de222caaf3a9e", - "master_commit_red_percent_groups": "601949da23f80a28", - "master_jobs_red_avg": "7df76d4b0d79e067", - "number_of_force_pushes": "7c12c25f00d85d5d", - "queued_jobs_by_label": "faa25dfaf336118a", - "queued_jobs": "2a1fce1642bb412d", - "reverts": "f5bc84a10c4065a3", - "top_reds": "f1a1f5012d419fc2", - "tts_avg": "2dd4a04e091c58aa", - "tts_percentile": "912c8ad61a1772a4", - "tts_duration_historical": "88c02c6e25d59854", - "tts_duration_historical_percentile": "f6824cbe03e1b6d8", - "strict_lag_sec": "e0ab723990d6f2a2", - "queue_times_historical": "2069a7304a266f6c", - "workflow_duration_avg": "7bae00900097a486", - "workflow_duration_percentile": "cc92aa19e3f35c3f", - "workflow_load": "600c129bd22a6569", - "get_workers_on_period": "ae5cf853350477c7", - "queue_times_historical_pct": "f815ad1732928bb6", - "lf_rollover_health": "b3eb4ffc761a224a", - "lf_rollover_percentage": "423f2523aa1e85b2" - }, - "inductor": { - "compilers_benchmark_performance": "442c41fbbc0eb758", - "compilers_benchmark_performance_branches": "2d47c0ef6d04d7f1", - "torchao_query": "89dd8524b4784c7b", - "torchao_query_branches": "dae2141eab66e839" - }, - "torchbench": { - "torchbench_list_userbenchmarks": "fcfa85c5d1056e8f", - "torchbench_userbenchmark_list_commits": "ecbcda0f8e0a3526", - "torchbench_userbenchmark_query_metrics": "39d10fce6485c0a3" - }, - "utilization": { - "runner_utilization": "2dfc4f9c16e51da0", - "runner_utilization_by_repo": "2ca7468c6f158924", - "runner_utilization_by_activity": "343929e0ebeee379" - }, - "nightlies": { - "nightly_jobs_red": "d49bc5633c5aac10", - "nightly_jobs_red_by_name": "bb6eeb316157ed2b", - "nightly_jobs_red_by_platform": "d5ab478e83ff2dae", - "nightly_jobs_red_past_day": "e74af839c37e1517", - "validation_jobs_red": "ac8dee6e6b76916d", - "validation_jobs_red_past_day": "aecb798a574ba2ff" - }, - "benchmarks": { - "oss_ci_benchmark_llms": "ec0dd4a03918ba0f", - "oss_ci_benchmark_branches": "80fdbef4cf5b8360", - "oss_ci_benchmark_names": "80824879afbc1c5b" - } -} diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/monthly_contribution_stats.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/monthly_contribution_stats.sql deleted file mode 100644 index 7b60060102..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/monthly_contribution_stats.sql +++ /dev/null @@ -1,24 +0,0 @@ -WITH average_table as ( - SELECT - DATE_TRUNC('MONTH', DATE (CAST(date as date))) AS granularity_bucket, - SUM(pr_count) - AS pr_count_sum, - ARRAY_AGG(users) as users_agg - FROM - metrics.external_contribution_stats - WHERE CAST(date as date) >= PARSE_DATETIME_ISO8601(:startTime) - AND CAST(date as date) < PARSE_DATETIME_ISO8601(:stopTime) - GROUP BY - DATE_TRUNC('MONTH', DATE (CAST(date as date))) -) -SELECT --- the day will always be 01 -FORMAT_ISO8601(CAST(granularity_bucket as date)) as year_and_month, -pr_count_sum as pr_count, -LENGTH(ARRAY_DISTINCT(ARRAY_FLATTEN(users_agg))) as user_count, -FROM -average_table -WHERE CAST(granularity_bucket as date) >= PARSE_DATETIME_ISO8601(:startTime) - AND CAST(granularity_bucket as date) < PARSE_DATETIME_ISO8601(:stopTime) -ORDER BY -granularity_bucket DESC \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/num_reverts.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/num_reverts.sql deleted file mode 100644 index 2b337cea42..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/num_reverts.sql +++ /dev/null @@ -1,107 +0,0 @@ -WITH - coded_reverts as ( - SELECT - FORMAT_TIMESTAMP( - '%Y-%m-%d', - DATE_TRUNC(:granularity, ic.created) - ) AS bucket, - REGEXP_EXTRACT( - ic.body, - '(-c|--classification)[\s =]+["'']?(\w+)["'']?', - 2 - ) AS code, - COUNT(*) AS num - FROM - commons.issue_comment AS ic - INNER JOIN ( - SELECT - issue_comment.issue_url, - MAX(issue_comment.created) AS created -- Use the max for when invalid revert commands are tried first - FROM - commons.issue_comment - WHERE - REGEXP_LIKE( - issue_comment.body, - ' *@pytorch(merge|)bot revert' - ) - GROUP BY - issue_comment.issue_url - ) AS rc ON ic.issue_url = rc.issue_url - WHERE - ic.created = rc.created - AND ic.created >= PARSE_DATETIME_ISO8601(:startTime) - AND ic.created <= PARSE_DATETIME_ISO8601(:stopTime) - AND ic.user.login != 'pytorch-bot[bot]' - AND REGEXP_EXTRACT( - ic.body, - '(-c|--classification)[\s =]+["'']?(\w+)["'']?', - 2 - ) IS NOT NULL - GROUP BY - code, - bucket - ), - weekly_results as ( - ( - SELECT - FORMAT_TIMESTAMP( - '%Y-%m-%d', - DATE_TRUNC(:granularity, push._event_time) - ) AS bucket, - 'total' AS code, - COUNT(*) AS num - FROM - push - WHERE - push.ref IN ('refs/heads/master', 'refs/heads/main') - AND push.repository.owner.name = 'pytorch' - AND push.repository.name = 'pytorch' - AND ( - push.head_commit.message LIKE 'Revert %' - OR push.head_commit.message LIKE 'Back out%' - ) - AND push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time <= PARSE_DATETIME_ISO8601(:stopTime) - GROUP BY - bucket - ORDER BY - bucket - ) - UNION - ( - SELECT - bucket, - code, - num - FROM - coded_reverts - ) - UNION - ( - SELECT - bucket, - 'non-ghfirst-total' AS code, - SUM(num) - FROM - coded_reverts - WHERE - code != 'ghfirst' - GROUP BY - bucket - ) - ) -SELECT - bucket, - -- 2 week rolling average - ( - SUM(num) OVER( - PARTITION BY code - ORDER BY - bucket ROWS 1 PRECEDING - ) - ) / 2.0 AS num, - code, -FROM - weekly_results -ORDER BY - bucket DESC, code \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/number_of_force_pushes_historical.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/number_of_force_pushes_historical.sql deleted file mode 100644 index 033b1385ce..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/number_of_force_pushes_historical.sql +++ /dev/null @@ -1,16 +0,0 @@ -SELECT - FORMAT_TIMESTAMP('%Y-%m-%d', DATE_TRUNC(:granularity, issue_comment.created)) AS bucket, - COUNT(DISTINCT issue_comment.issue_url) AS count -FROM - commons.issue_comment -WHERE - issue_comment.body LIKE '%@pytorchbot merge -f%' - AND created >= PARSE_DATETIME_ISO8601(:startTime) - AND created < PARSE_DATETIME_ISO8601(:stopTime) - AND issue_comment.user.login NOT LIKE '%pytorch-bot%' - AND issue_comment.user.login NOT LIKE '%facebook-github-bot%' - AND issue_comment.user.login NOT LIKE '%pytorchmergebot%' -group by - bucket -order by - bucket diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/strict_lag_historical.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/strict_lag_historical.sql deleted file mode 100644 index c27b517811..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/strict_lag_historical.sql +++ /dev/null @@ -1,19 +0,0 @@ -select - AVG( - DATE_DIFF( - 'minute', - PARSE_Timestamp_ISO8601(push.head_commit.timestamp), - push._event_time - ) / 60.0 - ) as diff_hr, - DATE_TRUNC(:granularity, push._event_time) AS push_time, -from - push -where - push._event_time >= PARSE_DATETIME_ISO8601(:startTime) - AND push._event_time < PARSE_DATETIME_ISO8601(:stopTime) - and push.ref like 'refs/heads/viable/strict' -group by - push_time -order by - push_time diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_merge.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_merge.sql deleted file mode 100644 index c5419b3cb8..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_merge.sql +++ /dev/null @@ -1,38 +0,0 @@ -with parsed_time as ( - SELECT - PARSE_DATETIME_ISO8601(created_at) as created_time, - PARSE_DATETIME_ISO8601(closed_at) closed_time,case - when author_association = 'FIRST_TIME_CONTRIBUTOR' - OR author_association = 'CONTRIBUTOR' - OR author_association = 'NONE' THEN 'external_user' - ELSE 'metamate' - END as user_type, - number, - FROM - commons.pull_request - where - PARSE_DATETIME_ISO8601(created_at) > PARSE_DATETIME_ISO8601(:startTime) -), -time_diffs as ( - select - created_time, - DATE_DIFF('day', created_time, closed_time) d_diff, - from - parsed_time - where - user_type = :userType -) -select - DATE_TRUNC('WEEK', created_time) AS week_bucket, - sum( - case - when d_diff < :closeSLO then 1 - else 0 - end - ) * 100.0 / count(*) metric -from - time_diffs -group by - week_bucket -ORDER BY - week_bucket diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_review.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_review.sql deleted file mode 100644 index ffd52fd6bc..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_review.sql +++ /dev/null @@ -1,48 +0,0 @@ --- join data from pull_request_review and pull_request. --- We are missing data from before March -with pr_data as ( - select - min( - PARSE_TIMESTAMP_ISO8601(pr_review.review.submitted_at) - ) as reviewed_on, - MIN(PARSE_TIMESTAMP_ISO8601(pr.created_at)) as created_at, - pr.number as pr_number, - case - when pr.author_association = 'FIRST_TIME_CONTRIBUTOR' - OR pr.author_association = 'CONTRIBUTOR' - OR pr.author_association = 'NONE' THEN 'external_user' - ELSE 'metamate' - END as user_type, - from - commons.pull_request_review pr_review - inner join commons.pull_request pr on pr_review.pull_request.number = pr.number - where - pr_review.action = 'submitted' - and PARSE_TIMESTAMP_ISO8601(pr_review.review.submitted_at) > PARSE_TIMESTAMP_ISO8601(:startTime) - group by - pr_number, - user_type -), -date_diffs as( - select - created_at, - DATE_DIFF('hour', created_at, reviewed_on) /(24.0) as day_diff, - from - pr_data - where - user_type = :userType -) -select - date_trunc('week', created_at) week_bucket, - sum( - case - when day_diff < 2 then 1 - else 0 - end - ) * 100.0 / count(*) as metric -from - date_diffs -group by - week_bucket -order by - week_bucket diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_signal.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_signal.sql deleted file mode 100644 index bab88a1367..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/time_to_signal.sql +++ /dev/null @@ -1,39 +0,0 @@ -with - tts as ( - SELECT - MAX( - DATE_DIFF( - 'second', - PARSE_TIMESTAMP_ISO8601(w.created_at), - PARSE_TIMESTAMP_ISO8601(w.updated_at) - ) - ) as duration_sec, - w.head_sha, - ARBITRARY(IF(w.head_branch = 'main', 'main', 'not main')) as branch, - MIN(PARSE_TIMESTAMP_ISO8601(w.created_at)) as created_at - FROM - commons.workflow_run w - WHERE - ARRAY_CONTAINS(['pull', 'trunk'], LOWER(w.name)) - AND PARSE_TIMESTAMP_ISO8601(w.created_at) >= PARSE_DATETIME_ISO8601(:startTime) - AND w.head_repository.full_name = 'pytorch/pytorch' - group by - w.head_sha - having - bool_and( - w.conclusion = 'success' - and w.run_attempt = 1 - ) - ) -select - CAST(DATE_TRUNC('week', t.created_at) as string) AS week_bucket, - avg(t.duration_sec / 3600.0) as avg_tts, - t.branch -from - tts t -group by - week_bucket, - t.branch -order by - week_bucket desc, - t.branch desc diff --git a/torchci/rockset/pytorch_dev_infra_kpis/__sql/ttrs_percentiles.sql b/torchci/rockset/pytorch_dev_infra_kpis/__sql/ttrs_percentiles.sql deleted file mode 100644 index 9ee0b08a2a..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/__sql/ttrs_percentiles.sql +++ /dev/null @@ -1,245 +0,0 @@ --- This query is used to compute the TTRS KPI for the pytorch/pytorch repo. --- --- Results are displayed on HUD in two views: --- The kpi view, where percentile_to_get should be left at zero in order to get the default percentiles --- The metrics view, where the percentile_to_get and one_bucket should be set in order to get just the desired percentile --- --- This query has two special params: --- percentile_to_get: When set, it returns only the specified percentile. Otherwise it returns --- p25, p50, p75 and p90 percentiles. --- one_bucket: When set to false, buckets data into weekly percentiles. When true, it treats --- entire time range AS one big bucket and returns percnetiles accordingly - -WITH --- All the percentiles that we want the query to determine -percentiles_desired AS ( - SELECT - CONCAT('p', n.percentile) as percentile, - n.percentile / 100.0 as percentile_num - FROM UNNEST(ARRAY_CREATE(25, 50, 75, 90) AS percentile) AS n - UNION ALL - -- if percentile_to_get is specified, we get and only return that percentile - SELECT - CONCAT( - 'p', - CAST( - ROUND(: percentile_to_get * 100) AS STRING - ) - ), - : percentile_to_get - WHERE - : percentile_to_get > 0 -), --- Get all PRs that were merged into master, and get all the SHAs for commits from that PR which CI jobs ran against --- We need the shas because some jobs (like trunk) don't have a PR they explicitly ran against, but they _were_ run against --- a commit from a PR -pr_shas AS ( - SELECT - r.pull_requests[1].number AS pr_number, - CONCAT( - 'https://github.com/pytorch/pytorch/pull/', - r.pull_requests[1].number - ) AS url, - j.head_sha AS sha, - FROM - commons.workflow_job j - INNER JOIN commons.workflow_run r ON j.run_id = r.id - WHERE - 1 = 1 - AND j._event_time > PARSE_DATETIME_ISO8601(: startTime) - AND r._event_time > PARSE_DATETIME_ISO8601(: startTime) - AND j._event_time < PARSE_DATETIME_ISO8601(: stopTime) - AND r._event_time < PARSE_DATETIME_ISO8601(: stopTime) - AND LENGTH(r.pull_requests) = 1 - AND r.pull_requests[1].head.repo.name = 'pytorch' - AND r.name IN ('pull', 'trunk', 'Lint') -- Ensure we don't pull in random PRs we don't care about - AND r.head_branch NOT IN ( - 'master', 'main', 'nightly', 'viable/strict' - ) -- Only measure TTRS against PRs - AND ( - r.pull_requests[1].base.ref = 'master' - OR r.pull_requests[1].base.ref = 'main' - OR r.pull_requests[1].base.ref like 'gh/%/base' - ) - GROUP BY - pr_number, - url, - sha -), --- Now filter the list to just closed PRs. --- Open PRs can be noisy experiments which were never meant to be merged. -merged_pr_shas AS ( - SELECT - DISTINCT s.pr_number, - s.url, - s.sha - FROM - pr_shas s - INNER JOIN commons.pull_request pr ON s.pr_number = pr.number - WHERE - pr.closed_at IS NOT NULL -- Ensure the PR was actaully merged - AND 'Merged' IN ( - SELECT - name - FROM - UNNEST(pr.labels) - ) -), --- Get all the workflows run against the PR and find the steps & stats we care about -commit_job_durations AS ( - SELECT - s.pr_number, - j.steps, - js.name AS step_name, - js.conclusion AS step_conclusion, - PARSE_TIMESTAMP_ISO8601(js.completed_at) AS failure_time, - PARSE_TIMESTAMP_ISO8601(js.started_at) AS start_time, - r.name AS workflow_name, - j.name AS job_name, - r.html_url AS workflow_url, - -- for debugging - s.sha, - j.conclusion AS conclusion, - j.conclusion = 'cancelled' AS was_cancelled, - -- For convenience - j.run_attempt, - -- the attemp number this job was run on - r.run_attempt AS total_attempts, - r.id AS workflow_run_id, - s.url -- for debugging - FROM - commons.workflow_job j - INNER JOIN merged_pr_shas s ON j.head_sha = s.sha HINT(join_strategy = lookup) - CROSS JOIN UNNEST (j.steps) js - INNER JOIN commons.workflow_run r ON j.run_id = r.id - WHERE - 1 = 1 - AND r.name = :workflow -- Stick to pull workflows to reduce noise. Trendlines are the same within other workflows - AND j.conclusion = 'failure' -- we just care about failed jobs - AND js.conclusion = 'failure' - AND j.run_attempt = 1 -- only look at the first run attempt since reruns will either 1) succeed, so are irrelevant or 2) repro the failure, biasing our data - and j.name NOT LIKE 'lintrunner%' - and j.name NOT LIKE '%unstable%' -- The PR doesn't wait for unstable jobs, so they should be excluded when computing TTRS - and js.name LIKE 'Test%' -- Only consider test steps - ), --- Refine our measurements to only collect the first red signal per workflow --- Gets the earliest TTRS across each workflow within the same commit -workflow_failure AS ( - SELECT DISTINCT - d.pr_number, - d.sha, - d.workflow_run_id, - FIRST_VALUE(d.step_name) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) as step_name, - FIRST_VALUE(d.workflow_name) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) as workflow_name, - DURATION_SECONDS( - FIRST_VALUE(d.failure_time) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) - - FIRST_VALUE(d.start_time) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) - ) / 60.0 as ttrs_mins, - FIRST_VALUE(d.workflow_url) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) as workflow_url, - FIRST_VALUE(d.start_time) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) as start_time, - FIRST_VALUE(d.failure_time) OVER( - PARTITION BY d.pr_number, d.sha, d.workflow_run_id - ORDER BY d.failure_time - ) as failure_time, - FROM - commit_job_durations d -), -workflow_failure_buckets AS ( - SELECT - -- When :one_bucket is set to true, we want the ttrs percentile over all the data - DATE_TRUNC( - 'week', - IF( - : one_bucket, - CURRENT_DATETIME(), - start_time - ) - ) AS bucket, - * - FROM - workflow_failure -), --- Within each bucket, figure out what percentile duration and num_commits each PR falls under -percentiles AS ( - SELECT - bucket, - ttrs_mins, - workflow_url, - PERCENT_RANK() OVER( - PARTITION BY bucket - ORDER by - ttrs_mins - ) AS percentile, - sha, - FROM - workflow_failure_buckets -), --- Take the full list of percentiles and get just the ones we care about -ttrs_percentile AS ( - SELECT - p.bucket, - pd.percentile, - MIN(p.ttrs_mins) AS ttrs_mins - FROM - percentiles p CROSS - JOIN percentiles_desired pd - WHERE - 1 = 1 - AND p.percentile >= pd.percentile_num - AND ( - : percentile_to_get <= 0 - OR pd.percentile_num = : percentile_to_get - ) - GROUP BY - p.bucket, - pd.percentile -), -kpi_results AS ( - SELECT - FORMAT_TIMESTAMP('%Y-%m-%d', d.bucket) AS bucket, - -- rolling average - ( - ROUND(AVG(ttrs_mins) OVER( - PARTITION BY percentile - ORDER BY - -- Average over this many + 1 buckets (two weeks) - bucket ROWS 0 PRECEDING - )) - ) AS ttrs_mins, - d.percentile - FROM - ttrs_percentile d - WHERE - : one_bucket - OR ( - d.bucket < CURRENT_TIMESTAMP() - INTERVAL 1 WEEK - ) -- discard the latest bucket, which will have noisy, partial data - ORDER BY - bucket ASC, - ttrs_mins -) -SELECT - * -FROM - kpi_results -ORDER BY - bucket DESC, - ttrs_mins DESC diff --git a/torchci/rockset/pytorch_dev_infra_kpis/monthly_contribution_stats.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/monthly_contribution_stats.lambda.json deleted file mode 100644 index 132f7b48c1..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/monthly_contribution_stats.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/monthly_contribution_stats.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "month" - }, - { - "name": "startTime", - "type": "string", - "value": "\t2022-05-01T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-03-07T00:06:32.839Z" - } - ], - "description": "" - } \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/num_reverts.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/num_reverts.lambda.json deleted file mode 100644 index 3eb1092a53..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/num_reverts.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/num_reverts.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "week" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-11-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-03-01T00:00:00.000Z" - } - ], - "description": "the count of various revert types over time" -} \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/number_of_force_pushes_historical.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/number_of_force_pushes_historical.lambda.json deleted file mode 100644 index 54c9973204..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/number_of_force_pushes_historical.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/number_of_force_pushes_historical.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "week" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-07-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-08-01T00:00:00.000Z" - } - ], - "description": "Number of force pushes over time" -} \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/strict_lag_historical.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/strict_lag_historical.lambda.json deleted file mode 100644 index 1f9d5f30a3..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/strict_lag_historical.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/strict_lag_historical.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "week" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-06-09T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2022-06-19T00:06:32.839Z" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/time_to_merge.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/time_to_merge.lambda.json deleted file mode 100644 index ea8a5e4daf..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/time_to_merge.lambda.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "sql_path": "__sql/time_to_merge.sql", - "default_parameters": [ - { - "name": "closeSLO", - "type": "int", - "value": "2" - }, - { - "name": "startTime", - "type": "string", - "value": "2022-01-01T00:00:00.000Z" - }, - { - "name": "userType", - "type": "string", - "value": "metamate" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/time_to_review.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/time_to_review.lambda.json deleted file mode 100644 index 9a071657dc..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/time_to_review.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/time_to_review.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2022-01-01T00:00:00.000Z" - }, - { - "name": "userType", - "type": "string", - "value": "metamate" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/time_to_signal.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/time_to_signal.lambda.json deleted file mode 100644 index 2b60f50873..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/time_to_signal.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/time_to_signal.sql", - "default_parameters": [ - { - "name": "startTime", - "type": "string", - "value": "2023-01-01T00:00:00.000Z" - } - ], - "description": "TTS for successful workflows" -} \ No newline at end of file diff --git a/torchci/rockset/pytorch_dev_infra_kpis/ttrs_percentiles.lambda.json b/torchci/rockset/pytorch_dev_infra_kpis/ttrs_percentiles.lambda.json deleted file mode 100644 index 479b5413dc..0000000000 --- a/torchci/rockset/pytorch_dev_infra_kpis/ttrs_percentiles.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/ttrs_percentiles.sql", - "default_parameters": [ - { - "name": "one_bucket", - "type": "bool", - "value": "False" - }, - { - "name": "percentile_to_get", - "type": "float", - "value": "0" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-02-16T00:06:32.839Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2024-08-16T00:06:32.839Z" - }, - { - "name": "workflow", - "type": "string", - "value": "pull" - } - ], - "description": "Computes the TTRS kpi" -} \ No newline at end of file diff --git a/torchci/rockset/torchbench/__sql/torchbench_list_userbenchmarks.sql b/torchci/rockset/torchbench/__sql/torchbench_list_userbenchmarks.sql deleted file mode 100644 index 35e14d599d..0000000000 --- a/torchci/rockset/torchbench/__sql/torchbench_list_userbenchmarks.sql +++ /dev/null @@ -1,5 +0,0 @@ -SELECT DISTINCT - name -FROM - torchbench."torchbench-userbenchmark" -WHERE name is not NULL \ No newline at end of file diff --git a/torchci/rockset/torchbench/__sql/torchbench_userbenchmark_list_commits.sql b/torchci/rockset/torchbench/__sql/torchbench_userbenchmark_list_commits.sql deleted file mode 100644 index 0da7562365..0000000000 --- a/torchci/rockset/torchbench/__sql/torchbench_userbenchmark_list_commits.sql +++ /dev/null @@ -1,13 +0,0 @@ -WITH w AS ( - SELECT ARBITRARY(name) AS name, "torchbench-userbenchmark".environ.pytorch_git_version as pytorch_git_version, - ARBITRARY("torchbench-userbenchmark".environ.pytorch_version) as pytorch_version, - FROM torchbench."torchbench-userbenchmark" - WHERE name = :userbenchmark - GROUP BY "torchbench-userbenchmark".environ.pytorch_git_version -), -s AS ( - SELECT push._event_time as pytorch_commit_time, push.head_commit.id as sha from push -) -SELECT name, pytorch_git_version, pytorch_version, s.pytorch_commit_time FROM w -INNER JOIN s ON w.pytorch_git_version = s.sha - ORDER BY s.pytorch_commit_time DESC; \ No newline at end of file diff --git a/torchci/rockset/torchbench/__sql/torchbench_userbenchmark_query_metrics.sql b/torchci/rockset/torchbench/__sql/torchbench_userbenchmark_query_metrics.sql deleted file mode 100644 index 726ad1eceb..0000000000 --- a/torchci/rockset/torchbench/__sql/torchbench_userbenchmark_query_metrics.sql +++ /dev/null @@ -1,3 +0,0 @@ -SELECT * FROM torchbench."torchbench-userbenchmark" - WHERE name = :userbenchmark - AND REGEXP_LIKE("torchbench-userbenchmark".environ.pytorch_git_version, :commit); \ No newline at end of file diff --git a/torchci/rockset/torchbench/torchbench_list_userbenchmarks.lambda.json b/torchci/rockset/torchbench/torchbench_list_userbenchmarks.lambda.json deleted file mode 100644 index 0720524ebc..0000000000 --- a/torchci/rockset/torchbench/torchbench_list_userbenchmarks.lambda.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sql_path": "__sql/torchbench_list_userbenchmarks.sql", - "default_parameters": [], - "description": "List unique userbenchmark names" -} \ No newline at end of file diff --git a/torchci/rockset/torchbench/torchbench_userbenchmark_list_commits.lambda.json b/torchci/rockset/torchbench/torchbench_userbenchmark_list_commits.lambda.json deleted file mode 100644 index 1593a2f575..0000000000 --- a/torchci/rockset/torchbench/torchbench_userbenchmark_list_commits.lambda.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "sql_path": "__sql/torchbench_userbenchmark_list_commits.sql", - "default_parameters": [ - { - "name": "userbenchmark", - "type": "string", - "value": "torch-nightly" - } - ], - "description": "List commits of a given userbenchmark" -} \ No newline at end of file diff --git a/torchci/rockset/torchbench/torchbench_userbenchmark_query_metrics.lambda.json b/torchci/rockset/torchbench/torchbench_userbenchmark_query_metrics.lambda.json deleted file mode 100644 index 4ca0d0a62b..0000000000 --- a/torchci/rockset/torchbench/torchbench_userbenchmark_query_metrics.lambda.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sql_path": "__sql/torchbench_userbenchmark_query_metrics.sql", - "default_parameters": [ - { - "name": "commit", - "type": "string", - "value": "4a4af8fb1c" - }, - { - "name": "userbenchmark", - "type": "string", - "value": "torch-nightly" - } - ], - "description": "Query userbenchmark metrics by userbenchmark name and pytorch git commit hash" -} \ No newline at end of file diff --git a/torchci/rockset/utilization/__sql/runner_utilization.sql b/torchci/rockset/utilization/__sql/runner_utilization.sql deleted file mode 100644 index 93b1ac0798..0000000000 --- a/torchci/rockset/utilization/__sql/runner_utilization.sql +++ /dev/null @@ -1,23 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - started_at AT TIME ZONE :timezone - ) - ) AS started_date, - label, - SUM(DATE_DIFF('SECOND',started_at,completed_at)) AS duration, -FROM (SELECT - PARSE_TIMESTAMP_ISO8601(started_at) as started_at, - PARSE_TIMESTAMP_ISO8601(completed_at) as completed_at, - ELEMENT_AT(labels, 1) AS label - FROM commons.workflow_job - WHERE - status = 'completed' AND - runner_group_name = 'GitHub Actions' AND - PARSE_DATETIME_ISO8601(started_at) >= PARSE_DATETIME_ISO8601(:startTime) AND - PARSE_DATETIME_ISO8601(started_at) < PARSE_DATETIME_ISO8601(:stopTime) - ) AS gha_jobs -GROUP BY started_date, label -ORDER BY started_date DESC, label -LIMIT 500; \ No newline at end of file diff --git a/torchci/rockset/utilization/__sql/runner_utilization_by_activity.sql b/torchci/rockset/utilization/__sql/runner_utilization_by_activity.sql deleted file mode 100644 index 85f847b644..0000000000 --- a/torchci/rockset/utilization/__sql/runner_utilization_by_activity.sql +++ /dev/null @@ -1,31 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - started_at AT TIME ZONE :timezone - ) - ) AS started_date, - activity, - SUM(DATE_DIFF('SECOND',started_at, completed_at)) AS duration, -FROM (SELECT - PARSE_TIMESTAMP_ISO8601(started_at) as started_at, - PARSE_TIMESTAMP_ISO8601(completed_at) as completed_at, - IF(head_branch like 'ciflow/%', - CONCAT('ciflow/', ELEMENT_AT(SPLIT(head_branch, '/'), 2)), - -- workaround for missing event_type property - -- If head_branch does not start with ciflow/periodic but workflow name is - -- then it must be running as result of scheduled event - IF(workflow_name = 'periodic', 'periodic', head_branch) - ) as activity - FROM commons.workflow_job - WHERE - status = 'completed' AND - ARRAY_CONTAINS(labels, :label) AND - SUBSTR(run_url, 30, 15) = 'pytorch/pytorch' AND - runner_group_name = 'GitHub Actions' AND - PARSE_DATETIME_ISO8601(started_at) >= PARSE_DATETIME_ISO8601(:startTime) AND - PARSE_DATETIME_ISO8601(started_at) < PARSE_DATETIME_ISO8601(:stopTime) - ) AS gha_jobs -GROUP BY started_date, activity -ORDER BY started_date DESC, activity -LIMIT 500; \ No newline at end of file diff --git a/torchci/rockset/utilization/__sql/runner_utilization_by_repo.sql b/torchci/rockset/utilization/__sql/runner_utilization_by_repo.sql deleted file mode 100644 index 5871f8d994..0000000000 --- a/torchci/rockset/utilization/__sql/runner_utilization_by_repo.sql +++ /dev/null @@ -1,24 +0,0 @@ -SELECT - FORMAT_ISO8601( - DATE_TRUNC( - :granularity, - started_at AT TIME ZONE :timezone - ) - ) AS started_date, - project, - SUM(DATE_DIFF('SECOND',started_at ,completed_at)) AS duration, -FROM (SELECT - PARSE_TIMESTAMP_ISO8601(started_at) as started_at, - PARSE_TIMESTAMP_ISO8601(completed_at) as completed_at, - ELEMENT_AT(SPLIT(SUBSTR(run_url, 30), '/'), 2) AS project, - FROM commons.workflow_job - WHERE - status = 'completed' AND - ARRAY_CONTAINS(labels, :label) AND - runner_group_name = 'GitHub Actions' AND - PARSE_DATETIME_ISO8601(started_at) >= PARSE_DATETIME_ISO8601(:startTime) AND - PARSE_DATETIME_ISO8601(started_at) < PARSE_DATETIME_ISO8601(:stopTime) - ) AS gha_jobs -GROUP BY started_date, project -ORDER BY started_date DESC, project -LIMIT 500; \ No newline at end of file diff --git a/torchci/rockset/utilization/runner_utilization.lambda.json b/torchci/rockset/utilization/runner_utilization.lambda.json deleted file mode 100644 index 845869b6bc..0000000000 --- a/torchci/rockset/utilization/runner_utilization.lambda.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "sql_path": "__sql/runner_utilization.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-04-01T00:00:00.000Z" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-04-31T00:00:00.000Z" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file diff --git a/torchci/rockset/utilization/runner_utilization_by_activity.lambda.json b/torchci/rockset/utilization/runner_utilization_by_activity.lambda.json deleted file mode 100644 index d44a6d400b..0000000000 --- a/torchci/rockset/utilization/runner_utilization_by_activity.lambda.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "sql_path": "__sql/runner_utilization_by_activity.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "label", - "type": "string", - "value": "macos-12-xl" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-04-01" - }, - { - "name": "started_pattern", - "type": "string", - "value": "2023-04-%" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-04-31" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "Collects runner utilization by activity" -} \ No newline at end of file diff --git a/torchci/rockset/utilization/runner_utilization_by_repo.lambda.json b/torchci/rockset/utilization/runner_utilization_by_repo.lambda.json deleted file mode 100644 index b3665ac313..0000000000 --- a/torchci/rockset/utilization/runner_utilization_by_repo.lambda.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "sql_path": "__sql/runner_utilization_by_repo.sql", - "default_parameters": [ - { - "name": "granularity", - "type": "string", - "value": "day" - }, - { - "name": "label", - "type": "string", - "value": "macos-12-xl" - }, - { - "name": "startTime", - "type": "string", - "value": "2023-04-01" - }, - { - "name": "stopTime", - "type": "string", - "value": "2023-04-31" - }, - { - "name": "timezone", - "type": "string", - "value": "America/Los_Angeles" - } - ], - "description": "" -} \ No newline at end of file