diff --git a/.gitignore b/.gitignore index d67e69afa..b94fa5892 100644 --- a/.gitignore +++ b/.gitignore @@ -82,6 +82,20 @@ artifacts/ # Chutzpah Test files _Chutzpah* +# Python +__pycache__/ +*.py[cod] +*$py.class + +# Virtual environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + # Visual C++ cache files ipch/ *.aps diff --git a/build/benchmarks-ci-01.yml b/build/benchmarks-ci-01.yml index e20449282..975cd715d 100644 --- a/build/benchmarks-ci-01.yml +++ b/build/benchmarks-ci-01.yml @@ -1,8 +1,9 @@ # Do not change this file, it is generated using these steps: -# - The file benchmarks.matrix.yml defines how each job is run in parallel -# - Convert its content to json using https://jsonformatter.org/yaml-to-json -# - Use the template in benchmarks.template.liquid and the converted json using https://liquidjs.com/playground.html -# - Update this file with the result of the template generation +# - The file benchmarks*.json defines how each pipeline set of jobs is run in parallel +# - Update the associated benchmarks*.json file with machine and scenario updates +# - Install python and install the requirements for the crank-scheduler in benchmarks/scripts/crank-scheduler/requirements.txt +# - Run the scheduler specifying the desired benchmarks*.json file, this template, and benchmarks/output to automatically overwrite the current pipeline. +# - Ex. python ./scripts/crank-scheduler/main.py --config ./build/benchmarks_ci.json --template ./build/benchmarks.template.liquid --yaml-output ./build trigger: none pr: none @@ -89,8 +90,8 @@ jobs: # GROUP 2 -- job: PGO_Intel_Lin - displayName: 2- PGO Intel Lin +- job: PGO_Gold_Win + displayName: 2- PGO Gold Win pool: server timeoutInMinutes: 120 dependsOn: [Proxies_Gold_Lin, Proxies_Intel_Lin, Grpc_Gold_Win, Grpc_Intel_Win] @@ -101,7 +102,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " - job: PGO_Intel_Win displayName: 2- PGO Intel Win @@ -115,7 +116,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile gold-db-db " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " - job: Grpc_Gold_Lin displayName: 2- Grpc Gold Lin @@ -131,19 +132,19 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " -- job: Baselines_Gold_Win - displayName: 2- Baselines Gold Win +- job: Grpc_Intel_Lin + displayName: 2- Grpc Intel Lin pool: server timeoutInMinutes: 120 dependsOn: [Proxies_Gold_Lin, Proxies_Intel_Lin, Grpc_Gold_Win, Grpc_Intel_Win] condition: succeededOrFailed() steps: - - template: baselines-scenarios.yml + - template: grpc-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " # GROUP 3 @@ -151,7 +152,7 @@ jobs: displayName: 3- Baselines Database Amd Lin2 pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Intel_Lin, PGO_Intel_Win, Grpc_Gold_Lin, Baselines_Gold_Win] + dependsOn: [PGO_Gold_Win, PGO_Intel_Win, Grpc_Gold_Lin, Grpc_Intel_Lin] condition: succeededOrFailed() steps: - template: baselines-database-scenarios.yml @@ -159,13 +160,13 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile intel-load-load --profile intel-db-db " + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load-load --profile gold-db-db " - job: HttpClient_Gold_Lin displayName: 3- HttpClient Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Intel_Lin, PGO_Intel_Win, Grpc_Gold_Lin, Baselines_Gold_Win] + dependsOn: [PGO_Gold_Win, PGO_Intel_Win, Grpc_Gold_Lin, Grpc_Intel_Lin] condition: succeededOrFailed() steps: - template: httpclient-scenarios.yml @@ -173,13 +174,13 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " + arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load2-load " - job: HttpClient_Intel_Lin displayName: 3- HttpClient Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Intel_Lin, PGO_Intel_Win, Grpc_Gold_Lin, Baselines_Gold_Win] + dependsOn: [PGO_Gold_Win, PGO_Intel_Win, Grpc_Gold_Lin, Grpc_Intel_Lin] condition: succeededOrFailed() steps: - template: httpclient-scenarios.yml @@ -187,13 +188,13 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " - job: SignalR_Intel_Win displayName: 3- SignalR Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Intel_Lin, PGO_Intel_Win, Grpc_Gold_Lin, Baselines_Gold_Win] + dependsOn: [PGO_Gold_Win, PGO_Intel_Win, Grpc_Gold_Lin, Grpc_Intel_Lin] condition: succeededOrFailed() steps: - template: signalr-scenarios.yml @@ -201,7 +202,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " # GROUP 4 @@ -233,22 +234,22 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " -- job: MVC_Intel_Win - displayName: 4- MVC Intel Win +- job: Frameworks_Database_Amd_Lin2 + displayName: 4- Frameworks Database Amd Lin2 pool: server timeoutInMinutes: 120 dependsOn: [Baselines_Database_Amd_Lin2, HttpClient_Gold_Lin, HttpClient_Intel_Lin, SignalR_Intel_Win] condition: succeededOrFailed() steps: - - template: mvc-scenarios.yml + - template: frameworks-database-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load2-load --profile gold-db-db " -- job: NativeAOT_Gold_Win - displayName: 4- NativeAOT Gold Win +- job: NativeAOT_Intel_Win + displayName: 4- NativeAOT Intel Win pool: server timeoutInMinutes: 120 dependsOn: [Baselines_Database_Amd_Lin2, HttpClient_Gold_Lin, HttpClient_Intel_Lin, SignalR_Intel_Win] @@ -259,7 +260,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " # GROUP 5 @@ -267,7 +268,7 @@ jobs: displayName: 5- SignalR Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, MVC_Intel_Win, NativeAOT_Gold_Win] + dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, Frameworks_Database_Amd_Lin2, NativeAOT_Intel_Win] condition: succeededOrFailed() steps: - template: signalr-scenarios.yml @@ -281,7 +282,7 @@ jobs: displayName: 5- SignalR Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, MVC_Intel_Win, NativeAOT_Gold_Win] + dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, Frameworks_Database_Amd_Lin2, NativeAOT_Intel_Win] condition: succeededOrFailed() steps: - template: signalr-scenarios.yml @@ -291,25 +292,25 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " -- job: NativeAOT_Intel_Win - displayName: 5- NativeAOT Intel Win +- job: MVC_Gold_Win + displayName: 5- MVC Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, MVC_Intel_Win, NativeAOT_Gold_Win] + dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, Frameworks_Database_Amd_Lin2, NativeAOT_Intel_Win] condition: succeededOrFailed() steps: - - template: nativeaot-scenarios.yml + - template: mvc-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " -- job: Trends_Gold_Win - displayName: 5- Trends Gold Win +- job: Trends_Intel_Win + displayName: 5- Trends Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, MVC_Intel_Win, NativeAOT_Gold_Win] + dependsOn: [Baselines_Gold_Lin, Baselines_Intel_Lin, Frameworks_Database_Amd_Lin2, NativeAOT_Intel_Win] condition: succeededOrFailed() steps: - template: trend-scenarios.yml @@ -317,7 +318,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " # GROUP 6 @@ -325,7 +326,7 @@ jobs: displayName: 6- NativeAOT Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, NativeAOT_Intel_Win, Trends_Gold_Win] + dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, MVC_Gold_Win, Trends_Intel_Win] condition: succeededOrFailed() steps: - template: nativeaot-scenarios.yml @@ -339,7 +340,7 @@ jobs: displayName: 6- NativeAOT Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, NativeAOT_Intel_Win, Trends_Gold_Win] + dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, MVC_Gold_Win, Trends_Intel_Win] condition: succeededOrFailed() steps: - template: nativeaot-scenarios.yml @@ -349,25 +350,25 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: GC_Gold_Win - displayName: 6- GC Gold Win +- job: EF_Core_Gold_Win + displayName: 6- EF Core Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, NativeAOT_Intel_Win, Trends_Gold_Win] + dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, MVC_Gold_Win, Trends_Intel_Win] condition: succeededOrFailed() steps: - - template: gc-scenarios.yml + - template: efcore-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " - job: GC_Intel_Win displayName: 6- GC Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, NativeAOT_Intel_Win, Trends_Gold_Win] + dependsOn: [SignalR_Gold_Lin, SignalR_Intel_Lin, MVC_Gold_Win, Trends_Intel_Win] condition: succeededOrFailed() steps: - template: gc-scenarios.yml @@ -383,7 +384,7 @@ jobs: displayName: 7- Frameworks Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, GC_Gold_Win, GC_Intel_Win] + dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, EF_Core_Gold_Win, GC_Intel_Win] condition: succeededOrFailed() steps: - template: frameworks-scenarios.yml @@ -397,7 +398,7 @@ jobs: displayName: 7- Frameworks Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, GC_Gold_Win, GC_Intel_Win] + dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, EF_Core_Gold_Win, GC_Intel_Win] condition: succeededOrFailed() steps: - template: frameworks-scenarios.yml @@ -411,7 +412,7 @@ jobs: displayName: 7- Single File Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, GC_Gold_Win, GC_Intel_Win] + dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, EF_Core_Gold_Win, GC_Intel_Win] condition: succeededOrFailed() steps: - template: singlefile-scenarios.yml @@ -425,7 +426,7 @@ jobs: displayName: 7- Single File Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, GC_Gold_Win, GC_Intel_Win] + dependsOn: [NativeAOT_Gold_Lin, NativeAOT_Intel_Lin, EF_Core_Gold_Win, GC_Intel_Win] condition: succeededOrFailed() steps: - template: singlefile-scenarios.yml @@ -465,8 +466,8 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: WebSockets_Gold_Win - displayName: 8- WebSockets Gold Win +- job: WebSockets_Intel_Win + displayName: 8- WebSockets Intel Win pool: server timeoutInMinutes: 120 dependsOn: [Frameworks_Gold_Lin, Frameworks_Intel_Lin, Single_File_Gold_Win, Single_File_Intel_Win] @@ -477,43 +478,43 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " -- job: WebSockets_Intel_Win - displayName: 8- WebSockets Intel Win +- job: Crossgen_Gold_Win + displayName: 8- Crossgen Gold Win pool: server timeoutInMinutes: 120 dependsOn: [Frameworks_Gold_Lin, Frameworks_Intel_Lin, Single_File_Gold_Win, Single_File_Intel_Win] condition: succeededOrFailed() steps: - - template: websocket-scenarios.yml + - template: crossgen2-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " # GROUP 9 -- job: WebSockets_Gold_Lin - displayName: 9- WebSockets Gold Lin +- job: Trends_Database_Amd_Lin2 + displayName: 9- Trends Database Amd Lin2 pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Gold_Win, WebSockets_Intel_Win] + dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Intel_Win, Crossgen_Gold_Win] condition: succeededOrFailed() steps: - - template: websocket-scenarios.yml + - template: trend-database-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load-load --profile gold-db-db " -- job: WebSockets_Intel_Lin - displayName: 9- WebSockets Intel Lin +- job: WebSockets_Gold_Lin + displayName: 9- WebSockets Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Gold_Win, WebSockets_Intel_Win] + dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Intel_Win, Crossgen_Gold_Win] condition: succeededOrFailed() steps: - template: websocket-scenarios.yml @@ -521,27 +522,27 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " + arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load2-load " -- job: Crossgen_Amd_Lin2 - displayName: 9- Crossgen Amd Lin2 +- job: WebSockets_Intel_Lin + displayName: 9- WebSockets Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Gold_Win, WebSockets_Intel_Win] + dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Intel_Win, Crossgen_Gold_Win] condition: succeededOrFailed() steps: - - template: crossgen2-scenarios.yml + - template: websocket-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " - job: Build_Gold_Win displayName: 9- Build Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Gold_Win, WebSockets_Intel_Win] + dependsOn: [Trends_Database_Gold_Lin, Trends_Database_Intel_Lin, WebSockets_Intel_Win, Crossgen_Gold_Win] condition: succeededOrFailed() steps: - template: build-perf-scenarios.yml @@ -557,7 +558,7 @@ jobs: displayName: 10- Custom Proxies Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [WebSockets_Gold_Lin, WebSockets_Intel_Lin, Crossgen_Amd_Lin2, Build_Gold_Win] + dependsOn: [Trends_Database_Amd_Lin2, WebSockets_Gold_Lin, WebSockets_Intel_Lin, Build_Gold_Win] condition: succeededOrFailed() steps: - template: proxies-custom-scenarios.yml @@ -571,7 +572,7 @@ jobs: displayName: 10- Custom Proxies Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [WebSockets_Gold_Lin, WebSockets_Intel_Lin, Crossgen_Amd_Lin2, Build_Gold_Win] + dependsOn: [Trends_Database_Amd_Lin2, WebSockets_Gold_Lin, WebSockets_Intel_Lin, Build_Gold_Win] condition: succeededOrFailed() steps: - template: proxies-custom-scenarios.yml diff --git a/build/benchmarks-ci-02.yml b/build/benchmarks-ci-02.yml index 11c9edef6..a833e5cbe 100644 --- a/build/benchmarks-ci-02.yml +++ b/build/benchmarks-ci-02.yml @@ -1,8 +1,9 @@ # Do not change this file, it is generated using these steps: -# - The file benchmarks.matrix.yml defines how each job is run in parallel -# - Convert its content to json using https://jsonformatter.org/yaml-to-json -# - Use the template in benchmarks.template.liquid and the converted json using https://liquidjs.com/playground.html -# - Update this file with the result of the template generation +# - The file benchmarks*.json defines how each pipeline set of jobs is run in parallel +# - Update the associated benchmarks*.json file with machine and scenario updates +# - Install python and install the requirements for the crank-scheduler in benchmarks/scripts/crank-scheduler/requirements.txt +# - Run the scheduler specifying the desired benchmarks*.json file, this template, and benchmarks/output to automatically overwrite the current pipeline. +# - Ex. python ./scripts/crank-scheduler/main.py --config ./build/benchmarks_ci.json --template ./build/benchmarks.template.liquid --yaml-output ./build trigger: none pr: none @@ -59,22 +60,22 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: HttpClient_Gold_Win - displayName: 1- HttpClient Gold Win +- job: Blazor_Intel_Perflin + displayName: 1- Blazor Intel Perflin pool: server timeoutInMinutes: 120 dependsOn: [] condition: succeededOrFailed() steps: - - template: httpclient-scenarios.yml + - template: blazor-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-perflin-app --profile intel-perfload-load " -- job: HttpClient_Intel_Win - displayName: 1- HttpClient Intel Win +- job: HttpClient_Gold_Win + displayName: 1- HttpClient Gold Win pool: server timeoutInMinutes: 120 dependsOn: [] @@ -85,7 +86,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " # GROUP 2 @@ -93,7 +94,7 @@ jobs: displayName: 2- PGO Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, HttpClient_Gold_Win, HttpClient_Intel_Win] + dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, Blazor_Intel_Perflin, HttpClient_Gold_Win] condition: succeededOrFailed() steps: - template: pgo-scenarios.yml @@ -103,11 +104,11 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load --profile gold-db-db " -- job: PGO_Gold_Win - displayName: 2- PGO Gold Win +- job: PGO_Intel_Lin + displayName: 2- PGO Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, HttpClient_Gold_Win, HttpClient_Intel_Win] + dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, Blazor_Intel_Perflin, HttpClient_Gold_Win] condition: succeededOrFailed() steps: - template: pgo-scenarios.yml @@ -115,27 +116,27 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: Grpc_Intel_Lin - displayName: 2- Grpc Intel Lin +- job: HttpClient_Intel_Win + displayName: 2- HttpClient Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, HttpClient_Gold_Win, HttpClient_Intel_Win] + dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, Blazor_Intel_Perflin, HttpClient_Gold_Win] condition: succeededOrFailed() steps: - - template: grpc-scenarios.yml + - template: httpclient-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " -- job: Baselines_Intel_Win - displayName: 2- Baselines Intel Win +- job: Baselines_Gold_Win + displayName: 2- Baselines Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, HttpClient_Gold_Win, HttpClient_Intel_Win] + dependsOn: [Containers_Gold_Lin, Containers_Intel_Lin, Blazor_Intel_Perflin, HttpClient_Gold_Win] condition: succeededOrFailed() steps: - template: baselines-scenarios.yml @@ -143,7 +144,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " # GROUP 3 @@ -151,7 +152,7 @@ jobs: displayName: 3- Baselines Database Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Gold_Lin, PGO_Gold_Win, Grpc_Intel_Lin, Baselines_Intel_Win] + dependsOn: [PGO_Gold_Lin, PGO_Intel_Lin, HttpClient_Intel_Win, Baselines_Gold_Win] condition: succeededOrFailed() steps: - template: baselines-database-scenarios.yml @@ -161,11 +162,11 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load --profile gold-db-db " -- job: Baselines_Database_Gold_Win - displayName: 3- Baselines Database Gold Win +- job: Baselines_Database_Intel_Lin + displayName: 3- Baselines Database Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Gold_Lin, PGO_Gold_Win, Grpc_Intel_Lin, Baselines_Intel_Win] + dependsOn: [PGO_Gold_Lin, PGO_Intel_Lin, HttpClient_Intel_Win, Baselines_Gold_Win] condition: succeededOrFailed() steps: - template: baselines-database-scenarios.yml @@ -173,27 +174,27 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: Blazor_Intel_Lin - displayName: 3- Blazor Intel Lin +- job: Baselines_Intel_Win + displayName: 3- Baselines Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Gold_Lin, PGO_Gold_Win, Grpc_Intel_Lin, Baselines_Intel_Win] + dependsOn: [PGO_Gold_Lin, PGO_Intel_Lin, HttpClient_Intel_Win, Baselines_Gold_Win] condition: succeededOrFailed() steps: - - template: blazor-scenarios.yml + - template: baselines-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " - job: Baselines_Amd_Lin2 displayName: 3- Baselines Amd Lin2 pool: server timeoutInMinutes: 120 - dependsOn: [PGO_Gold_Lin, PGO_Gold_Win, Grpc_Intel_Lin, Baselines_Intel_Win] + dependsOn: [PGO_Gold_Lin, PGO_Intel_Lin, HttpClient_Intel_Win, Baselines_Gold_Win] condition: succeededOrFailed() steps: - template: baselines-scenarios.yml @@ -201,15 +202,15 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load2-load " # GROUP 4 -- job: Baselines_Database_Intel_Lin - displayName: 4- Baselines Database Intel Lin +- job: Baselines_Database_Gold_Win + displayName: 4- Baselines Database Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Gold_Win, Blazor_Intel_Lin, Baselines_Amd_Lin2] + dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Intel_Lin, Baselines_Intel_Win, Baselines_Amd_Lin2] condition: succeededOrFailed() steps: - template: baselines-database-scenarios.yml @@ -217,13 +218,13 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " - job: Baselines_Database_Intel_Win displayName: 4- Baselines Database Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Gold_Win, Blazor_Intel_Lin, Baselines_Amd_Lin2] + dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Intel_Lin, Baselines_Intel_Win, Baselines_Amd_Lin2] condition: succeededOrFailed() steps: - template: baselines-database-scenarios.yml @@ -231,13 +232,13 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile gold-db-db " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " - job: Blazor_Gold_Lin displayName: 4- Blazor Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Gold_Win, Blazor_Intel_Lin, Baselines_Amd_Lin2] + dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Intel_Lin, Baselines_Intel_Win, Baselines_Amd_Lin2] condition: succeededOrFailed() steps: - template: blazor-scenarios.yml @@ -247,19 +248,19 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " -- job: SignalR_Gold_Win - displayName: 4- SignalR Gold Win +- job: Blazor_Intel_Lin + displayName: 4- Blazor Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Gold_Win, Blazor_Intel_Lin, Baselines_Amd_Lin2] + dependsOn: [Baselines_Database_Gold_Lin, Baselines_Database_Intel_Lin, Baselines_Intel_Win, Baselines_Amd_Lin2] condition: succeededOrFailed() steps: - - template: signalr-scenarios.yml + - template: blazor-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " # GROUP 5 @@ -267,7 +268,7 @@ jobs: displayName: 5- SslStream Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Intel_Lin, Baselines_Database_Intel_Win, Blazor_Gold_Lin, SignalR_Gold_Win] + dependsOn: [Baselines_Database_Gold_Win, Baselines_Database_Intel_Win, Blazor_Gold_Lin, Blazor_Intel_Lin] condition: succeededOrFailed() steps: - template: sslstream-scenarios.yml @@ -281,7 +282,7 @@ jobs: displayName: 5- SslStream Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Intel_Lin, Baselines_Database_Intel_Win, Blazor_Gold_Lin, SignalR_Gold_Win] + dependsOn: [Baselines_Database_Gold_Win, Baselines_Database_Intel_Win, Blazor_Gold_Lin, Blazor_Intel_Lin] condition: succeededOrFailed() steps: - template: sslstream-scenarios.yml @@ -291,25 +292,25 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " -- job: Frameworks_Database_Amd_Lin2 - displayName: 5- Frameworks Database Amd Lin2 +- job: SignalR_Gold_Win + displayName: 5- SignalR Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Intel_Lin, Baselines_Database_Intel_Win, Blazor_Gold_Lin, SignalR_Gold_Win] + dependsOn: [Baselines_Database_Gold_Win, Baselines_Database_Intel_Win, Blazor_Gold_Lin, Blazor_Intel_Lin] condition: succeededOrFailed() steps: - - template: frameworks-database-scenarios.yml + - template: signalr-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile intel-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " -- job: MVC_Gold_Win - displayName: 5- MVC Gold Win +- job: MVC_Intel_Win + displayName: 5- MVC Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [Baselines_Database_Intel_Lin, Baselines_Database_Intel_Win, Blazor_Gold_Lin, SignalR_Gold_Win] + dependsOn: [Baselines_Database_Gold_Win, Baselines_Database_Intel_Win, Blazor_Gold_Lin, Blazor_Intel_Lin] condition: succeededOrFailed() steps: - template: mvc-scenarios.yml @@ -317,7 +318,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " # GROUP 6 @@ -325,7 +326,7 @@ jobs: displayName: 6- Frameworks Database Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, Frameworks_Database_Amd_Lin2, MVC_Gold_Win] + dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, SignalR_Gold_Win, MVC_Intel_Win] condition: succeededOrFailed() steps: - template: frameworks-database-scenarios.yml @@ -339,7 +340,7 @@ jobs: displayName: 6- Frameworks Database Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, Frameworks_Database_Amd_Lin2, MVC_Gold_Win] + dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, SignalR_Gold_Win, MVC_Intel_Win] condition: succeededOrFailed() steps: - template: frameworks-database-scenarios.yml @@ -349,11 +350,11 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: Trends_Intel_Win - displayName: 6- Trends Intel Win +- job: Trends_Gold_Win + displayName: 6- Trends Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, Frameworks_Database_Amd_Lin2, MVC_Gold_Win] + dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, SignalR_Gold_Win, MVC_Intel_Win] condition: succeededOrFailed() steps: - template: trend-scenarios.yml @@ -361,21 +362,21 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " -- job: Trends_Amd_Lin2 - displayName: 6- Trends Amd Lin2 +- job: EF_Core_Intel_Win + displayName: 6- EF Core Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, Frameworks_Database_Amd_Lin2, MVC_Gold_Win] + dependsOn: [SslStream_Gold_Lin, SslStream_Intel_Lin, SignalR_Gold_Win, MVC_Intel_Win] condition: succeededOrFailed() steps: - - template: trend-scenarios.yml + - template: efcore-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " # GROUP 7 @@ -383,7 +384,7 @@ jobs: displayName: 7- MVC Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Intel_Win, Trends_Amd_Lin2] + dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Gold_Win, EF_Core_Intel_Win] condition: succeededOrFailed() steps: - template: mvc-scenarios.yml @@ -397,7 +398,7 @@ jobs: displayName: 7- MVC Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Intel_Win, Trends_Amd_Lin2] + dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Gold_Win, EF_Core_Intel_Win] condition: succeededOrFailed() steps: - template: mvc-scenarios.yml @@ -407,55 +408,55 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load --profile intel-db-db " -- job: EF_Core_Gold_Win - displayName: 7- EF Core Gold Win +- job: Trends_Amd_Lin2 + displayName: 7- Trends Amd Lin2 pool: server timeoutInMinutes: 120 - dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Intel_Win, Trends_Amd_Lin2] + dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Gold_Win, EF_Core_Intel_Win] condition: succeededOrFailed() steps: - - template: efcore-scenarios.yml + - template: trend-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load2-load " -- job: EF_Core_Intel_Win - displayName: 7- EF Core Intel Win +- job: GC_Gold_Win + displayName: 7- GC Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Intel_Win, Trends_Amd_Lin2] + dependsOn: [Frameworks_Database_Gold_Lin, Frameworks_Database_Intel_Lin, Trends_Gold_Win, EF_Core_Intel_Win] condition: succeededOrFailed() steps: - - template: efcore-scenarios.yml + - template: gc-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load " + arguments: "$(ciProfile) --profile gold-win-app " # GROUP 8 -- job: Trends_Gold_Lin - displayName: 8- Trends Gold Lin +- job: NativeAOT_Gold_Win + displayName: 8- NativeAOT Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, EF_Core_Gold_Win, EF_Core_Intel_Win] + dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, Trends_Amd_Lin2, GC_Gold_Win] condition: succeededOrFailed() steps: - - template: trend-scenarios.yml + - template: nativeaot-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " -- job: Trends_Intel_Lin - displayName: 8- Trends Intel Lin +- job: Trends_Gold_Lin + displayName: 8- Trends Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, EF_Core_Gold_Win, EF_Core_Intel_Win] + dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, Trends_Amd_Lin2, GC_Gold_Win] condition: succeededOrFailed() steps: - template: trend-scenarios.yml @@ -463,27 +464,27 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " + arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " -- job: Frameworks_Amd_Lin2 - displayName: 8- Frameworks Amd Lin2 +- job: Trends_Intel_Lin + displayName: 8- Trends Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, EF_Core_Gold_Win, EF_Core_Intel_Win] + dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, Trends_Amd_Lin2, GC_Gold_Win] condition: succeededOrFailed() steps: - - template: frameworks-scenarios.yml + - template: trend-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile intel-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " -- job: Trends_Database_Gold_Win - displayName: 8- Trends Database Gold Win +- job: Trends_Database_Intel_Win + displayName: 8- Trends Database Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, EF_Core_Gold_Win, EF_Core_Intel_Win] + dependsOn: [MVC_Gold_Lin, MVC_Intel_Lin, Trends_Amd_Lin2, GC_Gold_Win] condition: succeededOrFailed() steps: - template: trend-database-scenarios.yml @@ -491,29 +492,29 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " + arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " # GROUP 9 -- job: GC_Gold_Lin - displayName: 9- GC Gold Lin +- job: Frameworks_Amd_Lin2 + displayName: 9- Frameworks Amd Lin2 pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Gold_Lin, Trends_Intel_Lin, Frameworks_Amd_Lin2, Trends_Database_Gold_Win] + dependsOn: [NativeAOT_Gold_Win, Trends_Gold_Lin, Trends_Intel_Lin, Trends_Database_Intel_Win] condition: succeededOrFailed() steps: - - template: gc-scenarios.yml + - template: frameworks-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-lin-app " + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load-load --profile gold-db-db " -- job: GC_Intel_Lin - displayName: 9- GC Intel Lin +- job: GC_Gold_Lin + displayName: 9- GC Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Gold_Lin, Trends_Intel_Lin, Frameworks_Amd_Lin2, Trends_Database_Gold_Win] + dependsOn: [NativeAOT_Gold_Win, Trends_Gold_Lin, Trends_Intel_Lin, Trends_Database_Intel_Win] condition: succeededOrFailed() steps: - template: gc-scenarios.yml @@ -521,57 +522,57 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app " + arguments: "$(ciProfile) --profile gold-lin-app " -- job: Trends_Database_Intel_Win - displayName: 9- Trends Database Intel Win +- job: GC_Intel_Lin + displayName: 9- GC Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Gold_Lin, Trends_Intel_Lin, Frameworks_Amd_Lin2, Trends_Database_Gold_Win] + dependsOn: [NativeAOT_Gold_Win, Trends_Gold_Lin, Trends_Intel_Lin, Trends_Database_Intel_Win] condition: succeededOrFailed() steps: - - template: trend-database-scenarios.yml + - template: gc-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-win-app --profile intel-load2-load --profile intel-db-db " + arguments: "$(ciProfile) --profile intel-lin-app " -- job: Trends_Database_Amd_Lin2 - displayName: 9- Trends Database Amd Lin2 +- job: WebSockets_Gold_Win + displayName: 9- WebSockets Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Gold_Lin, Trends_Intel_Lin, Frameworks_Amd_Lin2, Trends_Database_Gold_Win] + dependsOn: [NativeAOT_Gold_Win, Trends_Gold_Lin, Trends_Intel_Lin, Trends_Database_Intel_Win] condition: succeededOrFailed() steps: - - template: trend-database-scenarios.yml + - template: websocket-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile amd-lin2-app --profile intel-load-load --profile gold-db-db " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " # GROUP 10 -- job: Single_File_Gold_Lin - displayName: 10- Single File Gold Lin +- job: Trends_Database_Gold_Win + displayName: 10- Trends Database Gold Win pool: server timeoutInMinutes: 120 - dependsOn: [GC_Gold_Lin, GC_Intel_Lin, Trends_Database_Intel_Win, Trends_Database_Amd_Lin2] + dependsOn: [Frameworks_Amd_Lin2, GC_Gold_Lin, GC_Intel_Lin, WebSockets_Gold_Win] condition: succeededOrFailed() steps: - - template: singlefile-scenarios.yml + - template: trend-database-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine1 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " + arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load --profile gold-db-db " -- job: Single_File_Intel_Lin - displayName: 10- Single File Intel Lin +- job: Single_File_Gold_Lin + displayName: 10- Single File Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [GC_Gold_Lin, GC_Intel_Lin, Trends_Database_Intel_Win, Trends_Database_Amd_Lin2] + dependsOn: [Frameworks_Amd_Lin2, GC_Gold_Lin, GC_Intel_Lin, WebSockets_Gold_Win] condition: succeededOrFailed() steps: - template: singlefile-scenarios.yml @@ -579,27 +580,27 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine2 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " + arguments: "$(ciProfile) --profile gold-lin-app --profile gold-load-load " -- job: Crossgen_Gold_Win - displayName: 10- Crossgen Gold Win +- job: Single_File_Intel_Lin + displayName: 10- Single File Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [GC_Gold_Lin, GC_Intel_Lin, Trends_Database_Intel_Win, Trends_Database_Amd_Lin2] + dependsOn: [Frameworks_Amd_Lin2, GC_Gold_Lin, GC_Intel_Lin, WebSockets_Gold_Win] condition: succeededOrFailed() steps: - - template: crossgen2-scenarios.yml + - template: singlefile-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: citrine3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile gold-win-app --profile gold-load2-load " + arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " - job: Crossgen_Intel_Win displayName: 10- Crossgen Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [GC_Gold_Lin, GC_Intel_Lin, Trends_Database_Intel_Win, Trends_Database_Amd_Lin2] + dependsOn: [Frameworks_Amd_Lin2, GC_Gold_Lin, GC_Intel_Lin, WebSockets_Gold_Win] condition: succeededOrFailed() steps: - template: crossgen2-scenarios.yml @@ -615,7 +616,7 @@ jobs: displayName: 11- Crossgen Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Gold_Win, Crossgen_Intel_Win] + dependsOn: [Trends_Database_Gold_Win, Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Intel_Win] condition: succeededOrFailed() steps: - template: crossgen2-scenarios.yml @@ -629,7 +630,7 @@ jobs: displayName: 11- Crossgen Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Gold_Win, Crossgen_Intel_Win] + dependsOn: [Trends_Database_Gold_Win, Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Intel_Win] condition: succeededOrFailed() steps: - template: crossgen2-scenarios.yml @@ -639,17 +640,31 @@ jobs: serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-lin-app --profile intel-load-load " +- job: Crossgen_Amd_Lin2 + displayName: 11- Crossgen Amd Lin2 + pool: server + timeoutInMinutes: 120 + dependsOn: [Trends_Database_Gold_Win, Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Intel_Win] + condition: succeededOrFailed() + steps: + - template: crossgen2-scenarios.yml + parameters: + connection: ASPNET Benchmarks Service Bus + serviceBusQueueName: citrine3 + serviceBusNamespace: aspnetbenchmarks + arguments: "$(ciProfile) --profile amd-lin2-app --profile gold-load2-load " + - job: Build_Intel_Win displayName: 11- Build Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Gold_Win, Crossgen_Intel_Win] + dependsOn: [Trends_Database_Gold_Win, Single_File_Gold_Lin, Single_File_Intel_Lin, Crossgen_Intel_Win] condition: succeededOrFailed() steps: - template: build-perf-scenarios.yml parameters: connection: ASPNET Benchmarks Service Bus - serviceBusQueueName: citrine3 + serviceBusQueueName: mono serviceBusNamespace: aspnetbenchmarks arguments: "$(ciProfile) --profile intel-win-app " @@ -659,7 +674,7 @@ jobs: displayName: 12- Build Gold Lin pool: server timeoutInMinutes: 120 - dependsOn: [Crossgen_Gold_Lin, Crossgen_Intel_Lin, Build_Intel_Win] + dependsOn: [Crossgen_Gold_Lin, Crossgen_Intel_Lin, Crossgen_Amd_Lin2, Build_Intel_Win] condition: succeededOrFailed() steps: - template: build-perf-scenarios.yml @@ -673,7 +688,7 @@ jobs: displayName: 12- Build Intel Lin pool: server timeoutInMinutes: 120 - dependsOn: [Crossgen_Gold_Lin, Crossgen_Intel_Lin, Build_Intel_Win] + dependsOn: [Crossgen_Gold_Lin, Crossgen_Intel_Lin, Crossgen_Amd_Lin2, Build_Intel_Win] condition: succeededOrFailed() steps: - template: build-perf-scenarios.yml diff --git a/build/benchmarks-ci-azure-eastus2.yml b/build/benchmarks-ci-azure-eastus2.yml index d9e03791f..ebd00c23a 100644 --- a/build/benchmarks-ci-azure-eastus2.yml +++ b/build/benchmarks-ci-azure-eastus2.yml @@ -1,8 +1,9 @@ # Do not change this file, it is generated using these steps: -# - The file benchmarks.matrix.yml defines how each job is run in parallel -# - Convert its content to json using https://jsonformatter.org/yaml-to-json -# - Use the template in benchmarks.template.liquid and the converted json using https://liquidjs.com/playground.html -# - Update this file with the result of the template generation +# - The file benchmarks*.json defines how each pipeline set of jobs is run in parallel +# - Update the associated benchmarks*.json file with machine and scenario updates +# - Install python and install the requirements for the crank-scheduler in benchmarks/scripts/crank-scheduler/requirements.txt +# - Run the scheduler specifying the desired benchmarks*.json file, this template, and benchmarks/output to automatically overwrite the current pipeline. +# - Ex. python ./scripts/crank-scheduler/main.py --config ./build/benchmarks_ci.json --template ./build/benchmarks.template.liquid --yaml-output ./build trigger: none pr: none diff --git a/build/benchmarks-ci-azure.yml b/build/benchmarks-ci-azure.yml index f88513055..ab31e1bde 100644 --- a/build/benchmarks-ci-azure.yml +++ b/build/benchmarks-ci-azure.yml @@ -1,8 +1,9 @@ # Do not change this file, it is generated using these steps: -# - The file benchmarks.matrix.yml defines how each job is run in parallel -# - Convert its content to json using https://jsonformatter.org/yaml-to-json -# - Use the template in benchmarks.template.liquid and the converted json using https://liquidjs.com/playground.html -# - Update this file with the result of the template generation +# - The file benchmarks*.json defines how each pipeline set of jobs is run in parallel +# - Update the associated benchmarks*.json file with machine and scenario updates +# - Install python and install the requirements for the crank-scheduler in benchmarks/scripts/crank-scheduler/requirements.txt +# - Run the scheduler specifying the desired benchmarks*.json file, this template, and benchmarks/output to automatically overwrite the current pipeline. +# - Ex. python ./scripts/crank-scheduler/main.py --config ./build/benchmarks_ci.json --template ./build/benchmarks.template.liquid --yaml-output ./build trigger: none pr: none @@ -163,10 +164,10 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: azure serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile idna-amd-lin-app --profile idna-intel-lin-load " + arguments: "$(ciProfile) --profile idna-amd-lin-app --profile idna-amd-win-load " -- job: Trends_Idna_Amd_Win - displayName: 5- Trends Idna Amd Win +- job: Trends_Idna_Intel_Lin + displayName: 5- Trends Idna Intel Lin pool: server timeoutInMinutes: 120 dependsOn: [Trends_Azure_Server_Arm64, Trends_Azure2_Server_Amd64] @@ -177,15 +178,15 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: azurearm64 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile idna-amd-win-app --profile azure2-db-load " + arguments: "$(ciProfile) --profile idna-intel-lin-app --profile idna-intel-win-load " # GROUP 6 -- job: Trends_Idna_Intel_Lin - displayName: 6- Trends Idna Intel Lin +- job: Trends_Idna_Amd_Win + displayName: 6- Trends Idna Amd Win pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Idna_Amd_Lin, Trends_Idna_Amd_Win] + dependsOn: [Trends_Idna_Amd_Lin, Trends_Idna_Intel_Lin] condition: succeededOrFailed() steps: - template: trend-scenarios.yml @@ -193,13 +194,13 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: azure serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile idna-intel-lin-app --profile idna-amd-lin-load " + arguments: "$(ciProfile) --profile idna-amd-win-app --profile idna-intel-lin-load " - job: Trends_Idna_Intel_Win displayName: 6- Trends Idna Intel Win pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Idna_Amd_Lin, Trends_Idna_Amd_Win] + dependsOn: [Trends_Idna_Amd_Lin, Trends_Idna_Intel_Lin] condition: succeededOrFailed() steps: - template: trend-scenarios.yml @@ -207,7 +208,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: azurearm64 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile idna-intel-win-app --profile azure2-db-load " + arguments: "$(ciProfile) --profile idna-intel-win-app --profile idna-amd-lin-load " # GROUP 7 @@ -215,7 +216,7 @@ jobs: displayName: 7- Trends Database Azure Server Arm64 pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Idna_Intel_Lin, Trends_Idna_Intel_Win] + dependsOn: [Trends_Idna_Amd_Win, Trends_Idna_Intel_Win] condition: succeededOrFailed() steps: - template: trend-database-scenarios.yml @@ -229,7 +230,7 @@ jobs: displayName: 7- Trends Database Azure2 Server Amd64 pool: server timeoutInMinutes: 120 - dependsOn: [Trends_Idna_Intel_Lin, Trends_Idna_Intel_Win] + dependsOn: [Trends_Idna_Amd_Win, Trends_Idna_Intel_Win] condition: succeededOrFailed() steps: - template: trend-database-scenarios.yml diff --git a/build/benchmarks-ci-cobalt.yml b/build/benchmarks-ci-cobalt.yml index 571c66798..f6597fff2 100644 --- a/build/benchmarks-ci-cobalt.yml +++ b/build/benchmarks-ci-cobalt.yml @@ -1,8 +1,9 @@ # Do not change this file, it is generated using these steps: -# - The file benchmarks.matrix.yml defines how each job is run in parallel -# - Convert its content to json using https://jsonformatter.org/yaml-to-json -# - Use the template in benchmarks.template.liquid and the converted json using https://liquidjs.com/playground.html -# - Update this file with the result of the template generation +# - The file benchmarks*.json defines how each pipeline set of jobs is run in parallel +# - Update the associated benchmarks*.json file with machine and scenario updates +# - Install python and install the requirements for the crank-scheduler in benchmarks/scripts/crank-scheduler/requirements.txt +# - Run the scheduler specifying the desired benchmarks*.json file, this template, and benchmarks/output to automatically overwrite the current pipeline. +# - Ex. python ./scripts/crank-scheduler/main.py --config ./build/benchmarks_ci.json --template ./build/benchmarks.template.liquid --yaml-output ./build trigger: none pr: none @@ -43,7 +44,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Containers_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 1- Containers Cobalt Hosted Lin Server Azure Linux3 @@ -57,7 +58,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 2 @@ -73,7 +74,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: PGO_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 2- PGO Cobalt Hosted Lin Server Azure Linux3 @@ -87,7 +88,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 3 @@ -103,7 +104,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: PGO_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 3- PGO 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -117,7 +118,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 4 @@ -133,7 +134,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Baselines_Database_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 4- Baselines Database Cobalt Hosted Lin Server Azure Linux3 @@ -147,7 +148,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 5 @@ -163,7 +164,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Baselines_Database_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 5- Baselines Database 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -177,7 +178,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 6 @@ -193,7 +194,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load " - job: Baselines_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 6- Baselines Cobalt Hosted Lin Server Azure Linux3 @@ -207,7 +208,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load " # GROUP 7 @@ -223,7 +224,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load " - job: Baselines_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 7- Baselines 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -237,7 +238,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load " # GROUP 8 @@ -253,7 +254,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Frameworks_Database_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 8- Frameworks Database Cobalt Hosted Lin Server Azure Linux3 @@ -267,7 +268,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 9 @@ -283,7 +284,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Frameworks_Database_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 9- Frameworks Database 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -297,7 +298,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 10 @@ -313,7 +314,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: MVC_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 10- MVC Cobalt Hosted Lin Server Azure Linux3 @@ -327,7 +328,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 11 @@ -343,7 +344,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: NativeAOT_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 11- NativeAOT Cobalt Hosted Lin Server Azure Linux3 @@ -357,7 +358,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 12 @@ -373,7 +374,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: NativeAOT_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 12- NativeAOT 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -387,7 +388,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 13 @@ -403,7 +404,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load " - job: Trends_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 13- Trends Cobalt Hosted Lin Server Azure Linux3 @@ -417,7 +418,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load " # GROUP 14 @@ -433,7 +434,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load " - job: Trends_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 14- Trends 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -447,7 +448,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load " # GROUP 15 @@ -463,7 +464,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Frameworks_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 15- Frameworks Cobalt Hosted Lin Server Azure Linux3 @@ -477,7 +478,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 16 @@ -493,7 +494,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Frameworks_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 16- Frameworks 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -507,7 +508,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 17 @@ -583,7 +584,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Trends_Database_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 19- Trends Database Cobalt Hosted Lin Server Azure Linux3 @@ -597,7 +598,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 20 @@ -613,7 +614,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " - job: Trends_Database_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 20- Trends Database 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -627,7 +628,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load --profile cobalt-hosted-lin-db-azure-linux3-db " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load --profile cobalt-hosted-lin-db-db " # GROUP 21 @@ -643,7 +644,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-app --profile cobalt-hosted-lin-client-azure-linux3-load " - job: Crossgen_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 21- Crossgen Cobalt Hosted Lin Server Azure Linux3 @@ -657,7 +658,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-azure-linux3-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-app --profile cobalt-hosted-lin-client-load " # GROUP 22 @@ -673,7 +674,7 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-28-app --profile cobalt-hosted-lin-client-azure-linux3-load " - job: Crossgen_28_Core_Cobalt_Hosted_Lin_Server_Azure_Linux3 displayName: 22- Crossgen 28 Core Cobalt Hosted Lin Server Azure Linux3 @@ -687,5 +688,5 @@ jobs: connection: ASPNET Benchmarks Service Bus serviceBusQueueName: cobalthosted_azurelinux3 serviceBusNamespace: aspnetbenchmarks - arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-azure-linux3-load " + arguments: "$(ciProfile) --profile cobalt-hosted-lin-server-azure-linux3-28-app --profile cobalt-hosted-lin-client-load " diff --git a/build/benchmarks.matrix.01.yml b/build/benchmarks.matrix.01.yml deleted file mode 100644 index 1540e7b61..000000000 --- a/build/benchmarks.matrix.01.yml +++ /dev/null @@ -1,299 +0,0 @@ -# This file describes all the scenarios which are run continuously on AzDo. -# It generates the file benchmarks.yml. See this file for instructions. - -queues: - - citrine1 - - citrine2 - - citrine3 - - mono - -schedule: "0 9/12 * * *" - -groups: - - - jobs: - - - name: Trends Database Intel Linux - template: trend-database-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: Trends Intel Windows - template: trend-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - - name: Trends Database Arm 28 Linux - template: trend-database-scenarios.yml - profiles: - - arm-lin-28-app - - amd-lin-load - - amd-lin2-db - - - name: Trends Database Gold Linux - template: trend-database-scenarios.yml - profiles: - - aspnet-gold-lin - - - jobs: - - - name: Trends Database Intel Windows - template: trend-database-scenarios.yml - profiles: - - intel-win-app - - intel-load-load - - intel-db-db - - - name: Trends Intel Linux - template: trend-scenarios.yml - profiles: - - intel-lin-app - - intel-load2-load - - - name: Native Aot Arm 28 Linux - template: nativeaot-scenarios.yml - profiles: - - arm-lin-28-app - - amd-lin-load - - amd-lin2-db - - - name: Trends Gold Windows - template: trend-scenarios.yml - profiles: - - aspnet-gold-win - - - jobs: - - - name: Trends Database Amd Linux - template: trend-database-scenarios.yml - profiles: - - amd-lin-app - - intel-load-load - - intel-db-db - - - name: Trends Arm 28 Linux - template: trend-scenarios.yml - profiles: - - arm-lin-28-app - - intel-load2-load - - - name: GC Intel Windows - template: gc-scenarios.yml - profiles: - - intel-win-app - - - name: Trends Gold Linux - template: trend-scenarios.yml - profiles: - - aspnet-gold-lin - - - jobs: - - - name: Trends Amd Linux - template: trend-scenarios.yml - profiles: - - amd-lin-app - - intel-load2-load - - - name: Containers Intel Linux - template: containers-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: EF Core Intel Windows - template: efcore-scenarios.yml - profiles: - - intel-win-app - - - name: Trends Database Gold Windows - template: trend-database-scenarios.yml - profiles: - - aspnet-gold-win - - - jobs: - - - name: Baselines Database Intel Linux - template: baselines-database-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: Baselines Intel Windows - template: baselines-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - - name: Baselines Database Arm 28 Linux - template: baselines-database-scenarios.yml - profiles: - - arm-lin-28-app - - amd-lin-load - - amd-lin2-db - - - name: Baselines Database Gold Linux - template: baselines-database-scenarios.yml - profiles: - - aspnet-gold-lin - - - jobs: - - - name: Baselines Database Intel Windows - template: baselines-database-scenarios.yml - profiles: - - intel-win-app - - intel-load-load - - intel-db-db - - - name: Baselines Intel Linux - template: baselines-scenarios.yml - profiles: - - intel-lin-app - - intel-load2-load - - - name: PGO Arm 28 Linux - template: pgo-scenarios.yml - profiles: - - arm-lin-28-app - - amd-lin-load - - amd-lin2-db - - - name: Baselines Gold Windows - template: baselines-scenarios.yml - profiles: - - aspnet-gold-win - - - jobs: - - - name: Baselines Database Amd Linux - template: baselines-database-scenarios.yml - profiles: - - amd-lin-app - - intel-load-load - - intel-db-db - - - name: Baselines Arm 28 Linux - template: baselines-scenarios.yml - profiles: - - arm-lin-28-app - - intel-load2-load - - - name: Native Aot Intel Windows - template: nativeaot-scenarios.yml - profiles: - - intel-win-app - - intel-lin-load - - amd-lin2-db - - - name: Baselines Gold Linux - template: baselines-scenarios.yml - profiles: - - aspnet-gold-lin - - - jobs: - - - name: Baselines Amd Linux - template: baselines-scenarios.yml - profiles: - - amd-lin-app - - intel-lin-load - - - name: Frameworks Intel Linux - template: frameworks-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - amd-lin2-db # required by nodejs and tfb-database argument - - - name: Grpc Intel Windows - template: grpc-scenarios.yml - profiles: - - intel-win-app - - intel-db-load - - - name: Baselines Database Gold Windows - template: baselines-database-scenarios.yml - profiles: - - aspnet-gold-win - - - jobs: - - - name: Frameworks Database Intel Linux - template: frameworks-database-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: WebSockets Intel Windows - template: websocket-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - - name: Frameworks Database Arm 28 Linux - template: frameworks-database-scenarios.yml - profiles: - - arm-lin-28-app - - amd-lin-load - - amd-lin2-db - - - name: Frameworks Database Intel Gold Linux - template: frameworks-database-scenarios.yml - profiles: - - aspnet-gold-lin - - - jobs: - - - name: Frameworks Database Amd Linux - template: frameworks-database-scenarios.yml - profiles: - - amd-lin-app - - intel-load-load - - intel-db-db - - - name: Frameworks Arm 80 Linux - template: frameworks-scenarios.yml - profiles: - - arm-lin-app - - amd-lin-load - - amd-lin2-db # required by nodejs and tfb-database argument - - - name: Trends Siryn Linux - template: trend-scenarios.yml - profiles: - - siryn-lin-app - - intel-load2-load - - - name: Frameworks Intel Gold Linux - template: frameworks-scenarios.yml - profiles: - - aspnet-gold-lin - - - jobs: - - - name: Frameworks Amd Linux - template: frameworks-scenarios.yml - profiles: - - amd-lin-app - - amd-lin2-load - - intel-load2-db # required by nodejs and tfb-database argument - - - name: Native Aot Intel Linux - template: nativeaot-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: Baselines Siryn Linux - template: baselines-scenarios.yml - profiles: - - siryn-lin-app - - arm-lin-load diff --git a/build/benchmarks.matrix.02.yml b/build/benchmarks.matrix.02.yml deleted file mode 100644 index 725ba9640..000000000 --- a/build/benchmarks.matrix.02.yml +++ /dev/null @@ -1,231 +0,0 @@ -# This file describes all the scenarios which are run continuously on AzDo. -# It generates the file benchmarks.yml. See this file for instructions. - -queues: - - citrine1 - - citrine2 - - citrine3 - - mono - -schedule: "0 3/12 * * *" - -groups: - - - jobs: - - - name: Frameworks Arm 28 Linux - template: frameworks-scenarios.yml - profiles: - - arm-lin-28-app - - intel-load2-load - - amd-lin2-db # required by nodejs and tfb-database argument - - - name: GC Intel Linux - template: gc-scenarios.yml - profiles: - - intel-lin-app - - - name: SslStream Intel Linux - template: sslstream-scenarios.yml - profiles: - - intel-load-app - - amd-lin-load - - - jobs: - - - name: PGO Intel Windows - template: pgo-scenarios.yml - profiles: - - intel-win-app - - amd-lin2-load - - intel-db-db - - - name: PGO Intel Linux - template: pgo-scenarios.yml - profiles: - - intel-lin-app - - amd-lin-load - - intel-load-db - - - name: Mvc Siryn Linux - template: mvc-scenarios.yml - profiles: - - siryn-lin-app - - arm-lin-load - - intel-load2-db - - - jobs: - - - name: SignalR Intel Linux - template: signalr-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - - name: SignalR Intel Windows - template: signalr-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - - name: Blazor Intel Linux - template: blazor-scenarios.yml - profiles: - - intel-perflin-app - - intel-perfload-load - - - jobs: - - - name: WebSockets Intel Linux - template: websocket-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - - name: Build Intel Linux - template: build-perf-scenarios.yml - profiles: - - intel-load2-app - - intel-db-load - - - name: Trends Database Arm 80 Linux - template: trend-database-scenarios.yml - profiles: - - arm-lin-app - - amd-lin-load - - amd-lin2-db - - - jobs: - - - name: Single File Intel Linux - template: singlefile-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - - name: Mvc Intel Windows - template: mvc-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - intel-db-db - - - name: Frameworks Database Arm 80 Linux - template: frameworks-database-scenarios.yml - profiles: - - arm-lin-app - - amd-lin-load - - amd-lin2-db - - - jobs: - - - name: Mvc Intel Linux - template: mvc-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: Single File Intel Windows - template: singlefile-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - - name: GC Arm 28 Linux - template: gc-scenarios.yml - profiles: - - arm-lin-28-app - - - jobs: - - - name: Custom Proxies Intel Linux - template: proxies-custom-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: Build Intel Windows - template: build-perf-scenarios.yml - profiles: - - intel-win-app - - intel-load2-load - - - name: Trends Arm 80 Linux - template: trend-scenarios.yml - profiles: - - arm-lin-app - - amd-lin-load - - - jobs: - - - name: Proxies Intel Linux - template: proxies-scenarios.yml - timeout: 180 - profiles: - - intel-lin-app - - intel-load-load - - intel-db-db - - - name: Grpc Intel Linux - template: grpc-scenarios.yml - profiles: - - intel-load2-app - - amd-lin-load - - - name: Baselines Database Siryn Linux - template: baselines-database-scenarios.yml - profiles: - - siryn-lin-app - - arm-lin-load - - amd-lin2-db - - - jobs: - - - name: Crossgen Arm 28 Linux - template: crossgen2-scenarios.yml - profiles: - - arm-lin-28-app - - amd-lin-load - - - name: Crossgen Intel Linux - template: crossgen2-scenarios.yml - profiles: - - intel-lin-app - - intel-load-load - - - name: Trends Database Siryn Linux - template: trend-database-scenarios.yml - profiles: - - siryn-lin-app - - intel-load2-load - - intel-db-db - - - jobs: - - - name: Crossgen Intel Windows - template: crossgen2-scenarios.yml - profiles: - - intel-win-app - - intel-load-load - - - name: Crossgen Amd Linux - template: crossgen2-scenarios.yml - profiles: - - amd-lin-app - - intel-load2-load - - - name: HttpClient Intel Linux - template: httpclient-scenarios.yml - profiles: - - intel-lin-app - - amd-lin2-load - - - jobs: - - - name: HttpClient Intel Windows - template: httpclient-scenarios.yml - profiles: - - intel-win-app - - amd-lin2-load diff --git a/build/benchmarks.matrix.azure.eastus2.yml b/build/benchmarks.matrix.azure.eastus2.yml deleted file mode 100644 index 7ac6145fb..000000000 --- a/build/benchmarks.matrix.azure.eastus2.yml +++ /dev/null @@ -1,80 +0,0 @@ -# This file describes all the scenarios which are run continuously on AzDo. -# It generates the file benchmarks.yml. See this file for instructions. - -queues: - - cobaltcloud - -schedule: "0 9/12 * * *" - -groups: - - jobs: - - name: Trends Database Cobalt Cloud Linux - template: trend-database-scenarios.yml - profiles: - - cobalt-cloud-lin - - - jobs: - - name: Trends Cobalt Cloud Linux - template: trend-scenarios.yml - profiles: - - cobalt-cloud-lin - - - jobs: - - name: Baselines Database Cobalt Cloud Linux - template: baselines-database-scenarios.yml - profiles: - - cobalt-cloud-lin - - - jobs: - - name: Baselines Cobalt Cloud Linux - template: baselines-scenarios.yml - profiles: - - cobalt-cloud-lin - - - jobs: - - name: Containers Cobalt Cloud Linux - template: containers-scenarios.yml - profiles: - - cobalt-cloud-lin - - - jobs: - - name: Mvc Cobalt Cloud Linux - template: mvc-scenarios.yml - profiles: - - cobalt-cloud-lin - - - jobs: - - name: Trends Database Cobalt Cloud Linux AL3 - template: trend-database-scenarios.yml - profiles: - - cobalt-cloud-lin-al3 - - - jobs: - - name: Trends Cobalt Cloud Linux AL3 - template: trend-scenarios.yml - profiles: - - cobalt-cloud-lin-al3 - - - jobs: - - name: Baselines Database Cobalt Cloud Linux AL3 - template: baselines-database-scenarios.yml - profiles: - - cobalt-cloud-lin-al3 - - - jobs: - - name: Baselines Cobalt Cloud Linux AL3 - template: baselines-scenarios.yml - profiles: - - cobalt-cloud-lin-al3 - - - jobs: - - name: Containers Cobalt Cloud Linux AL3 - template: containers-scenarios.yml - profiles: - - cobalt-cloud-lin-al3 - - - jobs: - - name: Mvc Cobalt Cloud Linux AL3 - template: mvc-scenarios.yml - profiles: - - cobalt-cloud-lin-al3 \ No newline at end of file diff --git a/build/benchmarks.matrix.azure.yml b/build/benchmarks.matrix.azure.yml deleted file mode 100644 index 087f5de3a..000000000 --- a/build/benchmarks.matrix.azure.yml +++ /dev/null @@ -1,87 +0,0 @@ -# This file describes all the scenarios which are run continuously on AzDo. -# It generates the file benchmarks.yml. See this file for instructions. - -queues: - - azure - - azurearm64 - -schedule: "0 9/12 * * *" - -groups: - - jobs: - - name: Trends Database Azure Linux - template: trend-database-scenarios.yml - profiles: - - aspnet-azure-lin - - - name: Trends Database Azure Arm64 Linux - template: trend-database-scenarios.yml - profiles: - - aspnet-azurearm64-lin - - - jobs: - - name: Trends Azure Linux - template: trend-scenarios.yml - profiles: - - aspnet-azure-lin - - - name: Trends Azure Arm64 Linux - template: trend-scenarios.yml - profiles: - - aspnet-azurearm64-lin - - - jobs: - - name: Baselines Database Azure Linux - template: baselines-database-scenarios.yml - profiles: - - aspnet-azure-lin - - - name: Baselines Database Azure Arm64 Linux - template: baselines-database-scenarios.yml - profiles: - - aspnet-azurearm64-lin - - - jobs: - - name: Baselines Azure Linux - template: baselines-scenarios.yml - profiles: - - aspnet-azure-lin - - - name: Baselines Azure Arm64 Linux - template: baselines-scenarios.yml - profiles: - - aspnet-azurearm64-lin - - - jobs: - - name: Containers Azure Intel Linux - template: containers-scenarios.yml - profiles: - - aspnet-azure-lin - - - name: Containers Azure Arm64 Linux - template: containers-scenarios.yml - profiles: - - aspnet-azurearm64-lin - - - jobs: - - name: IDNA Azure Amd Linux - template: trend-scenarios.yml - profiles: - - idna-amd-lin - - - jobs: - - name: IDNA Azure Intel Linux - template: trend-scenarios.yml - profiles: - - idna-intel-lin - - - jobs: - - name: IDNA Azure Amd Windows - template: trend-scenarios.yml - profiles: - - idna-amd-win - - - name: IDNA Azure Intel Windows - template: trend-scenarios.yml - profiles: - - idna-intel-win \ No newline at end of file diff --git a/build/benchmarks.template.liquid b/build/benchmarks.template.liquid index c4c80642a..2aa230237 100644 --- a/build/benchmarks.template.liquid +++ b/build/benchmarks.template.liquid @@ -1,8 +1,9 @@ # Do not change this file, it is generated using these steps: -# - The file benchmarks.matrix.yml defines how each job is run in parallel -# - Convert its content to json using https://jsonformatter.org/yaml-to-json -# - Use the template in benchmarks.template.liquid and the converted json using https://liquidjs.com/playground.html -# - Update this file with the result of the template generation +# - The file benchmarks*.json defines how each pipeline set of jobs is run in parallel +# - Update the associated benchmarks*.json file with machine and scenario updates +# - Install python and install the requirements for the crank-scheduler in benchmarks/scripts/crank-scheduler/requirements.txt +# - Run the scheduler specifying the desired benchmarks*.json file, this template, and benchmarks/output to automatically overwrite the current pipeline. +# - Ex. python ./scripts/crank-scheduler/main.py --config ./build/benchmarks_ci.json --template ./build/benchmarks.template.liquid --yaml-output ./build {%- assign defaultTimeout = 120 %} diff --git a/build/benchmarks_ci.json b/build/benchmarks_ci.json index bf238c0c1..2c1fdec3b 100644 --- a/build/benchmarks_ci.json +++ b/build/benchmarks_ci.json @@ -18,85 +18,235 @@ "machines": [ { "name": "intel-lin", - "machine_type": "sut", + "machine_group": "intel", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "intel-lin-app" + ] + }, + "load": { + "priority": 2, + "profiles": [ + "intel-lin-load" + ] + } + }, "preferred_partners": [ "intel-load", "intel-load2", "intel-db" - ], - "profile_name": "intel-lin-app" + ] }, { "name": "intel-win", - "machine_type": "sut", + "machine_group": "intel", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "intel-win-app" + ] + }, + "load": { + "priority": 2, + "profiles": [ + "intel-win-load" + ] + } + }, "preferred_partners": [ "intel-load2", "intel-load", "intel-db" - ], - "profile_name": "intel-win-app" + ] }, { "name": "intel-load", - "machine_type": "load", - "preferred_partners": [], - "profile_name": "intel-load-load" + "machine_group": "intel", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "intel-load-load" + ] + }, + "sut": { + "priority": 2, + "profiles": [ + "intel-load-app" + ] + }, + "db": { + "priority": 3, + "profiles": [ + "intel-load-db" + ] + } + } }, { "name": "intel-load2", - "machine_type": "load", - "preferred_partners": [], - "profile_name": "intel-load2-load" + "machine_group": "intel", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "intel-load2-load" + ] + }, + "sut": { + "priority": 2, + "profiles": [ + "intel-load2-app" + ] + }, + "db": { + "priority": 3, + "profiles": [ + "intel-load2-db" + ] + } + } }, { "name": "intel-db", - "machine_type": "db", - "preferred_partners": [], - "profile_name": "intel-db-db" + "machine_group": "intel", + "capabilities": { + "db": { + "priority": 1, + "profiles": [ + "intel-db-db" + ] + }, + "sut": { + "priority": 2, + "profiles": [ + "intel-db-app" + ] + }, + "load": { + "priority": 3, + "profiles": [ + "intel-db-load" + ] + } + } + }, + { + "name": "intel-perflin", + "machine_group": "intel-perf", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "intel-perflin-app" + ] + } + } + }, + { + "name": "intel-perfload", + "machine_group": "intel-perf", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "intel-perfload-load" + ] + } + } }, { "name": "amd-lin2", - "machine_type": "sut", - "preferred_partners": [], - "profile_name": "amd-lin2-app" + "machine_group": "gold", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "amd-lin2-app" + ] + } + } }, { "name": "gold-lin", - "machine_type": "sut", + "machine_group": "gold", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "gold-lin-app", + "gold-lin-28-app" + ] + } + }, "preferred_partners": [ "gold-load", "gold-load2", "gold-db" - ], - "profile_name": "gold-lin-app" + ] }, { "name": "gold-win", - "machine_type": "sut", + "machine_group": "gold", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "gold-win-app", + "gold-win-28-app" + ] + }, + "load": { + "priority": 2, + "profiles": [ + "gold-win-load" + ] + } + }, "preferred_partners": [ "gold-load2", "gold-load", "gold-db" - ], - "profile_name": "gold-win-app" + ] }, { "name": "gold-load", - "machine_type": "load", - "preferred_partners": [], - "profile_name": "gold-load-load" + "machine_group": "gold", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "gold-load-load" + ] + } + } }, { "name": "gold-load2", - "machine_type": "load", - "preferred_partners": [], - "profile_name": "gold-load2-load" + "machine_group": "gold", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "gold-load2-load" + ] + } + } }, { "name": "gold-db", - "machine_type": "db", - "preferred_partners": [], - "profile_name": "gold-db-db" + "machine_group": "gold", + "capabilities": { + "db": { + "priority": 1, + "profiles": [ + "gold-db-db" + ] + } + } } ], "scenarios": [ @@ -132,7 +282,8 @@ "type": 2, "target_machines": [ "gold-lin", - "intel-lin" + "intel-lin", + "intel-perflin" ], "estimated_runtime": null }, diff --git a/build/benchmarks_ci_azure.json b/build/benchmarks_ci_azure.json index 878d01676..475192e3f 100644 --- a/build/benchmarks_ci_azure.json +++ b/build/benchmarks_ci_azure.json @@ -16,6 +16,7 @@ "machines": [ { "name": "azure-db", + "machine_group": "azure", "capabilities": { "db": { "priority": 1, @@ -39,6 +40,7 @@ }, { "name": "azure-client", + "machine_group": "azure", "capabilities": { "load": { "priority": 1, @@ -62,6 +64,7 @@ }, { "name": "azure-server-arm64", + "machine_group": "azure", "capabilities": { "sut": { "priority": 1, @@ -89,6 +92,7 @@ }, { "name": "azure2-client", + "machine_group": "azure2", "capabilities": { "load": { "priority": 1, @@ -112,6 +116,7 @@ }, { "name": "azure2-db", + "machine_group": "azure2", "capabilities": { "db": { "priority": 1, @@ -135,6 +140,7 @@ }, { "name": "azure2-server-amd64", + "machine_group": "azure2", "capabilities": { "sut": { "priority": 1, @@ -162,6 +168,7 @@ }, { "name": "idna-amd-lin", + "machine_group": "idna", "capabilities": { "sut": { "priority": 1, @@ -189,6 +196,7 @@ }, { "name": "idna-amd-win", + "machine_group": "idna", "capabilities": { "sut": { "priority": 1, @@ -216,6 +224,7 @@ }, { "name": "idna-intel-lin", + "machine_group": "idna", "capabilities": { "sut": { "priority": 1, @@ -243,6 +252,7 @@ }, { "name": "idna-intel-win", + "machine_group": "idna", "capabilities": { "sut": { "priority": 1, diff --git a/scripts/crank-scheduler/CONFIGURATION_GUIDE.md b/scripts/crank-scheduler/CONFIGURATION_GUIDE.md new file mode 100644 index 000000000..c243313d7 --- /dev/null +++ b/scripts/crank-scheduler/CONFIGURATION_GUIDE.md @@ -0,0 +1,250 @@ +# Complete Configuration Options Guide + +This guide demonstrates all possible configuration options using `example_complete_features.json`. + +## Machine Configuration Options + +### 1. Single-Type Machine (Traditional) + +```json +{ + "name": "single-type-machine", + "capabilities": { + "sut": { + "priority": 1, + "profiles": ["single-type-basic"], + "default_profile": "single-type-basic" + } + }, + "preferred_partners": ["dedicated-load", "dedicated-db"] +} +``` + +**Features:** + +- ✅ One machine type only (SUT) +- ✅ Single profile available +- ✅ Preferred partners for load/db roles + +### 2. Multi-Type Machine (Advanced) + +```json +{ + "name": "multi-type-machine", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "multi-sut-normal", + "multi-sut-high-cpu", + "multi-sut-low-memory" + ], + "default_profile": "multi-sut-normal" + }, + "load": { + "priority": 2, + "profiles": [ + "multi-load-normal", + "multi-load-high-throughput", + "multi-load-burst-mode" + ], + "default_profile": "multi-load-normal" + }, + "db": { + "priority": 3, + "profiles": [ + "multi-db-normal", + "multi-db-memory-optimized" + ], + "default_profile": "multi-db-normal" + } + } +} +``` + +**Features:** + +- ✅ Multiple machine types (SUT, LOAD, DB) +- ✅ Priority ordering (1=preferred, 2=secondary, 3=fallback) +- ✅ Multiple sub-profiles per type +- ✅ Default profile for each type + +### 3. Specialized Machine + +```json +{ + "name": "dedicated-load", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "dedicated-load-standard", + "dedicated-load-high-connections", + "dedicated-load-low-latency" + ], + "default_profile": "dedicated-load-standard" + } + } +} +``` + +**Features:** + +- ✅ Dedicated to one role (LOAD only) +- ✅ Multiple specialized profiles +- ✅ No preferred partners needed + +### 4. Specialized Machines (Dedicated Role) + +```json +{ + "name": "dedicated-db", + "capabilities": { + "db": { + "priority": 1, + "profiles": [ + "dedicated-db-standard", + "dedicated-db-high-iops", + "dedicated-db-large-dataset" + ], + "default_profile": "dedicated-db-standard" + } + }, + "preferred_partners": [] +} +``` + +**Features:** + +- ✅ Dedicated to one role (DB only) +- ✅ Multiple specialized profiles for different workloads +- ✅ No preferred partners needed (self-contained) + +## Scenario Configuration Options + +### 1. Basic Scenario (Default Profiles) + +```json +{ + "name": "Simple Single Machine Test", + "template": "simple-single.yml", + "scenario_type": 1, + "target_machines": ["single-type-machine", "multi-type-machine"], + "estimated_runtime": 10.0, + "description": "Basic single machine scenario with default profiles" +} +``` + +**Result:** Uses default profiles for all machines + +### 2. Custom Profile Selection + +```json +{ + "name": "Triple Machine Test with Custom Profiles", + "template": "triple-custom.yml", + "scenario_type": 3, + "target_machines": ["multi-type-machine"], + "estimated_runtime": 45.0, + "profile_overrides": { + "multi-type-machine": { + "sut": "multi-sut-high-cpu", + "load": "multi-load-high-throughput", + "db": "multi-db-memory-optimized" + } + } +} +``` + +**Result:** Uses specific custom profiles for each machine type + +### 3. Mixed Profile Usage + +```json +{ + "name": "Mixed Profile Scenario", + "template": "mixed-profiles.yml", + "scenario_type": 2, + "target_machines": ["single-type-machine", "multi-type-machine"], + "profile_overrides": { + "multi-type-machine": { + "sut": "multi-sut-low-memory" + } + } +} +``` + +**Result:** + +- `single-type-machine`: Uses default profile +- `multi-type-machine` SUT: Uses custom profile +- `multi-type-machine` LOAD: Uses default profile + +## Configuration Properties Explained + +### Machine Properties + +| Property | Required | Description | +| -------------------- | -------- | ---------------------------------------------- | +| `name` | ✅ | Unique machine identifier | +| `capabilities` | ✅ | Dict of machine types this machine can fulfill | +| `preferred_partners` | ❌ | List of preferred machines for other roles | + +### Capability Properties + +| Property | Required | Description | +| ----------------- | -------- | ------------------------------------------------------------------- | +| `machine_type` | ✅ | Key: "sut", "load", or "db" | +| `priority` | ✅ | 1=preferred, 2=secondary, 3=fallback | +| `profiles` | ✅ | List of available profile names | +| `default_profile` | ❌ | Which profile to use by default (defaults to first profile in list) | + +### Scenario Properties + +| Property | Required | Description | +| ------------------- | -------- | ---------------------------------- | +| `name` | ✅ | Scenario identifier | +| `template` | ✅ | YAML template file | +| `scenario_type` | ✅ | 1=single, 2=dual, 3=triple machine | +| `target_machines` | ✅ | List of machines to run on | +| `estimated_runtime` | ❌ | Runtime in minutes | +| `description` | ❌ | Human-readable description | +| `profile_overrides` | ❌ | Custom profile overrides | + +### Profile Overrides Structure + +```json +"profile_overrides": { + "machine-name": { + "machine-type": "profile-name" + } +} +``` + +## Testing the Configuration + +Run these commands to test the example: + +```bash +# Test configuration loading and basic validation +python main.py --config example_complete_features.json --list-jobs + +# Test scheduling without YAML generation +python main.py --config example_complete_features.json + +# Test with template and YAML generation +python main.py --config example_complete_features.json --template benchmarks.template.liquid --yaml-output ./output + +# List jobs grouped by target machine +python main.py --config example_complete_features.json --list-jobs-by-machine +``` + +## Key Features Demonstrated + +1. ✅ **Multi-type machines** - One machine can be SUT, LOAD, and DB +2. ✅ **Priority-based selection** - Preferred roles vs fallback roles +3. ✅ **Sub-profile support** - Multiple profiles per machine type +4. ✅ **Default behavior** - No configuration change needed for existing setups +5. ✅ **Custom overrides** - Specific profiles for specific scenarios +6. ✅ **Backward compatibility** - Old single-type machines still work +7. ✅ **Flexible scheduling** - Scheduler automatically picks best assignments diff --git a/scripts/crank-scheduler/README.md b/scripts/crank-scheduler/README.md new file mode 100644 index 000000000..45266159d --- /dev/null +++ b/scripts/crank-scheduler/README.md @@ -0,0 +1,246 @@ +# Crank Scheduler + +A sophisticated scheduling system for managing performance test scenario execution across multiple machines with various constraints and preferences. This component is designed to be integrated into larger performance testing infrastructure projects. + +## Overview + +The Crank Scheduler solves the complex problem of optimally scheduling performance test scenarios across a fleet of machines while respecting: + +- Machine type constraints and multi-capability support (SUT, Load, DB) +- Machine preferences and partnerships +- Runtime optimization to minimize idle time +- Stage-based execution where all scenarios in a stage must complete before the next stage begins +- Queue-based load balancing across multiple execution pipelines +- YAML generation for CI/CD pipeline integration + +## Features + +- **Multi-Capability Machine Support**: Machines can have multiple capabilities (SUT, Load, DB) with priority-based assignments +- **Smart Machine Allocation**: Respects machine preferences while falling back gracefully to available alternatives +- **Runtime Estimation**: Automatically estimates runtimes for unknown scenarios based on similar ones +- **Stage Optimization**: Groups scenarios to minimize total execution time +- **Queue-Based Load Balancing**: Distributes work across multiple execution queues +- **Liquid Template Integration**: Generates YAML configurations for CI/CD pipelines using templates +- **Multiple Input/Output Formats**: JSON, CSV, YAML support +- **Schedule Splitting**: Divides large schedules into manageable chunks for parallel execution +- **Extensible Design**: Easy to add new optimization algorithms and constraints + +## Installation + +As a nested component, ensure Python 3 (latest recommended) and the requirements are installed: + +```bash +pip install -r requirements.txt +``` + +## Quick Start + +The scheduler uses a combined configuration format that includes machines, scenarios, and metadata in a single JSON file. + +### Basic Usage + +```bash +# Generate schedule from combined configuration +python main.py --config config.json + +# Generate YAML files for CI/CD pipelines +python main.py --config config.json --template benchmarks.template.liquid --yaml-output ./output + +# List all jobs without scheduling +python main.py --config config.json --list-jobs + +# List jobs grouped by machine +python main.py --config config.json --list-jobs-by-machine +``` + +## Configuration + +The scheduler uses a combined configuration format that includes machines, scenarios, and metadata. See `example_complete_features.json` for a comprehensive example. + +### Combined Configuration Structure + +```json +{ + "metadata": { + "name": "Configuration Name", + "description": "Description of the configuration", + "version": "2.0", + "schedule": "0 6/12 * * *", + "queues": ["queue1", "queue2"], + "yaml_generation": { + "target_yaml_count": 2, + "schedule_offset_hours": 6 + } + }, + "machines": [...], + "scenarios": [...] +} +``` + +### Machines Configuration + +Machines support multiple capabilities with priority-based assignment: + +```json +{ + "name": "multi-capability-machine", + "capabilities": { + "sut": { + "priority": 1, + "profiles": ["sut-profile-1", "sut-profile-2"], + "default_profile": "sut-profile-1" + }, + "load": { + "priority": 2, + "profiles": ["load-profile"] + } + }, + "preferred_partners": ["partner-machine-1", "partner-machine-2"] +} +``` + +#### Machine Properties + +- **name**: Unique machine identifier +- **capabilities**: Dictionary of machine types with their configurations + - **priority**: Assignment priority (1 = highest priority) + - **profiles**: Available profiles for this capability + - **default_profile**: Default profile to use (optional, defaults to first profile) +- **preferred_partners**: Ordered list of preferred partner machines + +### Scenarios Configuration + +```json +{ + "name": "performance-test-scenario", + "scenario_type": 2, + "estimated_runtime": 45.0, + "target_machines": ["machine-1", "machine-2"] +} +``` + +#### Scenario Properties + +- **name**: Scenario identifier +- **scenario_type**: Number of machines required (1=SUT only, 2=SUT+Load, 3=SUT+Load+DB) +- **estimated_runtime**: Runtime in minutes (optional) +- **target_machines**: List of specific machines to run this scenario on + +## Usage Examples + +### Basic Scheduling + +```bash +# Generate schedule and display summary +python main.py --config config.json + +# Generate YAML files for CI/CD integration +python main.py --config config.json --template benchmarks.template.liquid --yaml-output ./output +``` + +### Analysis and Debugging + +```bash +# List all jobs without executing scheduler +python main.py --config config.json --list-jobs + +# List jobs grouped by target machine +python main.py --config config.json --list-jobs-by-machine +``` + +## Architecture + +### Core Components + +1. **Models** (`models.py`): Data structures for machines, scenarios, schedules, and configurations +2. **Scheduler** (`scheduler.py`): Core scheduling algorithms and machine allocation logic +3. **Utils** (`utils.py`): Input/output utilities and data format conversion +4. **Template Generator** (`template_generator.py`): Liquid template processing for YAML generation +5. **Schedule Operations** (`schedule_operations.py`): Schedule processing and YAML generation workflows +6. **Schedule Splitter** (`schedule_splitter.py`): Logic for dividing schedules into manageable chunks +7. **Main** (`main.py`): Command-line interface and orchestration + +### Key Classes + +- **Machine**: Represents a physical machine with multiple capabilities and preferences +- **Scenario**: Represents a performance test scenario with requirements and target machines +- **MachineAllocator**: Handles machine assignment logic with preference matching and capability-based selection +- **CrankScheduler**: Main scheduler that creates optimized schedules with stage-based execution +- **ScheduleSplitter**: Divides large schedules into smaller, parallelizable units + +### Scheduling Algorithm + +1. **Expand scenarios** into individual runs for each target machine +2. **Estimate runtimes** for scenarios without known values using similarity-based estimation +3. **Sort scenarios** by runtime (descending) to minimize idle time using longest-job-first heuristic +4. **Create stages** by packing scenario runs optimally: + - Try to assign each scenario to available machines respecting capabilities and preferences + - Ensure no machine conflicts within a stage (each machine used only once per stage) + - Balance load across multiple queues within each stage +5. **Split schedule** into manageable chunks for parallel execution +6. **Generate YAML** configurations for CI/CD pipeline integration + +## Machine Preference System + +The scheduler uses a sophisticated multi-tiered machine matching system: + +1. **Capability-Based Matching**: Match machines based on their defined capabilities and priorities +2. **Explicit Preferences**: Use machines specified in `preferred_partners` list +3. **Priority-Based Fallback**: Select machines based on capability priority levels +4. **Type-Based Matching**: Match by machine type when specific preferences aren't available +5. **Graceful Degradation**: Use any available machine of the correct type as last resort + +### Example Preference Logic + +```text +Scenario needs a load machine for target "gold-lin": +1. Check gold-lin's preferred_partners for load-capable machines +2. Try load machines in preference order +3. Fall back to any available load-capable machine by priority +4. Fail if no suitable machines are available +``` + +## Output Formats + +### Table Format (Default) + +Human-readable summary with stage breakdown, machine utilization, and execution plans. + +### JSON Format + +Structured data suitable for programmatic consumption: + +```json +{ + "total_estimated_time": 120.0, + "stages": [ + { + "stage_id": 0, + "estimated_duration": 60.0, + "assignments": [...] + } + ] +} +``` + +### CSV Format + +Flat format suitable for spreadsheet analysis: + +```csv +stage_id,queue_id,scenario,sut_machine,load_machine,db_machine,estimated_runtime,stage_duration +0,0,plaintext-gold-lin,gold-lin,gold-load,,35.0,60.0 +``` + +### YAML Format + +Generated CI/CD pipeline configurations using Liquid templates for integration with build systems. + +## Integration as a Nested Component + +This scheduler is designed to be integrated into larger performance testing infrastructure projects. Key integration points: + +- **Configuration Management**: Use combined JSON configuration format for easy integration +- **Template System**: Leverage Liquid templates for generating custom CI/CD configurations +- **Modular Design**: Import and use individual components (`CrankScheduler`, `ScheduleSplitter`, etc.) +- **Extensible Architecture**: Add custom machine types and scenario types diff --git a/scripts/crank-scheduler/example_complete_features.json b/scripts/crank-scheduler/example_complete_features.json new file mode 100644 index 000000000..6b5073375 --- /dev/null +++ b/scripts/crank-scheduler/example_complete_features.json @@ -0,0 +1,188 @@ +{ + "metadata": { + "name": "Complete Feature Demo Configuration", + "description": "Demonstrates all possible machine and scenario configuration options", + "version": "2.0", + "schedule": "0 6/12 * * *", + "queues": [ + "queue1", + "queue2" + ], + "yaml_generation": { + "target_yaml_count": 2, + "schedule_offset_hours": 6 + } + }, + "machines": [ + { + "name": "single-type-machine", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "single-type-basic" + ], + "default_profile": "single-type-basic" + } + }, + "preferred_partners": [ + "dedicated-load", + "dedicated-db" + ] + }, + { + "name": "multi-type-machine", + "capabilities": { + "sut": { + "priority": 1, + "profiles": [ + "multi-sut-normal", + "multi-sut-high-cpu", + "multi-sut-low-memory" + ], + "default_profile": "multi-sut-normal" + }, + "load": { + "priority": 2, + "profiles": [ + "multi-load-normal", + "multi-load-high-throughput", + "multi-load-burst-mode" + ], + "default_profile": "multi-load-normal" + }, + "db": { + "priority": 3, + "profiles": [ + "multi-db-normal", + "multi-db-memory-optimized" + ], + "default_profile": "multi-db-normal" + } + }, + "preferred_partners": [ + "dedicated-load", + "dedicated-db" + ] + }, + { + "name": "dedicated-load", + "capabilities": { + "load": { + "priority": 1, + "profiles": [ + "dedicated-load-standard", + "dedicated-load-high-connections", + "dedicated-load-low-latency" + ], + "default_profile": "dedicated-load-standard" + } + }, + "preferred_partners": [] + }, + { + "name": "dedicated-db", + "capabilities": { + "db": { + "priority": 1, + "profiles": [ + "dedicated-db-standard", + "dedicated-db-high-iops", + "dedicated-db-large-dataset" + ], + "default_profile": "dedicated-db-standard" + } + }, + "preferred_partners": [] + } + ], + "scenarios": [ + { + "name": "Simple Single Machine Test", + "template": "simple-single.yml", + "type": 1, + "target_machines": [ + "single-type-machine", + "multi-type-machine" + ], + "estimated_runtime": 10.0, + "description": "Basic single machine scenario with default profiles" + }, + { + "name": "Dual Machine Test with Defaults", + "template": "dual-default.yml", + "type": 2, + "target_machines": [ + "single-type-machine", + "multi-type-machine" + ], + "estimated_runtime": 25.0, + "description": "Dual machine scenario using all default profiles" + }, + { + "name": "Triple Machine Test with Custom Profiles", + "template": "triple-custom.yml", + "type": 3, + "target_machines": [ + "multi-type-machine" + ], + "estimated_runtime": 45.0, + "description": "Triple machine scenario with custom profile selection", + "profile_overrides": { + "multi-type-machine": { + "sut": "multi-sut-high-cpu", + "load": "multi-load-high-throughput", + "db": "multi-db-memory-optimized" + } + } + }, + { + "name": "Mixed Profile Scenario", + "template": "mixed-profiles.yml", + "type": 2, + "target_machines": [ + "single-type-machine", + "multi-type-machine" + ], + "estimated_runtime": 30.0, + "description": "Scenario where some machines use custom profiles, others use defaults", + "profile_overrides": { + "multi-type-machine": { + "sut": "multi-sut-low-memory" + } + } + }, + { + "name": "High Performance Load Test", + "template": "high-performance.yml", + "type": 2, + "target_machines": [ + "multi-type-machine" + ], + "estimated_runtime": 60.0, + "description": "Performance test using high-performance profiles", + "profile_overrides": { + "multi-type-machine": { + "sut": "multi-sut-high-cpu" + } + } + }, + { + "name": "Resource Constrained Test", + "template": "resource-constrained.yml", + "type": 3, + "target_machines": [ + "multi-type-machine" + ], + "estimated_runtime": 40.0, + "description": "Test with limited resources across all machine types", + "profile_overrides": { + "multi-type-machine": { + "sut": "multi-sut-low-memory", + "load": "multi-load-normal", + "db": "multi-db-normal" + } + } + } + ] +} diff --git a/scripts/crank-scheduler/main.py b/scripts/crank-scheduler/main.py new file mode 100644 index 000000000..4e99489c8 --- /dev/null +++ b/scripts/crank-scheduler/main.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python3 +""" +Crank Scheduler - A scheduling system for machine/scenario assignments + +This tool helps schedule scenarios across multiple machines with various constraints: +- Machine preferences and types +- Runtime optimization +- Queue management +- Stage-based execution +""" + +import argparse +import sys +from pathlib import Path +from typing import List + +from models import (CombinedConfiguration, Machine, MachineType, Scenario, + ScenarioType) +from schedule_operations import ScheduleOperations +from schedule_splitter import MultiYamlSummary, ScheduleSplitter +from scheduler import CrankScheduler +from template_generator import TemplateCLI +from utils import DataLoader + + +def main(): + parser = argparse.ArgumentParser( + description="Crank Scheduler - Optimize scenario scheduling across machines", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Generate schedule from JSON files + python main.py --config config.json --format table + """ + ) + + # Input options + input_group = parser.add_argument_group('Input Options') + input_group.add_argument('-c', '--config', type=str, + help='Path to combined configuration file (machines + scenarios + metadata) (JSON)') + + # Output options + output_group = parser.add_argument_group('Output Options') + output_group.add_argument('--summary-output', type=str, + help='Summary Output file path (default: print to console)') + output_group.add_argument( + '--yaml-output', type=str, help='Output directory for YAML files (default: current directory)') + + # Scheduling options + schedule_group = parser.add_argument_group('Scheduling Options') + schedule_group.add_argument('--list-jobs', action='store_true', + help='List all jobs that will be scheduled without running the scheduler') + schedule_group.add_argument('--list-jobs-by-machine', action='store_true', + help='List all jobs grouped by target SUT machine') + + # Add template arguments + TemplateCLI.add_template_arguments(parser) + + args = parser.parse_args() + + # Load data + machines, scenarios, config = load_data(args) + + if not machines: + print("Error: No machines loaded", file=sys.stderr) + return 1 + + if not scenarios: + print("Error: No scenarios loaded", file=sys.stderr) + return 1 + + # Handle list-jobs option + if args.list_jobs: + list_scheduled_jobs(scenarios, machines) + return 0 + + # Handle list-jobs-by-machine option + if args.list_jobs_by_machine: + list_jobs_by_machine(scenarios, machines) + return 0 + + # Create scheduler and generate schedule + print( + f"Creating schedule for {len(scenarios)} scenarios on {len(machines)} machines...") + scheduler = CrankScheduler(machines, scenarios, config) + schedule = scheduler.create_schedule() + + splitter = ScheduleSplitter(config) + partial_schedules = splitter.split_schedule(schedule) + print(f"Schedule split into {len(partial_schedules)} part(s)") + + # Process template/YAML generation if template is specified + if args.template: + yamls_generated = ScheduleOperations.process_yaml_generation( + args, partial_schedules, config) + + # Print summary + summary = MultiYamlSummary(yamls_generated, len( + config.scenarios), partial_schedules) + summary.print_summary() + summary.print_execution_plans() + + if not yamls_generated: + raise RuntimeError( + "No YAML files were generated. Check your template and configuration.") + else: + # No template specified, just show the partial schedules + ScheduleOperations.show_partial_schedules(partial_schedules, config) + return 0 + + +def load_data(args) -> tuple[List[Machine], List[Scenario], CombinedConfiguration]: + """Load machine and scenario data based on arguments""" + + # Try combined configuration first + config_path = Path(args.config) + if not config_path.exists(): + raise FileNotFoundError( + f"Combined config file not found: {config_path}") + + config = DataLoader.load_combined_configuration(str(config_path)) + return config.machines, config.scenarios, config + + +def _extract_runtime_minutes(runtime_str: str) -> float: + """Extract numeric runtime from string (e.g., "5.0min" -> 5.0)""" + if runtime_str == "Unknown": + return 0.0 + try: + return float(runtime_str.replace('min', '')) + except (ValueError, AttributeError): + return 0.0 + + +def list_scheduled_jobs(scenarios: List[Scenario], machines: List[Machine]): + """List all jobs that will be scheduled without running the scheduler""" + print("=" * 60) + print("SCHEDULED JOBS LIST") + print("=" * 60) + + # Create a map of machine names to machine objects for quick lookup + machine_map = {m.name: m for m in machines} + + # Expand scenarios into individual jobs + total_jobs = 0 + jobs_by_scenario = {} + + for scenario in scenarios: + jobs_by_scenario[scenario.name] = [] + for target_machine in scenario.target_machines: + # Check if target machine exists + if target_machine not in machine_map: + print( + f"WARNING: Target machine '{target_machine}' not found in machine list") + continue + + job_name = f"{scenario.name}-{target_machine}" + estimated_runtime_str = f"{scenario.estimated_runtime:.1f}min" if scenario.estimated_runtime else "Unknown" + machine_types = [ + mt.value for mt in scenario.get_required_machine_types()] + + jobs_by_scenario[scenario.name].append({ + 'name': job_name, + 'target_machine': target_machine, + 'scenario_type': scenario.scenario_type.value, + 'estimated_runtime': estimated_runtime_str, + 'machine_types': machine_types + }) + total_jobs += 1 + + # Display summary + print(f"Total scenarios: {len(scenarios)}") + print(f"Total jobs: {total_jobs}") + print(f"Available machines: {len(machines)}") + print() + + # Display jobs grouped by scenario + for scenario_name, jobs in jobs_by_scenario.items(): + if not jobs: + continue + + print(f"Scenario: {scenario_name}") + print(f" Jobs: {len(jobs)}") + print( + f" Target machines: {', '.join([job['target_machine'] for job in jobs])}") + print(f" Machine types needed: {', '.join(jobs[0]['machine_types'])}") + print(f" Runtime: {jobs[0]['estimated_runtime']}") + print() + + for job in jobs: + print( + f" → {job['name']} ({job['scenario_type']} machines, {job['estimated_runtime']})") + print() + + # Display machine summary + print("=" * 40) + print("MACHINE SUMMARY") + print("=" * 40) + + machines_by_type = {} + for machine in machines: + # Get primary machine type (lowest priority capability) + if machine.capabilities: + primary_type = min(machine.capabilities.keys(), + key=lambda mt, m=machine: m.capabilities[mt].priority) + machine_type = primary_type.value + else: + continue # Skip machines with no capabilities + + if machine_type not in machines_by_type: + machines_by_type[machine_type] = [] + machines_by_type[machine_type].append(machine.name) + + for machine_type, machine_names in machines_by_type.items(): + print( + f"{machine_type.upper()} machines ({len(machine_names)}): {', '.join(machine_names)}") + + print() + print("=" * 40) + print("JOB REQUIREMENTS ANALYSIS") + print("=" * 40) + + # Analyze job requirements + single_machine_jobs = sum( + 1 for scenario in scenarios for _ in scenario.target_machines if scenario.scenario_type == ScenarioType.SINGLE) + dual_machine_jobs = sum( + 1 for scenario in scenarios for _ in scenario.target_machines if scenario.scenario_type == ScenarioType.DUAL) + triple_machine_jobs = sum( + 1 for scenario in scenarios for _ in scenario.target_machines if scenario.scenario_type == ScenarioType.TRIPLE) + + print(f"Single machine jobs (SUT only): {single_machine_jobs}") + print(f"Dual machine jobs (SUT + Load): {dual_machine_jobs}") + print(f"Triple machine jobs (SUT + Load + DB): {triple_machine_jobs}") + print() + + # Check for potential scheduling issues + sut_machines = len( + [m for m in machines if MachineType.SUT in m.capabilities]) + load_machines = len( + [m for m in machines if MachineType.LOAD in m.capabilities]) + db_machines = len( + [m for m in machines if MachineType.DB in m.capabilities]) + + max_concurrent_dual = min(sut_machines, load_machines) + max_concurrent_triple = min(sut_machines, load_machines, db_machines) + + print(f"Maximum concurrent dual-machine jobs: {max_concurrent_dual}") + print(f"Maximum concurrent triple-machine jobs: {max_concurrent_triple}") + if dual_machine_jobs > 0 and load_machines == 0: + print( + "⚠️ WARNING: Dual-machine jobs require load machines, but none are available!") + if triple_machine_jobs > 0 and db_machines == 0: + print("⚠️ WARNING: Triple-machine jobs require database machines, but none are available!") + if triple_machine_jobs > 0 and load_machines == 0: + print("⚠️ WARNING: Triple-machine jobs require load machines, but none are available!") + + +def list_jobs_by_machine(scenarios: List[Scenario], machines: List[Machine]): + """List all jobs grouped by target machine""" + print("=" * 60) + print("JOBS BY MACHINE") + print("=" * 60) + + # Create a map of machine names to machine objects for quick lookup + machine_map = {m.name: m for m in machines} + + # Collect all jobs and group by machine + jobs_by_machine = {} + total_jobs = 0 + + # Initialize all machines in the map + for machine in machines: + jobs_by_machine[machine.name] = [] + + # Expand scenarios into individual jobs and group by machine + for scenario in scenarios: + for target_machine in scenario.target_machines: + if target_machine not in machine_map: + print( + f"WARNING: Target machine '{target_machine}' not found in machine list") + continue + + job_name = f"{scenario.name}-{target_machine}" + estimated_runtime_str = f"{scenario.estimated_runtime:.1f}min" if scenario.estimated_runtime else "Unknown" + + job_info = { + 'name': job_name, + 'scenario_name': scenario.name, + 'scenario_type': scenario.scenario_type.value, + 'estimated_runtime': estimated_runtime_str, + 'machine_types': [mt.value for mt in scenario.get_required_machine_types()] + } + + jobs_by_machine[target_machine].append(job_info) + total_jobs += 1 + + # Display summary + machines_with_jobs = sum(1 for jobs in jobs_by_machine.values() if jobs) + sut_machines = len( + [m for m in machines if MachineType.SUT in m.capabilities]) + print(f"Total machines: {len(machines)}") + print(f"Primary (SUT) Machines: {sut_machines}") + print(f"Primary Machines with Jobs: {machines_with_jobs}") + print(f"Total jobs: {total_jobs}") + print() + + # Sort machines by machine type and then by name for consistent output + # Don't include non-SUT machines in the summary + def get_primary_machine_type(machine): + if machine.capabilities: + primary_type = min(machine.capabilities.keys(), + key=lambda mt, m=machine: m.capabilities[mt].priority) + return primary_type.value + return 'unknown' + + sorted_machines = sorted(machines, key=lambda m: ( + get_primary_machine_type(m), m.name)) + filtered_machines = [ + m for m in sorted_machines if MachineType.SUT in m.capabilities] + # Display jobs for each machine + for machine in filtered_machines: + machine_jobs = jobs_by_machine[machine.name] + total_runtime = sum(_extract_runtime_minutes(job['estimated_runtime']) + for job in machine_jobs) + + # Get primary machine type for display + primary_type = get_primary_machine_type(machine) + print(f"Machine: {machine.name} ({primary_type.upper()})") + print(f" Jobs: {len(machine_jobs)}") + print(f" Total runtime: {total_runtime:.1f} minutes") + + if machine.preferred_partners: + partners_str = ", ".join(machine.preferred_partners) + print(f" Preferred partners: {partners_str}") + + if not machine_jobs: + print(" → No jobs assigned") + else: + # Sort jobs by scenario name for consistent output + sorted_jobs = sorted( + machine_jobs, key=lambda j: j['scenario_name']) + for job in sorted_jobs: + scenario_types_str = ", ".join(job['machine_types']) + print( + f" → {job['name']} ({scenario_types_str}, {job['estimated_runtime']})") + print() + + # Summary statistics + print("=" * 40) + print("MACHINE UTILIZATION SUMMARY") + print("=" * 40) + + machines_by_type = {} + runtime_by_type = {} + jobs_by_type = {} + + for machine in sorted_machines: + machine_type = get_primary_machine_type(machine) + machine_jobs = jobs_by_machine[machine.name] + total_runtime = sum(_extract_runtime_minutes(job['estimated_runtime']) + for job in machine_jobs) + + if machine_type not in machines_by_type: + machines_by_type[machine_type] = 0 + runtime_by_type[machine_type] = 0 + jobs_by_type[machine_type] = 0 + + machines_by_type[machine_type] += 1 + runtime_by_type[machine_type] += total_runtime + jobs_by_type[machine_type] += len(machine_jobs) + + # For now, only show statistics for SUT machines + machine_type = 'sut' + if machine_type in machines_by_type: + avg_runtime = runtime_by_type[machine_type] / \ + machines_by_type[machine_type] if machines_by_type[machine_type] > 0 else 0 + print(f"{machine_type.upper()} machines:") + print(f" Count: {machines_by_type[machine_type]}") + print(f" Total jobs: {jobs_by_type[machine_type]}") + print(f" Total runtime: {runtime_by_type[machine_type]:.1f} minutes") + print(f" Average runtime per machine: {avg_runtime:.1f} minutes") + print() + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/crank-scheduler/models.py b/scripts/crank-scheduler/models.py new file mode 100644 index 000000000..9e3ea230b --- /dev/null +++ b/scripts/crank-scheduler/models.py @@ -0,0 +1,257 @@ +from dataclasses import dataclass, field +from enum import Enum +from typing import Dict, List, Optional, Set + + +class MachineType(Enum): + """Types of machines available""" + SUT = "sut" # System Under Test + LOAD = "load" # Load generation machine + DB = "db" # Database machine + + +class ScenarioType(Enum): + """Number of machines required for scenario""" + SINGLE = 1 # Just SUT + DUAL = 2 # SUT + Load + TRIPLE = 3 # SUT + Load + DB + + +@dataclass +class MachineCapability: + """Represents a machine's ability to fulfill a specific role""" + machine_type: MachineType + priority: int # 1 = preferred, 2 = secondary, etc. + profiles: List[str] # All available profiles for this type + # Which profile to use by default (defaults to first profile) + default_profile: Optional[str] = None + + def __post_init__(self): + if not self.profiles: + raise ValueError( + f"At least one profile must be specified for machine type {self.machine_type}") + + # If no default_profile specified, use the first profile in the list + if self.default_profile is None: + self.default_profile = self.profiles[0] + + if self.default_profile not in self.profiles: + raise ValueError( + f"Default profile '{self.default_profile}' not in available profiles: {self.profiles}") + + +@dataclass +class Machine: + """Represents a machine that can run scenarios with multiple capabilities""" + name: str + # Direct mapping for efficient access + capabilities: Dict[MachineType, MachineCapability] + # Ordered list of preferred partner machines + preferred_partners: List[str] = field(default_factory=list) + # Machine group for compatibility filtering + machine_group: Optional[str] = None + + def get_capability(self, machine_type: MachineType) -> Optional[MachineCapability]: + """Get the capability for a specific machine type""" + return self.capabilities.get(machine_type) + + def can_fulfill_type(self, machine_type: MachineType) -> bool: + """Check if machine can fulfill a specific type""" + return machine_type in self.capabilities + + def get_supported_types(self) -> List[MachineType]: + """Get all machine types this machine can support, sorted by priority""" + return sorted(self.capabilities.keys(), key=lambda mt: self.capabilities[mt].priority) + + +@dataclass +class Scenario: + """A scenario to be scheduled""" + name: str # Just the scenario name (e.g., "plaintext") + scenario_type: ScenarioType + # List of target machine names (e.g., ["gold-lin", "gold-win"]) + target_machines: List[str] + template: str # Template file for YAML generation + estimated_runtime: Optional[float] = None # Runtime in minutes + description: Optional[str] = None # Human-readable description + # Machine-specific profile overrides + profile_overrides: Optional[Dict[str, Dict[MachineType, str]]] = None + + def __post_init__(self): + # Validate that we have target machines + if not self.target_machines: + raise ValueError( + f"Scenario {self.name} must have at least one target machine") + + def get_preferred_profile(self, machine_name: str, machine_type: MachineType) -> Optional[str]: + """Get preferred profile for a specific machine and type""" + if not self.profile_overrides: + return None + machine_prefs = self.profile_overrides.get(machine_name, {}) + return machine_prefs.get(machine_type) + + def __hash__(self): + # Use name and target machines for hash to allow same scenario on different machines + return hash((self.name, tuple(sorted(self.target_machines)))) + + def __eq__(self, other): + if isinstance(other, Scenario): + return (self.name == other.name and + set(self.target_machines) == set(other.target_machines)) + return False + + def get_required_machine_types(self) -> List[MachineType]: + """Get the types of machines required for this scenario""" + if self.scenario_type == ScenarioType.SINGLE: + return [MachineType.SUT] + elif self.scenario_type == ScenarioType.DUAL: + return [MachineType.SUT, MachineType.LOAD] + elif self.scenario_type == ScenarioType.TRIPLE: + return [MachineType.SUT, MachineType.LOAD, MachineType.DB] + else: + raise ValueError(f"Unknown scenario type: {self.scenario_type}") + + def get_display_name(self) -> str: + """Get a display name that includes the target machines""" + if len(self.target_machines) == 1: + return f"{self.name}-{self.target_machines[0]}" + else: + return f"{self.name}-[{','.join(self.target_machines)}]" + + +@dataclass +class MachineAssignment: + """Assignment of machines to a scenario""" + scenario: Scenario + # Map of machine type to assigned machine + machines: Dict[MachineType, Machine] + profiles: Dict[MachineType, str] # Map of machine type to selected profile + queue_id: int + target_machine: str # The specific target machine this assignment is for + + def get_total_machines(self) -> int: + return len(self.machines) + + def get_machine_names(self) -> List[str]: + return [machine.name for machine in self.machines.values()] + + def get_display_name(self) -> str: + """Get a display name for this assignment""" + return f"{self.scenario.name}-{self.target_machine}" + + def get_profile_for_machine_type(self, machine_type: MachineType) -> Optional[str]: + """Get the profile being used for a specific machine type""" + return self.profiles.get(machine_type) + + +@dataclass +class Stage: + """A stage in the schedule where scenarios run in parallel""" + stage_id: int + assignments: List[MachineAssignment] + estimated_duration: Optional[float] = None # Duration in minutes + + def get_used_machines(self) -> Set[str]: + """Get all machine names used in this stage""" + used = set() + for assignment in self.assignments: + used.update(assignment.get_machine_names()) + return used + + def calculate_duration(self) -> float: + """Calculate the estimated duration of this stage""" + if not self.assignments: + return 0.0 + + # Stage duration is the maximum runtime of any scenario in the stage + max_runtime = 0.0 + for assignment in self.assignments: + if assignment.scenario.estimated_runtime: + max_runtime = max( + max_runtime, assignment.scenario.estimated_runtime) + + self.estimated_duration = max_runtime + return max_runtime + + +@dataclass +class Schedule: + """Complete schedule with all stages""" + stages: List[Stage] + total_estimated_time: Optional[float] = None + + def calculate_total_time(self) -> float: + """Calculate total estimated time for the schedule""" + total = sum(stage.calculate_duration() for stage in self.stages) + self.total_estimated_time = total + return total + + def get_machine_utilization(self) -> Dict[str, float]: + """Calculate utilization percentage for each machine""" + if not self.stages or self.total_estimated_time == 0: + return {} + + machine_usage = {} + + for stage in self.stages: + stage_duration = stage.calculate_duration() + for assignment in stage.assignments: + for machine in assignment.machines.values(): + if machine.name not in machine_usage: + machine_usage[machine.name] = 0.0 + machine_usage[machine.name] += stage_duration + + # Convert to percentages + utilization = {} + for machine_name, usage_time in machine_usage.items(): + utilization[machine_name] = ( + usage_time / self.total_estimated_time) * 100 + + return utilization + + +@dataclass +class YamlGenerationConfig: + """Configuration for multi-YAML generation""" + target_yaml_count: int = 2 + schedule_offset_hours: int = 6 + + +@dataclass +class ConfigurationMetadata: + """Metadata for a combined configuration file""" + name: str + description: str + version: str + schedule: str + queues: List[str] + yaml_generation: Optional[YamlGenerationConfig] = None + # If True, machines can only work with others in the same group + enforce_machine_groups: bool = True + + +@dataclass +class CombinedConfiguration: + """Combined machines and scenarios configuration""" + metadata: ConfigurationMetadata + machines: List[Machine] + scenarios: List[Scenario] + + +@dataclass +class PartialSchedule: + """A partial schedule containing a subset of stages""" + name: str + stages: List[Stage] + total_estimated_time: Optional[float] = None + + def __post_init__(self): + if self.total_estimated_time is None: + self.total_estimated_time = sum( + stage.estimated_duration or 0 for stage in self.stages) + + def add_stage(self, stage: Stage): + """Add a stage to this partial schedule""" + self.stages.append(stage) + self.total_estimated_time = ( + self.total_estimated_time or 0) + (stage.estimated_duration or 0) diff --git a/scripts/crank-scheduler/requirements.txt b/scripts/crank-scheduler/requirements.txt new file mode 100644 index 000000000..46566ee22 --- /dev/null +++ b/scripts/crank-scheduler/requirements.txt @@ -0,0 +1,2 @@ +pyyaml>=6.0 +python-liquid>=2.0.2 \ No newline at end of file diff --git a/scripts/crank-scheduler/schedule_operations.py b/scripts/crank-scheduler/schedule_operations.py new file mode 100644 index 000000000..ba24a4732 --- /dev/null +++ b/scripts/crank-scheduler/schedule_operations.py @@ -0,0 +1,165 @@ +""" +Schedule operations for splitting and manipulating schedules +""" + +from pathlib import Path +from typing import List + +from models import (CombinedConfiguration, PartialSchedule, Schedule, + YamlGenerationConfig) +from schedule_splitter import MultiYamlSummary, ScheduleSplitter +from template_generator import TemplateDataGenerator + + +class ScheduleOperations: + """Operations for splitting and manipulating schedules""" + + @staticmethod + def create_partial_schedules(schedule: Schedule, config: CombinedConfiguration) -> List[PartialSchedule]: + """Create partial schedules - either split into multiple or single schedule + + Returns: + List[PartialSchedule]: Always returns a list, even for single schedules + """ + if not config.metadata.yaml_generation: + # No split configuration, return single schedule wrapped as partial + return [PartialSchedule( + name="full", + stages=schedule.stages.copy(), + total_estimated_time=schedule.total_estimated_time + )] + + target_count = config.metadata.yaml_generation.target_yaml_count + if target_count <= 1: + # Single schedule requested + return [PartialSchedule( + name="full", + stages=schedule.stages.copy(), + total_estimated_time=schedule.total_estimated_time + )] + + # Multiple schedules - use splitter + splitter = ScheduleSplitter(config) + return splitter.split_schedule(schedule) + + @staticmethod + def generate_schedule_times(config: CombinedConfiguration, count: int) -> List[str]: + """Generate schedule times based on configuration + + Args: + config: Combined configuration with schedule settings + count: Number of schedules to generate + + Returns: + List[str]: List of cron schedule strings + """ + base_schedule = config.metadata.schedule + + if count <= 1: + return [base_schedule] + + if not config.metadata.yaml_generation: + # No offset configuration, use base schedule for all + return [base_schedule] * count + + # Generate with offsets + splitter = ScheduleSplitter(config) + return splitter.generate_schedules(base_schedule) + + @staticmethod + def process_yaml_generation(args, partial_schedules: List[PartialSchedule], config: CombinedConfiguration) -> list: + """ + Unified flow for YAML generation (single or multi) + + Returns: + bool: True if YAML files were generated, False otherwise + """ + # Apply CLI overrides to config if provided + if args.target_yamls: + if config.metadata.yaml_generation is None: + config.metadata.yaml_generation = YamlGenerationConfig() + config.metadata.yaml_generation.target_yaml_count = args.target_yamls + + if args.schedule_offset: + if config.metadata.yaml_generation is None: + config.metadata.yaml_generation = YamlGenerationConfig() + config.metadata.yaml_generation.schedule_offset_hours = args.schedule_offset + + schedule_times = ScheduleOperations.generate_schedule_times( + config, len(partial_schedules)) + + # Generate YAML prefix + if args.yaml_prefix: + yaml_prefix = args.yaml_prefix + elif args.config: + config_path = Path(args.config) + yaml_prefix = config_path.stem.replace('.', '-').replace('_', '-') + else: + yaml_prefix = 'schedule' + + # Generate YAML files + template_generator = TemplateDataGenerator(config) + yaml_files = [] + + for i, (partial_schedule, schedule_time) in enumerate(zip(partial_schedules, schedule_times)): + # Generate template data for this partial schedule + template_data = template_generator.generate_template_data( + partial_schedule, schedule_time) + + # Generate output filename + if len(partial_schedules) == 1: + filename = f"{yaml_prefix}.yml" + data_filename = f"{yaml_prefix}_data.json" if args.template_data else None + else: + filename = f"{yaml_prefix}-{i+1:02d}.yml" + data_filename = f"{yaml_prefix}-{i+1:02d}_data.json" if args.template_data else None + + # Handle output directory if specified + if args.yaml_output: + output_dir = Path(args.yaml_output) + output_path = str(output_dir.joinpath(filename)) + data_path = str(output_dir.joinpath( + data_filename)) if data_filename else None + else: + output_path = filename + data_path = data_filename + # Save template data if requested + if args.template_data and data_path: + template_generator.save_template_data(template_data, data_path) + + # Generate YAML file + template_generator.generate_yaml_from_template( + template_data, args.template, output_path) + + yaml_files.append({ + 'file': output_path, + 'schedule': schedule_time, + 'estimated_runtime': partial_schedule.total_estimated_time or 0, + 'stage_count': len(partial_schedule.stages) + }) + + return yaml_files + + @staticmethod + def show_partial_schedules(partial_schedules: List[PartialSchedule], config: CombinedConfiguration): + """Display execution plans for partial schedules + + Args: + partial_schedules: List of partial schedules to display + config: Combined configuration (used for scenario count) + """ + + # Create a summary object to use its display methods + # Mock yaml_files since we're just showing execution plans + mock_yaml_files = [] + for i, partial_schedule in enumerate(partial_schedules): + mock_yaml_files.append({ + 'file': f'schedule_{i+1:02d}.yml', + 'schedule': 'N/A', + 'estimated_runtime': partial_schedule.total_estimated_time or 0, + 'stage_count': len(partial_schedule.stages) + }) + + summary = MultiYamlSummary(mock_yaml_files, len( + config.scenarios), partial_schedules) + summary.print_execution_plans() diff --git a/scripts/crank-scheduler/schedule_splitter.py b/scripts/crank-scheduler/schedule_splitter.py new file mode 100644 index 000000000..099b56975 --- /dev/null +++ b/scripts/crank-scheduler/schedule_splitter.py @@ -0,0 +1,187 @@ +""" +Schedule splitting functionality for multi-YAML generation +""" + +from typing import List, Optional + +from models import CombinedConfiguration, PartialSchedule, Schedule +from utils import ScheduleExporter + + +class ScheduleSplitter: + """Splits a schedule across multiple YAML files with balanced runtimes""" + + def __init__(self, config: CombinedConfiguration): + self.config = config + self.yaml_config = config.metadata.yaml_generation + + def split_schedule(self, schedule: Schedule) -> List[PartialSchedule]: + """Split schedule using balanced runtime strategy""" + if not self.yaml_config: + # No split configuration, return single schedule + return [PartialSchedule( + name="full", + stages=schedule.stages.copy(), + total_estimated_time=schedule.total_estimated_time + )] + + return self._split_by_balanced_runtime(schedule) + + def _split_by_balanced_runtime(self, schedule: Schedule) -> List[PartialSchedule]: + """Use bin-packing to balance runtime across multiple schedules""" + if not self.yaml_config: + raise ValueError("No YAML generation configuration provided") + + target_count = self.yaml_config.target_yaml_count + + if target_count <= 1: + return [PartialSchedule( + name="full", + stages=schedule.stages.copy(), + total_estimated_time=schedule.total_estimated_time + )] + + # Sort stages by runtime (largest first for better bin-packing) + stages_with_runtime = [(stage, stage.estimated_duration or 0) + for stage in schedule.stages] + stages_with_runtime.sort(key=lambda x: x[1], reverse=True) + + # Initialize bins (partial schedules) + bins = [PartialSchedule(f"part_{i+1:02d}", []) + for i in range(target_count)] + bin_runtimes = [0.0] * target_count + + # Assign each stage to the bin with least current runtime + for stage, runtime in stages_with_runtime: + min_bin_idx = bin_runtimes.index(min(bin_runtimes)) + bins[min_bin_idx].add_stage(stage) + bin_runtimes[min_bin_idx] += runtime + + return bins + + def generate_schedules(self, base_schedule: str) -> List[str]: + """Generate multiple cron schedules with time offsets""" + if not self.yaml_config: + return [base_schedule] + + target_count = self.yaml_config.target_yaml_count + offset_hours = self.yaml_config.schedule_offset_hours + + schedules = [] + + for i in range(target_count): + if i == 0: + # First schedule uses base schedule + schedules.append(base_schedule) + else: + # Generate offset schedule + offset_schedule = self._offset_cron_schedule( + base_schedule, i * offset_hours) + schedules.append(offset_schedule) + + return schedules + + def _offset_cron_schedule(self, cron_schedule: str, offset_hours: int) -> str: + """Apply hour offset to a cron schedule""" + parts = cron_schedule.split() + if len(parts) != 5: + # Invalid cron format, return as-is + return cron_schedule + + minute, hour, day, month, weekday = parts + + # Handle hour patterns like "9/12" (every 12 hours starting at 9) + if '/' in hour: + start_hour, interval = hour.split('/') + start_hour = int(start_hour) + interval = int(interval) + + new_start_hour = (start_hour + offset_hours) % 24 + new_hour = f"{new_start_hour}/{interval}" + else: + # Handle simple hour like "9" or hour list like "9,21" + if ',' in hour: + # Hour list + hours = [int(h.strip()) for h in hour.split(',')] + new_hours = [(h + offset_hours) % 24 for h in hours] + new_hour = ','.join(str(h) for h in sorted(new_hours)) + else: + # Single hour + try: + single_hour = int(hour) + new_hour = str((single_hour + offset_hours) % 24) + except ValueError: + # Complex hour pattern, return as-is + new_hour = hour + + return f"{minute} {new_hour} {day} {month} {weekday}" + + +class MultiYamlSummary: + """Summary information for multi-YAML generation""" + + def __init__(self, yaml_files: List[dict], total_scenarios: int, + partial_schedules: Optional[List[PartialSchedule]] = None): + self.yaml_files = yaml_files + self.total_scenarios = total_scenarios + self.total_runtime = sum(f['estimated_runtime'] for f in yaml_files) + self.partial_schedules = partial_schedules or [] + + def print_summary(self): + """Print a summary of the generated YAML files""" + print("=" * 60) + print("MULTI-YAML GENERATION SUMMARY") + print("=" * 60) + print(f"Total scenarios: {self.total_scenarios}") + print(f"Total runtime: {self.total_runtime:.1f} minutes") + print(f"Generated files: {len(self.yaml_files)}") + print() + + print("Generated YAML files:") + print("-" * 50) + for yaml_info in self.yaml_files: + print(f"File: {yaml_info['file']}") + print(f" Schedule: {yaml_info['schedule']}") + print(f" Runtime: {yaml_info['estimated_runtime']:.1f} minutes") + print(f" Stages: {yaml_info['stage_count']}") + print() + + # Show balance statistics + runtimes = [f['estimated_runtime'] for f in self.yaml_files] + avg_runtime = sum(runtimes) / len(runtimes) + max_runtime = max(runtimes) + min_runtime = min(runtimes) + balance_ratio = (max_runtime - min_runtime) / \ + avg_runtime * 100 if avg_runtime > 0 else 0 + + print("Runtime balance:") + print(f" Average: {avg_runtime:.1f} minutes") + print(f" Range: {min_runtime:.1f} - {max_runtime:.1f} minutes") + print(f" Balance ratio: {balance_ratio:.1f}% (lower is better)") + + def print_execution_plans(self): + """Print execution plans for each split schedule""" + if not self.partial_schedules: + print("No partial schedules available for execution plan display.") + return + + for (yaml_info, partial_schedule) in zip(self.yaml_files, self.partial_schedules): + print("\n" + "=" * 80) + print(f"EXECUTION PLAN FOR {yaml_info['file'].upper()}") + print("=" * 80) + print(f"Schedule: {yaml_info['schedule']}") + print( + f"Estimated Runtime: {yaml_info['estimated_runtime']:.1f} minutes") + print(f"Stages: {yaml_info['stage_count']}") + print() + + # Convert PartialSchedule to Schedule for display + display_schedule = Schedule( + stages=partial_schedule.stages, + total_estimated_time=partial_schedule.total_estimated_time + ) + + # Use the existing ScheduleExporter to format the execution plan + execution_plan = ScheduleExporter.to_summary_table( + display_schedule) + print(execution_plan) diff --git a/scripts/crank-scheduler/scheduler.py b/scripts/crank-scheduler/scheduler.py new file mode 100644 index 000000000..9793044a6 --- /dev/null +++ b/scripts/crank-scheduler/scheduler.py @@ -0,0 +1,266 @@ +from typing import Dict, List, Optional, Set, Tuple + +from models import (CombinedConfiguration, Machine, MachineAssignment, + MachineCapability, MachineType, Scenario, ScenarioType, + Schedule, Stage) + + +class MachineAllocator: + """Handles machine allocation logic with multi-type capabilities""" + + def __init__(self, machines: List[Machine], enforce_machine_groups: bool = True): + self.machines = {m.name: m for m in machines} + self.enforce_machine_groups = enforce_machine_groups + + def find_machine_assignment(self, scenario: Scenario, target_machine: str, used_machines: Set[str]) -> Optional[ + Tuple[Dict[MachineType, Machine], Dict[MachineType, str]]]: + """Find the best machine assignment for a scenario on a specific target machine""" + required_types = scenario.get_required_machine_types() + assignment = {} + profiles = {} + used_machines_temp = used_machines.copy() + sut_machine = None + + # Process each required machine type + for machine_type in required_types: + if machine_type == MachineType.SUT: + # For SUT, REQUIRE the exact target machine - no substitution allowed + machine_profile = self._select_exact_sut_machine( + target_machine, used_machines_temp, scenario) + if not machine_profile: + return None + + machine, profile = machine_profile + assignment[machine_type] = machine + profiles[machine_type] = profile + used_machines_temp.add(machine.name) + sut_machine = machine # Store SUT machine for preferred partner selection + + else: + # For LOAD and DB, use SUT machine's preferred partners + preferred_partners = sut_machine.preferred_partners if sut_machine else [] + machine_profile = self._select_best_machine_for_type( + machine_type, used_machines_temp, None, scenario, preferred_partners, sut_machine + ) + if not machine_profile: + return None + + machine, profile = machine_profile + assignment[machine_type] = machine + profiles[machine_type] = profile + used_machines_temp.add(machine.name) + + # Return combined assignment information + return (assignment, profiles) + + def _select_best_machine_for_type(self, machine_type: MachineType, + used_machines: Set[str], + preferred_machine: Optional[str] = None, + scenario: Optional[Scenario] = None, + preferred_partners: Optional[List[str]] = None, + sut_machine: Optional[Machine] = None) -> Optional[Tuple[Machine, str]]: + """Select the best machine for a specific type with role-priority-first scoring. + + Scoring rules (lower score is better): + - Start with the machine's capability.priority for the requested role (1 = best) + - If a specific preferred_machine is requested, give it a strong boost (-0.5) + - If the machine is a preferred partner of the SUT, add a tiny tie-breaker (+0.01 per rank) + so partner order never overrides role priority, only breaks ties among equals. + """ + candidates: List[Tuple[float, Machine, MachineCapability]] = [] + + for machine in self.machines.values(): + if machine.name in used_machines: + continue + + capability = machine.get_capability(machine_type) + if not capability: + continue + + # Apply machine group filtering if enabled + if self.enforce_machine_groups and sut_machine: + if not self._machines_in_same_group(sut_machine, machine): + continue + + # Base score is the role priority (1 = preferred) + score: float = float(capability.priority) + + # Strong boost if a specific machine is explicitly preferred (rare for non-SUT) + if preferred_machine and machine.name == preferred_machine: + score -= 0.5 + + # Tiny bias for preferred partners (LOAD/DB case) – tie-breaker only + if preferred_partners and machine.name in preferred_partners: + try: + partner_index = preferred_partners.index(machine.name) + score += 0.01 * (partner_index + 1) # 0.01, 0.02, 0.03, ... + except ValueError: + pass + + candidates.append((score, machine, capability)) + + if not candidates: + return None + + # Sort by score (lower = better), apply stable tiebreaker by name + candidates.sort(key=lambda x: (x[0], x[1].name)) + _, best_machine, best_capability = candidates[0] + + # Select profile based on scenario preferences or default + profile = self._select_profile_for_capability( + best_capability, best_machine, machine_type, scenario) + return (best_machine, profile) + + def _machines_in_same_group(self, machine1: Machine, machine2: Machine) -> bool: + """Check if two machines are in the same group or if either has no group (compatible)""" + # If either machine has no group, they are compatible with any machine + if machine1.machine_group is None or machine2.machine_group is None: + return True + + # Both machines have groups - they must match + return machine1.machine_group == machine2.machine_group + + def _select_profile_for_capability(self, capability: MachineCapability, machine: Machine, + machine_type: MachineType, scenario: Optional[Scenario] = None) -> str: + """Select the best profile for a capability, considering scenario preferences""" + + # Check if scenario has a specific profile preference for this machine/type + if scenario: + preferred_profile = scenario.get_preferred_profile( + machine.name, machine_type) + if preferred_profile and preferred_profile in capability.profiles: + return preferred_profile + + # Use default profile (guaranteed to be set in MachineCapability.__post_init__) + return capability.default_profile or capability.profiles[0] + + def _select_exact_sut_machine(self, target_machine: str, used_machines: Set[str], + scenario: Optional[Scenario] = None) -> Optional[Tuple[Machine, str]]: + """Select the exact target machine for SUT role - no substitution allowed""" + + # Check if target machine is available + if target_machine in used_machines: + return None + + # Check if target machine exists and has SUT capability + if target_machine not in self.machines: + return None + + machine = self.machines[target_machine] + capability = machine.get_capability(MachineType.SUT) + if not capability: + return None + + # Select profile based on scenario preferences or default + profile = self._select_profile_for_capability( + capability, machine, MachineType.SUT, scenario) + return (machine, profile) + + +class CrankScheduler: + """Main scheduler class that creates optimized schedules""" + + def __init__(self, machines: List[Machine], scenarios: List[Scenario], config: CombinedConfiguration): + self.machines = machines + self.scenarios = scenarios + self.max_queues = len(config.metadata.queues) + self.allocator = MachineAllocator( + machines, config.metadata.enforce_machine_groups) + + # Estimate runtimes for all scenarios + for scenario in self.scenarios: + if scenario.estimated_runtime is None: + scenario.estimated_runtime = estimate_runtime(scenario) + + def create_schedule(self) -> Schedule: + """Create an optimized schedule""" + # Expand scenarios into individual runs for each target machine + scenario_runs = [] + for scenario in self.scenarios: + for target_machine in scenario.target_machines: + scenario_runs.append((scenario, target_machine)) + + # Sort by estimated runtime (descending) to minimize idle time + sorted_runs = sorted(scenario_runs, + key=lambda run: run[0].estimated_runtime or 0, + reverse=True) + + stages = [] + remaining_runs = sorted_runs.copy() + stage_id = 0 + + while remaining_runs: + stage = self._create_stage(stage_id, remaining_runs) + if not stage.assignments: + # If we can't create any assignments, we might have a problem + print( + f"Warning: Could not schedule {len(remaining_runs)} scenario runs") + break + + stages.append(stage) + + # Remove scheduled runs + scheduled_runs = set() + for assignment in stage.assignments: + scheduled_runs.add( + (assignment.scenario, assignment.target_machine)) + remaining_runs = [ + run for run in remaining_runs if run not in scheduled_runs] + stage_id += 1 + + schedule = Schedule(stages=stages) + schedule.calculate_total_time() + return schedule + + def _create_stage(self, stage_id: int, available_runs: List[Tuple]) -> Stage: + """Create a single stage by packing scenario runs optimally""" + assignments = [] + used_machines = set() + # Track assignments per queue + queue_assignments = [[] for _ in range(self.max_queues)] + + # Try to assign scenario runs to this stage + for scenario, target_machine in available_runs.copy(): + assignment_result = self.allocator.find_machine_assignment( + scenario, target_machine, used_machines) + + if assignment_result: + machine_assignment, profiles = assignment_result + + # Find the queue with the fewest assignments to balance load + queue_id = min(range(self.max_queues), + key=lambda q: len(queue_assignments[q])) + + assignment = MachineAssignment( + scenario=scenario, + machines=machine_assignment, + profiles=profiles, + queue_id=queue_id, + target_machine=target_machine + ) + assignments.append(assignment) + queue_assignments[queue_id].append(assignment) + used_machines.update(assignment.get_machine_names()) + + # Stop if we've reached the maximum number of assignments for this stage + # (one per queue at most) + if len(assignments) >= self.max_queues: + break + + stage = Stage(stage_id=stage_id, assignments=assignments) + stage.calculate_duration() + return stage + + +def estimate_runtime(scenario: Scenario) -> float: + """Estimate runtime for a scenario""" + if scenario.estimated_runtime is not None: + return scenario.estimated_runtime # Try to find similar scenarios by name + + # Fallback to a default estimate based on scenario complexity + if scenario.scenario_type == ScenarioType.SINGLE: + return 30.0 # 30 minutes default + elif scenario.scenario_type == ScenarioType.DUAL: + return 45.0 # 45 minutes default + else: # TRIPLE + return 60.0 # 60 minutes default diff --git a/scripts/crank-scheduler/template_generator.py b/scripts/crank-scheduler/template_generator.py new file mode 100644 index 000000000..d218dc3ec --- /dev/null +++ b/scripts/crank-scheduler/template_generator.py @@ -0,0 +1,114 @@ +""" +Template generation for converting scheduler output to Liquid template format +""" + +import json +from pathlib import Path +from typing import Any, Dict, Optional + +from liquid import render +from models import CombinedConfiguration, MachineAssignment, PartialSchedule + + +class TemplateDataGenerator: + """Converts scheduler output to data format expected by Liquid templates""" + + def __init__(self, config: CombinedConfiguration): + self.config = config + self.machine_map = {m.name: m for m in config.machines} + self.scenario_map = {s.name: s for s in config.scenarios} + + def generate_template_data(self, partial_schedule: PartialSchedule, schedule_time: str) -> Dict[str, Any]: + """Generate template data for a partial schedule with specific schedule time""" + groups = [] + queues = self.config.metadata.queues or [] + + for stage in partial_schedule.stages: + group_jobs = [] + + for assignment in stage.assignments: + job_data = self._create_job_from_assignment(assignment) + if job_data: + group_jobs.append(job_data) + + if group_jobs: + groups.append({ + "jobs": group_jobs + }) + + template_data = { + "schedule": schedule_time, + "queues": queues, + "groups": groups + } + + return template_data + + def _create_job_from_assignment(self, assignment: MachineAssignment) -> Optional[Dict[str, Any]]: + """Create a job entry from a machine assignment""" + scenario = assignment.scenario + + # Get the scenario template + template = scenario.template + if not template: + print(f"Warning: No template defined for scenario {scenario.name}") + return None + + # Generate job name based on target machine, not assigned machine + target_machine_name = assignment.target_machine.replace( + '-', ' ').title() + job_name = f"{scenario.name} {target_machine_name}" if target_machine_name else scenario.name + # Generate profiles from machines + profiles = [] + for machine_type, _ in assignment.machines.items(): + profile_name = assignment.get_profile_for_machine_type( + machine_type) + if profile_name: + profiles.append(profile_name) + + job_data = { + "name": job_name, + "template": template, + "profiles": profiles + } + + return job_data + + def save_template_data(self, template_data: Dict[str, Any], output_path: str): + """Save template data to JSON file for use with Liquid template""" + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(template_data, f, indent=2) + + print(f"Template data saved to: {output_path}") + + def generate_yaml_from_template(self, template_data: Dict[str, Any], + template_path: str, output_path: str) -> None: + """Generate YAML by processing template data through Liquid template""" + # Read template file + template_content = Path(template_path).read_text(encoding='utf-8') + + # Process template + result = render(template_content, **template_data) + + # Save result + Path(output_path).write_text(result, encoding='utf-8') + print(f"Generated YAML saved to: {output_path}") + + +class TemplateCLI: + """CLI integration for template generation""" + + @staticmethod + def add_template_arguments(parser): + """Add template-related arguments to argument parser""" + template_group = parser.add_argument_group('Template Options') + template_group.add_argument('--template', type=str, + help='Path to Liquid template file') + template_group.add_argument('--template-data', action='store_true', + help='Save template data to JSON files (for debugging)') + template_group.add_argument('--yaml-prefix', type=str, + help='Prefix for generated YAML files (default: )') + template_group.add_argument('--target-yamls', type=int, + help='Number of YAML files to generate (overrides config)') + template_group.add_argument('--schedule-offset', type=int, + help='Hours between each YAML schedule (overrides config)') diff --git a/scripts/crank-scheduler/utils.py b/scripts/crank-scheduler/utils.py new file mode 100644 index 000000000..90cf5a56c --- /dev/null +++ b/scripts/crank-scheduler/utils.py @@ -0,0 +1,224 @@ +import csv +import json + +import yaml +from models import (CombinedConfiguration, ConfigurationMetadata, Machine, + MachineCapability, MachineType, Scenario, ScenarioType, + Schedule, YamlGenerationConfig) + + +class DataLoader: + """Load machine and scenario data from various formats""" + + @staticmethod + def load_combined_configuration(file_path: str) -> CombinedConfiguration: + """Load combined machines and scenarios configuration from JSON file""" + with open(file_path, 'r') as f: + data = json.load(f) + + # Load metadata + metadata_data = data.get('metadata', {}) + + # Load YAML generation config if present + yaml_gen_data = metadata_data.get('yaml_generation') + yaml_generation = None + if yaml_gen_data: + yaml_generation = YamlGenerationConfig( + target_yaml_count=yaml_gen_data.get('target_yaml_count', 2), + schedule_offset_hours=yaml_gen_data.get( + 'schedule_offset_hours', 6), + ) + + metadata = ConfigurationMetadata( + name=metadata_data.get('name', 'Configuration'), + description=metadata_data.get('description', ''), + version=metadata_data.get('version', '1.0'), + schedule=metadata_data.get('schedule'), + queues=metadata_data.get('queues', []), + yaml_generation=yaml_generation, + enforce_machine_groups=metadata_data.get( + 'enforce_machine_groups', True) + ) + + # Load machines + machines = [] + for machine_data in data.get('machines', []): + # Parse capabilities + capabilities = {} + for machine_type_str, cap_data in machine_data.get('capabilities', {}).items(): + machine_type = MachineType(machine_type_str) + + # Get profiles list - required + profiles = cap_data.get('profiles', []) + if not profiles: + # Fallback: create a default profile name if none specified + profiles = [f"{machine_data['name']}-{machine_type_str}"] + + # Get default_profile - optional, will default to first profile if not specified + default_profile = cap_data.get('default_profile') + + capability = MachineCapability( + machine_type=machine_type, + priority=cap_data.get('priority', 1), + profiles=profiles, + # Can be None, will be set to first profile in __post_init__ + default_profile=default_profile + ) + capabilities[machine_type] = capability + + machines.append(Machine( + name=machine_data['name'], + capabilities=capabilities, + preferred_partners=machine_data.get('preferred_partners', []), + machine_group=machine_data.get('machine_group') + )) + + # Load scenarios + scenarios = [] + for scenario_data in data.get('scenarios', []): + scenario_type = ScenarioType(scenario_data.get('type')) + + # Parse profile overrides if present + profile_overrides = None + if 'profile_overrides' in scenario_data: + profile_overrides = {} + for machine_name, prefs in scenario_data['profile_overrides'].items(): + machine_prefs = {} + for machine_type_str, profile_name in prefs.items(): + machine_type = MachineType(machine_type_str) + machine_prefs[machine_type] = profile_name + profile_overrides[machine_name] = machine_prefs + + scenarios.append(Scenario( + name=scenario_data['name'], + scenario_type=scenario_type, + target_machines=scenario_data['target_machines'], + estimated_runtime=scenario_data.get('estimated_runtime'), + template=scenario_data.get('template'), + description=scenario_data.get('description'), + profile_overrides=profile_overrides + )) + + return CombinedConfiguration( + metadata=metadata, + machines=machines, + scenarios=scenarios + ) + + +class ScheduleExporter: + """Export schedules to various formats""" + + @staticmethod + def to_json(schedule: Schedule) -> str: + """Export schedule to JSON format""" + data = { + 'total_estimated_time': schedule.total_estimated_time, + 'stages': [] + } + + for stage in schedule.stages: + stage_data = { + 'stage_id': stage.stage_id, + 'estimated_duration': stage.estimated_duration, + 'assignments': [] + } + + for assignment in stage.assignments: + assignment_data = { + 'scenario': assignment.scenario.name, + 'queue_id': assignment.queue_id, + 'machines': {mt.value: machine.name for mt, machine in assignment.machines.items()}, + 'estimated_runtime': assignment.scenario.estimated_runtime + } + stage_data['assignments'].append(assignment_data) + + data['stages'].append(stage_data) + + return json.dumps(data, indent=2) + + @staticmethod + def to_yaml(schedule: Schedule) -> str: + """Export schedule to YAML format""" + data = json.loads(ScheduleExporter.to_json(schedule)) + return yaml.dump(data, default_flow_style=False, sort_keys=False) + + @staticmethod + def to_csv(schedule: Schedule) -> str: + """Export schedule to CSV format""" + import io + output = io.StringIO() + + fieldnames = ['stage_id', 'queue_id', 'scenario', 'sut_machine', 'load_machine', + 'db_machine', 'estimated_runtime', 'stage_duration'] + writer = csv.DictWriter(output, fieldnames=fieldnames) + writer.writeheader() + + for stage in schedule.stages: + for assignment in stage.assignments: + row = { + 'stage_id': stage.stage_id, + 'queue_id': assignment.queue_id, + 'scenario': assignment.scenario.name, + 'estimated_runtime': assignment.scenario.estimated_runtime, + 'stage_duration': stage.estimated_duration + } + + # Add machine assignments + for machine_type, machine in assignment.machines.items(): + row[f"{machine_type.value}_machine"] = machine.name + + writer.writerow(row) + + return output.getvalue() + + @staticmethod + def to_summary_table(schedule: Schedule) -> str: + """Create a human-readable summary table""" + lines = [] + lines.append("=" * 80) + lines.append("CRANK SCHEDULER - EXECUTION PLAN") + lines.append("=" * 80) + lines.append( + f"Total Estimated Time: {schedule.total_estimated_time:.1f} minutes") + lines.append(f"Number of Stages: {len(schedule.stages)}") + lines.append("") + + # Machine utilization + utilization = schedule.get_machine_utilization() + if utilization: + lines.append("MACHINE UTILIZATION:") + lines.append("-" * 30) + for machine, util in sorted(utilization.items()): + lines.append(f"{machine:20s} {util:6.1f}%") + lines.append("") + + # Stage-by-stage breakdown + lines.append("STAGE BREAKDOWN:") + lines.append("-" * 50) + + for stage in schedule.stages: + lines.append( + f"\nStage {stage.stage_id} (Duration: {stage.estimated_duration:.1f} min)") + lines.append( + " Queue | Scenario | Runtime (min) | Machines") + lines.append( + " ------|-----------------------------|--------------|---------") + + for assignment in stage.assignments: + machine_list = ", ".join(assignment.get_machine_names()) + sut_machine = assignment.machines.get(MachineType.SUT) + runtime = assignment.scenario.estimated_runtime or 0.0 + + if sut_machine is None: + raise ValueError( + f"No SUT machine assigned for scenario: {assignment.scenario.name}") + + # Append SUT machine to scenario name + scenario_name = assignment.scenario.name + scenario_with_machine = f"{scenario_name}-{sut_machine.name}" + + lines.append( + f" {assignment.queue_id:4d} | {scenario_with_machine:27s} | {runtime:12.1f} | {machine_list}") + + return "\n".join(lines)