|
1 | 1 | import logging
|
2 | 2 | import os
|
3 | 3 | import pickle
|
| 4 | +import random |
4 | 5 | from unittest import mock
|
5 | 6 |
|
6 | 7 | import pytest
|
@@ -218,3 +219,38 @@ def test_tiny_large_loop():
|
218 | 219 | got_result = scheduler.get_result(True, 1)
|
219 | 220 |
|
220 | 221 | assert got_result == result_pkl
|
| 222 | + |
| 223 | + |
| 224 | +@pytest.mark.local |
| 225 | +def test_larger_jobs_prioritized(): |
| 226 | + """Larger jobs should be scheduled first""" |
| 227 | + |
| 228 | + task_q, result_q = SpawnContext.Queue(), SpawnContext.Queue() |
| 229 | + scheduler = MPITaskScheduler(task_q, result_q) |
| 230 | + |
| 231 | + max_nodes = len(scheduler.available_nodes) |
| 232 | + |
| 233 | + # The first task will get scheduled with all the nodes, |
| 234 | + # and the remainder hits the backlog queue. |
| 235 | + node_request_list = [max_nodes] + [random.randint(1, 4) for _i in range(8)] |
| 236 | + |
| 237 | + for task_id, num_nodes in enumerate(node_request_list): |
| 238 | + mock_task_buffer = pack_res_spec_apply_message("func", "args", "kwargs", |
| 239 | + resource_specification={ |
| 240 | + "num_nodes": num_nodes, |
| 241 | + "ranks_per_node": 2 |
| 242 | + }) |
| 243 | + task_package = {"task_id": task_id, "buffer": mock_task_buffer} |
| 244 | + scheduler.put_task(task_package) |
| 245 | + |
| 246 | + # Confirm that the tasks are sorted in decreasing order |
| 247 | + output_priority = [] |
| 248 | + for i in range(len(node_request_list) - 1): |
| 249 | + p_task = scheduler._backlog_queue.get() |
| 250 | + output_priority.append(p_task.nodes_needed) |
| 251 | + |
| 252 | + # Remove the first large job that blocks the nodes and forces following |
| 253 | + # tasks into backlog |
| 254 | + expected_priority = node_request_list[1:] |
| 255 | + expected_priority.sort(reverse=True) |
| 256 | + assert expected_priority == output_priority, "Expected nodes in decreasing sorted order" |
0 commit comments