|
| 1 | +from multiprocessing.dummy import Pool |
| 2 | +import requests |
| 3 | +from time import time, sleep |
| 4 | +import logging |
| 5 | + |
| 6 | + |
| 7 | +class ClientHandler: |
| 8 | + """Performs concurrent requests with timeout |
| 9 | +
|
| 10 | + :OPERATION_MODE n_firsts, timeout or wait_all. |
| 11 | + :clients list of clients' (host, port) tuples |
| 12 | + """ |
| 13 | + |
| 14 | + def __init__(self, clients, OPERATION_MODE='wait_all', **kwargs): |
| 15 | + self.clients = self.parse_clients(clients) |
| 16 | + self.n_clients = len(clients) |
| 17 | + self.OPERATION_MODE = OPERATION_MODE |
| 18 | + # Set the pool as None, later on will be created |
| 19 | + self.pool = None |
| 20 | + logging.info( |
| 21 | + '[Client Handler] Operation mode: {}'.format(self.OPERATION_MODE)) |
| 22 | + default_n_firsts = max(1, self.n_clients - 2) |
| 23 | + if self.OPERATION_MODE == 'n_firsts': |
| 24 | + self.N_FIRSTS = kwargs.get('n_firsts', default_n_firsts) |
| 25 | + import pudb; pudb.set_trace() |
| 26 | + assert self.N_FIRSTS <= self.n_clients, \ |
| 27 | + 'n_firsts must be <= than num clients' |
| 28 | + logging.info( |
| 29 | + '[Client Handler] n_firsts: {}'.format(self.N_FIRSTS)) |
| 30 | + elif self.OPERATION_MODE == 'timeout': |
| 31 | + self.WAIT_FROM_N_FIRSTS = kwargs.get('wait_from_n_firsts', |
| 32 | + default_n_firsts) |
| 33 | + self.TIMEOUT = kwargs.get('timoeut', 60) # Seconds |
| 34 | + elif self.OPERATION_MODE == 'wait_all': |
| 35 | + self.N_FIRSTS = self.n_clients |
| 36 | + logging.info('[Client Handler] Will wait ' |
| 37 | + 'until {} clients'.format(self.N_FIRSTS)) |
| 38 | + else: |
| 39 | + raise Exception('Operation mode not accepted') |
| 40 | + self.operations_history = {} |
| 41 | + self.init_operations_history() |
| 42 | + |
| 43 | + def perform_requests_and_wait(self, endpoint): |
| 44 | + self.perform_parallel_requests(endpoint) |
| 45 | + if self.OPERATION_MODE == 'n_firsts': |
| 46 | + if endpoint == 'send_model': |
| 47 | + # TODO: Do this part with redundancy |
| 48 | + return self.wait_until_n_responses(wait_all=True) |
| 49 | + return self.wait_until_n_responses() |
| 50 | + elif self.OPERATION_MODE == 'timeout': |
| 51 | + self.started = time() |
| 52 | + return self.wait_until_timeout() |
| 53 | + elif self.OPERATION_MODE == 'wait_all': |
| 54 | + return self.wait_until_n_responses(wait_all=True) |
| 55 | + |
| 56 | + def init_operations_history(self): |
| 57 | + for host, port in self.clients: |
| 58 | + key = self.get_client_key(host, port) |
| 59 | + self.operations_history[key] = [] |
| 60 | + |
| 61 | + @staticmethod |
| 62 | + def parse_clients(clients): |
| 63 | + p_clients = [] |
| 64 | + for cl in clients: |
| 65 | + host = cl[list(cl.keys())[0]]['host'] |
| 66 | + port = cl[list(cl.keys())[0]]['port'] |
| 67 | + p_clients.append((host, port)) |
| 68 | + return p_clients |
| 69 | + |
| 70 | + def perform_parallel_requests(self, endpoint): |
| 71 | + futures = [] |
| 72 | + self.pool = Pool(self.n_clients) |
| 73 | + for host, port in self.clients: |
| 74 | + futures.append( |
| 75 | + self.pool.apply_async(self.perform_request, |
| 76 | + [host, port, endpoint])) |
| 77 | + self.pool.close() |
| 78 | + |
| 79 | + def wait_until_timeout(self): |
| 80 | + ended_clients = set() |
| 81 | + completed = False |
| 82 | + while not completed: |
| 83 | + for key in self.clients: |
| 84 | + try: |
| 85 | + last_operation = self.operations_history[key][-1] |
| 86 | + except IndexError: |
| 87 | + # Last operation still not computed |
| 88 | + continue |
| 89 | + if last_operation['ended']: |
| 90 | + # TODO: Handle exception when status code != 200 |
| 91 | + assert last_operation['res'].status_code == 200 |
| 92 | + logging.info( |
| 93 | + '[Client Handler] client {} ' |
| 94 | + 'finished performing operation {}'.format( |
| 95 | + key, last_operation['op'] |
| 96 | + ) |
| 97 | + ) |
| 98 | + ended_clients.add(key) |
| 99 | + elapsed = time() - self.started |
| 100 | + if ((len(ended_clients) >= self.WAIT_FROM_N_FIRSTS) and |
| 101 | + elapsed > self.TIMEOUT): |
| 102 | + self.pool.terminate() |
| 103 | + completed = True |
| 104 | + sleep(0.1) |
| 105 | + return list(ended_clients) |
| 106 | + |
| 107 | + def wait_until_n_responses(self, wait_all=False): |
| 108 | + # TODO: What to do in send model? |
| 109 | + ended_clients = set() |
| 110 | + completed = False |
| 111 | + while not completed: |
| 112 | + # Periodically check if the requests are ending |
| 113 | + for key in self.clients: |
| 114 | + try: |
| 115 | + last_operation = self.operations_history[key][-1] |
| 116 | + except IndexError: |
| 117 | + # Last operation still not computed |
| 118 | + continue |
| 119 | + if last_operation['ended']: |
| 120 | + # TODO: Handle exception when status code != 200 |
| 121 | + assert last_operation['res'].status_code == 200 |
| 122 | + logging.info( |
| 123 | + '[Client Handler] client {} ' |
| 124 | + 'finished performing operation {}'.format( |
| 125 | + key, last_operation['op'] |
| 126 | + ) |
| 127 | + ) |
| 128 | + ended_clients.add(key) |
| 129 | + if ((not wait_all and (len(ended_clients) >= self.N_FIRSTS)) |
| 130 | + or (wait_all and len(ended_clients) == self.N_FIRSTS)): |
| 131 | + self.pool.terminate() |
| 132 | + completed = True |
| 133 | + sleep(0.1) |
| 134 | + return list(ended_clients) |
| 135 | + |
| 136 | + @staticmethod |
| 137 | + def get_client_key(host, port): |
| 138 | + return (host, port) |
| 139 | + |
| 140 | + def perform_request(self, host, port, endpoint): |
| 141 | + key = self.get_client_key(host, port) |
| 142 | + last_operation = { |
| 143 | + 'started': time(), |
| 144 | + 'op': endpoint, |
| 145 | + 'status': 'started', |
| 146 | + 'ended': None |
| 147 | + } |
| 148 | + url = 'http://{}:{}/{}'.format(host, port, endpoint) |
| 149 | + res = requests.get(url) |
| 150 | + last_operation.update({'status': 'ended', |
| 151 | + 'ended': time(), |
| 152 | + 'response': res}) |
| 153 | + self.operations_history.setdefault(key, []).append(last_operation) |
| 154 | + |
0 commit comments