Skip to content
This repository was archived by the owner on Jun 7, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ oc_delete_model_test:
| oc delete -f -

run_app_pipenv:
pipenv run python app.py
pipenv run python3 app.py

run_test_pipenv:
pipenv run python test_model.py
pipenv run python3 test_model.py

run_app:
python app.py
python3 app.py

run_test:
python test_model.py
python3 test_model.py
20 changes: 16 additions & 4 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from configuration import Configuration
import model
import schedule
import threading


# Set up logging
_LOGGER = logging.getLogger(__name__)
Expand All @@ -29,6 +31,8 @@
headers=Configuration.prom_connect_headers,
disable_ssl=True,
)
# print("hiiii")
# print(Configuration.prom_connect_headers)

for metric in METRICS_LIST:
# Initialize a predictor for all metrics first
Expand Down Expand Up @@ -152,6 +156,7 @@ def train_individual_model(predictor_model, initial_run):
metric_to_predict.metric_name,
metric_to_predict.label_config,
)

return predictor_model

def train_model(initial_run=False, data_queue=None):
Expand All @@ -165,6 +170,8 @@ def train_model(initial_run=False, data_queue=None):
PREDICTOR_MODEL_LIST = result
data_queue.put(PREDICTOR_MODEL_LIST)

def start_ioloop():
tornado.ioloop.IOLoop.current().start()

if __name__ == "__main__":
# Queue to share data between the tornado server and the model training
Expand All @@ -176,9 +183,14 @@ def train_model(initial_run=False, data_queue=None):
# Set up the tornado web app
app = make_app(predicted_model_queue)
app.listen(8080)
server_process = Process(target=tornado.ioloop.IOLoop.instance().start)
# Start up the server to expose the metrics.
server_process.start()
# server_process = Process(target=tornado.ioloop.IOLoop.instance().start)
# # Start up the server to expose the metrics.
# server_process.start()

server_thread = threading.Thread(target=start_ioloop)

# Start the server thread
server_thread.start()

# Schedule the model training
schedule.every(Configuration.retraining_interval_minutes).minutes.do(
Expand All @@ -193,4 +205,4 @@ def train_model(initial_run=False, data_queue=None):
time.sleep(1)

# join the server process in case the main process ends
server_process.join()
server_thread.join()
9 changes: 5 additions & 4 deletions configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,11 @@ class Configuration:
# any headers that need to be passed while connecting to the prometheus host
prom_connect_headers = None
# example oath token passed as a header
if os.getenv("FLT_PROM_ACCESS_TOKEN"):
prom_connect_headers = {
"Authorization": "bearer " + os.getenv("FLT_PROM_ACCESS_TOKEN")
}
prom_connect_headers = {
"x-scope-org-id": "something-else-for-testing",
"X-Scope-OrgID": "something-else-for-testing"
}


# list of metrics that need to be scraped and predicted
# multiple metrics can be separated with a ";"
Expand Down