Skip to content

Commit 7c19dec

Browse files
kconsandrewshie-sentry
authored andcommitted
feat(aci): Batch of minor logging fixes (#93243)
* Add back log_if_slow call that was lost in a weird merge. * Shorten explicitly reported batch duration list from 200 -> 20; they're generally logged unordered, so having 200 to look at for a slow one is actually not useful. 20 is more than enough to get a sense of the distribution without being big in logs. * Fix loop log event to not be misleading.
1 parent fdb13e9 commit 7c19dec

File tree

3 files changed

+13
-8
lines changed

3 files changed

+13
-8
lines changed

src/sentry/workflow_engine/processors/delayed_workflow.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
get_slow_conditions_for_groups,
5151
)
5252
from sentry.workflow_engine.processors.detector import get_detector_by_event
53-
from sentry.workflow_engine.processors.log_util import track_batch_performance
53+
from sentry.workflow_engine.processors.log_util import log_if_slow, track_batch_performance
5454
from sentry.workflow_engine.processors.workflow import (
5555
WORKFLOW_ENGINE_BUFFER_LIST_KEY,
5656
evaluate_workflows_action_filters,
@@ -422,7 +422,7 @@ def fire_actions_for_groups(
422422
)
423423

424424
with track_batch_performance(
425-
"workflow_engine.delayed_workflow.group_to_groupevent.loop",
425+
"workflow_engine.delayed_workflow.fire_actions_for_groups.loop",
426426
logger,
427427
threshold=timedelta(seconds=40),
428428
) as tracker:
@@ -453,9 +453,14 @@ def fire_actions_for_groups(
453453
Workflow.objects.filter(when_condition_group_id__in=workflow_triggers)
454454
)
455455

456-
filtered_actions = filtered_actions.union(
457-
evaluate_workflows_action_filters(workflows, event_data)
458-
)
456+
with log_if_slow(
457+
logger,
458+
"workflow_engine.delayed_workflow.slow_evaluate_workflows_action_filters",
459+
extra={"group_id": group.id, "event_data": event_data},
460+
threshold_seconds=1,
461+
):
462+
workflows_actions = evaluate_workflows_action_filters(workflows, event_data)
463+
filtered_actions = filtered_actions.union(workflows_actions)
459464

460465
metrics.incr(
461466
"workflow_engine.delayed_workflow.triggered_actions",

src/sentry/workflow_engine/processors/log_util.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def top_n_slowest(durations: dict[str, float], n: int) -> dict[str, float]:
1414

1515

1616
# To keep logs reasonable, we only report up to this many individual iteration times.
17-
_MAX_ITERATIONS_LOGGED = 200
17+
_MAX_ITERATIONS_LOGGED = 20
1818

1919

2020
# NOTE: You should be using Sentry's built-in performance tracking instead of this.

tests/sentry/workflow_engine/processors/test_log_util.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ def test_durations_truncated(self):
112112
self.tracker.finalize()
113113
self.logger.info.assert_called_once()
114114
call_args = self.logger.info.call_args[1]
115-
assert call_args["extra"]["durations_truncated"] == 100 # 300 - 200
116-
assert len(call_args["extra"]["durations"]) == 200 # Should only keep top 200
115+
assert call_args["extra"]["durations_truncated"] == 100
116+
assert len(call_args["extra"]["durations"]) == _MAX_ITERATIONS_LOGGED
117117

118118

119119
def test_top_n_slowest():

0 commit comments

Comments
 (0)