Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,14 @@ scenarios:

Then run `bzt test.yml`. After the tool finishes, observe resulting summary stats in console log (for more reporting options, see [Generating Test Reports](https://gettaurus.org/docs/Reporting.md)). All artifact files from the run will be placed in the directory mentioned in console log. Read more on command-line tool usage [Command-Line Tool](https://gettaurus.org/docs/CommandLine.md).


## Note on Percentile Calculation and Ignored Labels

**Available in the unstable snapshot (August 2025):**

Taurus now fully excludes any labels specified in the `ignored_labels` option from the overall percentile calculation in the aggregator. If you use the `ignored_labels` feature, those labels will not affect the percentiles reported in the summary/overall statistics.

This change ensures that ignored labels are consistently excluded from both score and percentile calculations.

![Analytics](https://ga-beacon.appspot.com/UA-63369152-1/taurus/readme)

13 changes: 9 additions & 4 deletions bzt/modules/aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,14 +737,19 @@ def __aggregate_current(self, datapoint, samples):
base_label = '[empty]'

if self.generalize_labels:
base_label = self._generalize_label(base_label)
base_label = self._generalize_label(str(base_label))

# Skip ignored labels
if any([base_label.startswith(ignore) for ignore in self.ignored_labels]):
continue

self.__add_sample(current, base_label, sample[1:])

# Exclude ignored labels from percentile calculation for the overall KPISet
filtered_labels = [label for label in current if not any([label.startswith(ignore) for ignore in self.ignored_labels]) and label != '']
overall = KPISet(self.track_percentiles, self.__get_rtimes_max(''), ext_aggregation=self._redundant_aggregation)

for label in current.values():
overall.merge_kpis(label, datapoint[DataPoint.SOURCE_ID])
for label in filtered_labels:
overall.merge_kpis(current[label], datapoint[DataPoint.SOURCE_ID])
current[''] = overall

return current
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
fix-aggregator-ignore-labels-percentiles.change: Exclude ignored labels from overall percentile calculation in ResultsReader.
58 changes: 58 additions & 0 deletions tests/unit/modules/test_aggregator_ignore_labels.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import unittest
from bzt.modules.aggregator import ResultsReader, DataPoint, KPISet

class TestResultsReaderIgnoreLabelsPercentiles(unittest.TestCase):
def setUp(self):
self.reader = ResultsReader()
self.reader.track_percentiles = [50, 90, 100]
self.reader.buffer_scale_idx = '100.0'
self.reader.buffer_len = 1
self.reader.ignored_labels = ["ignore"]

def test_percentiles_exclude_ignored_labels(self):
# t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count
raw_samples = [
(1, "ignore", 1, 10, 1, 1, 200, None, '', 0),
(1, "ignore", 1, 20, 1, 1, 200, None, '', 0),
(1, "not-ignore", 1, 100, 1, 1, 200, None, '', 0),
(1, "not-ignore", 1, 200, 1, 1, 200, None, '', 0),
(1, "not-ignore", 1, 300, 1, 1, 200, None, '', 0),
(1, "not-ignore", 1, 400, 1, 1, 200, None, '', 0),
(1, "ignore", 1, 30, 1, 1, 200, None, '', 0),
(1, "not-ignore", 1, 500, 1, 1, 200, None, '', 0),
]
datapoint = DataPoint(1, self.reader.track_percentiles)
self.reader._ResultsReader__aggregate_current(datapoint, [s[1:] for s in raw_samples])
overall = datapoint[DataPoint.CURRENT]['']
percentiles = overall[KPISet.PERCENTILES]
# Only the 'not-ignore' samples should be used for percentiles
used_samples = [s[3] for s in raw_samples if s[1] != "ignore"]
ignored_samples = [s[3] for s in raw_samples if s[1] == "ignore"]
self.assertEqual(len(used_samples), 5)
self.assertEqual(len(ignored_samples), 3)
# Check that ignored samples are not in the percentiles calculation
self.assertTrue(all(v not in percentiles.values() for v in ignored_samples))
# Check percentiles keys
for p in ["50.0", "90.0", "100.0"]:
self.assertIn(p, percentiles)
# Calculate expected percentiles manually
sorted_samples = sorted(used_samples)
def percentile(sorted_list, perc):
k = (len(sorted_list)-1) * (perc/100.0)
f = int(k)
c = min(f+1, len(sorted_list)-1)
if f == c:
return sorted_list[int(k)]
d0 = sorted_list[f] * (c-k)
d1 = sorted_list[c] * (k-f)
return d0 + d1
self.assertAlmostEqual(percentiles["50.0"], percentile(sorted_samples, 50), delta=100)
self.assertAlmostEqual(percentiles["90.0"], percentile(sorted_samples, 90), delta=100)
self.assertAlmostEqual(percentiles["100.0"], percentile(sorted_samples, 100), delta=100)
# All percentiles should be >= min of used_samples
self.assertTrue(all(v >= min(used_samples) for v in percentiles.values()))
# All percentiles should be <= max of used_samples
self.assertTrue(all(v <= max(used_samples) for v in percentiles.values()))

if __name__ == "__main__":
unittest.main()