[PATCH v1 2/3] perf test: Add allow list for metrics known would fail

From: Weilin Wang
Date: Tue Jun 06 2023 - 16:25:06 EST


Add allow list for metrics known would fail because some of the metrics are
very likely to fail due to multiplexing or other errors. So add all of the
flaky tests into the allow list.

Signed-off-by: Weilin Wang <weilin.wang@xxxxxxxxx>
---
.../tests/shell/lib/perf_metric_validation.py | 31 ++++++++++++++++---
.../lib/perf_metric_validation_rules.json | 11 +++++++
2 files changed, 38 insertions(+), 4 deletions(-)

diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 7bc5b9a5f62f..7d789d7e2807 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -12,7 +12,7 @@ class Validator:
self.reportfname = reportfname
self.rules = None
self.collectlist=metrics
- self.metrics = set()
+ self.metrics = set(metrics)
self.tolerance = t

self.workloads = [x for x in workload.split(",") if x]
@@ -148,6 +148,7 @@ class Validator:
self.errlist.append("Metric '%s' is not collected"%(name))
elif val < 0:
negmetric.add("{0}(={1:.4f})".format(name, val))
+ self.collectlist[0].append(name)
else:
pcnt += 1
tcnt += 1
@@ -266,6 +267,7 @@ class Validator:
passcnt += 1
else:
faillist.append({'MetricName':m['Name'], 'CollectedValue':result})
+ self.collectlist[0].append(m['Name'])

self.totalcnt += totalcnt
self.passedcnt += passcnt
@@ -348,7 +350,7 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
- collectlist[rule["RuleIndex"]] = set(metrics)
+ collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]

for idx, metrics in collectlist.items():
if idx == 0: wl = "sleep 0.5".split()
@@ -356,9 +358,12 @@ class Validator:
for metric in metrics:
command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
command.extend(wl)
+ print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
self.convert(data, idx)
+ self.collectlist = dict()
+ self.collectlist[0] = list()
# End of Collector and Converter

# Start of Rule Generator
@@ -386,6 +391,20 @@ class Validator:

return

+ def remove_unsupported_rules(self, rules, allowlist: set = None):
+ for m in allowlist:
+ self.metrics.discard(m)
+ new_rules = []
+ for rule in rules:
+ add_rule = True
+ for m in rule["Metrics"]:
+ if m["Name"] not in self.metrics:
+ add_rule = False
+ break
+ if add_rule:
+ new_rules.append(rule)
+ return new_rules
+
def create_rules(self):
"""
Create full rules which includes:
@@ -394,7 +413,10 @@ class Validator:

Reindex all the rules to avoid repeated RuleIndex
"""
- self.rules = self.read_json(self.rulefname)['RelationshipRules']
+ data = self.read_json(self.rulefname)
+ rules = data['RelationshipRules']
+ allowlist = set(data['AllowList'])
+ self.rules = self.remove_unsupported_rules(rules, allowlist)
pctgrule = {'RuleIndex':0,
'TestType':'SingleMetricTest',
'RangeLower':'0',
@@ -453,7 +475,8 @@ class Validator:

The final report is written into a JSON file.
'''
- self.parse_perf_metrics()
+ if not self.collectlist:
+ self.parse_perf_metrics()
self.create_rules()
for i in range(0, len(self.workloads)):
self._init_data()
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
index debaa910da9f..50307a9caf06 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
+++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
@@ -1,4 +1,15 @@
{
+ "AllowList": [
+ "tsx_aborted_cycles",
+ "tsx_transactional_cycles",
+ "C2_Pkg_Residency",
+ "C6_Pkg_Residency",
+ "C1_Core_Residency",
+ "C6_Core_Residency",
+ "tma_false_sharing",
+ "tma_remote_cache",
+ "tma_contested_accesses"
+ ],
"RelationshipRules": [
{
"RuleIndex": 1,
--
2.39.1