aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/tests/shell/lib/perf_metric_validation.py
diff options
context:
space:
mode:
authorWeilin Wang <[email protected]>2024-05-22 20:42:54 +0000
committerArnaldo Carvalho de Melo <[email protected]>2024-07-31 19:58:18 +0000
commit4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073 (patch)
tree2a5319ee6e2c8ca2c6f0dc9b9b03979c95286030 /tools/perf/tests/shell/lib/perf_metric_validation.py
parentperf ftrace profile: Add -s/--sort option (diff)
downloadkernel-4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073.tar.gz
kernel-4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073.zip
perf test: make metric validation test return early when there is no metric supported on the test system
Add a check to return the metric validation test early when perf list metric does not output any metric. This would happen when NO_JEVENTS=1 is set or in a system that there is no metric supported. Signed-off-by: Weilin Wang <[email protected]> Tested-by: Ian Rogers <[email protected]> Cc: Adrian Hunter <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Caleb Biggers <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Kan Liang <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: Perry Taylor <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Samantha Alt <[email protected]> Link: https://lore.kernel.org/lkml/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
Diffstat (limited to 'tools/perf/tests/shell/lib/perf_metric_validation.py')
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py10
1 files changed, 8 insertions, 2 deletions
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index a2d235252183..0b94216c9c46 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -95,7 +95,7 @@ class Validator:
indent=4)
def get_results(self, idx: int = 0):
- return self.results[idx]
+ return self.results.get(idx)
def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
@@ -173,7 +173,10 @@ class Validator:
pcnt = 0
tcnt = 0
rerun = list()
- for name, val in self.get_results().items():
+ results = self.get_results()
+ if not results:
+ return
+ for name, val in results.items():
if val < 0:
negmetric[name] = val
rerun.append(name)
@@ -532,6 +535,9 @@ class Validator:
'''
if not self.collectlist:
self.parse_perf_metrics()
+ if not self.metrics:
+ print("No metric found for testing")
+ return 0
self.create_rules()
for i in range(0, len(self.workloads)):
self.wlidx = i