summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorWeilin Wang <weilin.wang@intel.com>2024-05-22 13:42:54 -0700
committerArnaldo Carvalho de Melo <acme@redhat.com>2024-07-31 16:58:18 -0300
commit4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073 (patch)
tree2a5319ee6e2c8ca2c6f0dc9b9b03979c95286030 /tools
parent74ae366c37b71b46be7f2fa45fa4b2c9c6708fbe (diff)
downloadlinux-stable-4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073.tar.gz
linux-stable-4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073.tar.bz2
linux-stable-4ed0f392e7dbd2e90a903bdd77d1f6e61b7d3073.zip
perf test: make metric validation test return early when there is no metric supported on the test system
Add a check to return the metric validation test early when perf list metric does not output any metric. This would happen when NO_JEVENTS=1 is set or in a system that there is no metric supported. Signed-off-by: Weilin Wang <weilin.wang@intel.com> Tested-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Caleb Biggers <caleb.biggers@intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Perry Taylor <perry.taylor@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Samantha Alt <samantha.alt@intel.com> Link: https://lore.kernel.org/lkml/20240522204254.1841420-1-weilin.wang@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py10
1 files changed, 8 insertions, 2 deletions
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index a2d235252183..0b94216c9c46 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -95,7 +95,7 @@ class Validator:
indent=4)
def get_results(self, idx: int = 0):
- return self.results[idx]
+ return self.results.get(idx)
def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
@@ -173,7 +173,10 @@ class Validator:
pcnt = 0
tcnt = 0
rerun = list()
- for name, val in self.get_results().items():
+ results = self.get_results()
+ if not results:
+ return
+ for name, val in results.items():
if val < 0:
negmetric[name] = val
rerun.append(name)
@@ -532,6 +535,9 @@ class Validator:
'''
if not self.collectlist:
self.parse_perf_metrics()
+ if not self.metrics:
+ print("No metric found for testing")
+ return 0
self.create_rules()
for i in range(0, len(self.workloads)):
self.wlidx = i