diff options
Diffstat (limited to 'tools/perf/tests/shell')
28 files changed, 398 insertions, 143 deletions
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh index 1590a37363de..16a1ccd06089 100755 --- a/tools/perf/tests/shell/annotate.sh +++ b/tools/perf/tests/shell/annotate.sh @@ -35,54 +35,78 @@ trap_cleanup() { trap trap_cleanup EXIT TERM INT test_basic() { - echo "Basic perf annotate test" - if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null + mode=$1 + echo "${mode} perf annotate test" + if [ "x${mode}" == "xBasic" ] then - echo "Basic annotate [Failed: perf record]" + perf record -o "${perfdata}" ${testprog} 2> /dev/null + else + perf record -o - ${testprog} 2> /dev/null > "${perfdata}" + fi + if [ "x$?" != "x0" ] + then + echo "${mode} annotate [Failed: perf record]" err=1 return fi # Generate the annotated output file - perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null | head -250 > "${perfout}" + if [ "x${mode}" == "xBasic" ] + then + perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null > "${perfout}" + else + perf annotate --no-demangle -i - --stdio 2> /dev/null < "${perfdata}" > "${perfout}" + fi # check if it has the target symbol - if ! grep "${testsym}" "${perfout}" + if ! head -250 "${perfout}" | grep -q "${testsym}" then - echo "Basic annotate [Failed: missing target symbol]" + echo "${mode} annotate [Failed: missing target symbol]" err=1 return fi # check if it has the disassembly lines - if ! grep "${disasm_regex}" "${perfout}" + if ! head -250 "${perfout}" | grep -q "${disasm_regex}" then - echo "Basic annotate [Failed: missing disasm output from default disassembler]" + echo "${mode} annotate [Failed: missing disasm output from default disassembler]" err=1 return fi # check again with a target symbol name - if ! perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null | \ - head -250 | grep -m 3 "${disasm_regex}" + if [ "x${mode}" == "xBasic" ] then - echo "Basic annotate [Failed: missing disasm output when specifying the target symbol]" + perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null > "${perfout}" + else + perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}" + fi + + if ! head -250 "${perfout}"| grep -q -m 3 "${disasm_regex}" + then + echo "${mode} annotate [Failed: missing disasm output when specifying the target symbol]" err=1 return fi # check one more with external objdump tool (forced by --objdump option) - if ! perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null | \ - head -250 | grep -m 3 "${disasm_regex}" + if [ "x${mode}" == "xBasic" ] + then + perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null > "${perfout}" + else + perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}" + fi + if ! head -250 "${perfout}" | grep -q -m 3 "${disasm_regex}" then - echo "Basic annotate [Failed: missing disasm output from non default disassembler (using --objdump)]" + echo "${mode} annotate [Failed: missing disasm output from non default disassembler (using --objdump)]" err=1 return fi - echo "Basic annotate test [Success]" + echo "${mode} annotate test [Success]" } -test_basic +test_basic Basic +test_basic Pipe cleanup exit $err diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh index b03501b2e8fc..8634e7e0dda6 100755 --- a/tools/perf/tests/shell/base_report/setup.sh +++ b/tools/perf/tests/shell/base_report/setup.sh @@ -15,6 +15,8 @@ # include working environment . ../common/init.sh +TEST_RESULT=0 + test -d "$HEADER_TAR_DIR" || mkdir -p "$HEADER_TAR_DIR" SW_EVENT="cpu-clock" @@ -26,7 +28,21 @@ PERF_EXIT_CODE=$? CHECK_EXIT_CODE=$? print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file" -TEST_RESULT=$? +(( TEST_RESULT += $? )) + +# Some minimal parallel workload. +$CMD_PERF record --latency -o $CURRENT_TEST_DIR/perf.data.1 bash -c "for i in {1..100} ; do cat /proc/cpuinfo 1> /dev/null & done; sleep 1" 2> $LOGS_DIR/setup-latency.log +PERF_EXIT_CODE=$? + +echo ================== +cat $LOGS_DIR/setup-latency.log +echo ================== + +../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup-latency.log +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data.1 file" +(( TEST_RESULT += $? )) print_overall_results $TEST_RESULT exit $? diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh index 2398eba4d3fd..adfd8713b8f8 100755 --- a/tools/perf/tests/shell/base_report/test_basic.sh +++ b/tools/perf/tests/shell/base_report/test_basic.sh @@ -183,6 +183,58 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter" (( TEST_RESULT += $? )) +### latency and parallelism + +# Record with --latency should record with context switches. +$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data.1 --stdio --header-only > $LOGS_DIR/latency_header.log +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl ", context_switch = 1, " < $LOGS_DIR/latency_header.log +CHECK_EXIT_CODE=$? + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency header" +(( TEST_RESULT += $? )) + + +# The default report for latency profile should show Overhead and Latency fields (in that order). +$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_default.log 2> $LOGS_DIR/latency_default.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "# Overhead Latency Command" < $LOGS_DIR/latency_default.log +CHECK_EXIT_CODE=$? +../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_default.err +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "default report for latency profile" +(( TEST_RESULT += $? )) + + +# The latency report for latency profile should show Latency and Overhead fields (in that order). +$CMD_PERF report --latency --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_latency.log 2> $LOGS_DIR/latency_latency.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "# Latency Overhead Command" < $LOGS_DIR/latency_latency.log +CHECK_EXIT_CODE=$? +../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_latency.err +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency report for latency profile" +(( TEST_RESULT += $? )) + + +# Ensure parallelism histogram with parallelism filter does not fail/crash. +$CMD_PERF report --hierarchy --sort latency,parallelism,comm,symbol --parallelism=1,2 --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/parallelism_hierarchy.log 2> $LOGS_DIR/parallelism_hierarchy.err +PERF_EXIT_CODE=$? + +../common/check_all_patterns_found.pl "# Latency Parallelism / Command / Symbol" < $LOGS_DIR/parallelism_hierarchy.log +CHECK_EXIT_CODE=$? +../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/parallelism_hierarchy.err +(( CHECK_EXIT_CODE += $? )) + +print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "parallelism histogram" +(( TEST_RESULT += $? )) + + # TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log # print overall results diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S index 75cf084a927d..577760046772 100644 --- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S +++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S @@ -26,3 +26,5 @@ skip: mov x0, #0 mov x8, #93 // __NR_exit syscall svc #0 + +.section .note.GNU-stack, "", @progbits diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh index 14b87af88703..e05a5dc49479 100755 --- a/tools/perf/tests/shell/diff.sh +++ b/tools/perf/tests/shell/diff.sh @@ -39,13 +39,13 @@ make_data() { file="$1" if ! perf record -o "${file}" ${testprog} 2> /dev/null then - echo "Workload record [Failed record]" + echo "Workload record [Failed record]" >&2 echo 1 return fi if ! perf report -i "${file}" -q | grep -q "${testsym}" then - echo "Workload record [Failed missing output]" + echo "Workload record [Failed missing output]" >&2 echo 1 return fi @@ -55,12 +55,12 @@ make_data() { test_two_files() { echo "Basic two file diff test" err=$(make_data "${perfdata1}") - if [ $err != 0 ] + if [ "$err" != 0 ] then return fi err=$(make_data "${perfdata2}") - if [ $err != 0 ] + if [ "$err" != 0 ] then return fi @@ -77,12 +77,12 @@ test_two_files() { test_three_files() { echo "Basic three file diff test" err=$(make_data "${perfdata1}") - if [ $err != 0 ] + if [ "$err" != 0 ] then return fi err=$(make_data "${perfdata2}") - if [ $err != 0 ] + if [ "$err" != 0 ] then return fi diff --git a/tools/perf/tests/shell/lib/attr.py b/tools/perf/tests/shell/lib/attr.py index 3db9a7d78715..bfccc727d9b2 100644 --- a/tools/perf/tests/shell/lib/attr.py +++ b/tools/perf/tests/shell/lib/attr.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -from __future__ import print_function - +import configparser import os import sys import glob @@ -13,11 +12,6 @@ import re import shutil import subprocess -try: - import configparser -except ImportError: - import ConfigParser as configparser - def data_equal(a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py index b066d721f897..9e772a89ce38 100644 --- a/tools/perf/tests/shell/lib/perf_json_output_lint.py +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py @@ -19,6 +19,7 @@ ap.add_argument('--per-cluster', action='store_true') ap.add_argument('--per-die', action='store_true') ap.add_argument('--per-node', action='store_true') ap.add_argument('--per-socket', action='store_true') +ap.add_argument('--metric-only', action='store_true') ap.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin) args = ap.parse_args() @@ -64,6 +65,8 @@ def check_json_output(expected_items): 'socket': lambda x: True, 'thread': lambda x: True, 'unit': lambda x: True, + 'insn per cycle': lambda x: isfloat(x), + 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only } input = '[\n' + ','.join(Lines) + '\n]' for item in json.loads(input): @@ -78,6 +81,8 @@ def check_json_output(expected_items): pass elif count - 1 in expected_items and 'metric-threshold' in item: pass + elif count in expected_items and 'insn per cycle' in item: + pass elif count not in expected_items: raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}' f' in \'{item}\'') @@ -95,6 +100,8 @@ try: expected_items = [6, 8] elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache: expected_items = [7, 9] + elif args.metric_only: + expected_items = [1, 2] else: # If no option is specified, don't check the number of items. expected_items = -1 diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh index 9a176ceae4a3..4d4aac547f01 100644 --- a/tools/perf/tests/shell/lib/stat_output.sh +++ b/tools/perf/tests/shell/lib/stat_output.sh @@ -148,6 +148,14 @@ check_per_socket() echo "[Success]" } +check_metric_only() +{ + echo -n "Checking $1 output: metric only " + perf stat --metric-only $2 -e instructions,cycles true + commachecker --metric-only + echo "[Success]" +} + # The perf stat options for per-socket, per-core, per-die # and -A ( no_aggr mode ) uses the info fetched from this # directory: "/sys/devices/system/cpu/cpu*/topology". For diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh index 7b1bfd0f888f..3863df16c19b 100755 --- a/tools/perf/tests/shell/perftool-testsuite_probe.sh +++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh @@ -2,6 +2,7 @@ # perftool-testsuite_probe (exclusive) # SPDX-License-Identifier: GPL-2.0 +[ "$(id -u)" = 0 ] || exit 2 test -d "$(dirname "$0")/base_probe" || exit 2 cd "$(dirname "$0")/base_probe" || exit 2 status=0 diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh index 0c5aacc446b3..c51a32931af6 100755 --- a/tools/perf/tests/shell/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/probe_vfs_getname.sh @@ -8,6 +8,7 @@ . "$(dirname $0)"/lib/probe.sh skip_if_no_perf_probe || exit 2 +[ "$(id -u)" = 0 ] || exit 2 # shellcheck source=lib/probe_vfs_getname.sh . "$(dirname $0)"/lib/probe_vfs_getname.sh diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index d5e5193cceb6..c4bab5b5cc59 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -105,6 +105,7 @@ delete_libc_inet_pton_event() { # Check for IPv6 interface existence ip a sh lo | grep -F -q inet6 || exit 2 +[ "$(id -u)" = 0 ] || exit 2 skip_if_no_perf_probe && \ add_libc_inet_pton_event && \ diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh index 5940fdc1df37..fd5b10d46915 100755 --- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh +++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh @@ -13,6 +13,7 @@ . "$(dirname "$0")/lib/probe.sh" skip_if_no_perf_probe || exit 2 +[ "$(id -u)" = 0 ] || exit 2 # shellcheck source=lib/probe_vfs_getname.sh . "$(dirname "$0")/lib/probe_vfs_getname.sh" diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh index 0fc7a909ae9b..ba8d873d3ca7 100755 --- a/tools/perf/tests/shell/record.sh +++ b/tools/perf/tests/shell/record.sh @@ -231,7 +231,7 @@ test_cgroup() { test_leader_sampling() { echo "Basic leader sampling test" - if ! perf record -o "${perfdata}" -e "{instructions,instructions}:Su" -- \ + if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \ perf test -w brstack 2> /dev/null then echo "Leader sampling [Failed record]" @@ -243,15 +243,15 @@ test_leader_sampling() { while IFS= read -r line do # Check if the two instruction counts are equal in each record - instructions=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="instructions:") print $(i-1)}') - if [ $(($index%2)) -ne 0 ] && [ ${instructions}x != ${prev_instructions}x ] + cycles=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="cycles:") print $(i-1)}') + if [ $(($index%2)) -ne 0 ] && [ ${cycles}x != ${prev_cycles}x ] then - echo "Leader sampling [Failed inconsistent instructions count]" + echo "Leader sampling [Failed inconsistent cycles count]" err=1 return fi index=$(($index+1)) - prev_instructions=$instructions + prev_cycles=$cycles done < $script_output echo "Basic leader sampling test [Success]" } @@ -273,27 +273,42 @@ test_topdown_leader_sampling() { } test_precise_max() { + local -i skipped=0 + echo "precise_max attribute test" - if ! perf stat -e "cycles,instructions" true 2> /dev/null + # Just to make sure event cycles is supported for sampling + if perf record -o "${perfdata}" -e "cycles" true 2> /dev/null then - echo "precise_max attribute [Skipped no hardware events]" - return + if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null + then + echo "precise_max attribute [Failed cycles:P event]" + err=1 + return + fi + else + echo "precise_max attribute [Skipped no cycles:P event]" + ((skipped+=1)) fi - # Just to make sure it doesn't fail - if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null + # On s390 event instructions is not supported for perf record + if perf record -o "${perfdata}" -e "instructions" true 2> /dev/null then - echo "precise_max attribute [Failed cycles:P event]" - err=1 - return + # On AMD, cycles and instructions events are treated differently + if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null + then + echo "precise_max attribute [Failed instructions:P event]" + err=1 + return + fi + else + echo "precise_max attribute [Skipped no instructions:P event]" + ((skipped+=1)) fi - # On AMD, cycles and instructions events are treated differently - if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null + if [ $skipped -eq 2 ] then - echo "precise_max attribute [Failed instructions:P event]" - err=1 - return + echo "precise_max attribute [Skipped no hardware events]" + else + echo "precise_max attribute test [Success]" fi - echo "precise_max attribute test [Success]" } # raise the limit of file descriptors to minimum diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh index 1b58ccc1fd88..4d6c3c1b7fb9 100755 --- a/tools/perf/tests/shell/record_bpf_filter.sh +++ b/tools/perf/tests/shell/record_bpf_filter.sh @@ -89,7 +89,7 @@ test_bpf_filter_fail() { test_bpf_filter_group() { echo "Group bpf-filter test" - if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \ + if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \ -o /dev/null true 2>/dev/null then echo "Group bpf-filter test [Failed should succeed]" @@ -97,7 +97,7 @@ test_bpf_filter_group() { return fi - if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \ + if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU then echo "Group bpf-filter test [Failed forbidden CPU]" diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh index fc2d8cc6e5e0..7a6f6e177402 100755 --- a/tools/perf/tests/shell/stat+csv_output.sh +++ b/tools/perf/tests/shell/stat+csv_output.sh @@ -44,6 +44,7 @@ function commachecker() ;; "--per-die") exp=8 ;; "--per-cluster") exp=8 ;; "--per-cache") exp=8 + ;; "--metric-only") exp=2 esac while read line @@ -75,6 +76,7 @@ check_interval "CSV" "$perf_cmd" check_event "CSV" "$perf_cmd" check_per_thread "CSV" "$perf_cmd" check_per_node "CSV" "$perf_cmd" +check_metric_only "CSV" "$perf_cmd" if [ $skip_test -ne 1 ] then check_system_wide_no_aggr "CSV" "$perf_cmd" diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh index 6b630d33c328..a4f257ea839e 100755 --- a/tools/perf/tests/shell/stat+json_output.sh +++ b/tools/perf/tests/shell/stat+json_output.sh @@ -173,6 +173,14 @@ check_per_socket() echo "[Success]" } +check_metric_only() +{ + echo -n "Checking json output: metric only " + perf stat -j --metric-only -e instructions,cycles -o "${stat_output}" true + $PYTHON $pythonchecker --metric-only --file "${stat_output}" + echo "[Success]" +} + # The perf stat options for per-socket, per-core, per-die # and -A ( no_aggr mode ) uses the info fetched from this # directory: "/sys/devices/system/cpu/cpu*/topology". For @@ -207,6 +215,7 @@ check_interval check_event check_per_thread check_per_node +check_metric_only if [ $skip_test -ne 1 ] then check_system_wide_no_aggr diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh index 0f7967be60af..6fee67693ba7 100755 --- a/tools/perf/tests/shell/stat+std_output.sh +++ b/tools/perf/tests/shell/stat+std_output.sh @@ -30,6 +30,7 @@ trap trap_cleanup EXIT TERM INT function commachecker() { local prefix=1 + local -i metric_only=0 case "$1" in "--interval") prefix=2 @@ -41,6 +42,7 @@ function commachecker() ;; "--per-die") prefix=3 ;; "--per-cache") prefix=3 ;; "--per-cluster") prefix=3 + ;; "--metric-only") metric_only=1 esac while read line @@ -60,6 +62,9 @@ function commachecker() x=${main_body%#*} [ "$x" = "" ] && continue + # Check metric only - if it has a non-empty result + [ $metric_only -eq 1 ] && return 0 + # Skip metrics without event name y=${main_body#*#} for i in "${!skip_metric[@]}"; do @@ -84,6 +89,8 @@ function commachecker() exit 1; } done < "${stat_output}" + + [ $metric_only -eq 1 ] && exit 1 return 0 } @@ -95,6 +102,7 @@ check_system_wide "STD" "$perf_cmd" check_interval "STD" "$perf_cmd" check_per_thread "STD" "$perf_cmd" check_per_node "STD" "$perf_cmd" +check_metric_only "STD" "$perf_cmd" if [ $skip_test -ne 1 ] then check_system_wide_no_aggr "STD" "$perf_cmd" diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh index 68323d636fb7..8a100a7f2dc1 100755 --- a/tools/perf/tests/shell/stat.sh +++ b/tools/perf/tests/shell/stat.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # perf stat tests # SPDX-License-Identifier: GPL-2.0 @@ -67,43 +67,54 @@ test_topdown_groups() { echo "Topdown event group test [Skipped event parsing failed]" return fi - if perf stat -e '{slots,topdown-retiring}' true 2>&1 | grep -E -q "<not supported>" - then - echo "Topdown event group test [Failed events not supported]" - err=1 - return - fi - if perf stat -e 'instructions,topdown-retiring,slots' true 2>&1 | grep -E -q "<not supported>" - then - echo "Topdown event group test [Failed slots not reordered first in no-group case]" - err=1 - return - fi - if perf stat -e '{instructions,topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>" - then - echo "Topdown event group test [Failed slots not reordered first in single group case]" - err=1 - return - fi - if perf stat -e '{instructions,slots},topdown-retiring' true 2>&1 | grep -E -q "<not supported>" - then - echo "Topdown event group test [Failed topdown metrics event not move into slots group]" - err=1 - return - fi - if perf stat -e '{instructions,slots},{topdown-retiring}' true 2>&1 | grep -E -q "<not supported>" - then - echo "Topdown event group test [Failed topdown metrics group not merge into slots group]" - err=1 - return - fi - if perf stat -e '{instructions,r400,r8000}' true 2>&1 | grep -E -q "<not supported>" + td_err=0 + do_topdown_group_test() { + events=$1 + failure=$2 + if perf stat -e "$events" true 2>&1 | grep -E -q "<not supported>" + then + echo "Topdown event group test [Failed $failure for '$events']" + td_err=1 + return + fi + } + do_topdown_group_test "{slots,topdown-retiring}" "events not supported" + do_topdown_group_test "{instructions,r400,r8000}" "raw format slots not reordered first" + filler_events=("instructions" "cycles" + "context-switches" "faults") + for ((i = 0; i < ${#filler_events[@]}; i+=2)) + do + filler1=${filler_events[i]} + filler2=${filler_events[i+1]} + do_topdown_group_test "$filler1,topdown-retiring,slots" \ + "slots not reordered first in no-group case" + do_topdown_group_test "slots,$filler1,topdown-retiring" \ + "topdown metrics event not reordered in no-group case" + do_topdown_group_test "{$filler1,topdown-retiring,slots}" \ + "slots not reordered first in single group case" + do_topdown_group_test "{$filler1,slots},topdown-retiring" \ + "topdown metrics event not move into slots group" + do_topdown_group_test "topdown-retiring,{$filler1,slots}" \ + "topdown metrics event not move into slots group last" + do_topdown_group_test "{$filler1,slots},{topdown-retiring}" \ + "topdown metrics group not merge into slots group" + do_topdown_group_test "{topdown-retiring},{$filler1,slots}" \ + "topdown metrics group not merge into slots group last" + do_topdown_group_test "{$filler1,slots},$filler2,topdown-retiring" \ + "non-adjacent topdown metrics group not move into slots group" + do_topdown_group_test "$filler2,topdown-retiring,{$filler1,slots}" \ + "non-adjacent topdown metrics group not move into slots group last" + do_topdown_group_test "{$filler1,slots},{$filler2,topdown-retiring}" \ + "metrics group not merge into slots group" + do_topdown_group_test "{$filler1,topdown-retiring},{$filler2,slots}" \ + "metrics group not merge into slots group last" + done + if test "$td_err" -eq 0 then - echo "Topdown event group test [Failed raw format slots not reordered first]" - err=1 - return + echo "Topdown event group test [Success]" + else + err="$td_err" fi - echo "Topdown event group test [Success]" } test_topdown_weak_groups() { diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh index 73e9347e88a9..ee817c66da06 100755 --- a/tools/perf/tests/shell/stat_all_metrics.sh +++ b/tools/perf/tests/shell/stat_all_metrics.sh @@ -20,7 +20,13 @@ for m in $(perf list --raw-dump metrics); do result_err=$? if [[ $result_err -gt 0 ]] then - if [[ "$result" =~ \ + if [[ "$result" =~ "Cannot resolve IDs for" ]] + then + echo "Metric contains missing events" + echo $result + err=1 # Fail + continue + elif [[ "$result" =~ \ "Access to performance monitoring and observability operations is limited" ]] then echo "Permission failure" diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh index 8b148b300be1..9c466c0efa85 100755 --- a/tools/perf/tests/shell/stat_all_pmu.sh +++ b/tools/perf/tests/shell/stat_all_pmu.sh @@ -2,7 +2,6 @@ # perf all PMU test (exclusive) # SPDX-License-Identifier: GPL-2.0 -set -e err=0 result="" @@ -16,34 +15,55 @@ trap trap_cleanup EXIT TERM INT # Test all PMU events; however exclude parameterized ones (name contains '?') for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g') do - echo "Testing $p" - result=$(perf stat -e "$p" true 2>&1) - if echo "$result" | grep -q "$p" + echo -n "Testing $p -- " + output=$(perf stat -e "$p" true 2>&1) + stat_result=$? + if echo "$output" | grep -q "$p" then # Event seen in output. - continue - fi - if echo "$result" | grep -q "<not supported>" - then - # Event not supported, so ignore. - continue + if [ $stat_result -eq 0 ] && ! echo "$output" | grep -q "<not supported>" + then + # Event supported. + echo "supported" + continue + elif echo "$output" | grep -q "<not supported>" + then + # Event not supported, so ignore. + echo "not supported" + continue + elif echo "$output" | grep -q "No permission to enable" + then + # No permissions, so ignore. + echo "no permission to enable" + continue + elif echo "$output" | grep -q "Bad event name" + then + # Non-existent event. + echo "Error: Bad event name" + echo "$output" + err=1 + continue + fi fi - if echo "$result" | grep -q "Access to performance monitoring and observability operations is limited." + + if echo "$output" | grep -q "Access to performance monitoring and observability operations is limited." then # Access is limited, so ignore. + echo "access limited" continue fi # We failed to see the event and it is supported. Possibly the workload was # too small so retry with something longer. - result=$(perf stat -e "$p" perf bench internals synthesize 2>&1) - if echo "$result" | grep -q "$p" + output=$(perf stat -e "$p" perf bench internals synthesize 2>&1) + if echo "$output" | grep -q "$p" then # Event seen in output. + echo "supported" continue fi echo "Error: event '$p' not printed in:" - echo "$result" + echo "$output" err=1 done diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index c86da0235059..bbe8277496ae 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -5,8 +5,6 @@ # Leo Yan <leo.yan@linaro.org>, 2022 shelldir=$(dirname "$0") -# shellcheck source=lib/waiting.sh -. "${shelldir}"/lib/waiting.sh # shellcheck source=lib/perf_has_symbol.sh . "${shelldir}"/lib/perf_has_symbol.sh @@ -18,7 +16,7 @@ skip_if_no_mem_event() { skip_if_no_mem_event || exit 2 -skip_test_missing_symbol buf1 +skip_test_missing_symbol workload_datasym_buf1 TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) @@ -26,18 +24,19 @@ ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX) check_result() { # The memory report format is as below: - # 99.92% ... [.] buf1+0x38 + # 99.92% ... [.] workload_datasym_buf1+0x38 result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 | - awk '/buf1/ { print $4 }') + awk '/workload_datasym_buf1/ { print $4 }') - # Testing is failed if has no any sample for "buf1" + # Testing is failed if has no any sample for "workload_datasym_buf1" [ -z "$result" ] && return 1 while IFS= read -r line; do - # The "data1" and "data2" fields in structure "buf1" have - # offset "0x0" and "0x38", returns failure if detect any - # other offset value. - if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then + # The "data1" and "data2" fields in structure + # "workload_datasym_buf1" have offset "0x0" and "0x38", returns + # failure if detect any other offset value. + if [ "$line" != "workload_datasym_buf1+0x0" ] && \ + [ "$line" != "workload_datasym_buf1+0x38" ]; then return 1 fi done <<< "$result" @@ -60,19 +59,10 @@ echo "Recording workload..." # specific CPU and test in per-CPU mode. is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo) if (($is_amd >= 1)); then - perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" & + perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" else - perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" & + perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" fi -PERFPID=$! - -wait_for_perf_to_start ${PERFPID} "${ERR_FILE}" - -sleep 1 - -kill $PERFPID -wait $PERFPID - check_result exit $? diff --git a/tools/perf/tests/shell/test_stat_intel_tpebs.sh b/tools/perf/tests/shell/test_stat_intel_tpebs.sh index f95fc64bf0a7..a330ecdb7ba5 100755 --- a/tools/perf/tests/shell/test_stat_intel_tpebs.sh +++ b/tools/perf/tests/shell/test_stat_intel_tpebs.sh @@ -3,20 +3,83 @@ # SPDX-License-Identifier: GPL-2.0 set -e -grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; } -# Use this event for testing because it should exist in all platforms -event=cache-misses:R +ParanoidAndNotRoot() { + [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ] +} -# Hybrid platforms output like "cpu_atom/cache-misses/R", rather than as above -alt_name=/cache-misses/R +if ! grep -q GenuineIntel /proc/cpuinfo +then + echo "Skipping non-Intel" + exit 2 +fi -# Without this cmd option, default value or zero is returned -#echo "Testing without --record-tpebs" -#result=$(perf stat -e "$event" true 2>&1) -#[[ "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1 +if ParanoidAndNotRoot 0 +then + echo "Skipping paranoid >0 and not root" + exit 2 +fi -# In platforms that do not support TPEBS, it should execute without error. -echo "Testing with --record-tpebs" -result=$(perf stat -e "$event" --record-tpebs -a sleep 0.01 2>&1) -[[ "$result" =~ "perf record" && "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1 +stat_output=$(mktemp /tmp/__perf_stat_tpebs_output.XXXXX) + +cleanup() { + rm -rf "${stat_output}" + trap - EXIT TERM INT +} + +trap_cleanup() { + echo "Unexpected signal in ${FUNCNAME[1]}" + cat "${stat_output}" + cleanup + exit 1 +} +trap trap_cleanup EXIT TERM INT + +# Event to be used in tests +event=cache-misses + +if ! perf record -e "${event}:p" -a -o /dev/null sleep 0.01 > "${stat_output}" 2>&1 +then + echo "Missing ${event} support" + cleanup + exit 2 +fi + +test_with_record_tpebs() { + echo "Testing with --record-tpebs" + if ! perf stat -e "${event}:R" --record-tpebs -a sleep 0.01 > "${stat_output}" 2>&1 + then + echo "Testing with --record-tpebs [Failed perf stat]" + cat "${stat_output}" + exit 1 + fi + + # Expected output: + # $ perf stat --record-tpebs -e cache-misses:R -a sleep 0.01 + # Events enabled + # [ perf record: Woken up 2 times to write data ] + # [ perf record: Captured and wrote 0.056 MB - ] + # + # Performance counter stats for 'system wide': + # + # 0 cache-misses:R + # + # 0.013963299 seconds time elapsed + if ! grep "perf record" "${stat_output}" + then + echo "Testing with --record-tpebs [Failed missing perf record]" + cat "${stat_output}" + exit 1 + fi + if ! grep "${event}:R" "${stat_output}" && ! grep "/${event}/R" "${stat_output}" + then + echo "Testing with --record-tpebs [Failed missing event name]" + cat "${stat_output}" + exit 1 + fi + echo "Testing with --record-tpebs [Success]" +} + +test_with_record_tpebs +cleanup +exit 0 diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh index 33387c329f92..7adf9755d6de 100755 --- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh +++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh @@ -4,12 +4,11 @@ set -e -# Skip if there's no probe command. -if ! perf | grep probe -then - echo "Skip: probe command isn't present" - exit 2 -fi +# shellcheck source=lib/probe.sh +. "$(dirname $0)"/lib/probe.sh + +skip_if_no_perf_probe || exit 2 +[ "$(id -u)" == 0 ] || exit 2 # skip if there's no gcc if ! [ -x "$(command -v gcc)" ]; then diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index 708a13f00635..60fccb62c540 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh @@ -15,6 +15,7 @@ skip_if_no_perf_probe || exit 2 skip_if_no_perf_trace || exit 2 +[ "$(id -u)" = 0 ] || exit 2 . "$(dirname $0)"/lib/probe_vfs_getname.sh diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh index 8d1e6bbeac90..f0b49f7fb57d 100755 --- a/tools/perf/tests/shell/trace_btf_enum.sh +++ b/tools/perf/tests/shell/trace_btf_enum.sh @@ -6,13 +6,14 @@ err=0 set -e syscall="landlock_add_rule" -non_syscall="timer:hrtimer_init,timer:hrtimer_start" +non_syscall="timer:hrtimer_setup,timer:hrtimer_start" TESTPROG="perf test -w landlock" # shellcheck source=lib/probe.sh . "$(dirname $0)"/lib/probe.sh skip_if_no_perf_trace || exit 2 +[ "$(id -u)" = 0 ] || exit 2 check_vmlinux() { echo "Checking if vmlinux exists" diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh index e9ee727f3433..a25d8744695e 100755 --- a/tools/perf/tests/shell/trace_btf_general.sh +++ b/tools/perf/tests/shell/trace_btf_general.sh @@ -76,6 +76,7 @@ trace_config() { skip_if_no_perf_trace || exit 2 check_vmlinux || exit 2 +[ "$(id -u)" = 0 ] || exit 2 trace_config diff --git a/tools/perf/tests/shell/trace_exit_race.sh b/tools/perf/tests/shell/trace_exit_race.sh index fbb0adc33a88..1e247693e756 100755 --- a/tools/perf/tests/shell/trace_exit_race.sh +++ b/tools/perf/tests/shell/trace_exit_race.sh @@ -10,6 +10,7 @@ . "$(dirname $0)"/lib/probe.sh skip_if_no_perf_trace || exit 2 +[ "$(id -u)" = 0 ] || exit 2 if [ "$1" = "-v" ]; then verbose="1" diff --git a/tools/perf/tests/shell/trace_record_replay.sh b/tools/perf/tests/shell/trace_record_replay.sh new file mode 100755 index 000000000000..6b4ed863c1ef --- /dev/null +++ b/tools/perf/tests/shell/trace_record_replay.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# perf trace record and replay +# SPDX-License-Identifier: GPL-2.0 + +# Check that perf trace works with record and replay + +# shellcheck source=lib/probe.sh +. "$(dirname $0)"/lib/probe.sh + +skip_if_no_perf_trace || exit 2 +[ "$(id -u)" = 0 ] || exit 2 + +file=$(mktemp /tmp/temporary_file.XXXXX) + +perf trace record -o ${file} sleep 1 || exit 1 +if ! perf trace -i ${file} 2>&1 | grep nanosleep; then + echo "Failed: cannot find *nanosleep syscall" + exit 1 +fi + +rm -f ${file} |