diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-11 10:54:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-11 10:54:24 -0700 |
commit | b1412bd75abe8b1c57ecca4a85f92c8ddb4ccd39 (patch) | |
tree | ebfa509236729b322b56b56e84003527581ef5f0 /tools | |
parent | de5540965853e514a85d3b775e9049deb85a2ff3 (diff) | |
parent | eb7261f14e1a86f0fd299a2ec408990d349ce3d1 (diff) | |
download | linux-stable-b1412bd75abe8b1c57ecca4a85f92c8ddb4ccd39.tar.gz linux-stable-b1412bd75abe8b1c57ecca4a85f92c8ddb4ccd39.tar.bz2 linux-stable-b1412bd75abe8b1c57ecca4a85f92c8ddb4ccd39.zip |
Merge tag 'perf-tools-for-v5.14-2021-07-10' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull more perf tool updates from Arnaldo Carvalho de Melo:
"New features:
- Enable use of BPF counters with 'perf stat --for-each-cgroup',
using per-CPU 'cgroup-switch' events with an attached BPF program
that does aggregation per-cgroup in the kernel instead of using
per-cgroup perf events.
- Add Topdown metrics L2 events as default events in 'perf stat' for
systems having those events.
Hardware tracing:
- Add a config for max loops without consuming a packet in the Intel
PT packet decoder, set via 'perf config intel-pt.max-loops=N'
Hardware enablement:
- Disable misleading NMI watchdog message in 'perf stat' on hybrid
systems such as Intel Alder Lake.
- Add a dummy event on hybrid systems to collect metadata records.
- Add 24x7 nest metric events for the Power10 platform.
Fixes:
- Fix event parsing for PMUs starting with the same prefix.
- Fix the 'perf trace' 'trace' alias installation dir.
- Fix buffer size to report iregs in perf script python scripts,
supporting the extended registers in PowerPC.
- Fix overflow in elf_sec__is_text().
- Fix 's' on source line when disasm is empty in the annotation TUI,
accessible via 'perf annotate', 'perf report' and 'perf top'.
- Plug leaks in scandir() returned dirent entries in 'perf test' when
sorting the shell tests.
- Fix --task and --stat with pipe input in 'perf report'.
- Fix 'perf probe' use of debuginfo files by build id.
- If a DSO has both dynsym and symtab ELF sections, read from both
when loading the symbol table, fixing a problem processing Fedora
32 glibc DSOs.
Libraries:
- Add grouping of events to libperf, from code in tools/perf,
allowing libperf users to use that mode.
Misc:
- Filter plt stubs from the 'perf probe --functions' output.
- Update UAPI header copies for asound, DRM, mman-common.h and the
ones affected by the quotactl_fd syscall"
* tag 'perf-tools-for-v5.14-2021-07-10' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (29 commits)
perf test: Add free() calls for scandir() returned dirent entries
libperf: Add tests for perf_evlist__set_leader()
libperf: Remove BUG_ON() from library code in get_group_fd()
libperf: Add group support to perf_evsel__open()
perf tools: Fix pattern matching for same substring in different PMU type
perf record: Add a dummy event on hybrid systems to collect metadata records
perf stat: Add Topdown metrics L2 events as default events
libperf: Adopt evlist__set_leader() from tools/perf as perf_evlist__set_leader()
libperf: Move 'nr_groups' from tools/perf to evlist::nr_groups
libperf: Move 'leader' from tools/perf to perf_evsel::leader
libperf: Move 'idx' from tools/perf to perf_evsel::idx
libperf: Change tests to single static and shared binaries
perf intel-pt: Add a config for max loops without consuming a packet
perf stat: Disable the NMI watchdog message on hybrid
perf vendor events power10: Adds 24x7 nest metric events for power10 platform
perf script python: Fix buffer size to report iregs in perf script
perf trace: Fix the perf trace link location
perf top: Fix overflow in elf_sec__is_text()
perf annotate: Fix 's' on source line when disasm is empty
perf probe: Do not show @plt function by default
...
Diffstat (limited to 'tools')
77 files changed, 1872 insertions, 331 deletions
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index f94f65d429be..1567a3294c3d 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -72,6 +72,9 @@ #define MADV_COLD 20 /* deactivate these pages */ #define MADV_PAGEOUT 21 /* reclaim these pages */ +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index d2a942086fcb..f211961ce1da 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -863,7 +863,8 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise) __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2) #define __NR_mount_setattr 442 __SYSCALL(__NR_mount_setattr, sys_mount_setattr) -/* 443 is reserved for quotactl_path */ +#define __NR_quotactl_fd 443 +__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd) #define __NR_landlock_create_ruleset 444 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset) diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 67b94bc3c885..d043752a74cf 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -777,9 +777,12 @@ struct drm_get_cap { /** * DRM_CLIENT_CAP_STEREO_3D * - * if set to 1, the DRM core will expose the stereo 3D capabilities of the + * If set to 1, the DRM core will expose the stereo 3D capabilities of the * monitor by advertising the supported 3D layouts in the flags of struct - * drm_mode_modeinfo. + * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 3.13. */ #define DRM_CLIENT_CAP_STEREO_3D 1 @@ -788,6 +791,9 @@ struct drm_get_cap { * * If set to 1, the DRM core will expose all planes (overlay, primary, and * cursor) to userspace. + * + * This capability has been introduced in kernel version 3.15. Starting from + * kernel version 3.17, this capability is always supported for all drivers. */ #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 @@ -797,6 +803,13 @@ struct drm_get_cap { * If set to 1, the DRM core will expose atomic properties to userspace. This * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and * &DRM_CLIENT_CAP_ASPECT_RATIO. + * + * If the driver doesn't support atomic mode-setting, enabling this capability + * will fail with -EOPNOTSUPP. + * + * This capability has been introduced in kernel version 4.0. Starting from + * kernel version 4.2, this capability is always supported for atomic-capable + * drivers. */ #define DRM_CLIENT_CAP_ATOMIC 3 @@ -804,6 +817,10 @@ struct drm_get_cap { * DRM_CLIENT_CAP_ASPECT_RATIO * * If set to 1, the DRM core will provide aspect ratio information in modes. + * See ``DRM_MODE_FLAG_PIC_AR_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 4.18. */ #define DRM_CLIENT_CAP_ASPECT_RATIO 4 @@ -811,8 +828,11 @@ struct drm_get_cap { * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS * * If set to 1, the DRM core will expose special connectors to be used for - * writing back to memory the scene setup in the commit. Depends on client - * also supporting DRM_CLIENT_CAP_ATOMIC + * writing back to memory the scene setup in the commit. The client must enable + * &DRM_CLIENT_CAP_ATOMIC first. + * + * This capability is always supported for atomic-capable drivers starting from + * kernel version 4.19. */ #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index ddc47bbf48b6..c2c7759b7d2e 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -62,8 +62,8 @@ extern "C" { #define I915_ERROR_UEVENT "ERROR" #define I915_RESET_UEVENT "RESET" -/* - * i915_user_extension: Base class for defining a chain of extensions +/** + * struct i915_user_extension - Base class for defining a chain of extensions * * Many interfaces need to grow over time. In most cases we can simply * extend the struct and have userspace pass in more data. Another option, @@ -76,12 +76,58 @@ extern "C" { * increasing complexity, and for large parts of that interface to be * entirely optional. The downside is more pointer chasing; chasing across * the __user boundary with pointers encapsulated inside u64. + * + * Example chaining: + * + * .. code-block:: C + * + * struct i915_user_extension ext3 { + * .next_extension = 0, // end + * .name = ..., + * }; + * struct i915_user_extension ext2 { + * .next_extension = (uintptr_t)&ext3, + * .name = ..., + * }; + * struct i915_user_extension ext1 { + * .next_extension = (uintptr_t)&ext2, + * .name = ..., + * }; + * + * Typically the struct i915_user_extension would be embedded in some uAPI + * struct, and in this case we would feed it the head of the chain(i.e ext1), + * which would then apply all of the above extensions. + * */ struct i915_user_extension { + /** + * @next_extension: + * + * Pointer to the next struct i915_user_extension, or zero if the end. + */ __u64 next_extension; + /** + * @name: Name of the extension. + * + * Note that the name here is just some integer. + * + * Also note that the name space for this is not global for the whole + * driver, but rather its scope/meaning is limited to the specific piece + * of uAPI which has embedded the struct i915_user_extension. + */ __u32 name; - __u32 flags; /* All undefined bits must be zero. */ - __u32 rsvd[4]; /* Reserved for future use; must be zero. */ + /** + * @flags: MBZ + * + * All undefined bits must be zero. + */ + __u32 flags; + /** + * @rsvd: MBZ + * + * Reserved for future use; must be zero. + */ + __u32 rsvd[4]; }; /* @@ -360,6 +406,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_QUERY 0x39 #define DRM_I915_GEM_VM_CREATE 0x3a #define DRM_I915_GEM_VM_DESTROY 0x3b +#define DRM_I915_GEM_CREATE_EXT 0x3c /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -392,6 +439,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) +#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) @@ -1054,12 +1102,12 @@ struct drm_i915_gem_exec_fence { __u32 flags; }; -/** +/* * See drm_i915_gem_execbuffer_ext_timeline_fences. */ #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 -/** +/* * This structure describes an array of drm_syncobj and associated points for * timeline variants of drm_syncobj. It is invalid to append this structure to * the execbuf if I915_EXEC_FENCE_ARRAY is set. @@ -1700,7 +1748,7 @@ struct drm_i915_gem_context_param { __u64 value; }; -/** +/* * Context SSEU programming * * It may be necessary for either functional or performance reason to configure @@ -2067,7 +2115,7 @@ struct drm_i915_perf_open_param { __u64 properties_ptr; }; -/** +/* * Enable data capture for a stream that was either opened in a disabled state * via I915_PERF_FLAG_DISABLED or was later disabled via * I915_PERF_IOCTL_DISABLE. @@ -2081,7 +2129,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) -/** +/* * Disable data capture for a stream. * * It is an error to try and read a stream that is disabled. @@ -2090,7 +2138,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) -/** +/* * Change metrics_set captured by a stream. * * If the stream is bound to a specific context, the configuration change @@ -2103,7 +2151,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) -/** +/* * Common to all i915 perf records */ struct drm_i915_perf_record_header { @@ -2151,7 +2199,7 @@ enum drm_i915_perf_record_type { DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; -/** +/* * Structure to upload perf dynamic configuration into the kernel. */ struct drm_i915_perf_oa_config { @@ -2172,53 +2220,95 @@ struct drm_i915_perf_oa_config { __u64 flex_regs_ptr; }; +/** + * struct drm_i915_query_item - An individual query for the kernel to process. + * + * The behaviour is determined by the @query_id. Note that exactly what + * @data_ptr is also depends on the specific @query_id. + */ struct drm_i915_query_item { + /** @query_id: The id for this query */ __u64 query_id; #define DRM_I915_QUERY_TOPOLOGY_INFO 1 #define DRM_I915_QUERY_ENGINE_INFO 2 #define DRM_I915_QUERY_PERF_CONFIG 3 +#define DRM_I915_QUERY_MEMORY_REGIONS 4 /* Must be kept compact -- no holes and well documented */ - /* + /** + * @length: + * * When set to zero by userspace, this is filled with the size of the - * data to be written at the data_ptr pointer. The kernel sets this + * data to be written at the @data_ptr pointer. The kernel sets this * value to a negative value to signal an error on a particular query * item. */ __s32 length; - /* + /** + * @flags: + * * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. * * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the - * following : - * - DRM_I915_QUERY_PERF_CONFIG_LIST - * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID - * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * following: + * + * - DRM_I915_QUERY_PERF_CONFIG_LIST + * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID + * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID */ __u32 flags; #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 - /* - * Data will be written at the location pointed by data_ptr when the - * value of length matches the length of the data to be written by the + /** + * @data_ptr: + * + * Data will be written at the location pointed by @data_ptr when the + * value of @length matches the length of the data to be written by the * kernel. */ __u64 data_ptr; }; +/** + * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the + * kernel to fill out. + * + * Note that this is generally a two step process for each struct + * drm_i915_query_item in the array: + * + * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct + * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The + * kernel will then fill in the size, in bytes, which tells userspace how + * memory it needs to allocate for the blob(say for an array of properties). + * + * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the + * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that + * the &drm_i915_query_item.length should still be the same as what the + * kernel previously set. At this point the kernel can fill in the blob. + * + * Note that for some query items it can make sense for userspace to just pass + * in a buffer/blob equal to or larger than the required size. In this case only + * a single ioctl call is needed. For some smaller query items this can work + * quite well. + * + */ struct drm_i915_query { + /** @num_items: The number of elements in the @items_ptr array */ __u32 num_items; - /* - * Unused for now. Must be cleared to zero. + /** + * @flags: Unused for now. Must be cleared to zero. */ __u32 flags; - /* - * This points to an array of num_items drm_i915_query_item structures. + /** + * @items_ptr: + * + * Pointer to an array of struct drm_i915_query_item. The number of + * array elements is @num_items. */ __u64 items_ptr; }; @@ -2292,21 +2382,21 @@ struct drm_i915_query_topology_info { * Describes one engine and it's capabilities as known to the driver. */ struct drm_i915_engine_info { - /** Engine class and instance. */ + /** @engine: Engine class and instance. */ struct i915_engine_class_instance engine; - /** Reserved field. */ + /** @rsvd0: Reserved field. */ __u32 rsvd0; - /** Engine flags. */ + /** @flags: Engine flags. */ __u64 flags; - /** Capabilities of this engine. */ + /** @capabilities: Capabilities of this engine. */ __u64 capabilities; #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) - /** Reserved fields. */ + /** @rsvd1: Reserved fields. */ __u64 rsvd1[4]; }; @@ -2317,13 +2407,13 @@ struct drm_i915_engine_info { * an array of struct drm_i915_engine_info structures. */ struct drm_i915_query_engine_info { - /** Number of struct drm_i915_engine_info structs following. */ + /** @num_engines: Number of struct drm_i915_engine_info structs following. */ __u32 num_engines; - /** MBZ */ + /** @rsvd: MBZ */ __u32 rsvd[3]; - /** Marker for drm_i915_engine_info structures. */ + /** @engines: Marker for drm_i915_engine_info structures. */ struct drm_i915_engine_info engines[]; }; @@ -2377,6 +2467,241 @@ struct drm_i915_query_perf_config { __u8 data[]; }; +/** + * enum drm_i915_gem_memory_class - Supported memory classes + */ +enum drm_i915_gem_memory_class { + /** @I915_MEMORY_CLASS_SYSTEM: System memory */ + I915_MEMORY_CLASS_SYSTEM = 0, + /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */ + I915_MEMORY_CLASS_DEVICE, +}; + +/** + * struct drm_i915_gem_memory_class_instance - Identify particular memory region + */ +struct drm_i915_gem_memory_class_instance { + /** @memory_class: See enum drm_i915_gem_memory_class */ + __u16 memory_class; + + /** @memory_instance: Which instance */ + __u16 memory_instance; +}; + +/** + * struct drm_i915_memory_region_info - Describes one region as known to the + * driver. + * + * Note that we reserve some stuff here for potential future work. As an example + * we might want expose the capabilities for a given region, which could include + * things like if the region is CPU mappable/accessible, what are the supported + * mapping types etc. + * + * Note that to extend struct drm_i915_memory_region_info and struct + * drm_i915_query_memory_regions in the future the plan is to do the following: + * + * .. code-block:: C + * + * struct drm_i915_memory_region_info { + * struct drm_i915_gem_memory_class_instance region; + * union { + * __u32 rsvd0; + * __u32 new_thing1; + * }; + * ... + * union { + * __u64 rsvd1[8]; + * struct { + * __u64 new_thing2; + * __u64 new_thing3; + * ... + * }; + * }; + * }; + * + * With this things should remain source compatible between versions for + * userspace, even as we add new fields. + * + * Note this is using both struct drm_i915_query_item and struct drm_i915_query. + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS + * at &drm_i915_query_item.query_id. + */ +struct drm_i915_memory_region_info { + /** @region: The class:instance pair encoding */ + struct drm_i915_gem_memory_class_instance region; + + /** @rsvd0: MBZ */ + __u32 rsvd0; + + /** @probed_size: Memory probed by the driver (-1 = unknown) */ + __u64 probed_size; + + /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */ + __u64 unallocated_size; + + /** @rsvd1: MBZ */ + __u64 rsvd1[8]; +}; + +/** + * struct drm_i915_query_memory_regions + * + * The region info query enumerates all regions known to the driver by filling + * in an array of struct drm_i915_memory_region_info structures. + * + * Example for getting the list of supported regions: + * + * .. code-block:: C + * + * struct drm_i915_query_memory_regions *info; + * struct drm_i915_query_item item = { + * .query_id = DRM_I915_QUERY_MEMORY_REGIONS; + * }; + * struct drm_i915_query query = { + * .num_items = 1, + * .items_ptr = (uintptr_t)&item, + * }; + * int err, i; + * + * // First query the size of the blob we need, this needs to be large + * // enough to hold our array of regions. The kernel will fill out the + * // item.length for us, which is the number of bytes we need. + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * info = calloc(1, item.length); + * // Now that we allocated the required number of bytes, we call the ioctl + * // again, this time with the data_ptr pointing to our newly allocated + * // blob, which the kernel can then populate with the all the region info. + * item.data_ptr = (uintptr_t)&info, + * + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * // We can now access each region in the array + * for (i = 0; i < info->num_regions; i++) { + * struct drm_i915_memory_region_info mr = info->regions[i]; + * u16 class = mr.region.class; + * u16 instance = mr.region.instance; + * + * .... + * } + * + * free(info); + */ +struct drm_i915_query_memory_regions { + /** @num_regions: Number of supported regions */ + __u32 num_regions; + + /** @rsvd: MBZ */ + __u32 rsvd[3]; + + /** @regions: Info about each supported region */ + struct drm_i915_memory_region_info regions[]; +}; + +/** + * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added + * extension support using struct i915_user_extension. + * + * Note that in the future we want to have our buffer flags here, at least for + * the stuff that is immutable. Previously we would have two ioctls, one to + * create the object with gem_create, and another to apply various parameters, + * however this creates some ambiguity for the params which are considered + * immutable. Also in general we're phasing out the various SET/GET ioctls. + */ +struct drm_i915_gem_create_ext { + /** + * @size: Requested size for the object. + * + * The (page-aligned) allocated size for the object will be returned. + * + * Note that for some devices we have might have further minimum + * page-size restrictions(larger than 4K), like for device local-memory. + * However in general the final size here should always reflect any + * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS + * extension to place the object in device local-memory. + */ + __u64 size; + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + /** @flags: MBZ */ + __u32 flags; + /** + * @extensions: The chain of extensions to apply to this object. + * + * This will be useful in the future when we need to support several + * different extensions, and we need to apply more than one when + * creating the object. See struct i915_user_extension. + * + * If we don't supply any extensions then we get the same old gem_create + * behaviour. + * + * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see + * struct drm_i915_gem_create_ext_memory_regions. + */ +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 + __u64 extensions; +}; + +/** + * struct drm_i915_gem_create_ext_memory_regions - The + * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. + * + * Set the object with the desired set of placements/regions in priority + * order. Each entry must be unique and supported by the device. + * + * This is provided as an array of struct drm_i915_gem_memory_class_instance, or + * an equivalent layout of class:instance pair encodings. See struct + * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to + * query the supported regions for a device. + * + * As an example, on discrete devices, if we wish to set the placement as + * device local-memory we can do something like: + * + * .. code-block:: C + * + * struct drm_i915_gem_memory_class_instance region_lmem = { + * .memory_class = I915_MEMORY_CLASS_DEVICE, + * .memory_instance = 0, + * }; + * struct drm_i915_gem_create_ext_memory_regions regions = { + * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, + * .regions = (uintptr_t)®ion_lmem, + * .num_regions = 1, + * }; + * struct drm_i915_gem_create_ext create_ext = { + * .size = 16 * PAGE_SIZE, + * .extensions = (uintptr_t)®ions, + * }; + * + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); + * if (err) ... + * + * At which point we get the object handle in &drm_i915_gem_create_ext.handle, + * along with the final object size in &drm_i915_gem_create_ext.size, which + * should account for any rounding up, if required. + */ +struct drm_i915_gem_create_ext_memory_regions { + /** @base: Extension link. See struct i915_user_extension. */ + struct i915_user_extension base; + + /** @pad: MBZ */ + __u32 pad; + /** @num_regions: Number of elements in the @regions array. */ + __u32 num_regions; + /** + * @regions: The regions/placements array. + * + * An array of struct drm_i915_gem_memory_class_instance. + */ + __u64 regions; +}; + #if defined(__cplusplus) } #endif diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index 535a7229e1d9..d17c061950df 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h @@ -710,7 +710,7 @@ enum { * Raw MIDI section - /dev/snd/midi?? */ -#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1) +#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2) enum { SNDRV_RAWMIDI_STREAM_OUTPUT = 0, @@ -736,12 +736,38 @@ struct snd_rawmidi_info { unsigned char reserved[64]; /* reserved for future use */ }; +#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0 +#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0) +#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3 +#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3) + +#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16 + +struct snd_rawmidi_framing_tstamp { + /* For now, frame_type is always 0. Midi 2.0 is expected to add new + * types here. Applications are expected to skip unknown frame types. + */ + __u8 frame_type; + __u8 length; /* number of valid bytes in data field */ + __u8 reserved[2]; + __u32 tv_nsec; /* nanoseconds */ + __u64 tv_sec; /* seconds */ + __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH]; +} __packed; + struct snd_rawmidi_params { int stream; size_t buffer_size; /* queue size in bytes */ size_t avail_min; /* minimum avail bytes for wakeup */ unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */ - unsigned char reserved[16]; /* reserved for future use */ + unsigned int mode; /* For input data only, frame incoming data */ + unsigned char reserved[12]; /* reserved for future use */ }; #ifndef __KERNEL__ diff --git a/tools/lib/perf/Build b/tools/lib/perf/Build index 2ef9a4ec6d99..e8f5b7fb9973 100644 --- a/tools/lib/perf/Build +++ b/tools/lib/perf/Build @@ -11,3 +11,5 @@ libperf-y += lib.o $(OUTPUT)zalloc.o: ../../lib/zalloc.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) + +tests-y += tests/ diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile index 3718d65cffac..08fe6e3c4089 100644 --- a/tools/lib/perf/Makefile +++ b/tools/lib/perf/Makefile @@ -52,6 +52,8 @@ else Q = @ endif +TEST_ARGS := $(if $(V),-v) + # Set compile option CFLAGS ifdef EXTRA_CFLAGS CFLAGS := $(EXTRA_CFLAGS) @@ -136,12 +138,30 @@ all: fixdep clean: $(LIBAPI)-clean $(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \ - *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC) - $(Q)$(MAKE) -C tests clean + *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \ + $(TESTS_STATIC) $(TESTS_SHARED) + +TESTS_IN = tests-in.o + +TESTS_STATIC = $(OUTPUT)tests-static +TESTS_SHARED = $(OUTPUT)tests-shared + +$(TESTS_IN): FORCE + $(Q)$(MAKE) $(build)=tests + +$(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI) + $(QUIET_LINK)$(CC) -o $@ $^ + +$(TESTS_SHARED): $(TESTS_IN) $(LIBAPI) + $(QUIET_LINK)$(CC) -o $@ -L$(if $(OUTPUT),$(OUTPUT),.) $^ -lperf + +make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC) -tests: libs - $(Q)$(MAKE) -C tests - $(Q)$(MAKE) -C tests run +tests: make-tests + @echo "running static:" + @./$(TESTS_STATIC) $(TEST_ARGS) + @echo "running dynamic:" + @LD_LIBRARY_PATH=. ./$(TESTS_SHARED) $(TEST_ARGS) $(LIBPERF_PC): $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \ diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index a0aaf385cbb5..e37dfad31383 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -66,6 +66,7 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist) void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *evsel) { + evsel->idx = evlist->nr_entries; list_add_tail(&evsel->node, &evlist->entries); evlist->nr_entries += 1; __perf_evlist__propagate_maps(evlist, evsel); @@ -641,3 +642,24 @@ perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map, return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first; } + +void __perf_evlist__set_leader(struct list_head *list) +{ + struct perf_evsel *evsel, *leader; + + leader = list_entry(list->next, struct perf_evsel, node); + evsel = list_entry(list->prev, struct perf_evsel, node); + + leader->nr_members = evsel->idx - leader->idx + 1; + + __perf_evlist__for_each_entry(list, evsel) + evsel->leader = leader; +} + +void perf_evlist__set_leader(struct perf_evlist *evlist) +{ + if (evlist->nr_entries) { + evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; + __perf_evlist__set_leader(&evlist->entries); + } +} diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index bd8c2f19ef74..d8886720e83d 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -17,11 +17,15 @@ #include <linux/string.h> #include <sys/ioctl.h> #include <sys/mman.h> +#include <asm/bug.h> -void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr) +void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, + int idx) { INIT_LIST_HEAD(&evsel->node); evsel->attr = *attr; + evsel->idx = idx; + evsel->leader = evsel; } struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) @@ -29,7 +33,7 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) struct perf_evsel *evsel = zalloc(sizeof(*evsel)); if (evsel != NULL) - perf_evsel__init(evsel, attr); + perf_evsel__init(evsel, attr, 0); return evsel; } @@ -73,6 +77,32 @@ sys_perf_event_open(struct perf_event_attr *attr, return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); } +static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd) +{ + struct perf_evsel *leader = evsel->leader; + int fd; + + if (evsel == leader) { + *group_fd = -1; + return 0; + } + + /* + * Leader must be already processed/open, + * if not it's a bug. + */ + if (!leader->fd) + return -ENOTCONN; + + fd = FD(leader, cpu, thread); + if (fd == -1) + return -EBADF; + + *group_fd = fd; + + return 0; +} + int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) { @@ -108,11 +138,15 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, for (cpu = 0; cpu < cpus->nr; cpu++) { for (thread = 0; thread < threads->nr; thread++) { - int fd; + int fd, group_fd; + + err = get_group_fd(evsel, cpu, thread, &group_fd); + if (err < 0) + return err; fd = sys_perf_event_open(&evsel->attr, threads->map[thread].pid, - cpus->map[cpu], -1, 0); + cpus->map[cpu], group_fd, 0); if (fd < 0) return -errno; diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h index 212c29063ad4..f366dbad6a88 100644 --- a/tools/lib/perf/include/internal/evlist.h +++ b/tools/lib/perf/include/internal/evlist.h @@ -16,6 +16,7 @@ struct perf_mmap_param; struct perf_evlist { struct list_head entries; int nr_entries; + int nr_groups; bool has_user_cpus; struct perf_cpu_map *cpus; struct perf_cpu_map *all_cpus; @@ -126,4 +127,5 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist, void perf_evlist__reset_id_hash(struct perf_evlist *evlist); +void __perf_evlist__set_leader(struct list_head *list); #endif /* __LIBPERF_INTERNAL_EVLIST_H */ diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h index 1c067d088bc6..1f3eacbad2e8 100644 --- a/tools/lib/perf/include/internal/evsel.h +++ b/tools/lib/perf/include/internal/evsel.h @@ -45,13 +45,16 @@ struct perf_evsel { struct xyarray *sample_id; u64 *id; u32 ids; + struct perf_evsel *leader; /* parse modifier helper */ int nr_members; bool system_wide; + int idx; }; -void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr); +void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, + int idx); int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); void perf_evsel__close_fd(struct perf_evsel *evsel); void perf_evsel__free_fd(struct perf_evsel *evsel); diff --git a/tools/lib/perf/include/internal/tests.h b/tools/lib/perf/include/internal/tests.h index 29425c2dabe1..61052099225b 100644 --- a/tools/lib/perf/include/internal/tests.h +++ b/tools/lib/perf/include/internal/tests.h @@ -5,8 +5,8 @@ #include <stdio.h> #include <unistd.h> -int tests_failed; -int tests_verbose; +extern int tests_failed; +extern int tests_verbose; static inline int get_verbose(char **argv, int argc) { diff --git a/tools/lib/perf/include/perf/evlist.h b/tools/lib/perf/include/perf/evlist.h index 0a7479dc13bf..9ca399d49bb4 100644 --- a/tools/lib/perf/include/perf/evlist.h +++ b/tools/lib/perf/include/perf/evlist.h @@ -46,4 +46,5 @@ LIBPERF_API struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist, (pos) != NULL; \ (pos) = perf_evlist__next_mmap((evlist), (pos), overwrite)) +LIBPERF_API void perf_evlist__set_leader(struct perf_evlist *evlist); #endif /* __LIBPERF_EVLIST_H */ diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index c0c7ceb11060..71468606e8a7 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -45,6 +45,7 @@ LIBPERF_0.0.1 { perf_evlist__munmap; perf_evlist__filter_pollfd; perf_evlist__next_mmap; + perf_evlist__set_leader; perf_mmap__consume; perf_mmap__read_init; perf_mmap__read_done; diff --git a/tools/lib/perf/tests/Build b/tools/lib/perf/tests/Build new file mode 100644 index 000000000000..56e81378d443 --- /dev/null +++ b/tools/lib/perf/tests/Build @@ -0,0 +1,5 @@ +tests-y += main.o +tests-y += test-evsel.o +tests-y += test-evlist.o +tests-y += test-cpumap.o +tests-y += test-threadmap.o diff --git a/tools/lib/perf/tests/Makefile b/tools/lib/perf/tests/Makefile deleted file mode 100644 index b536cc9a26dd..000000000000 --- a/tools/lib/perf/tests/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) - -TESTS = test-cpumap test-threadmap test-evlist test-evsel - -TESTS_SO := $(addsuffix -so,$(TESTS)) -TESTS_A := $(addsuffix -a,$(TESTS)) - -TEST_ARGS := $(if $(V),-v) - -# Set compile option CFLAGS -ifdef EXTRA_CFLAGS - CFLAGS := $(EXTRA_CFLAGS) -else - CFLAGS := -g -Wall -endif - -all: - -include $(srctree)/tools/scripts/Makefile.include - -INCLUDE = -I$(srctree)/tools/lib/perf/include -I$(srctree)/tools/include -I$(srctree)/tools/lib - -$(TESTS_A): FORCE - $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a $(LIBAPI) - -$(TESTS_SO): FORCE - $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) $(LIBAPI) -lperf - -all: $(TESTS_A) $(TESTS_SO) - -run: - @echo "running static:" - @for i in $(TESTS_A); do ./$$i $(TEST_ARGS); done - @echo "running dynamic:" - @for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i $(TEST_ARGS); done - -clean: - $(call QUIET_CLEAN, tests)$(RM) $(TESTS_A) $(TESTS_SO) - -.PHONY: all clean FORCE diff --git a/tools/lib/perf/tests/main.c b/tools/lib/perf/tests/main.c new file mode 100644 index 000000000000..56423fd4db19 --- /dev/null +++ b/tools/lib/perf/tests/main.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <internal/tests.h> +#include "tests.h" + +int tests_failed; +int tests_verbose; + +int main(int argc, char **argv) +{ + __T("test cpumap", !test_cpumap(argc, argv)); + __T("test threadmap", !test_threadmap(argc, argv)); + __T("test evlist", !test_evlist(argc, argv)); + __T("test evsel", !test_evsel(argc, argv)); + return 0; +} diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c index c70e9e03af3e..d39378eaf897 100644 --- a/tools/lib/perf/tests/test-cpumap.c +++ b/tools/lib/perf/tests/test-cpumap.c @@ -3,6 +3,7 @@ #include <stdio.h> #include <perf/cpumap.h> #include <internal/tests.h> +#include "tests.h" static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) @@ -10,7 +11,7 @@ static int libperf_print(enum libperf_print_level level, return vfprintf(stderr, fmt, ap); } -int main(int argc, char **argv) +int test_cpumap(int argc, char **argv) { struct perf_cpu_map *cpus; diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c index e2ac0b7f432e..c67c83399170 100644 --- a/tools/lib/perf/tests/test-evlist.c +++ b/tools/lib/perf/tests/test-evlist.c @@ -18,6 +18,8 @@ #include <perf/event.h> #include <internal/tests.h> #include <api/fs/fs.h> +#include "tests.h" +#include <internal/evsel.h> static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) @@ -29,7 +31,7 @@ static int test_stat_cpu(void) { struct perf_cpu_map *cpus; struct perf_evlist *evlist; - struct perf_evsel *evsel; + struct perf_evsel *evsel, *leader; struct perf_event_attr attr1 = { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_CLOCK, @@ -46,7 +48,7 @@ static int test_stat_cpu(void) evlist = perf_evlist__new(); __T("failed to create evlist", evlist); - evsel = perf_evsel__new(&attr1); + evsel = leader = perf_evsel__new(&attr1); __T("failed to create evsel1", evsel); perf_evlist__add(evlist, evsel); @@ -56,6 +58,10 @@ static int test_stat_cpu(void) perf_evlist__add(evlist, evsel); + perf_evlist__set_leader(evlist); + __T("failed to set leader", leader->leader == leader); + __T("failed to set leader", evsel->leader == leader); + perf_evlist__set_maps(evlist, cpus, NULL); err = perf_evlist__open(evlist); @@ -84,7 +90,7 @@ static int test_stat_thread(void) struct perf_counts_values counts = { .val = 0 }; struct perf_thread_map *threads; struct perf_evlist *evlist; - struct perf_evsel *evsel; + struct perf_evsel *evsel, *leader; struct perf_event_attr attr1 = { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_CLOCK, @@ -103,7 +109,7 @@ static int test_stat_thread(void) evlist = perf_evlist__new(); __T("failed to create evlist", evlist); - evsel = perf_evsel__new(&attr1); + evsel = leader = perf_evsel__new(&attr1); __T("failed to create evsel1", evsel); perf_evlist__add(evlist, evsel); @@ -113,6 +119,10 @@ static int test_stat_thread(void) perf_evlist__add(evlist, evsel); + perf_evlist__set_leader(evlist); + __T("failed to set leader", leader->leader == leader); + __T("failed to set leader", evsel->leader == leader); + perf_evlist__set_maps(evlist, NULL, threads); err = perf_evlist__open(evlist); @@ -135,7 +145,7 @@ static int test_stat_thread_enable(void) struct perf_counts_values counts = { .val = 0 }; struct perf_thread_map *threads; struct perf_evlist *evlist; - struct perf_evsel *evsel; + struct perf_evsel *evsel, *leader; struct perf_event_attr attr1 = { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_CLOCK, @@ -156,7 +166,7 @@ static int test_stat_thread_enable(void) evlist = perf_evlist__new(); __T("failed to create evlist", evlist); - evsel = perf_evsel__new(&attr1); + evsel = leader = perf_evsel__new(&attr1); __T("failed to create evsel1", evsel); perf_evlist__add(evlist, evsel); @@ -166,6 +176,10 @@ static int test_stat_thread_enable(void) perf_evlist__add(evlist, evsel); + perf_evlist__set_leader(evlist); + __T("failed to set leader", leader->leader == leader); + __T("failed to set leader", evsel->leader == leader); + perf_evlist__set_maps(evlist, NULL, threads); err = perf_evlist__open(evlist); @@ -253,6 +267,7 @@ static int test_mmap_thread(void) evsel = perf_evsel__new(&attr); __T("failed to create evsel1", evsel); + __T("failed to set leader", evsel->leader == evsel); perf_evlist__add(evlist, evsel); @@ -338,6 +353,7 @@ static int test_mmap_cpus(void) evsel = perf_evsel__new(&attr); __T("failed to create evsel1", evsel); + __T("failed to set leader", evsel->leader == evsel); perf_evlist__add(evlist, evsel); @@ -397,7 +413,7 @@ static int test_mmap_cpus(void) return 0; } -int main(int argc, char **argv) +int test_evlist(int argc, char **argv) { __T_START; diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c index 288b5feaefe2..a184e4861627 100644 --- a/tools/lib/perf/tests/test-evsel.c +++ b/tools/lib/perf/tests/test-evsel.c @@ -6,6 +6,7 @@ #include <perf/threadmap.h> #include <perf/evsel.h> #include <internal/tests.h> +#include "tests.h" static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) @@ -184,7 +185,7 @@ static int test_stat_user_read(int event) return 0; } -int main(int argc, char **argv) +int test_evsel(int argc, char **argv) { __T_START; diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c index 384471441b48..5e2a0291e94c 100644 --- a/tools/lib/perf/tests/test-threadmap.c +++ b/tools/lib/perf/tests/test-threadmap.c @@ -3,6 +3,7 @@ #include <stdio.h> #include <perf/threadmap.h> #include <internal/tests.h> +#include "tests.h" static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) @@ -10,7 +11,7 @@ static int libperf_print(enum libperf_print_level level, return vfprintf(stderr, fmt, ap); } -int main(int argc, char **argv) +int test_threadmap(int argc, char **argv) { struct perf_thread_map *threads; diff --git a/tools/lib/perf/tests/tests.h b/tools/lib/perf/tests/tests.h new file mode 100644 index 000000000000..604838f21b2b --- /dev/null +++ b/tools/lib/perf/tests/tests.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TESTS_H +#define TESTS_H + +int test_cpumap(int argc, char **argv); +int test_threadmap(int argc, char **argv); +int test_evlist(int argc, char **argv); +int test_evsel(int argc, char **argv); + +#endif /* TESTS_H */ diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index b0872c801866..3bb75c1f25e8 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt @@ -706,6 +706,12 @@ intel-pt.*:: If set, Intel PT decoder will set the mispred flag on all branches. + intel-pt.max-loops:: + If set and non-zero, the maximum number of unconditional + branches decoded without consuming any trace packets. If + the maximum is exceeded there will be a "Never-ending loop" + error. The default is 100000. + auxtrace.*:: auxtrace.dumpdir:: diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index c9e0de5b00c1..77e7f18c0bd0 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -923,7 +923,7 @@ install-tools: all install-gtk $(call QUIET_INSTALL, binaries) \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \ $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \ - $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(dir_SQ)/trace'; \ + $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'; \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(includedir_SQ)/perf'; \ $(INSTALL) util/perf_dlfilter.h -t '$(DESTDIR_SQ)$(includedir_SQ)/perf' ifndef NO_PERF_READ_VDSO32 @@ -1017,6 +1017,7 @@ SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel) SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp) SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h +SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h ifdef BUILD_BPF_SKEL BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool @@ -1030,7 +1031,21 @@ $(BPFTOOL): | $(SKEL_TMP_OUT) CFLAGS= $(MAKE) -C ../bpf/bpftool \ OUTPUT=$(SKEL_TMP_OUT)/ bootstrap -$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT) +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ + ../../vmlinux \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) + +$(SKEL_OUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) +ifeq ($(VMLINUX_H),) + $(QUIET_GEN)$(BPFTOOL) btf dump file $< format c > $@ +else + $(Q)cp "$(VMLINUX_H)" $@ +endif + +$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h | $(SKEL_TMP_OUT) $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \ -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@ diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl index 9cd1c34f31b5..ac653d08b1ea 100644 --- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl +++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl @@ -357,7 +357,7 @@ 440 n64 process_madvise sys_process_madvise 441 n64 epoll_pwait2 sys_epoll_pwait2 442 n64 mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 n64 quotactl_fd sys_quotactl_fd 444 n64 landlock_create_ruleset sys_landlock_create_ruleset 445 n64 landlock_add_rule sys_landlock_add_rule 446 n64 landlock_restrict_self sys_landlock_restrict_self diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 8f052ff4058c..aef2a290e71a 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -522,7 +522,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index 0690263df1dd..64d51ab5a8b4 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -445,7 +445,7 @@ 440 common process_madvise sys_process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index ce18119ea0d0..af973e400053 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -364,7 +364,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c index 8c6732cc7794..0b0951030a2f 100644 --- a/tools/perf/arch/x86/util/evlist.c +++ b/tools/perf/arch/x86/util/evlist.c @@ -5,11 +5,15 @@ #include "util/parse-events.h" #define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}" +#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}" int arch_evlist__add_default_attrs(struct evlist *evlist) { if (!pmu_have_event("cpu", "slots")) return 0; - return parse_events(evlist, TOPDOWN_L1_EVENTS, NULL); + if (pmu_have_event("cpu", "topdown-heavy-ops")) + return parse_events(evlist, TOPDOWN_L2_EVENTS, NULL); + else + return parse_events(evlist, TOPDOWN_L1_EVENTS, NULL); } diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c index d63acb782b63..eeafe97b8105 100644 --- a/tools/perf/arch/x86/util/iostat.c +++ b/tools/perf/arch/x86/util/iostat.c @@ -322,7 +322,7 @@ static int iostat_event_group(struct evlist *evl, } evlist__for_each_entry(evl, evsel) { - evsel->priv = list->rps[evsel->idx / metrics_count]; + evsel->priv = list->rps[evsel->core.idx / metrics_count]; } list->nr_entries = 0; err: @@ -428,7 +428,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel, { double iostat_value = 0; u64 prev_count_val = 0; - const char *iostat_metric = iostat_metric_by_idx(evsel->idx); + const char *iostat_metric = iostat_metric_by_idx(evsel->core.idx); u8 die = ((struct iio_root_port *)evsel->priv)->die; struct perf_counts_values *count = perf_counts(evsel->counts, die, 0); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index f52b3a799e76..80450c0e8f36 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -1031,12 +1031,12 @@ static int process_base_stream(struct data__file *data_base, continue; es_base = evsel_streams__entry(data_base->evlist_streams, - evsel_base->idx); + evsel_base->core.idx); if (!es_base) return -1; es_pair = evsel_streams__entry(data_pair->evlist_streams, - evsel_pair->idx); + evsel_pair->core.idx); if (!es_pair) return -1; diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 2bfd41df621c..e1dd51f2874b 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -31,7 +31,7 @@ #include <linux/zalloc.h> #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" -#define DEFAULT_FUNC_FILTER "!_*" +#define DEFAULT_FUNC_FILTER "!_* & !*@plt" #define DEFAULT_LIST_FILTER "*" /* Session management structure */ diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 71efe6573ee7..671a21c9ee4d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -891,11 +891,12 @@ static int record__open(struct record *rec) int rc = 0; /* - * For initial_delay or system wide, we need to add a dummy event so - * that we can track PERF_RECORD_MMAP to cover the delay of waiting or - * event synthesis. + * For initial_delay, system wide or a hybrid system, we need to add a + * dummy event so that we can track PERF_RECORD_MMAP to cover the delay + * of waiting or event synthesis. */ - if (opts->initial_delay || target__has_cpu(&opts->target)) { + if (opts->initial_delay || target__has_cpu(&opts->target) || + perf_pmu__has_hybrid()) { pos = evlist__get_tracking_event(evlist); if (!evsel__is_dummy_event(pos)) { /* Set up dummy event. */ @@ -926,7 +927,7 @@ try_again: goto try_again; } if ((errno == EINVAL || errno == EBADF) && - pos->leader != pos && + pos->core.leader != &pos->core && pos->weak_group) { pos = evlist__reset_weak_group(evlist, pos, true); goto try_again; @@ -1776,7 +1777,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) rec->tool.ordered_events = false; } - if (!rec->evlist->nr_groups) + if (!rec->evlist->core.nr_groups) perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); if (data->is_pipe) { diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index bc5c393021dc..6386af6a2612 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -332,7 +332,7 @@ static int process_read_event(struct perf_tool *tool, const char *name = evsel__name(evsel); int err = perf_read_values_add_value(&rep->show_threads_values, event->read.pid, event->read.tid, - evsel->idx, + evsel->core.idx, name, event->read.value); @@ -666,7 +666,7 @@ static int report__collapse_hists(struct report *rep) evlist__for_each_entry(rep->session->evlist, pos) { struct hists *hists = evsel__hists(pos); - if (pos->idx == 0) + if (pos->core.idx == 0) hists->symbol_filter_str = rep->symbol_filter_str; hists->socket_filter = rep->socket_filter; @@ -677,7 +677,7 @@ static int report__collapse_hists(struct report *rep) /* Non-group events are considered as leader */ if (symbol_conf.event_group && !evsel__is_group_leader(pos)) { - struct hists *leader_hists = evsel__hists(pos->leader); + struct hists *leader_hists = evsel__hists(evsel__leader(pos)); hists__match(leader_hists, hists); hists__link(leader_hists, hists); @@ -729,9 +729,14 @@ static int count_sample_event(struct perf_tool *tool __maybe_unused, return 0; } +static int process_attr(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct evlist **pevlist); + static void stats_setup(struct report *rep) { memset(&rep->tool, 0, sizeof(rep->tool)); + rep->tool.attr = process_attr; rep->tool.sample = count_sample_event; rep->tool.no_warn = true; } @@ -753,6 +758,7 @@ static void tasks_setup(struct report *rep) rep->tool.mmap = perf_event__process_mmap; rep->tool.mmap2 = perf_event__process_mmap2; } + rep->tool.attr = process_attr; rep->tool.comm = perf_event__process_comm; rep->tool.exit = perf_event__process_exit; rep->tool.fork = perf_event__process_fork; @@ -1426,7 +1432,7 @@ repeat: setup_forced_leader(&report, session->evlist); - if (symbol_conf.group_sort_idx && !session->evlist->nr_groups) { + if (symbol_conf.group_sort_idx && !session->evlist->core.nr_groups) { parse_options_usage(NULL, options, "group-sort-idx", 0); ret = -EINVAL; goto error; diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 2030936cc891..8c03a9862872 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1899,6 +1899,7 @@ static void perf_sample__fprint_metric(struct perf_script *script, struct perf_sample *sample, FILE *fp) { + struct evsel *leader = evsel__leader(evsel); struct perf_stat_output_ctx ctx = { .print_metric = script_print_metric, .new_line = script_new_line, @@ -1915,7 +1916,7 @@ static void perf_sample__fprint_metric(struct perf_script *script, if (!evsel->stats) evlist__alloc_stats(script->session->evlist, false); - if (evsel_script(evsel->leader)->gnum++ == 0) + if (evsel_script(leader)->gnum++ == 0) perf_stat__reset_shadow_stats(); val = sample->period * evsel->scale; perf_stat__update_shadow_stats(evsel, @@ -1923,8 +1924,8 @@ static void perf_sample__fprint_metric(struct perf_script *script, sample->cpu, &rt_stat); evsel_script(evsel)->val = val; - if (evsel_script(evsel->leader)->gnum == evsel->leader->core.nr_members) { - for_each_group_member (ev2, evsel->leader) { + if (evsel_script(leader)->gnum == leader->core.nr_members) { + for_each_group_member (ev2, leader) { perf_stat__print_shadow_stats(&stat_config, ev2, evsel_script(ev2)->val, sample->cpu, @@ -1932,7 +1933,7 @@ static void perf_sample__fprint_metric(struct perf_script *script, NULL, &rt_stat); } - evsel_script(evsel->leader)->gnum = 0; + evsel_script(leader)->gnum = 0; } } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f9f74a514315..d25cb8088e8c 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -154,6 +154,8 @@ static const char *topdown_metric_L2_attrs[] = { NULL, }; +#define TOPDOWN_MAX_LEVEL 2 + static const char *smi_cost_attrs = { "{" "msr/aperf/," @@ -248,7 +250,7 @@ static void evlist__check_cpu_maps(struct evlist *evlist) evlist__warn_hybrid_group(evlist); evlist__for_each_entry(evlist, evsel) { - leader = evsel->leader; + leader = evsel__leader(evsel); /* Check that leader matches cpus with each member. */ if (leader == evsel) @@ -269,10 +271,10 @@ static void evlist__check_cpu_maps(struct evlist *evlist) } for_each_group_evsel(pos, leader) { - pos->leader = pos; + evsel__set_leader(pos, pos); pos->core.nr_members = 0; } - evsel->leader->core.nr_members = 0; + evsel->core.leader->nr_members = 0; } } @@ -745,8 +747,8 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) */ counter->errored = true; - if ((counter->leader != counter) || - !(counter->leader->core.nr_members > 1)) + if ((evsel__leader(counter) != counter) || + !(counter->core.leader->nr_members > 1)) return COUNTER_SKIP; } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { if (verbose > 0) @@ -839,7 +841,7 @@ try_again: * Don't close here because we're in the wrong affinity. */ if ((errno == EINVAL || errno == EBADF) && - counter->leader != counter && + evsel__leader(counter) != counter && counter->weak_group) { evlist__reset_weak_group(evsel_list, counter, false); assert(counter->reset_group); @@ -1931,6 +1933,7 @@ setup_metrics: if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) return -1; + stat_config.topdown_level = TOPDOWN_MAX_LEVEL; if (arch_evlist__add_default_attrs(evsel_list) < 0) return -1; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 2d570bfe7a56..02f8bb5dbc0f 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -264,9 +264,9 @@ static void perf_top__show_details(struct perf_top *top) if (top->evlist->enabled) { if (top->zero) - symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx); + symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx); else - symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx); + symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx); } if (more != 0) printf("%d lines not displayed, maybe increase display entries [e]\n", more); @@ -301,7 +301,7 @@ static void perf_top__resort_hists(struct perf_top *t) /* Non-group events are considered as leader */ if (symbol_conf.event_group && !evsel__is_group_leader(pos)) { - struct hists *leader_hists = evsel__hists(pos->leader); + struct hists *leader_hists = evsel__hists(evsel__leader(pos)); hists__match(leader_hists, hists); hists__link(leader_hists, hists); @@ -530,7 +530,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) fprintf(stderr, "\nAvailable events:"); evlist__for_each_entry(top->evlist, top->sym_evsel) - fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, evsel__name(top->sym_evsel)); + fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel)); prompt_integer(&counter, "Enter details event counter"); @@ -541,7 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) break; } evlist__for_each_entry(top->evlist, top->sym_evsel) - if (top->sym_evsel->idx == counter) + if (top->sym_evsel->core.idx == counter) break; } else top->sym_evsel = evlist__first(top->evlist); diff --git a/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json new file mode 100644 index 000000000000..8ba3e81c9808 --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json @@ -0,0 +1,424 @@ +[ + { + "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "LOCAL_NODE_PUMP_RETRY_RATIO_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_LNS_PUMP01\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "LOCAL_NODE_PUMP_RETRY_RATIO_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_LNS_PUMP23\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "GROUP_PUMP_RETRY_RATIO_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_GROUP_PUMP01\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "GROUP_PUMP_RETRY_RATIO_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_GROUP_PUMP23\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_GROUP_PUMPS_P01", + "MetricExpr": "(hv_24x7@PM_PB_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_GROUP_PUMPS_P23", + "MetricExpr": "(hv_24x7@PM_PB_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_GROUP_PUMPS_RETRIES_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_GROUP_PUMPS_RETRIES_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_P01", + "MetricExpr": "(hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_P23", + "MetricExpr": "(hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_LOCAL_NODE_PUMPS_P01", + "MetricExpr": "(hv_24x7@PM_PB_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_LOCAL_NODE_PUMPS_P23", + "MetricExpr": "(hv_24x7@PM_PB_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_RETRIES_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_RETRIES_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_LOCAL_NODE_PUMPS_RETRIES_P01", + "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_LOCAL_NODE_PUMPS_RETRIES_P23", + "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_REMOTE_NODE_PUMPS_P01", + "MetricExpr": "(hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_REMOTE_NODE_PUMPS_P23", + "MetricExpr": "(hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_NEAR_NODE_PUMPS_P01", + "MetricExpr": "(hv_24x7@PM_PB_NNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_NEAR_NODE_PUMPS_P23", + "MetricExpr": "(hv_24x7@PM_PB_NNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)", + "ScaleUnit": "4", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_INT_PB_BW", + "MetricExpr": "(hv_24x7@PM_PB_INT_DATA_XFER\\,chip\\=?@)", + "ScaleUnit": "2.09MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK0_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK1_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK2_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK3_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK4_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK5_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK6_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK7_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK0_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK1_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK2_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK3_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK4_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK5_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK6_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "XLINK7_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK0_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK1_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK2_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK3_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK4_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK5_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK6_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK7_OUT_TOTAL_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK0_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK1_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK2_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK3_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK4_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK5_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK6_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "ALINK7_OUT_DATA_UTILIZATION", + "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100", + "ScaleUnit": "1.063%", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_DATA_BANDWIDTH_TRANSFERRED_OVER_PB_PCI1", + "MetricExpr": "(hv_24x7@PM_PCI1_32B_INOUT\\,chip\\=?@)", + "ScaleUnit": "3.28e-2MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_DATA_BANDWIDTH_TRANSFERRED_OVER_PB_PCI0", + "MetricExpr": "(hv_24x7@PM_PCI0_32B_INOUT\\,chip\\=?@)", + "ScaleUnit": "3.28e-2MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_READ_BW_MC0_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@)", + "ScaleUnit": "5.24e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_READ_BW_MC1_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@)", + "ScaleUnit": "5.24e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_READ_BW_MC2_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@)", + "ScaleUnit": "5.24e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_READ_BW_MC3_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)", + "ScaleUnit": "5.24e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_WRITE_BW_MC0_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@)", + "ScaleUnit": "2.6e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_WRITE_BW_MC1_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@)", + "ScaleUnit": "2.6e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_WRITE_BW_MC2_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@)", + "ScaleUnit": "2.6e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "TOTAL_MCS_WRITE_BW_MC3_CHAN01", + "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)", + "ScaleUnit": "2.6e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "Memory_RD_BW_Chip", + "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)", + "MetricGroup": "Memory_BW", + "ScaleUnit": "5.24e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "Memory_WR_BW_Chip", + "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@ )", + "MetricGroup": "Memory_BW", + "ScaleUnit": "2.6e-1MB", + "AggregationMode": "PerChip" + }, + { + "MetricName": "PowerBUS_Frequency", + "MetricExpr": "(hv_24x7@PM_PAU_CYC\\,chip\\=?@ )", + "ScaleUnit": "2.56e-7GHz", + "AggregationMode": "PerChip" + } +] diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index c72adbd67386..33bda9c26542 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -151,7 +151,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void), } evlist__splice_list_tail(evlist, &parse_state.list); - evlist->nr_groups = parse_state.nr_groups; + evlist->core.nr_groups = parse_state.nr_groups; evlist__config(evlist, &opts, NULL); diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 41e3cf6bb66c..5e6242576236 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <subcmd/exec-cmd.h> +#include <linux/zalloc.h> static bool dont_fork; @@ -540,7 +541,7 @@ static int shell_tests__max_desc_width(void) { struct dirent **entlist; struct dirent *ent; - int n_dirs; + int n_dirs, e; char path_dir[PATH_MAX]; const char *path = shell_tests__dir(path_dir, sizeof(path_dir)); int width = 0; @@ -564,8 +565,9 @@ static int shell_tests__max_desc_width(void) } } + for (e = 0; e < n_dirs; e++) + zfree(&entlist[e]); free(entlist); - return width; } @@ -596,7 +598,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width) { struct dirent **entlist; struct dirent *ent; - int n_dirs; + int n_dirs, e; char path_dir[PATH_MAX]; struct shell_test st = { .dir = shell_tests__dir(path_dir, sizeof(path_dir)), @@ -629,6 +631,8 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width) test_and_print(&test, false, -1); } + for (e = 0; e < n_dirs; e++) + zfree(&entlist[e]); free(entlist); return 0; } @@ -730,7 +734,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i) { struct dirent **entlist; struct dirent *ent; - int n_dirs; + int n_dirs, e; char path_dir[PATH_MAX]; const char *path = shell_tests__dir(path_dir, sizeof(path_dir)); @@ -752,8 +756,11 @@ static int perf_test__list_shell(int argc, const char **argv, int i) continue; pr_info("%2d: %s\n", i, t.desc); + } + for (e = 0; e < n_dirs; e++) + zfree(&entlist[e]); free(entlist); return 0; } diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c index b74cf80d1f10..5ebf56331904 100644 --- a/tools/perf/tests/evsel-roundtrip-name.c +++ b/tools/perf/tests/evsel-roundtrip-name.c @@ -44,7 +44,7 @@ static int perf_evsel__roundtrip_cache_name_test(void) for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); - if (evsel->idx != idx) + if (evsel->core.idx != idx) continue; ++idx; @@ -84,9 +84,9 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names, err = 0; evlist__for_each_entry(evlist, evsel) { - if (strcmp(evsel__name(evsel), names[evsel->idx / distance])) { + if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) { --err; - pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->idx / distance]); + pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]); } } diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 73ae8f7aa066..d38757db2dc2 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -139,7 +139,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse " doesn't map to an evsel\n", sample.id); goto out_delete_evlist; } - nr_events[evsel->idx]++; + nr_events[evsel->core.idx]++; perf_mmap__consume(&md->core); } perf_mmap__read_done(&md->core); @@ -147,10 +147,10 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse out_init: err = 0; evlist__for_each_entry(evlist, evsel) { - if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { + if (nr_events[evsel->core.idx] != expected_nr_events[evsel->core.idx]) { pr_debug("expected %d %s events, got %d\n", - expected_nr_events[evsel->idx], - evsel__name(evsel), nr_events[evsel->idx]); + expected_nr_events[evsel->core.idx], + evsel__name(evsel), nr_events[evsel->core.idx]); err = -1; goto out_delete_evlist; } diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 0f113b2b36a3..56a7b6a14195 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -49,7 +49,7 @@ static int test__checkevent_tracepoint(struct evlist *evlist) struct evsel *evsel = evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->core.nr_groups); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type); TEST_ASSERT_VAL("wrong sample_type", PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type); @@ -62,7 +62,7 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist) struct evsel *evsel; TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1); - TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->core.nr_groups); evlist__for_each_entry(evlist, evsel) { TEST_ASSERT_VAL("wrong type", @@ -668,7 +668,7 @@ static int test__group1(struct evlist *evlist) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* instructions:k */ evsel = leader = evlist__first(evlist); @@ -698,7 +698,7 @@ static int test__group1(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); @@ -710,7 +710,7 @@ static int test__group2(struct evlist *evlist) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* faults + :ku modifier */ evsel = leader = evlist__first(evlist); @@ -739,7 +739,7 @@ static int test__group2(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); @@ -765,7 +765,7 @@ static int test__group3(struct evlist *evlist __maybe_unused) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->core.nr_groups); /* group1 syscalls:sys_enter_openat:H */ evsel = leader = evlist__first(evlist); @@ -798,7 +798,7 @@ static int test__group3(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 3); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); @@ -831,7 +831,7 @@ static int test__group3(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); @@ -857,7 +857,7 @@ static int test__group4(struct evlist *evlist __maybe_unused) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* cycles:u + p */ evsel = leader = evlist__first(evlist); @@ -889,7 +889,7 @@ static int test__group4(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); @@ -901,7 +901,7 @@ static int test__group5(struct evlist *evlist __maybe_unused) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->core.nr_groups); /* cycles + G */ evsel = leader = evlist__first(evlist); @@ -931,7 +931,7 @@ static int test__group5(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); @@ -963,7 +963,7 @@ static int test__group5(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); /* cycles */ @@ -987,7 +987,7 @@ static int test__group_gh1(struct evlist *evlist) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* cycles + :H group modifier */ evsel = leader = evlist__first(evlist); @@ -1016,7 +1016,7 @@ static int test__group_gh1(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); return 0; @@ -1027,7 +1027,7 @@ static int test__group_gh2(struct evlist *evlist) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* cycles + :G group modifier */ evsel = leader = evlist__first(evlist); @@ -1056,7 +1056,7 @@ static int test__group_gh2(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); return 0; @@ -1067,7 +1067,7 @@ static int test__group_gh3(struct evlist *evlist) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* cycles:G + :u group modifier */ evsel = leader = evlist__first(evlist); @@ -1096,7 +1096,7 @@ static int test__group_gh3(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); return 0; @@ -1107,7 +1107,7 @@ static int test__group_gh4(struct evlist *evlist) struct evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); - TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups); /* cycles:G + :uG group modifier */ evsel = leader = evlist__first(evlist); @@ -1136,7 +1136,7 @@ static int test__group_gh4(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1); return 0; @@ -1160,7 +1160,7 @@ static int test__leader_sample1(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); /* cache-misses - not sampling */ @@ -1174,7 +1174,7 @@ static int test__leader_sample1(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); /* branch-misses - not sampling */ @@ -1189,7 +1189,7 @@ static int test__leader_sample1(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); return 0; @@ -1213,7 +1213,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); /* branch-misses - not sampling */ @@ -1228,7 +1228,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); return 0; @@ -1259,7 +1259,7 @@ static int test__pinned_group(struct evlist *evlist) TEST_ASSERT_VAL("wrong config", PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned); /* cache-misses - can not be pinned, but will go on with the leader */ @@ -1303,7 +1303,7 @@ static int test__exclusive_group(struct evlist *evlist) TEST_ASSERT_VAL("wrong config", PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive); /* cache-misses - can not be pinned, but will go on with the leader */ @@ -1530,12 +1530,12 @@ static int test__hybrid_hw_group_event(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); return 0; } @@ -1546,12 +1546,12 @@ static int test__hybrid_sw_hw_group_event(struct evlist *evlist) evsel = leader = evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); return 0; } @@ -1563,11 +1563,11 @@ static int test__hybrid_hw_sw_group_event(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); return 0; } @@ -1579,14 +1579,14 @@ static int test__hybrid_group_modifier1(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config); - TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); return 0; diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c index acd50944f6af..e8fd0da0762b 100644 --- a/tools/perf/tests/pfm.c +++ b/tools/perf/tests/pfm.c @@ -96,7 +96,7 @@ static int test__pfm_events(void) count_pfm_events(&evlist->core), table[i].nr_events); TEST_ASSERT_EQUAL(table[i].events, - evlist->nr_groups, + evlist->core.nr_groups, 0); evlist__delete(evlist); @@ -180,7 +180,7 @@ static int test__pfm_group(void) count_pfm_events(&evlist->core), table[i].nr_events); TEST_ASSERT_EQUAL(table[i].events, - evlist->nr_groups, + evlist->core.nr_groups, table[i].nr_groups); evlist__delete(evlist); diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index f5509a958e38..701130ad43a2 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -350,14 +350,14 @@ static struct annotation_line *annotate_browser__find_next_asm_line( struct annotation_line *it = al; /* find next asm line */ - list_for_each_entry_continue(it, browser->b.top, node) { + list_for_each_entry_continue(it, browser->b.entries, node) { if (it->idx_asm >= 0) return it; } /* no asm line found forwards, try backwards */ it = al; - list_for_each_entry_continue_reverse(it, browser->b.top, node) { + list_for_each_entry_continue_reverse(it, browser->b.entries, node) { if (it->idx_asm >= 0) return it; } @@ -749,7 +749,7 @@ static int annotate_browser__run(struct annotate_browser *browser, hbt->timer(hbt->arg); if (delay_secs != 0) { - symbol__annotate_decay_histogram(sym, evsel->idx); + symbol__annotate_decay_histogram(sym, evsel->core.idx); hists__scnprintf_title(hists, title, sizeof(title)); annotate_browser__show(&browser->b, title, help); } diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index a7dff77f2018..94167bfed722 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -135,12 +135,12 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms, ret += perf_gtk__get_percent(s + ret, sizeof(s) - ret, sym, pos, - evsel->idx + i); + evsel->core.idx + i); ret += scnprintf(s + ret, sizeof(s) - ret, " "); } } else { ret = perf_gtk__get_percent(s, sizeof(s), sym, pos, - evsel->idx); + evsel->core.idx); } if (ret) diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 1a909b53dc15..2d4fa1304178 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -141,6 +141,7 @@ perf-y += clockid.o perf-$(CONFIG_LIBBPF) += bpf-loader.o perf-$(CONFIG_LIBBPF) += bpf_map.o perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o +perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o perf-$(CONFIG_LIBELF) += symbol-elf.o perf-$(CONFIG_LIBELF) += probe-file.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index abe1499a9164..aa04a3655236 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -961,7 +961,7 @@ static int symbol__inc_addr_samples(struct map_symbol *ms, if (sym == NULL) return 0; src = symbol__hists(sym, evsel->evlist->core.nr_entries); - return src ? __symbol__inc_addr_samples(ms, src, evsel->idx, addr, sample) : 0; + return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0; } static int symbol__account_cycles(u64 addr, u64 start, @@ -2159,7 +2159,7 @@ static void annotation__calc_percent(struct annotation *notes, BUG_ON(i >= al->data_nr); - sym_hist = annotation__histogram(notes, evsel->idx); + sym_hist = annotation__histogram(notes, evsel->core.idx); data = &al->data[i++]; calc_percent(sym_hist, hists, data, al->offset, end); @@ -2340,7 +2340,7 @@ static void print_summary(struct rb_root *root, const char *filename) static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) { struct annotation *notes = symbol__annotation(sym); - struct sym_hist *h = annotation__histogram(notes, evsel->idx); + struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); u64 len = symbol__size(sym), offset; for (offset = 0; offset < len; ++offset) @@ -2373,7 +2373,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel, const char *d_filename; const char *evsel_name = evsel__name(evsel); struct annotation *notes = symbol__annotation(sym); - struct sym_hist *h = annotation__histogram(notes, evsel->idx); + struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); struct annotation_line *pos, *queue = NULL; u64 start = map__rip_2objdump(map, sym->start); int printed = 2, queue_len = 0, addr_fmt_width; diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 9350eeb3a3fc..cb19669d2a5b 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -73,8 +73,8 @@ static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct e grp = false; evlist__for_each_entry(evlist, evsel) { if (grp) { - if (!(evsel->leader == leader || - (evsel->leader == evsel && + if (!(evsel__leader(evsel) == leader || + (evsel__leader(evsel) == evsel && evsel->core.nr_members <= 1))) return -EINVAL; } else if (evsel == leader) { @@ -87,8 +87,8 @@ static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct e grp = false; evlist__for_each_entry(evlist, evsel) { if (grp) { - if (evsel->leader != leader) { - evsel->leader = leader; + if (!evsel__has_leader(evsel, leader)) { + evsel__set_leader(evsel, leader); if (leader->core.nr_members < 1) leader->core.nr_members = 1; leader->core.nr_members += 1; @@ -1231,11 +1231,11 @@ static void unleader_evsel(struct evlist *evlist, struct evsel *leader) /* Find new leader for the group */ evlist__for_each_entry(evlist, evsel) { - if (evsel->leader != leader || evsel == leader) + if (!evsel__has_leader(evsel, leader) || evsel == leader) continue; if (!new_leader) new_leader = evsel; - evsel->leader = new_leader; + evsel__set_leader(evsel, new_leader); } /* Update group information */ diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index 21c8e71162b1..8150e03367bb 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -18,6 +18,7 @@ #include "evsel.h" #include "evlist.h" #include "target.h" +#include "cgroup.h" #include "cpumap.h" #include "thread_map.h" @@ -352,7 +353,7 @@ static int bperf_check_target(struct evsel *evsel, enum bperf_filter_type *filter_type, __u32 *filter_entry_cnt) { - if (evsel->leader->core.nr_members > 1) { + if (evsel->core.leader->nr_members > 1) { pr_err("bpf managed perf events do not yet support groups.\n"); return -1; } @@ -742,6 +743,8 @@ struct bpf_counter_ops bperf_ops = { .destroy = bperf__destroy, }; +extern struct bpf_counter_ops bperf_cgrp_ops; + static inline bool bpf_counter_skip(struct evsel *evsel) { return list_empty(&evsel->bpf_counter_list) && @@ -759,6 +762,8 @@ int bpf_counter__load(struct evsel *evsel, struct target *target) { if (target->bpf_str) evsel->bpf_counter_ops = &bpf_program_profiler_ops; + else if (cgrp_event_expanded && target->use_bpf) + evsel->bpf_counter_ops = &bperf_cgrp_ops; else if (target->use_bpf || evsel->bpf_counter || evsel__match_bpf_counter_events(evsel->name)) evsel->bpf_counter_ops = &bperf_ops; diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c new file mode 100644 index 000000000000..89aa5e71db1a --- /dev/null +++ b/tools/perf/util/bpf_counter_cgroup.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2021 Facebook */ +/* Copyright (c) 2021 Google */ + +#include <assert.h> +#include <limits.h> +#include <unistd.h> +#include <sys/file.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <linux/err.h> +#include <linux/zalloc.h> +#include <linux/perf_event.h> +#include <api/fs/fs.h> +#include <perf/bpf_perf.h> + +#include "affinity.h" +#include "bpf_counter.h" +#include "cgroup.h" +#include "counts.h" +#include "debug.h" +#include "evsel.h" +#include "evlist.h" +#include "target.h" +#include "cpumap.h" +#include "thread_map.h" + +#include "bpf_skel/bperf_cgroup.skel.h" + +static struct perf_event_attr cgrp_switch_attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CGROUP_SWITCHES, + .size = sizeof(cgrp_switch_attr), + .sample_period = 1, + .disabled = 1, +}; + +static struct evsel *cgrp_switch; +static struct bperf_cgroup_bpf *skel; + +#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0)) + +static int bperf_load_program(struct evlist *evlist) +{ + struct bpf_link *link; + struct evsel *evsel; + struct cgroup *cgrp, *leader_cgrp; + __u32 i, cpu; + __u32 nr_cpus = evlist->core.all_cpus->nr; + int total_cpus = cpu__max_cpu(); + int map_size, map_fd; + int prog_fd, err; + + skel = bperf_cgroup_bpf__open(); + if (!skel) { + pr_err("Failed to open cgroup skeleton\n"); + return -1; + } + + skel->rodata->num_cpus = total_cpus; + skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups; + + BUG_ON(evlist->core.nr_entries % nr_cgroups != 0); + + /* we need one copy of events per cpu for reading */ + map_size = total_cpus * evlist->core.nr_entries / nr_cgroups; + bpf_map__resize(skel->maps.events, map_size); + bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups); + /* previous result is saved in a per-cpu array */ + map_size = evlist->core.nr_entries / nr_cgroups; + bpf_map__resize(skel->maps.prev_readings, map_size); + /* cgroup result needs all events (per-cpu) */ + map_size = evlist->core.nr_entries; + bpf_map__resize(skel->maps.cgrp_readings, map_size); + + set_max_rlimit(); + + err = bperf_cgroup_bpf__load(skel); + if (err) { + pr_err("Failed to load cgroup skeleton\n"); + goto out; + } + + if (cgroup_is_v2("perf_event") > 0) + skel->bss->use_cgroup_v2 = 1; + + err = -1; + + cgrp_switch = evsel__new(&cgrp_switch_attr); + if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) { + pr_err("Failed to open cgroup switches event\n"); + goto out; + } + + for (i = 0; i < nr_cpus; i++) { + link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch, + FD(cgrp_switch, i)); + if (IS_ERR(link)) { + pr_err("Failed to attach cgroup program\n"); + err = PTR_ERR(link); + goto out; + } + } + + /* + * Update cgrp_idx map from cgroup-id to event index. + */ + cgrp = NULL; + i = 0; + + evlist__for_each_entry(evlist, evsel) { + if (cgrp == NULL || evsel->cgrp == leader_cgrp) { + leader_cgrp = evsel->cgrp; + evsel->cgrp = NULL; + + /* open single copy of the events w/o cgroup */ + err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1); + if (err) { + pr_err("Failed to open first cgroup events\n"); + goto out; + } + + map_fd = bpf_map__fd(skel->maps.events); + for (cpu = 0; cpu < nr_cpus; cpu++) { + int fd = FD(evsel, cpu); + __u32 idx = evsel->core.idx * total_cpus + + evlist->core.all_cpus->map[cpu]; + + err = bpf_map_update_elem(map_fd, &idx, &fd, + BPF_ANY); + if (err < 0) { + pr_err("Failed to update perf_event fd\n"); + goto out; + } + } + + evsel->cgrp = leader_cgrp; + } + evsel->supported = true; + + if (evsel->cgrp == cgrp) + continue; + + cgrp = evsel->cgrp; + + if (read_cgroup_id(cgrp) < 0) { + pr_err("Failed to get cgroup id\n"); + err = -1; + goto out; + } + + map_fd = bpf_map__fd(skel->maps.cgrp_idx); + err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY); + if (err < 0) { + pr_err("Failed to update cgroup index map\n"); + goto out; + } + + i++; + } + + /* + * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check + * whether the kernel support it + */ + prog_fd = bpf_program__fd(skel->progs.trigger_read); + err = bperf_trigger_reading(prog_fd, 0); + if (err) { + pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n" + "Therefore, --for-each-cgroup might show inaccurate readings\n"); + err = 0; + } + +out: + return err; +} + +static int bperf_cgrp__load(struct evsel *evsel, + struct target *target __maybe_unused) +{ + static bool bperf_loaded = false; + + evsel->bperf_leader_prog_fd = -1; + evsel->bperf_leader_link_fd = -1; + + if (!bperf_loaded && bperf_load_program(evsel->evlist)) + return -1; + + bperf_loaded = true; + /* just to bypass bpf_counter_skip() */ + evsel->follower_skel = (struct bperf_follower_bpf *)skel; + + return 0; +} + +static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused, + int cpu __maybe_unused, int fd __maybe_unused) +{ + /* nothing to do */ + return 0; +} + +/* + * trigger the leader prog on each cpu, so the cgrp_reading map could get + * the latest results. + */ +static int bperf_cgrp__sync_counters(struct evlist *evlist) +{ + int i, cpu; + int nr_cpus = evlist->core.all_cpus->nr; + int prog_fd = bpf_program__fd(skel->progs.trigger_read); + + for (i = 0; i < nr_cpus; i++) { + cpu = evlist->core.all_cpus->map[i]; + bperf_trigger_reading(prog_fd, cpu); + } + + return 0; +} + +static int bperf_cgrp__enable(struct evsel *evsel) +{ + if (evsel->core.idx) + return 0; + + bperf_cgrp__sync_counters(evsel->evlist); + + skel->bss->enabled = 1; + return 0; +} + +static int bperf_cgrp__disable(struct evsel *evsel) +{ + if (evsel->core.idx) + return 0; + + bperf_cgrp__sync_counters(evsel->evlist); + + skel->bss->enabled = 0; + return 0; +} + +static int bperf_cgrp__read(struct evsel *evsel) +{ + struct evlist *evlist = evsel->evlist; + int i, cpu, nr_cpus = evlist->core.all_cpus->nr; + int total_cpus = cpu__max_cpu(); + struct perf_counts_values *counts; + struct bpf_perf_event_value *values; + int reading_map_fd, err = 0; + __u32 idx; + + if (evsel->core.idx) + return 0; + + bperf_cgrp__sync_counters(evsel->evlist); + + values = calloc(total_cpus, sizeof(*values)); + if (values == NULL) + return -ENOMEM; + + reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings); + + evlist__for_each_entry(evlist, evsel) { + idx = evsel->core.idx; + err = bpf_map_lookup_elem(reading_map_fd, &idx, values); + if (err) { + pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n", + idx, evsel__name(evsel), evsel->cgrp->name); + goto out; + } + + for (i = 0; i < nr_cpus; i++) { + cpu = evlist->core.all_cpus->map[i]; + + counts = perf_counts(evsel->counts, i, 0); + counts->val = values[cpu].counter; + counts->ena = values[cpu].enabled; + counts->run = values[cpu].running; + } + } + +out: + free(values); + return err; +} + +static int bperf_cgrp__destroy(struct evsel *evsel) +{ + if (evsel->core.idx) + return 0; + + bperf_cgroup_bpf__destroy(skel); + evsel__delete(cgrp_switch); // it'll destroy on_switch progs too + + return 0; +} + +struct bpf_counter_ops bperf_cgrp_ops = { + .load = bperf_cgrp__load, + .enable = bperf_cgrp__enable, + .disable = bperf_cgrp__disable, + .read = bperf_cgrp__read, + .install_pe = bperf_cgrp__install_pe, + .destroy = bperf_cgrp__destroy, +}; diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c new file mode 100644 index 000000000000..292c430768b5 --- /dev/null +++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// Copyright (c) 2021 Facebook +// Copyright (c) 2021 Google +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary +#define MAX_EVENTS 32 // max events per cgroup: arbitrary + +// NOTE: many of map and global data will be modified before loading +// from the userspace (perf tool) using the skeleton helpers. + +// single set of global perf events to measure +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 1); +} events SEC(".maps"); + +// from cgroup id to event index +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u64)); + __uint(value_size, sizeof(__u32)); + __uint(max_entries, 1); +} cgrp_idx SEC(".maps"); + +// per-cpu event snapshots to calculate delta +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); +} prev_readings SEC(".maps"); + +// aggregated event values for each cgroup (per-cpu) +// will be read from the user-space +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); +} cgrp_readings SEC(".maps"); + +const volatile __u32 num_events = 1; +const volatile __u32 num_cpus = 1; + +int enabled = 0; +int use_cgroup_v2 = 0; + +static inline int get_cgroup_v1_idx(__u32 *cgrps, int size) +{ + struct task_struct *p = (void *)bpf_get_current_task(); + struct cgroup *cgrp; + register int i = 0; + __u32 *elem; + int level; + int cnt; + + cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup); + level = BPF_CORE_READ(cgrp, level); + + for (cnt = 0; i < MAX_LEVELS; i++) { + __u64 cgrp_id; + + if (i > level) + break; + + // convert cgroup-id to a map index + cgrp_id = BPF_CORE_READ(cgrp, ancestor_ids[i]); + elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id); + if (!elem) + continue; + + cgrps[cnt++] = *elem; + if (cnt == size) + break; + } + + return cnt; +} + +static inline int get_cgroup_v2_idx(__u32 *cgrps, int size) +{ + register int i = 0; + __u32 *elem; + int cnt; + + for (cnt = 0; i < MAX_LEVELS; i++) { + __u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i); + + if (cgrp_id == 0) + break; + + // convert cgroup-id to a map index + elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id); + if (!elem) + continue; + + cgrps[cnt++] = *elem; + if (cnt == size) + break; + } + + return cnt; +} + +static int bperf_cgroup_count(void) +{ + register __u32 idx = 0; // to have it in a register to pass BPF verifier + register int c = 0; + struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val; + __u32 cpu = bpf_get_smp_processor_id(); + __u32 cgrp_idx[MAX_LEVELS]; + int cgrp_cnt; + __u32 key, cgrp; + long err; + + if (use_cgroup_v2) + cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, MAX_LEVELS); + else + cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, MAX_LEVELS); + + for ( ; idx < MAX_EVENTS; idx++) { + if (idx == num_events) + break; + + // XXX: do not pass idx directly (for verifier) + key = idx; + // this is per-cpu array for diff + prev_val = bpf_map_lookup_elem(&prev_readings, &key); + if (!prev_val) { + val.counter = val.enabled = val.running = 0; + bpf_map_update_elem(&prev_readings, &key, &val, BPF_ANY); + + prev_val = bpf_map_lookup_elem(&prev_readings, &key); + if (!prev_val) + continue; + } + + // read from global perf_event array + key = idx * num_cpus + cpu; + err = bpf_perf_event_read_value(&events, key, &val, sizeof(val)); + if (err) + continue; + + if (enabled) { + delta.counter = val.counter - prev_val->counter; + delta.enabled = val.enabled - prev_val->enabled; + delta.running = val.running - prev_val->running; + + for (c = 0; c < MAX_LEVELS; c++) { + if (c == cgrp_cnt) + break; + + cgrp = cgrp_idx[c]; + + // aggregate the result by cgroup + key = cgrp * num_events + idx; + cgrp_val = bpf_map_lookup_elem(&cgrp_readings, &key); + if (cgrp_val) { + cgrp_val->counter += delta.counter; + cgrp_val->enabled += delta.enabled; + cgrp_val->running += delta.running; + } else { + bpf_map_update_elem(&cgrp_readings, &key, + &delta, BPF_ANY); + } + } + } + + *prev_val = val; + } + return 0; +} + +// This will be attached to cgroup-switches event for each cpu +SEC("perf_events") +int BPF_PROG(on_cgrp_switch) +{ + return bperf_cgroup_count(); +} + +SEC("raw_tp/sched_switch") +int BPF_PROG(trigger_read) +{ + return bperf_cgroup_count(); +} + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index e819a4f30fc2..e99b41f9be45 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -18,6 +18,7 @@ #include <regex.h> int nr_cgroups; +bool cgrp_event_expanded; /* used to match cgroup name with patterns */ struct cgroup_name { @@ -458,7 +459,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str, if (evsel__is_group_leader(pos)) leader = evsel; - evsel->leader = leader; + evsel__set_leader(evsel, leader); evlist__add(tmp_list, evsel); } @@ -484,6 +485,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str, } ret = 0; + cgrp_event_expanded = true; out_err: evlist__delete(orig_list); diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h index de5b272560ab..12256b78608c 100644 --- a/tools/perf/util/cgroup.h +++ b/tools/perf/util/cgroup.h @@ -18,6 +18,7 @@ struct cgroup { }; extern int nr_cgroups; /* number of explicit cgroups defined */ +extern bool cgrp_event_expanded; struct cgroup *cgroup__get(struct cgroup *cgroup); void cgroup__put(struct cgroup *cgroup); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 6ba9664089bd..47581a237c7a 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -165,11 +165,9 @@ void evlist__delete(struct evlist *evlist) void evlist__add(struct evlist *evlist, struct evsel *entry) { - entry->evlist = evlist; - entry->idx = evlist->core.nr_entries; - entry->tracking = !entry->idx; - perf_evlist__add(&evlist->core, &entry->core); + entry->evlist = evlist; + entry->tracking = !entry->core.idx; if (evlist->core.nr_entries == 1) evlist__set_id_pos(evlist); @@ -194,7 +192,7 @@ void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) } __evlist__for_each_entry_safe(list, temp, evsel) { - if (evsel->leader == leader) { + if (evsel__has_leader(evsel, leader)) { list_del_init(&evsel->core.node); evlist__add(evlist, evsel); } @@ -225,26 +223,9 @@ out: return err; } -void __evlist__set_leader(struct list_head *list) -{ - struct evsel *evsel, *leader; - - leader = list_entry(list->next, struct evsel, core.node); - evsel = list_entry(list->prev, struct evsel, core.node); - - leader->core.nr_members = evsel->idx - leader->idx + 1; - - __evlist__for_each_entry(list, evsel) { - evsel->leader = leader; - } -} - void evlist__set_leader(struct evlist *evlist) { - if (evlist->core.nr_entries) { - evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0; - __evlist__set_leader(&evlist->core.entries); - } + perf_evlist__set_leader(&evlist->core); } int __evlist__add_default(struct evlist *evlist, bool precise) @@ -1626,7 +1607,7 @@ void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) return; evlist__for_each_entry_safe(evlist, n, evsel) { - if (evsel->leader == move_evsel->leader) + if (evsel__leader(evsel) == evsel__leader(move_evsel)) list_move_tail(&evsel->core.node, &move); } @@ -1750,7 +1731,7 @@ bool evlist__exclude_kernel(struct evlist *evlist) */ void evlist__force_leader(struct evlist *evlist) { - if (!evlist->nr_groups) { + if (!evlist->core.nr_groups) { struct evsel *leader = evlist__first(evlist); evlist__set_leader(evlist); @@ -1763,7 +1744,8 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel * struct evsel *c2, *leader; bool is_open = true; - leader = evsel->leader; + leader = evsel__leader(evsel); + pr_debug("Weak group for %s/%d failed\n", leader->name, leader->core.nr_members); @@ -1774,10 +1756,10 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel * evlist__for_each_entry(evsel_list, c2) { if (c2 == evsel) is_open = false; - if (c2->leader == leader) { + if (evsel__has_leader(c2, leader)) { if (is_open && close) perf_evsel__close(&c2->core); - c2->leader = c2; + evsel__set_leader(c2, c2); c2->core.nr_members = 0; /* * Set this for all former members of the group @@ -2137,7 +2119,7 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { - if (evsel->idx == idx) + if (evsel->core.idx == idx) return evsel; } return NULL; @@ -2174,13 +2156,13 @@ void evlist__check_mem_load_aux(struct evlist *evlist) * any valid memory load information. */ evlist__for_each_entry(evlist, evsel) { - leader = evsel->leader; + leader = evsel__leader(evsel); if (leader == evsel) continue; if (leader->name && strstr(leader->name, "mem-loads-aux")) { for_each_group_evsel(pos, leader) { - pos->leader = pos; + evsel__set_leader(pos, pos); pos->core.nr_members = 0; } } diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 2073cfa79f79..5c22383489ae 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -50,7 +50,6 @@ enum bkw_mmap_state { struct evlist { struct perf_evlist core; - int nr_groups; bool enabled; int id_pos; int is_pos; @@ -202,7 +201,6 @@ void evlist__set_selected(struct evlist *evlist, struct evsel *evsel); int evlist__create_maps(struct evlist *evlist, struct target *target); int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel); -void __evlist__set_leader(struct list_head *list); void evlist__set_leader(struct evlist *evlist); u64 __evlist__combined_sample_type(struct evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index b1c930eca40f..f61e5dd53f5d 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -239,10 +239,8 @@ bool evsel__is_function_event(struct evsel *evsel) void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx) { - perf_evsel__init(&evsel->core, attr); - evsel->idx = idx; + perf_evsel__init(&evsel->core, attr, idx); evsel->tracking = !idx; - evsel->leader = evsel; evsel->unit = ""; evsel->scale = 1.0; evsel->max_events = ULONG_MAX; @@ -410,7 +408,7 @@ struct evsel *evsel__clone(struct evsel *orig) evsel->cgrp = cgroup__get(orig->cgrp); evsel->tp_format = orig->tp_format; evsel->handler = orig->handler; - evsel->leader = orig->leader; + evsel->core.leader = orig->core.leader; evsel->max_events = orig->max_events; evsel->tool_event = orig->tool_event; @@ -1075,7 +1073,7 @@ void __weak arch_evsel__set_sample_weight(struct evsel *evsel) void evsel__config(struct evsel *evsel, struct record_opts *opts, struct callchain_param *callchain) { - struct evsel *leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); struct perf_event_attr *attr = &evsel->core.attr; int track = evsel->tracking; bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; @@ -1593,7 +1591,7 @@ static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu) { - struct evsel *leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { @@ -1605,7 +1603,7 @@ static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu) static int get_group_fd(struct evsel *evsel, int cpu, int thread) { - struct evsel *leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); int fd; if (evsel__is_group_leader(evsel)) @@ -2851,3 +2849,23 @@ bool evsel__is_hybrid(struct evsel *evsel) { return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name); } + +struct evsel *evsel__leader(struct evsel *evsel) +{ + return container_of(evsel->core.leader, struct evsel, core); +} + +bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) +{ + return evsel->core.leader == &leader->core; +} + +bool evsel__is_leader(struct evsel *evsel) +{ + return evsel__has_leader(evsel, evsel); +} + +void evsel__set_leader(struct evsel *evsel, struct evsel *leader) +{ + evsel->core.leader = &leader->core; +} diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index bdad52a06438..80383096d51c 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -49,7 +49,6 @@ struct evsel { struct perf_evsel core; struct evlist *evlist; off_t id_offset; - int idx; int id_pos; int is_pos; unsigned int sample_size; @@ -119,7 +118,6 @@ struct evsel { bool reset_group; bool errored; struct hashmap *per_pkg_mask; - struct evsel *leader; int err; int cpu_iter; struct { @@ -368,7 +366,7 @@ static inline struct evsel *evsel__prev(struct evsel *evsel) */ static inline bool evsel__is_group_leader(const struct evsel *evsel) { - return evsel->leader == evsel; + return evsel->core.leader == &evsel->core; } /** @@ -406,19 +404,19 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target, static inline int evsel__group_idx(struct evsel *evsel) { - return evsel->idx - evsel->leader->idx; + return evsel->core.idx - evsel->core.leader->idx; } /* Iterates group WITHOUT the leader. */ #define for_each_group_member(_evsel, _leader) \ for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \ - (_evsel) && (_evsel)->leader == (_leader); \ + (_evsel) && (_evsel)->core.leader == (&_leader->core); \ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node)) /* Iterates group WITH the leader. */ #define for_each_group_evsel(_evsel, _leader) \ for ((_evsel) = _leader; \ - (_evsel) && (_evsel)->leader == (_leader); \ + (_evsel) && (_evsel)->core.leader == (&_leader->core); \ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node)) static inline bool evsel__has_branch_callstack(const struct evsel *evsel) @@ -463,4 +461,8 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist); void evsel__zero_per_pkg(struct evsel *evsel); bool evsel__is_hybrid(struct evsel *evsel); +struct evsel *evsel__leader(struct evsel *evsel); +bool evsel__has_leader(struct evsel *evsel, struct evsel *leader); +bool evsel__is_leader(struct evsel *evsel); +void evsel__set_leader(struct evsel *evsel, struct evsel *leader); #endif /* __PERF_EVSEL_H */ diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 0158d2945bab..44249027507a 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -778,7 +778,7 @@ static int write_pmu_mappings(struct feat_fd *ff, static int write_group_desc(struct feat_fd *ff, struct evlist *evlist) { - u32 nr_groups = evlist->nr_groups; + u32 nr_groups = evlist->core.nr_groups; struct evsel *evsel; int ret; @@ -789,7 +789,7 @@ static int write_group_desc(struct feat_fd *ff, evlist__for_each_entry(evlist, evsel) { if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) { const char *name = evsel->group_name ?: "{anon_group}"; - u32 leader_idx = evsel->idx; + u32 leader_idx = evsel->core.idx; u32 nr_members = evsel->core.nr_members; ret = do_write_string(ff, name); @@ -1844,7 +1844,7 @@ static struct evsel *read_event_desc(struct feat_fd *ff) msz = sz; for (i = 0, evsel = events; i < nre; evsel++, i++) { - evsel->idx = i; + evsel->core.idx = i; /* * must read entire on-file attr struct to @@ -2379,7 +2379,7 @@ static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx) struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { - if (evsel->idx == idx) + if (evsel->core.idx == idx) return evsel; } @@ -2393,7 +2393,7 @@ static void evlist__set_event_name(struct evlist *evlist, struct evsel *event) if (!event->name) return; - evsel = evlist__find_by_index(evlist, event->idx); + evsel = evlist__find_by_index(evlist, event->core.idx); if (!evsel) return; @@ -2735,12 +2735,12 @@ static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) * Rebuild group relationship based on the group_desc */ session = container_of(ff->ph, struct perf_session, header); - session->evlist->nr_groups = nr_groups; + session->evlist->core.nr_groups = nr_groups; i = nr = 0; evlist__for_each_entry(session->evlist, evsel) { - if (evsel->idx == (int) desc[i].leader_idx) { - evsel->leader = evsel; + if (evsel->core.idx == (int) desc[i].leader_idx) { + evsel__set_leader(evsel, evsel); /* {anon_group} is a dummy name */ if (strcmp(desc[i].name, "{anon_group}")) { evsel->group_name = desc[i].name; @@ -2758,7 +2758,7 @@ static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) i++; } else if (nr) { /* This is a group member */ - evsel->leader = leader; + evsel__set_leader(evsel, leader); nr--; } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index cb2520abf261..5ab631702769 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -41,8 +41,11 @@ #define INTEL_PT_RETURN 1 -/* Maximum number of loops with no packets consumed i.e. stuck in a loop */ -#define INTEL_PT_MAX_LOOPS 10000 +/* + * Default maximum number of loops with no packets consumed i.e. stuck in a + * loop. + */ +#define INTEL_PT_MAX_LOOPS 100000 struct intel_pt_blk { struct intel_pt_blk *prev; @@ -220,6 +223,7 @@ struct intel_pt_decoder { uint64_t timestamp_insn_cnt; uint64_t sample_insn_cnt; uint64_t stuck_ip; + int max_loops; int no_progress; int stuck_ip_prd; int stuck_ip_cnt; @@ -315,6 +319,7 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) decoder->vm_tm_corr_dry_run = params->vm_tm_corr_dry_run; decoder->first_timestamp = params->first_timestamp; decoder->last_reliable_timestamp = params->first_timestamp; + decoder->max_loops = params->max_loops ? params->max_loops : INTEL_PT_MAX_LOOPS; decoder->flags = params->flags; @@ -483,7 +488,7 @@ static const char *intel_pt_err_msgs[] = { [INTEL_PT_ERR_OVR] = "Overflow packet", [INTEL_PT_ERR_LOST] = "Lost trace data", [INTEL_PT_ERR_UNK] = "Unknown error!", - [INTEL_PT_ERR_NELOOP] = "Never-ending loop", + [INTEL_PT_ERR_NELOOP] = "Never-ending loop (refer perf config intel-pt.max-loops)", }; int intel_pt__strerror(int code, char *buf, size_t buflen) @@ -1168,7 +1173,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder, decoder->stuck_ip = decoder->state.to_ip; decoder->stuck_ip_prd = 1; decoder->stuck_ip_cnt = 1; - } else if (cnt > INTEL_PT_MAX_LOOPS || + } else if (cnt > decoder->max_loops || decoder->state.to_ip == decoder->stuck_ip) { intel_pt_log_at("ERROR: Never-ending loop", decoder->state.to_ip); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h index 714c475808c0..4b5e79fcf557 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h @@ -270,6 +270,7 @@ struct intel_pt_params { uint32_t tsc_ctc_ratio_d; enum intel_pt_param_flags flags; unsigned int quick; + int max_loops; }; struct intel_pt_decoder; diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 154a1077f22e..6f852b305e92 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -123,6 +123,7 @@ struct intel_pt { u64 noretcomp_bit; unsigned max_non_turbo_ratio; unsigned cbr2khz; + int max_loops; unsigned long num_events; @@ -1200,6 +1201,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, params.vm_time_correlation = pt->synth_opts.vm_time_correlation; params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run; params.first_timestamp = pt->first_timestamp; + params.max_loops = pt->max_loops; if (pt->filts.cnt > 0) params.pgd_ip = intel_pt_pgd_ip; @@ -3431,6 +3433,9 @@ static int intel_pt_perf_config(const char *var, const char *value, void *data) if (!strcmp(var, "intel-pt.mispred-all")) pt->mispred_all = perf_config_bool(var, value); + if (!strcmp(var, "intel-pt.max-loops")) + perf_config_int(&pt->max_loops, var, value); + return 0; } diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index d3cf2dee36c8..99d047c5ead0 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -219,9 +219,9 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, if (has_constraint && ev->weak_group) continue; /* Ignore event if already used and merging is disabled. */ - if (metric_no_merge && test_bit(ev->idx, evlist_used)) + if (metric_no_merge && test_bit(ev->core.idx, evlist_used)) continue; - if (!has_constraint && ev->leader != current_leader) { + if (!has_constraint && !evsel__has_leader(ev, current_leader)) { /* * Start of a new group, discard the whole match and * start again. @@ -229,7 +229,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, matched_events = 0; memset(metric_events, 0, sizeof(struct evsel *) * idnum); - current_leader = ev->leader; + current_leader = evsel__leader(ev); } /* * Check for duplicate events with the same name. For example, @@ -269,7 +269,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, for (i = 0; i < idnum; i++) { ev = metric_events[i]; /* Don't free the used events. */ - set_bit(ev->idx, evlist_used); + set_bit(ev->core.idx, evlist_used); /* * The metric leader points to the identically named event in * metric_events. @@ -287,11 +287,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, * when then group is left. */ if (!has_constraint && - ev->leader != metric_events[i]->leader && - evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader)) + ev->core.leader != metric_events[i]->core.leader && + evsel_same_pmu_or_none(evsel__leader(ev), evsel__leader(metric_events[i]))) break; if (!strcmp(metric_events[i]->name, ev->name)) { - set_bit(ev->idx, evlist_used); + set_bit(ev->core.idx, evlist_used); ev->metric_leader = metric_events[i]; } } @@ -391,7 +391,7 @@ static int metricgroup__setup_events(struct list_head *groups, } evlist__for_each_entry_safe(perf_evlist, tmp, evsel) { - if (!test_bit(evsel->idx, evlist_used)) { + if (!test_bit(evsel->core.idx, evlist_used)) { evlist__remove(perf_evlist, evsel); evsel__delete(evsel); } @@ -1312,7 +1312,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, nd = rblist__entry(old_metric_events, i); old_me = container_of(nd, struct metric_event, nd); - evsel = evlist__find_evsel(evlist, old_me->evsel->idx); + evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx); if (!evsel) return -EINVAL; new_me = metricgroup__lookup(new_metric_events, evsel, true); @@ -1320,7 +1320,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, return -ENOMEM; pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n", - cgrp ? cgrp->name : "root", evsel->name, evsel->idx); + cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx); list_for_each_entry(old_expr, &old_me->head, nd) { new_expr = malloc(sizeof(*new_expr)); @@ -1363,7 +1363,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, /* copy evsel in the same position */ for (idx = 0; idx < nr; idx++) { evsel = old_expr->metric_events[idx]; - evsel = evlist__find_evsel(evlist, evsel->idx); + evsel = evlist__find_evsel(evlist, evsel->core.idx); if (evsel == NULL) { free(new_expr->metric_events); free(new_expr->metric_refs); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 84108c17f48d..e5eae23cfceb 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1740,7 +1740,7 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, leader = list_first_entry(list, struct evsel, core.node); evsel = list_last_entry(list, struct evsel, core.node); - total_members = evsel->idx - leader->idx + 1; + total_members = evsel->core.idx - leader->core.idx + 1; leaders = calloc(total_members, sizeof(uintptr_t)); if (WARN_ON(!leaders)) @@ -1800,7 +1800,7 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, __evlist__for_each_entry(list, evsel) { if (i >= nr_pmu) i = 0; - evsel->leader = (struct evsel *) leaders[i++]; + evsel__set_leader(evsel, (struct evsel *) leaders[i++]); } /* The number of members and group name are same for each group */ @@ -1833,7 +1833,7 @@ void parse_events__set_leader(char *name, struct list_head *list, if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) return; - __evlist__set_leader(list); + __perf_evlist__set_leader(list); leader = list_entry(list->next, struct evsel, core.node); leader->group_name = name ? strdup(name) : NULL; } @@ -2285,7 +2285,7 @@ int __parse_events(struct evlist *evlist, const char *str, if (!ret) { struct evsel *last; - evlist->nr_groups += parse_state.nr_groups; + evlist->core.nr_groups += parse_state.nr_groups; last = evlist__last(evlist); last->cmdline_group_boundary = true; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index aba12a4d488e..9321bd0e2f76 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -316,7 +316,7 @@ event_pmu_name opt_pmu_config if (!strncmp(name, "uncore_", 7) && strncmp($1, "uncore_", 7)) name += 7; - if (!fnmatch(pattern, name, 0)) { + if (!perf_pmu__match(pattern, name, $1)) { if (parse_events_copy_term_list(orig_terms, &terms)) CLEANUP_YYABORT; if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false)) diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c index 6eef6dfeaa57..dd9ed56e0504 100644 --- a/tools/perf/util/pfm.c +++ b/tools/perf/util/pfm.c @@ -110,7 +110,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str, "cannot close a non-existing event group\n"); goto error; } - evlist->nr_groups++; + evlist->core.nr_groups++; grp_leader = NULL; grp_evt = -1; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 88c8ecdc60b0..44b90d638ad5 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -3,6 +3,7 @@ #include <linux/compiler.h> #include <linux/string.h> #include <linux/zalloc.h> +#include <linux/ctype.h> #include <subcmd/pager.h> #include <sys/types.h> #include <errno.h> @@ -17,6 +18,7 @@ #include <locale.h> #include <regex.h> #include <perf/cpumap.h> +#include <fnmatch.h> #include "debug.h" #include "evsel.h" #include "pmu.h" @@ -740,6 +742,27 @@ struct pmu_events_map *__weak pmu_events_map__find(void) return perf_pmu__find_map(NULL); } +static bool perf_pmu__valid_suffix(char *pmu_name, char *tok) +{ + char *p; + + if (strncmp(pmu_name, tok, strlen(tok))) + return false; + + p = pmu_name + strlen(tok); + if (*p == 0) + return true; + + if (*p != '_') + return false; + + ++p; + if (*p == 0 || !isdigit(*p)) + return false; + + return true; +} + bool pmu_uncore_alias_match(const char *pmu_name, const char *name) { char *tmp = NULL, *tok, *str; @@ -768,7 +791,7 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name) */ for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) { name = strstr(name, tok); - if (!name) { + if (!name || !perf_pmu__valid_suffix((char *)name, tok)) { res = false; goto out; } @@ -1872,3 +1895,14 @@ bool perf_pmu__has_hybrid(void) return !list_empty(&perf_pmu__hybrid_pmus); } + +int perf_pmu__match(char *pattern, char *name, char *tok) +{ + if (fnmatch(pattern, name, 0)) + return -1; + + if (tok && !perf_pmu__valid_suffix(name, tok)) + return -1; + + return 0; +} diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index a790ef758171..926da483a141 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -133,5 +133,6 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, char *name); bool perf_pmu__has_hybrid(void); +int perf_pmu__match(char *pattern, char *name, char *tok); #endif /* __PMU_H */ diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index b029c29ce227..02ef0d78053b 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -118,12 +118,17 @@ struct debuginfo *debuginfo__new(const char *path) char buf[PATH_MAX], nil = '\0'; struct dso *dso; struct debuginfo *dinfo = NULL; + struct build_id bid; /* Try to open distro debuginfo files */ dso = dso__new(path); if (!dso) goto out; + /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */ + if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0) + dso__set_build_id(dso, &bid); + for (type = distro_dwarf_types; !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND; type++) { diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 412f8e79e409..8feef3a05af7 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -1032,7 +1032,7 @@ static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, Py_INCREF(pevsel); evsel = &((struct pyrf_evsel *)pevsel)->evsel; - evsel->idx = evlist->core.nr_entries; + evsel->core.idx = evlist->core.nr_entries; evlist__add(evlist, evsel); return Py_BuildValue("i", evlist->core.nr_entries); diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index 43e5b563dee8..bff669b615ee 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -25,12 +25,12 @@ */ static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist) { - struct evsel *leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) || is_mem_loads_aux_event(leader)) { evlist__for_each_entry(evlist, evsel) { - if (evsel->leader == leader && evsel != evsel->leader) + if (evsel__leader(evsel) == leader && evsel != evsel__leader(evsel)) return evsel; } } @@ -53,7 +53,7 @@ static u64 evsel__config_term_mask(struct evsel *evsel) static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist) { struct perf_event_attr *attr = &evsel->core.attr; - struct evsel *leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); struct evsel *read_sampler; u64 term_types, freq_mask; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 164d2f45028c..69129e2aa7a1 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -687,7 +687,7 @@ static void set_sample_datasrc_in_dict(PyObject *dict, _PyUnicode_FromString(decode)); } -static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size) +static void regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size) { unsigned int i = 0, r; int printed = 0; @@ -695,7 +695,7 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size) bf[0] = 0; if (!regs || !regs->regs) - return 0; + return; for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) { u64 val = regs->regs[i++]; @@ -704,8 +704,6 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size) "%5s:0x%" PRIx64 " ", perf_reg_name(r), val); } - - return printed; } static void set_regs_in_dict(PyObject *dict, @@ -713,7 +711,16 @@ static void set_regs_in_dict(PyObject *dict, struct evsel *evsel) { struct perf_event_attr *attr = &evsel->core.attr; - char bf[512]; + + /* + * Here value 28 is a constant size which can be used to print + * one register value and its corresponds to: + * 16 chars is to specify 64 bit register in hexadecimal. + * 2 chars is for appending "0x" to the hexadecimal value and + * 10 chars is for register name. + */ + int size = __sw_hweight64(attr->sample_regs_intr) * 28; + char bf[size]; regs_map(&sample->intr_regs, attr->sample_regs_intr, bf, sizeof(bf)); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index c588a6b7a8db..83a2bc02df15 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -19,6 +19,7 @@ #include "util.h" #include "iostat.h" #include "pmu-hybrid.h" +#include "evlist-hybrid.h" #define CNTR_NOT_SUPPORTED "<not supported>" #define CNTR_NOT_COUNTED "<not counted>" @@ -465,9 +466,11 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int config->csv_sep); if (counter->supported) { - config->print_free_counters_hint = 1; - if (is_mixed_hw_group(counter)) - config->print_mixed_hw_group_error = 1; + if (!evlist__has_hybrid(counter->evlist)) { + config->print_free_counters_hint = 1; + if (is_mixed_hw_group(counter)) + config->print_mixed_hw_group_error = 1; + } } fprintf(config->output, "%-*s%s", diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 39967a45f55b..34a7f5c1fff7 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -379,7 +379,7 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list) evlist__for_each_entry(evsel_list, counter) { bool invalid = false; - leader = counter->leader; + leader = evsel__leader(counter); if (!counter->metric_expr) continue; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index d3ec2624e036..09ea334586f2 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -534,7 +534,7 @@ int create_perf_stat_counter(struct evsel *evsel, int cpu) { struct perf_event_attr *attr = &evsel->core.attr; - struct evsel *leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; diff --git a/tools/perf/util/stream.c b/tools/perf/util/stream.c index 4bd5e5a00aa5..545e44981a27 100644 --- a/tools/perf/util/stream.c +++ b/tools/perf/util/stream.c @@ -139,7 +139,7 @@ static int evlist__init_callchain_streams(struct evlist *evlist, hists__output_resort(hists, NULL); init_hot_callchain(hists, &es[i]); - es[i].evsel_idx = pos->idx; + es[i].evsel_idx = pos->core.idx; i++; } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index a73345730ba9..31cd59a2b66e 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1074,14 +1074,15 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, return 0; } -int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, - struct symsrc *runtime_ss, int kmodule) +static int +dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, + struct symsrc *runtime_ss, int kmodule, int dynsym) { struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; struct maps *kmaps = kmap ? map__kmaps(map) : NULL; struct map *curr_map = map; struct dso *curr_dso = dso; - Elf_Data *symstrs, *secstrs; + Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym; uint32_t nr_syms; int err = -1; uint32_t idx; @@ -1098,34 +1099,15 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, if (kmap && !kmaps) return -1; - dso->symtab_type = syms_ss->type; - dso->is_64_bit = syms_ss->is_64_bit; - dso->rel = syms_ss->ehdr.e_type == ET_REL; - - /* - * Modules may already have symbols from kallsyms, but those symbols - * have the wrong values for the dso maps, so remove them. - */ - if (kmodule && syms_ss->symtab) - symbols__delete(&dso->symbols); - - if (!syms_ss->symtab) { - /* - * If the vmlinux is stripped, fail so we will fall back - * to using kallsyms. The vmlinux runtime symbols aren't - * of much use. - */ - if (dso->kernel) - goto out_elf_end; - - syms_ss->symtab = syms_ss->dynsym; - syms_ss->symshdr = syms_ss->dynshdr; - } - elf = syms_ss->elf; ehdr = syms_ss->ehdr; - sec = syms_ss->symtab; - shdr = syms_ss->symshdr; + if (dynsym) { + sec = syms_ss->dynsym; + shdr = syms_ss->dynshdr; + } else { + sec = syms_ss->symtab; + shdr = syms_ss->symshdr; + } if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr, ".text", NULL)) @@ -1150,8 +1132,16 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, if (sec_strndx == NULL) goto out_elf_end; - secstrs = elf_getdata(sec_strndx, NULL); - if (secstrs == NULL) + secstrs_run = elf_getdata(sec_strndx, NULL); + if (secstrs_run == NULL) + goto out_elf_end; + + sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); + if (sec_strndx == NULL) + goto out_elf_end; + + secstrs_sym = elf_getdata(sec_strndx, NULL); + if (secstrs_sym == NULL) goto out_elf_end; nr_syms = shdr.sh_size / shdr.sh_entsize; @@ -1237,6 +1227,8 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, gelf_getshdr(sec, &shdr); + secstrs = secstrs_sym; + /* * We have to fallback to runtime when syms' section header has * NOBITS set. NOBITS results in file offset (sh_offset) not @@ -1249,6 +1241,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, goto out_elf_end; gelf_getshdr(sec, &shdr); + secstrs = secstrs_run; } if (is_label && !elf_sec__filter(&shdr, secstrs)) @@ -1312,6 +1305,50 @@ out_elf_end: return err; } +int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, + struct symsrc *runtime_ss, int kmodule) +{ + int nr = 0; + int err = -1; + + dso->symtab_type = syms_ss->type; + dso->is_64_bit = syms_ss->is_64_bit; + dso->rel = syms_ss->ehdr.e_type == ET_REL; + + /* + * Modules may already have symbols from kallsyms, but those symbols + * have the wrong values for the dso maps, so remove them. + */ + if (kmodule && syms_ss->symtab) + symbols__delete(&dso->symbols); + + if (!syms_ss->symtab) { + /* + * If the vmlinux is stripped, fail so we will fall back + * to using kallsyms. The vmlinux runtime symbols aren't + * of much use. + */ + if (dso->kernel) + return err; + } else { + err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss, + kmodule, 0); + if (err < 0) + return err; + nr = err; + } + + if (syms_ss->dynsym) { + err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss, + kmodule, 1); + if (err < 0) + return err; + err += nr; + } + + return err; +} + static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) { GElf_Phdr phdr; |