diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-18 11:26:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-18 11:26:46 -0400 |
commit | 96b90f27bcf22f1d06cc16d9475cefa6ea4c4718 (patch) | |
tree | a886ad5f611dea36c6d4b615dfdcdbbcf5bd3135 | |
parent | 396c9df2231865ef55aa031e3f5df9d99e036869 (diff) | |
parent | 0c99241c93b8060441f3c8434848e54b5338f922 (diff) | |
download | linux-96b90f27bcf22f1d06cc16d9475cefa6ea4c4718.tar.gz linux-96b90f27bcf22f1d06cc16d9475cefa6ea4c4718.tar.bz2 linux-96b90f27bcf22f1d06cc16d9475cefa6ea4c4718.zip |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"This update has mostly fixes, but also other bits:
- perf tooling fixes
- PMU driver fixes
- Intel Broadwell PMU driver HW-enablement for LBR callstacks
- a late coming 'perf kmem' tool update that enables it to also
analyze page allocation data. Note, this comes with MM tracepoint
changes that we believe to not break anything: because it changes
the formerly opaque 'struct page *' field that uniquely identifies
pages to 'pfn' which identifies pages uniquely too, but isn't as
opaque and can be used for other purposes as well"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/intel/pt: Fix and clean up error handling in pt_event_add()
perf/x86/intel: Add Broadwell support for the LBR callstack
perf/x86/intel/rapl: Fix energy counter measurements but supporing per domain energy units
perf/x86/intel: Fix Core2,Atom,NHM,WSM cycles:pp events
perf/x86: Fix hw_perf_event::flags collision
perf probe: Fix segfault when probe with lazy_line to file
perf probe: Find compilation directory path for lazy matching
perf probe: Set retprobe flag when probe in address-based alternative mode
perf kmem: Analyze page allocator events also
tracing, mm: Record pfn instead of pointer to struct page
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_pt.c | 33 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_rapl.c | 94 | ||||
-rw-r--r-- | include/trace/events/filemap.h | 8 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 42 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 8 | ||||
-rw-r--r-- | tools/perf/Documentation/perf-kmem.txt | 8 | ||||
-rw-r--r-- | tools/perf/builtin-kmem.c | 500 | ||||
-rw-r--r-- | tools/perf/util/probe-event.c | 60 | ||||
-rw-r--r-- | tools/perf/util/probe-finder.c | 73 | ||||
-rw-r--r-- | tools/perf/util/probe-finder.h | 4 |
13 files changed, 702 insertions, 156 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 329f0356ad4a..6ac5cb7a9e14 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -65,15 +65,15 @@ struct event_constraint { /* * struct hw_perf_event.flags flags */ -#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ -#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ -#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */ -#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ -#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */ -#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */ -#define PERF_X86_EVENT_EXCL 0x40 /* HT exclusivity on counter */ -#define PERF_X86_EVENT_DYNAMIC 0x80 /* dynamic alloc'd constraint */ -#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */ +#define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ +#define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ +#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ +#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */ +#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */ +#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */ +#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ +#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ +#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ struct amd_nb { diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 9da2400c2ec3..219d3fb423a1 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -3275,7 +3275,7 @@ __init int intel_pmu_init(void) hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; - intel_pmu_lbr_init_snb(); + intel_pmu_lbr_init_hsw(); x86_pmu.event_constraints = intel_bdw_event_constraints; x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index ca69ea56c712..813f75d71175 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -558,6 +558,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -565,6 +567,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -588,6 +592,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -603,6 +609,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index f2770641c0fd..ffe666c2c6b5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -988,39 +988,36 @@ static int pt_event_add(struct perf_event *event, int mode) int ret = -EBUSY; if (pt->handle.event) - goto out; + goto fail; buf = perf_aux_output_begin(&pt->handle, event); - if (!buf) { - ret = -EINVAL; - goto out; - } + ret = -EINVAL; + if (!buf) + goto fail_stop; pt_buffer_reset_offsets(buf, pt->handle.head); if (!buf->snapshot) { ret = pt_buffer_reset_markers(buf, &pt->handle); - if (ret) { - perf_aux_output_end(&pt->handle, 0, true); - goto out; - } + if (ret) + goto fail_end_stop; } if (mode & PERF_EF_START) { pt_event_start(event, 0); - if (hwc->state == PERF_HES_STOPPED) { - pt_event_del(event, 0); - ret = -EBUSY; - } + ret = -EBUSY; + if (hwc->state == PERF_HES_STOPPED) + goto fail_end_stop; } else { hwc->state = PERF_HES_STOPPED; } - ret = 0; -out: - - if (ret) - hwc->state = PERF_HES_STOPPED; + return 0; +fail_end_stop: + perf_aux_output_end(&pt->handle, 0, true); +fail_stop: + hwc->state = PERF_HES_STOPPED; +fail: return ret; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index c4bb8b8e5017..999289b94025 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -62,6 +62,14 @@ #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */ #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ +#define NR_RAPL_DOMAINS 0x4 +static const char *rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { + "pp0-core", + "package", + "dram", + "pp1-gpu", +}; + /* Clients have PP0, PKG */ #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\ 1<<RAPL_IDX_PKG_NRG_STAT|\ @@ -112,7 +120,6 @@ static struct perf_pmu_events_attr event_attr_##v = { \ struct rapl_pmu { spinlock_t lock; - int hw_unit; /* 1/2^hw_unit Joule */ int n_active; /* number of active events */ struct list_head active_list; struct pmu *pmu; /* pointer to rapl_pmu_class */ @@ -120,6 +127,7 @@ struct rapl_pmu { struct hrtimer hrtimer; }; +static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; /* 1/2^hw_unit Joule */ static struct pmu rapl_pmu_class; static cpumask_t rapl_cpu_mask; static int rapl_cntr_mask; @@ -127,6 +135,7 @@ static int rapl_cntr_mask; static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free); +static struct x86_pmu_quirk *rapl_quirks; static inline u64 rapl_read_counter(struct perf_event *event) { u64 raw; @@ -134,15 +143,28 @@ static inline u64 rapl_read_counter(struct perf_event *event) return raw; } -static inline u64 rapl_scale(u64 v) +#define rapl_add_quirk(func_) \ +do { \ + static struct x86_pmu_quirk __quirk __initdata = { \ + .func = func_, \ + }; \ + __quirk.next = rapl_quirks; \ + rapl_quirks = &__quirk; \ +} while (0) + +static inline u64 rapl_scale(u64 v, int cfg) { + if (cfg > NR_RAPL_DOMAINS) { + pr_warn("invalid domain %d, failed to scale data\n", cfg); + return v; + } /* * scale delta to smallest unit (1/2^32) * users must then scale back: count * 1/(1e9*2^32) to get Joules * or use ldexp(count, -32). * Watts = Joules/Time delta */ - return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit); + return v << (32 - rapl_hw_unit[cfg - 1]); } static u64 rapl_event_update(struct perf_event *event) @@ -173,7 +195,7 @@ again: delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; - sdelta = rapl_scale(delta); + sdelta = rapl_scale(delta, event->hw.config); local64_add(sdelta, &event->count); @@ -546,12 +568,22 @@ static void rapl_cpu_init(int cpu) cpumask_set_cpu(cpu, &rapl_cpu_mask); } +static __init void rapl_hsw_server_quirk(void) +{ + /* + * DRAM domain on HSW server has fixed energy unit which can be + * different than the unit from power unit MSR. + * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2 + * of 2. Datasheet, September 2014, Reference Number: 330784-001 " + */ + rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16; +} + static int rapl_cpu_prepare(int cpu) { struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); int phys_id = topology_physical_package_id(cpu); u64 ms; - u64 msr_rapl_power_unit_bits; if (pmu) return 0; @@ -559,24 +591,13 @@ static int rapl_cpu_prepare(int cpu) if (phys_id < 0) return -1; - /* protect rdmsrl() to handle virtualization */ - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) - return -1; - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); if (!pmu) return -1; - spin_lock_init(&pmu->lock); INIT_LIST_HEAD(&pmu->active_list); - /* - * grab power unit as: 1/2^unit Joules - * - * we cache in local PMU instance - */ - pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; pmu->pmu = &rapl_pmu_class; /* @@ -586,8 +607,8 @@ static int rapl_cpu_prepare(int cpu) * divide interval by 2 to avoid lockstep (2 * 100) * if hw unit is 32, then we use 2 ms 1/200/2 */ - if (pmu->hw_unit < 32) - ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1)); + if (rapl_hw_unit[0] < 32) + ms = (1000 / (2 * 100)) * (1ULL << (32 - rapl_hw_unit[0] - 1)); else ms = 2; @@ -655,6 +676,20 @@ static int rapl_cpu_notifier(struct notifier_block *self, return NOTIFY_OK; } +static int rapl_check_hw_unit(void) +{ + u64 msr_rapl_power_unit_bits; + int i; + + /* protect rdmsrl() to handle virtualization */ + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) + return -1; + for (i = 0; i < NR_RAPL_DOMAINS; i++) + rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; + + return 0; +} + static const struct x86_cpu_id rapl_cpu_match[] = { [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 }, [1] = {}, @@ -664,6 +699,8 @@ static int __init rapl_pmu_init(void) { struct rapl_pmu *pmu; int cpu, ret; + struct x86_pmu_quirk *quirk; + int i; /* * check for Intel processor family 6 @@ -678,6 +715,11 @@ static int __init rapl_pmu_init(void) rapl_cntr_mask = RAPL_IDX_CLN; rapl_pmu_events_group.attrs = rapl_events_cln_attr; break; + case 63: /* Haswell-Server */ + rapl_add_quirk(rapl_hsw_server_quirk); + rapl_cntr_mask = RAPL_IDX_SRV; + rapl_pmu_events_group.attrs = rapl_events_srv_attr; + break; case 60: /* Haswell */ case 69: /* Haswell-Celeron */ rapl_cntr_mask = RAPL_IDX_HSW; @@ -693,7 +735,13 @@ static int __init rapl_pmu_init(void) /* unsupported */ return 0; } + ret = rapl_check_hw_unit(); + if (ret) + return ret; + /* run cpu model quirks */ + for (quirk = rapl_quirks; quirk; quirk = quirk->next) + quirk->func(); cpu_notifier_register_begin(); for_each_online_cpu(cpu) { @@ -714,14 +762,18 @@ static int __init rapl_pmu_init(void) pmu = __this_cpu_read(rapl_pmu); - pr_info("RAPL PMU detected, hw unit 2^-%d Joules," + pr_info("RAPL PMU detected," " API unit is 2^-32 Joules," " %d fixed counters" " %llu ms ovfl timer\n", - pmu->hw_unit, hweight32(rapl_cntr_mask), ktime_to_ms(pmu->timer_interval)); - + for (i = 0; i < NR_RAPL_DOMAINS; i++) { + if (rapl_cntr_mask & (1 << i)) { + pr_info("hw unit of domain %s 2^-%d Joules\n", + rapl_domain_names[i], rapl_hw_unit[i]); + } + } out: cpu_notifier_register_done(); diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h index 0421f49a20f7..42febb6bc1d5 100644 --- a/include/trace/events/filemap.h +++ b/include/trace/events/filemap.h @@ -18,14 +18,14 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, TP_ARGS(page), TP_STRUCT__entry( - __field(struct page *, page) + __field(unsigned long, pfn) __field(unsigned long, i_ino) __field(unsigned long, index) __field(dev_t, s_dev) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->i_ino = page->mapping->host->i_ino; __entry->index = page->index; if (page->mapping->host->i_sb) @@ -37,8 +37,8 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->index << PAGE_SHIFT) ); diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 4ad10baecd4d..81ea59812117 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -154,18 +154,18 @@ TRACE_EVENT(mm_page_free, TP_ARGS(page, order), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( unsigned int, order ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->order = order; ), TP_printk("page=%p pfn=%lu order=%d", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->order) ); @@ -176,18 +176,18 @@ TRACE_EVENT(mm_page_free_batched, TP_ARGS(page, cold), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( int, cold ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->cold = cold; ), TP_printk("page=%p pfn=%lu order=0 cold=%d", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->cold) ); @@ -199,22 +199,22 @@ TRACE_EVENT(mm_page_alloc, TP_ARGS(page, order, gfp_flags, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( unsigned int, order ) __field( gfp_t, gfp_flags ) __field( int, migratetype ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; __entry->gfp_flags = gfp_flags; __entry->migratetype = migratetype; ), TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", - __entry->page, - __entry->page ? page_to_pfn(__entry->page) : 0, + __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, + __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, __entry->migratetype, show_gfp_flags(__entry->gfp_flags)) @@ -227,20 +227,20 @@ DECLARE_EVENT_CLASS(mm_page, TP_ARGS(page, order, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( unsigned int, order ) __field( int, migratetype ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; __entry->migratetype = migratetype; ), TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", - __entry->page, - __entry->page ? page_to_pfn(__entry->page) : 0, + __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, + __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, __entry->migratetype, __entry->order == 0) @@ -260,7 +260,7 @@ DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, TP_ARGS(page, order, migratetype), TP_printk("page=%p pfn=%lu order=%d migratetype=%d", - __entry->page, page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), __entry->pfn, __entry->order, __entry->migratetype) ); @@ -275,7 +275,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, alloc_migratetype, fallback_migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( int, alloc_order ) __field( int, fallback_order ) __field( int, alloc_migratetype ) @@ -284,7 +284,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->alloc_order = alloc_order; __entry->fallback_order = fallback_order; __entry->alloc_migratetype = alloc_migratetype; @@ -294,8 +294,8 @@ TRACE_EVENT(mm_page_alloc_extfrag, ), TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->alloc_order, __entry->fallback_order, pageblock_order, diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 69590b6ffc09..f66476b96264 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -336,18 +336,18 @@ TRACE_EVENT(mm_vmscan_writepage, TP_ARGS(page, reclaim_flags), TP_STRUCT__entry( - __field(struct page *, page) + __field(unsigned long, pfn) __field(int, reclaim_flags) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->reclaim_flags = reclaim_flags; ), TP_printk("page=%p pfn=%lu flags=%s", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, show_reclaim_flags(__entry->reclaim_flags)) ); diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt index 150253cc3c97..23219c65c16f 100644 --- a/tools/perf/Documentation/perf-kmem.txt +++ b/tools/perf/Documentation/perf-kmem.txt @@ -3,7 +3,7 @@ perf-kmem(1) NAME ---- -perf-kmem - Tool to trace/measure kernel memory(slab) properties +perf-kmem - Tool to trace/measure kernel memory properties SYNOPSIS -------- @@ -46,6 +46,12 @@ OPTIONS --raw-ip:: Print raw ip instead of symbol +--slab:: + Analyze SLAB allocator events. + +--page:: + Analyze page allocator events + SEE ALSO -------- linkperf:perf-record[1] diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 4ebf65c79434..63ea01349b6e 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -22,6 +22,11 @@ #include <linux/string.h> #include <locale.h> +static int kmem_slab; +static int kmem_page; + +static long kmem_page_size; + struct alloc_stat; typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); @@ -226,6 +231,244 @@ static int perf_evsel__process_free_event(struct perf_evsel *evsel, return 0; } +static u64 total_page_alloc_bytes; +static u64 total_page_free_bytes; +static u64 total_page_nomatch_bytes; +static u64 total_page_fail_bytes; +static unsigned long nr_page_allocs; +static unsigned long nr_page_frees; +static unsigned long nr_page_fails; +static unsigned long nr_page_nomatch; + +static bool use_pfn; + +#define MAX_MIGRATE_TYPES 6 +#define MAX_PAGE_ORDER 11 + +static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES]; + +struct page_stat { + struct rb_node node; + u64 page; + int order; + unsigned gfp_flags; + unsigned migrate_type; + u64 alloc_bytes; + u64 free_bytes; + int nr_alloc; + int nr_free; +}; + +static struct rb_root page_tree; +static struct rb_root page_alloc_tree; +static struct rb_root page_alloc_sorted; + +static struct page_stat *search_page(unsigned long page, bool create) +{ + struct rb_node **node = &page_tree.rb_node; + struct rb_node *parent = NULL; + struct page_stat *data; + + while (*node) { + s64 cmp; + + parent = *node; + data = rb_entry(*node, struct page_stat, node); + + cmp = data->page - page; + if (cmp < 0) + node = &parent->rb_left; + else if (cmp > 0) + node = &parent->rb_right; + else + return data; + } + + if (!create) + return NULL; + + data = zalloc(sizeof(*data)); + if (data != NULL) { + data->page = page; + + rb_link_node(&data->node, parent, node); + rb_insert_color(&data->node, &page_tree); + } + + return data; +} + +static int page_stat_cmp(struct page_stat *a, struct page_stat *b) +{ + if (a->page > b->page) + return -1; + if (a->page < b->page) + return 1; + if (a->order > b->order) + return -1; + if (a->order < b->order) + return 1; + if (a->migrate_type > b->migrate_type) + return -1; + if (a->migrate_type < b->migrate_type) + return 1; + if (a->gfp_flags > b->gfp_flags) + return -1; + if (a->gfp_flags < b->gfp_flags) + return 1; + return 0; +} + +static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool create) +{ + struct rb_node **node = &page_alloc_tree.rb_node; + struct rb_node *parent = NULL; + struct page_stat *data; + + while (*node) { + s64 cmp; + + parent = *node; + data = rb_entry(*node, struct page_stat, node); + + cmp = page_stat_cmp(data, stat); + if (cmp < 0) + node = &parent->rb_left; + else if (cmp > 0) + node = &parent->rb_right; + else + return data; + } + + if (!create) + return NULL; + + data = zalloc(sizeof(*data)); + if (data != NULL) { + data->page = stat->page; + data->order = stat->order; + data->gfp_flags = stat->gfp_flags; + data->migrate_type = stat->migrate_type; + + rb_link_node(&data->node, parent, node); + rb_insert_color(&data->node, &page_alloc_tree); + } + + return data; +} + +static bool valid_page(u64 pfn_or_page) +{ + if (use_pfn && pfn_or_page == -1UL) + return false; + if (!use_pfn && pfn_or_page == 0) + return false; + return true; +} + +static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, + struct perf_sample *sample) +{ + u64 page; + unsigned int order = perf_evsel__intval(evsel, sample, "order"); + unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags"); + unsigned int migrate_type = perf_evsel__intval(evsel, sample, + "migratetype"); + u64 bytes = kmem_page_size << order; + struct page_stat *stat; + struct page_stat this = { + .order = order, + .gfp_flags = gfp_flags, + .migrate_type = migrate_type, + }; + + if (use_pfn) + page = perf_evsel__intval(evsel, sample, "pfn"); + else + page = perf_evsel__intval(evsel, sample, "page"); + + nr_page_allocs++; + total_page_alloc_bytes += bytes; + + if (!valid_page(page)) { + nr_page_fails++; + total_page_fail_bytes += bytes; + + return 0; + } + + /* + * This is to find the current page (with correct gfp flags and + * migrate type) at free event. + */ + stat = search_page(page, true); + if (stat == NULL) + return -ENOMEM; + + stat->order = order; + stat->gfp_flags = gfp_flags; + stat->migrate_type = migrate_type; + + this.page = page; + stat = search_page_alloc_stat(&this, true); + if (stat == NULL) + return -ENOMEM; + + stat->nr_alloc++; + stat->alloc_bytes += bytes; + + order_stats[order][migrate_type]++; + + return 0; +} + +static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, + struct perf_sample *sample) +{ + u64 page; + unsigned int order = perf_evsel__intval(evsel, sample, "order"); + u64 bytes = kmem_page_size << order; + struct page_stat *stat; + struct page_stat this = { + .order = order, + }; + + if (use_pfn) + page = perf_evsel__intval(evsel, sample, "pfn"); + else + page = perf_evsel__intval(evsel, sample, "page"); + + nr_page_frees++; + total_page_free_bytes += bytes; + + stat = search_page(page, false); + if (stat == NULL) { + pr_debug2("missing free at page %"PRIx64" (order: %d)\n", + page, order); + + nr_page_nomatch++; + total_page_nomatch_bytes += bytes; + + return 0; + } + + this.page = page; + this.gfp_flags = stat->gfp_flags; + this.migrate_type = stat->migrate_type; + + rb_erase(&stat->node, &page_tree); + free(stat); + + stat = search_page_alloc_stat(&this, false); + if (stat == NULL) + return -ENOENT; + + stat->nr_free++; + stat->free_bytes += bytes; + + return 0; +} + typedef int (*tracepoint_handler)(struct perf_evsel *evsel, struct perf_sample *sample); @@ -270,8 +513,9 @@ static double fragmentation(unsigned long n_req, unsigned long n_alloc) return 100.0 - (100.0 * n_req / n_alloc); } -static void __print_result(struct rb_root *root, struct perf_session *session, - int n_lines, int is_caller) +static void __print_slab_result(struct rb_root *root, + struct perf_session *session, + int n_lines, int is_caller) { struct rb_node *next; struct machine *machine = &session->machines.host; @@ -323,9 +567,56 @@ static void __print_result(struct rb_root *root, struct perf_session *session, printf("%.105s\n", graph_dotted_line); } -static void print_summary(void) +static const char * const migrate_type_str[] = { + "UNMOVABL", + "RECLAIM", + "MOVABLE", + "RESERVED", + "CMA/ISLT", + "UNKNOWN", +}; + +static void __print_page_result(struct rb_root *root, + struct perf_session *session __maybe_unused, + int n_lines) +{ + struct rb_node *next = rb_first(root); + const char *format; + + printf("\n%.80s\n", graph_dotted_line); + printf(" %-16s | Total alloc (KB) | Hits | Order | Mig.type | GFP flags\n", + use_pfn ? "PFN" : "Page"); + printf("%.80s\n", graph_dotted_line); + + if (use_pfn) + format = " %16llu | %'16llu | %'9d | %5d | %8s | %08lx\n"; + else + format = " %016llx | %'16llu | %'9d | %5d | %8s | %08lx\n"; + + while (next && n_lines--) { + struct page_stat *data; + + data = rb_entry(next, struct page_stat, node); + + printf(format, (unsigned long long)data->page, + (unsigned long long)data->alloc_bytes / 1024, + data->nr_alloc, data->order, + migrate_type_str[data->migrate_type], + (unsigned long)data->gfp_flags); + + next = rb_next(next); + } + + if (n_lines == -1) + printf(" ... | ... | ... | ... | ... | ... \n"); + + printf("%.80s\n", graph_dotted_line); +} + +static void print_slab_summary(void) { - printf("\nSUMMARY\n=======\n"); + printf("\nSUMMARY (SLAB allocator)"); + printf("\n========================\n"); printf("Total bytes requested: %'lu\n", total_requested); printf("Total bytes allocated: %'lu\n", total_allocated); printf("Total bytes wasted on internal fragmentation: %'lu\n", @@ -335,13 +626,73 @@ static void print_summary(void) printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs); } -static void print_result(struct perf_session *session) +static void print_page_summary(void) +{ + int o, m; + u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch; + u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes; + + printf("\nSUMMARY (page allocator)"); + printf("\n========================\n"); + printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests", + nr_page_allocs, total_page_alloc_bytes / 1024); + printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests", + nr_page_frees, total_page_free_bytes / 1024); + printf("\n"); + + printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", + nr_alloc_freed, (total_alloc_freed_bytes) / 1024); + printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", + nr_page_allocs - nr_alloc_freed, + (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); + printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", + nr_page_nomatch, total_page_nomatch_bytes / 1024); + printf("\n"); + + printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures", + nr_page_fails, total_page_fail_bytes / 1024); + printf("\n"); + + printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable", + "Reclaimable", "Movable", "Reserved", "CMA/Isolated"); + printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line, + graph_dotted_line, graph_dotted_line, graph_dotted_line, + graph_dotted_line, graph_dotted_line); + + for (o = 0; o < MAX_PAGE_ORDER; o++) { + printf("%5d", o); + for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) { + if (order_stats[o][m]) + printf(" %'12d", order_stats[o][m]); + else + printf(" %12c", '.'); + } + printf("\n"); + } +} + +static void print_slab_result(struct perf_session *session) { if (caller_flag) - __print_result(&root_caller_sorted, session, caller_lines, 1); + __print_slab_result(&root_caller_sorted, session, caller_lines, 1); + if (alloc_flag) + __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0); + print_slab_summary(); +} + +static void print_page_result(struct perf_session *session) +{ if (alloc_flag) - __print_result(&root_alloc_sorted, session, alloc_lines, 0); - print_summary(); + __print_page_result(&page_alloc_sorted, session, alloc_lines); + print_page_summary(); +} + +static void print_result(struct perf_session *session) +{ + if (kmem_slab) + print_slab_result(session); + if (kmem_page) + print_page_result(session); } struct sort_dimension { @@ -353,8 +704,8 @@ struct sort_dimension { static LIST_HEAD(caller_sort); static LIST_HEAD(alloc_sort); -static void sort_insert(struct rb_root *root, struct alloc_stat *data, - struct list_head *sort_list) +static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data, + struct list_head *sort_list) { struct rb_node **new = &(root->rb_node); struct rb_node *parent = NULL; @@ -383,8 +734,8 @@ static void sort_insert(struct rb_root *root, struct alloc_stat *data, rb_insert_color(&data->node, root); } -static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, - struct list_head *sort_list) +static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted, + struct list_head *sort_list) { struct rb_node *node; struct alloc_stat *data; @@ -396,26 +747,79 @@ static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, rb_erase(node, root); data = rb_entry(node, struct alloc_stat, node); - sort_insert(root_sorted, data, sort_list); + sort_slab_insert(root_sorted, data, sort_list); + } +} + +static void sort_page_insert(struct rb_root *root, struct page_stat *data) +{ + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + + while (*new) { + struct page_stat *this; + int cmp = 0; + + this = rb_entry(*new, struct page_stat, node); + parent = *new; + + /* TODO: support more sort key */ + cmp = data->alloc_bytes - this->alloc_bytes; + + if (cmp > 0) + new = &parent->rb_left; + else + new = &parent->rb_right; + } + + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted) +{ + struct rb_node *node; + struct page_stat *data; + + for (;;) { + node = rb_first(root); + if (!node) + break; + + rb_erase(node, root); + data = rb_entry(node, struct page_stat, node); + sort_page_insert(root_sorted, data); } } static void sort_result(void) { - __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort); - __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); + if (kmem_slab) { + __sort_slab_result(&root_alloc_stat, &root_alloc_sorted, + &alloc_sort); + __sort_slab_result(&root_caller_stat, &root_caller_sorted, + &caller_sort); + } + if (kmem_page) { + __sort_page_result(&page_alloc_tree, &page_alloc_sorted); + } } static int __cmd_kmem(struct perf_session *session) { int err = -EINVAL; + struct perf_evsel *evsel; const struct perf_evsel_str_handler kmem_tracepoints[] = { + /* slab allocator */ { "kmem:kmalloc", perf_evsel__process_alloc_event, }, { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, }, { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, }, { "kmem:kfree", perf_evsel__process_free_event, }, { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, + /* page allocator */ + { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, }, + { "kmem:mm_page_free", perf_evsel__process_page_free_event, }, }; if (!perf_session__has_traces(session, "kmem record")) @@ -426,10 +830,20 @@ static int __cmd_kmem(struct perf_session *session) goto out; } + evlist__for_each(session->evlist, evsel) { + if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") && + perf_evsel__field(evsel, "pfn")) { + use_pfn = true; + break; + } + } + setup_pager(); err = perf_session__process_events(session); - if (err != 0) + if (err != 0) { + pr_err("error during process events: %d\n", err); goto out; + } sort_result(); print_result(session); out: @@ -612,6 +1026,22 @@ static int parse_alloc_opt(const struct option *opt __maybe_unused, return 0; } +static int parse_slab_opt(const struct option *opt __maybe_unused, + const char *arg __maybe_unused, + int unset __maybe_unused) +{ + kmem_slab = (kmem_page + 1); + return 0; +} + +static int parse_page_opt(const struct option *opt __maybe_unused, + const char *arg __maybe_unused, + int unset __maybe_unused) +{ + kmem_page = (kmem_slab + 1); + return 0; +} + static int parse_line_opt(const struct option *opt __maybe_unused, const char *arg, int unset __maybe_unused) { @@ -634,6 +1064,8 @@ static int __cmd_record(int argc, const char **argv) { const char * const record_args[] = { "record", "-a", "-R", "-c", "1", + }; + const char * const slab_events[] = { "-e", "kmem:kmalloc", "-e", "kmem:kmalloc_node", "-e", "kmem:kfree", @@ -641,10 +1073,19 @@ static int __cmd_record(int argc, const char **argv) "-e", "kmem:kmem_cache_alloc_node", "-e", "kmem:kmem_cache_free", }; + const char * const page_events[] = { + "-e", "kmem:mm_page_alloc", + "-e", "kmem:mm_page_free", + }; unsigned int rec_argc, i, j; const char **rec_argv; rec_argc = ARRAY_SIZE(record_args) + argc - 1; + if (kmem_slab) + rec_argc += ARRAY_SIZE(slab_events); + if (kmem_page) + rec_argc += ARRAY_SIZE(page_events); + rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) @@ -653,6 +1094,15 @@ static int __cmd_record(int argc, const char **argv) for (i = 0; i < ARRAY_SIZE(record_args); i++) rec_argv[i] = strdup(record_args[i]); + if (kmem_slab) { + for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++) + rec_argv[i] = strdup(slab_events[j]); + } + if (kmem_page) { + for (j = 0; j < ARRAY_SIZE(page_events); j++, i++) + rec_argv[i] = strdup(page_events[j]); + } + for (j = 1; j < (unsigned int)argc; j++, i++) rec_argv[i] = argv[j]; @@ -679,6 +1129,10 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt), OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), + OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator", + parse_slab_opt), + OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator", + parse_page_opt), OPT_END() }; const char *const kmem_subcommands[] = { "record", "stat", NULL }; @@ -695,6 +1149,9 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) if (!argc) usage_with_options(kmem_usage, kmem_options); + if (kmem_slab == 0 && kmem_page == 0) + kmem_slab = 1; /* for backward compatibility */ + if (!strncmp(argv[0], "rec", 3)) { symbol__init(NULL); return __cmd_record(argc, argv); @@ -706,6 +1163,17 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) if (session == NULL) return -1; + if (kmem_page) { + struct perf_evsel *evsel = perf_evlist__first(session->evlist); + + if (evsel == NULL || evsel->tp_format == NULL) { + pr_err("invalid event found.. aborting\n"); + return -1; + } + + kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent); + } + symbol__init(&session->header.env); if (!strcmp(argv[0], "stat")) { diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 30545ce2c712..d8bb616ff57c 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -332,6 +332,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo, else { result->offset += pp->offset; result->line += pp->line; + result->retprobe = pp->retprobe; ret = 0; } @@ -654,65 +655,6 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, return ntevs; } -/* - * Find a src file from a DWARF tag path. Prepend optional source path prefix - * and chop off leading directories that do not exist. Result is passed back as - * a newly allocated path on success. - * Return 0 if file was found and readable, -errno otherwise. - */ -static int get_real_path(const char *raw_path, const char *comp_dir, - char **new_path) -{ - const char *prefix = symbol_conf.source_prefix; - - if (!prefix) { - if (raw_path[0] != '/' && comp_dir) - /* If not an absolute path, try to use comp_dir */ - prefix = comp_dir; - else { - if (access(raw_path, R_OK) == 0) { - *new_path = strdup(raw_path); - return *new_path ? 0 : -ENOMEM; - } else - return -errno; - } - } - - *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2)); - if (!*new_path) - return -ENOMEM; - - for (;;) { - sprintf(*new_path, "%s/%s", prefix, raw_path); - - if (access(*new_path, R_OK) == 0) - return 0; - - if (!symbol_conf.source_prefix) { - /* In case of searching comp_dir, don't retry */ - zfree(new_path); - return -errno; - } - - switch (errno) { - case ENAMETOOLONG: - case ENOENT: - case EROFS: - case EFAULT: - raw_path = strchr(++raw_path, '/'); - if (!raw_path) { - zfree(new_path); - return -ENOENT; - } - continue; - - default: - zfree(new_path); - return -errno; - } - } -} - #define LINEBUF_SIZE 256 #define NR_ADDITIONAL_LINES 2 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index e3074230f236..b5bf9d5efeaf 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -855,11 +855,22 @@ static int probe_point_lazy_walker(const char *fname, int lineno, static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) { int ret = 0; + char *fpath; if (intlist__empty(pf->lcache)) { + const char *comp_dir; + + comp_dir = cu_get_comp_dir(&pf->cu_die); + ret = get_real_path(pf->fname, comp_dir, &fpath); + if (ret < 0) { + pr_warning("Failed to find source file path.\n"); + return ret; + } + /* Matching lazy line pattern */ - ret = find_lazy_match_lines(pf->lcache, pf->fname, + ret = find_lazy_match_lines(pf->lcache, fpath, pf->pev->point.lazy_line); + free(fpath); if (ret <= 0) return ret; } @@ -1055,7 +1066,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg, if (pp->function) ret = find_probe_point_by_func(pf); else if (pp->lazy_line) - ret = find_probe_point_lazy(NULL, pf); + ret = find_probe_point_lazy(&pf->cu_die, pf); else { pf->lno = pp->line; ret = find_probe_point_by_line(pf); @@ -1622,3 +1633,61 @@ found: return (ret < 0) ? ret : lf.found; } +/* + * Find a src file from a DWARF tag path. Prepend optional source path prefix + * and chop off leading directories that do not exist. Result is passed back as + * a newly allocated path on success. + * Return 0 if file was found and readable, -errno otherwise. + */ +int get_real_path(const char *raw_path, const char *comp_dir, + char **new_path) +{ + const char *prefix = symbol_conf.source_prefix; + + if (!prefix) { + if (raw_path[0] != '/' && comp_dir) + /* If not an absolute path, try to use comp_dir */ + prefix = comp_dir; + else { + if (access(raw_path, R_OK) == 0) { + *new_path = strdup(raw_path); + return *new_path ? 0 : -ENOMEM; + } else + return -errno; + } + } + + *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2)); + if (!*new_path) + return -ENOMEM; + + for (;;) { + sprintf(*new_path, "%s/%s", prefix, raw_path); + + if (access(*new_path, R_OK) == 0) + return 0; + + if (!symbol_conf.source_prefix) { + /* In case of searching comp_dir, don't retry */ + zfree(new_path); + return -errno; + } + + switch (errno) { + case ENAMETOOLONG: + case ENOENT: + case EROFS: + case EFAULT: + raw_path = strchr(++raw_path, '/'); + if (!raw_path) { + zfree(new_path); + return -ENOENT; + } + continue; + + default: + zfree(new_path); + return -errno; + } + } +} diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 92590b2c7e1c..ebf8c8c81453 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -55,6 +55,10 @@ extern int debuginfo__find_available_vars_at(struct debuginfo *dbg, struct variable_list **vls, int max_points, bool externs); +/* Find a src file from a DWARF tag path */ +int get_real_path(const char *raw_path, const char *comp_dir, + char **new_path); + struct probe_finder { struct perf_probe_event *pev; /* Target probe event */ |