diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-25 17:05:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-25 17:05:40 -0700 |
commit | bdc6b758e443c21c39a14c075e5b7e01f095b37b (patch) | |
tree | 40b98b5abd501cc232f41af03eb078282d7a6327 /arch/x86 | |
parent | c4a346002bc06046bc51910a7ade3a0c650c3d34 (diff) | |
parent | 0c9f790fcbdaf8cfb6dd7fb4e88fadf55082e37e (diff) | |
download | linux-stable-bdc6b758e443c21c39a14c075e5b7e01f095b37b.tar.gz linux-stable-bdc6b758e443c21c39a14c075e5b7e01f095b37b.tar.bz2 linux-stable-bdc6b758e443c21c39a14c075e5b7e01f095b37b.zip |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Mostly tooling and PMU driver fixes, but also a number of late updates
such as the reworking of the call-chain size limiting logic to make
call-graph recording more robust, plus tooling side changes for the
new 'backwards ring-buffer' extension to the perf ring-buffer"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
perf record: Read from backward ring buffer
perf record: Rename variable to make code clear
perf record: Prevent reading invalid data in record__mmap_read
perf evlist: Add API to pause/resume
perf trace: Use the ptr->name beautifier as default for "filename" args
perf trace: Use the fd->name beautifier as default for "fd" args
perf report: Add srcline_from/to branch sort keys
perf evsel: Record fd into perf_mmap
perf evsel: Add overwrite attribute and check write_backward
perf tools: Set buildid dir under symfs when --symfs is provided
perf trace: Only auto set call-graph to "dwarf" when syscalls are being traced
perf annotate: Sort list of recognised instructions
perf annotate: Fix identification of ARM blt and bls instructions
perf tools: Fix usage of max_stack sysctl
perf callchain: Stop validating callchains by the max_stack sysctl
perf trace: Fix exit_group() formatting
perf top: Use machine->kptr_restrict_warned
perf trace: Warn when trying to resolve kernel addresses with kptr_restrict=1
perf machine: Do not bail out if not managing to read ref reloc symbol
perf/x86/intel/p4: Trival indentation fix, remove space
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/events/core.c | 14 | ||||
-rw-r--r-- | arch/x86/events/intel/p4.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore.c | 2 |
3 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 73a75aa5a66d..33787ee817f0 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name) static int backtrace_address(void *data, unsigned long addr, int reliable) { - struct perf_callchain_entry *entry = data; + struct perf_callchain_entry_ctx *entry = data; return perf_callchain_store(entry, addr); } @@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = { }; void -perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) +perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ @@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment) #include <asm/compat.h> static inline int -perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) +perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) { /* 32-bit process in 64-bit kernel. */ unsigned long ss_base, cs_base; @@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) fp = compat_ptr(ss_base + regs->bp); pagefault_disable(); - while (entry->nr < sysctl_perf_event_max_stack) { + while (entry->nr < entry->max_stack) { unsigned long bytes; frame.next_frame = 0; frame.return_address = 0; @@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) } #else static inline int -perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) +perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) { return 0; } #endif void -perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) +perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct stack_frame frame; const void __user *fp; @@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) return; pagefault_disable(); - while (entry->nr < sysctl_perf_event_max_stack) { + while (entry->nr < entry->max_stack) { unsigned long bytes; frame.next_frame = NULL; frame.return_address = 0; diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index 0a5ede187d9c..eb0533558c2b 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -826,7 +826,7 @@ static int p4_hw_config(struct perf_event *event) * Clear bits we reserve to be managed by kernel itself * and never allowed from a user space */ - event->attr.config &= P4_CONFIG_MASK; + event->attr.config &= P4_CONFIG_MASK; rc = p4_validate_raw_event(event); if (rc) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 16c178916412..fce74062d981 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -891,7 +891,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id return -ENODEV; pkg = topology_phys_to_logical_pkg(phys_id); - if (WARN_ON_ONCE(pkg < 0)) + if (pkg < 0) return -EINVAL; if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { |