diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apb_timer.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_cluster.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-inject.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 46 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_rapl.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_knc.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/hw_breakpoint.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/irq_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 22 |
19 files changed, 112 insertions, 112 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index af5b08ab3b71..5972b108f15a 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -146,7 +146,7 @@ static inline int is_apbt_capable(void) static int __init apbt_clockevent_register(void) { struct sfi_timer_table_entry *mtmr; - struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); + struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); if (mtmr == NULL) { @@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void) if (!cpu) return; - adev = &__get_cpu_var(cpu_apbt_dev); + adev = this_cpu_ptr(&cpu_apbt_dev); if (!adev->timer) { adev->timer = dw_apb_clockevent_init(cpu, adev->name, APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 67760275544b..00853b254ab0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); */ static void setup_APIC_timer(void) { - struct clock_event_device *levt = &__get_cpu_var(lapic_events); + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); if (this_cpu_has(X86_FEATURE_ARAT)) { lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; @@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) static int __init calibrate_APIC_clock(void) { - struct clock_event_device *levt = &__get_cpu_var(lapic_events); + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); void (*real_handler)(struct clock_event_device *dev); unsigned long deltaj; long delta, deltatsc; diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 6ce600f9bc78..e658f21681c8 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -42,7 +42,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) * We are to modify mask, so we need an own copy * and be sure it's manipulated with irq off. */ - ipi_mask_ptr = __raw_get_cpu_var(ipi_mask); + ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask); cpumask_copy(ipi_mask_ptr, mask); /* diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3eff36f719fb..4b4f78c9ba19 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1200,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage); int is_debug_stack(unsigned long addr) { - return __get_cpu_var(debug_stack_usage) || - (addr <= __get_cpu_var(debug_stack_addr) && - addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); + return __this_cpu_read(debug_stack_usage) || + (addr <= __this_cpu_read(debug_stack_addr) && + addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); } NOKPROBE_SYMBOL(is_debug_stack); diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 5ac2d1fb28bc..4cfba4371a71 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex); static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) { int cpu = smp_processor_id(); - struct mce *m = &__get_cpu_var(injectm); + struct mce *m = this_cpu_ptr(&injectm); if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) return NMI_DONE; cpumask_clear_cpu(cpu, mce_inject_cpumask); @@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) static void mce_irq_ipi(void *info) { int cpu = smp_processor_id(); - struct mce *m = &__get_cpu_var(injectm); + struct mce *m = this_cpu_ptr(&injectm); if (cpumask_test_cpu(cpu, mce_inject_cpumask) && m->inject_flags & MCJ_EXCEPTION) { @@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info) /* Inject mce on current CPU */ static int raise_local(void) { - struct mce *m = &__get_cpu_var(injectm); + struct mce *m = this_cpu_ptr(&injectm); int context = MCJ_CTX(m->inject_flags); int ret = 0; int cpu = m->extcpu; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bd9ccda8087f..61a9668cebfd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr) if (offset < 0) return 0; - return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); + return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); } if (rdmsrl_safe(msr, &v)) { @@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v) int offset = msr_to_offset(msr); if (offset >= 0) - *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; + *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; return; } wrmsrl(msr, v); @@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring); /* Runs with CPU affinity in workqueue */ static int mce_ring_empty(void) { - struct mce_ring *r = &__get_cpu_var(mce_ring); + struct mce_ring *r = this_cpu_ptr(&mce_ring); return r->start == r->end; } @@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn) *pfn = 0; get_cpu(); - r = &__get_cpu_var(mce_ring); + r = this_cpu_ptr(&mce_ring); if (r->start == r->end) goto out; *pfn = r->ring[r->start]; @@ -504,7 +504,7 @@ out: /* Always runs in MCE context with preempt off */ static int mce_ring_add(unsigned long pfn) { - struct mce_ring *r = &__get_cpu_var(mce_ring); + struct mce_ring *r = this_cpu_ptr(&mce_ring); unsigned next; next = (r->end + 1) % MCE_RING_SIZE; @@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c) static void mce_schedule_work(void) { if (!mce_ring_empty()) - schedule_work(&__get_cpu_var(mce_work)); + schedule_work(this_cpu_ptr(&mce_work)); } DEFINE_PER_CPU(struct irq_work, mce_irq_work); @@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs) return; } - irq_work_queue(&__get_cpu_var(mce_irq_work)); + irq_work_queue(this_cpu_ptr(&mce_irq_work)); } /* @@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) mce_gather_info(&m, regs); - final = &__get_cpu_var(mces_seen); + final = this_cpu_ptr(&mces_seen); *final = m; memset(valid_banks, 0, sizeof(valid_banks)); @@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) = static int cmc_error_seen(void) { - unsigned long *v = &__get_cpu_var(mce_polled_error); + unsigned long *v = this_cpu_ptr(&mce_polled_error); return test_and_clear_bit(0, v); } static void mce_timer_fn(unsigned long data) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct timer_list *t = this_cpu_ptr(&mce_timer); unsigned long iv; int notify; WARN_ON(smp_processor_id() != data); - if (mce_available(__this_cpu_ptr(&cpu_info))) { + if (mce_available(this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, - &__get_cpu_var(mce_poll_banks)); + this_cpu_ptr(&mce_poll_banks)); mce_intel_cmci_poll(); } @@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data) */ void mce_timer_kick(unsigned long interval) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct timer_list *t = this_cpu_ptr(&mce_timer); unsigned long when = jiffies + interval; unsigned long iv = __this_cpu_read(mce_next_interval); @@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) static void __mcheck_cpu_init_timer(void) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct timer_list *t = this_cpu_ptr(&mce_timer); unsigned int cpu = smp_processor_id(); setup_timer(t, mce_timer_fn, cpu); @@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(c); __mcheck_cpu_init_timer(); - INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); - init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); + INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); + init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); } /* @@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = { static void __mce_disable_bank(void *arg) { int bank = *((int *)arg); - __clear_bit(bank, __get_cpu_var(mce_poll_banks)); + __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); cmci_disable_bank(bank); } @@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void) static void mce_syscore_resume(void) { __mcheck_cpu_init_generic(); - __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); + __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); } static struct syscore_ops mce_syscore_ops = { @@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = { static void mce_cpu_restart(void *data) { - if (!mce_available(__this_cpu_ptr(&cpu_info))) + if (!mce_available(raw_cpu_ptr(&cpu_info))) return; __mcheck_cpu_init_generic(); __mcheck_cpu_init_timer(); @@ -2096,14 +2096,14 @@ static void mce_restart(void) /* Toggle features for corrected errors */ static void mce_disable_cmci(void *data) { - if (!mce_available(__this_cpu_ptr(&cpu_info))) + if (!mce_available(raw_cpu_ptr(&cpu_info))) return; cmci_clear(); } static void mce_enable_ce(void *all) { - if (!mce_available(__this_cpu_ptr(&cpu_info))) + if (!mce_available(raw_cpu_ptr(&cpu_info))) return; cmci_reenable(); cmci_recheck(); @@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h) unsigned long action = *(unsigned long *)h; int i; - if (!mce_available(__this_cpu_ptr(&cpu_info))) + if (!mce_available(raw_cpu_ptr(&cpu_info))) return; if (!(action & CPU_TASKS_FROZEN)) @@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h) unsigned long action = *(unsigned long *)h; int i; - if (!mce_available(__this_cpu_ptr(&cpu_info))) + if (!mce_available(raw_cpu_ptr(&cpu_info))) return; if (!(action & CPU_TASKS_FROZEN)) diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1e49f8f41276..5d4999f95aec 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void) * event. */ machine_check_poll(MCP_TIMESTAMP, - &__get_cpu_var(mce_poll_banks)); + this_cpu_ptr(&mce_poll_banks)); if (high & MASK_OVERFLOW_HI) { rdmsrl(address, m.misc); diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..b3c97bafc123 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void) { if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) return; - machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); + machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); } void mce_intel_hcpu_update(unsigned long cpu) @@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void) u64 val; raw_spin_lock_irqsave(&cmci_discover_lock, flags); - owned = __get_cpu_var(mce_banks_owned); + owned = this_cpu_ptr(mce_banks_owned); for_each_set_bit(bank, owned, MAX_NR_BANKS) { rdmsrl(MSR_IA32_MCx_CTL2(bank), val); val &= ~MCI_CTL2_CMCI_EN; @@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void) { if (cmci_storm_detect()) return; - machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); + machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); mce_notify_irq(); } @@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void) */ static void cmci_discover(int banks) { - unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); + unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); unsigned long flags; int i; int bios_wrong_thresh = 0; @@ -228,7 +228,7 @@ static void cmci_discover(int banks) /* Already owned by someone else? */ if (val & MCI_CTL2_CMCI_EN) { clear_bit(i, owned); - __clear_bit(i, __get_cpu_var(mce_poll_banks)); + __clear_bit(i, this_cpu_ptr(mce_poll_banks)); continue; } @@ -252,7 +252,7 @@ static void cmci_discover(int banks) /* Did the enable bit stick? -- the bank supports CMCI */ if (val & MCI_CTL2_CMCI_EN) { set_bit(i, owned); - __clear_bit(i, __get_cpu_var(mce_poll_banks)); + __clear_bit(i, this_cpu_ptr(mce_poll_banks)); /* * We are able to set thresholds for some banks that * had a threshold of 0. This means the BIOS has not @@ -263,7 +263,7 @@ static void cmci_discover(int banks) (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) bios_wrong_thresh = 1; } else { - WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); + WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); } } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); @@ -284,10 +284,10 @@ void cmci_recheck(void) unsigned long flags; int banks; - if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) + if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) return; local_irq_save(flags); - machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); + machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); local_irq_restore(flags); } @@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank) { u64 val; - if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) + if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) return; rdmsrl(MSR_IA32_MCx_CTL2(bank), val); val &= ~MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(bank), val); - __clear_bit(bank, __get_cpu_var(mce_banks_owned)); + __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); } /* diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 16c73022306e..1b8299dd3d91 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event) void x86_pmu_disable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { @@ -512,7 +512,7 @@ void x86_pmu_disable_all(void) static void x86_pmu_disable(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!x86_pmu_initialized()) return; @@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu) void x86_pmu_enable_all(int added) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { @@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags); static void x86_pmu_enable(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; struct hw_perf_event *hwc; int i, added = cpuc->n_added; @@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event) */ static int x86_pmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc; int assign[X86_PMC_IDX_MAX]; int n, n0, ret; @@ -1081,7 +1081,7 @@ out: static void x86_pmu_start(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx = event->hw.idx; if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) @@ -1160,7 +1160,7 @@ void perf_event_print_debug(void) void x86_pmu_stop(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { @@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) static void x86_pmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; /* @@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - cpuc = &__get_cpu_var(cpu_hw_events); + cpuc = this_cpu_ptr(&cpu_hw_events); /* * Some chipsets need to unmask the LVTPC in a particular spot @@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ static int x86_pmu_commit_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int assign[X86_PMC_IDX_MAX]; int n, ret; @@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment) if (idx > GDT_ENTRIES) return 0; - desc = __this_cpu_ptr(&gdt_page.gdt[0]); + desc = raw_cpu_ptr(gdt_page.gdt); } return get_desc_base(desc + idx); diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index beeb7cc07044..28926311aac1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -699,7 +699,7 @@ __init int amd_pmu_init(void) void amd_pmu_enable_virt(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); cpuc->perf_ctr_virt_mask = 0; @@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); void amd_pmu_disable_virt(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); /* * We only mask out the Host-only bit so that host-only counting works diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3851def5057c..a73947c53b65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) static void intel_pmu_disable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); @@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void) static void intel_pmu_enable_all(int added) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); intel_pmu_pebs_enable_all(); intel_pmu_lbr_enable_all(); @@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added) */ static void intel_pmu_nhm_workaround(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); static const unsigned long nhm_magic[4] = { 0x4300B5, 0x4300D2, @@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event) static void intel_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { intel_pmu_disable_bts(); @@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) static void intel_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { if (!__this_cpu_read(cpu_hw_events.enabled)) @@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) u64 status; int handled; - cpuc = &__get_cpu_var(cpu_hw_events); + cpuc = this_cpu_ptr(&cpu_hw_events); /* * No known reason to not always do late ACK, @@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs); static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; @@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; int idx; @@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event) static void core_pmu_enable_all(int added) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b1553d05a5cb..46211bcc813e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config) void intel_pmu_disable_bts(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned long debugctlmsr; if (!cpuc->ds) @@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void) int intel_pmu_drain_bts_buffer(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct debug_store *ds = cpuc->ds; struct bts_record { u64 from; @@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) void intel_pmu_pebs_enable(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; @@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) void intel_pmu_pebs_disable(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; cpuc->pebs_enabled &= ~(1ULL << hwc->idx); @@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) void intel_pmu_pebs_enable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->pebs_enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); @@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void) void intel_pmu_pebs_disable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->pebs_enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, 0); @@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void) static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned long from = cpuc->lbr_entries[0].from; unsigned long old_to, to = cpuc->lbr_entries[0].to; unsigned long ip = regs->ip; @@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, * We cast to the biggest pebs_record but are careful not to * unconditionally access the 'extra' entries. */ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct pebs_record_hsw *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; @@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct debug_store *ds = cpuc->ds; struct perf_event *event = cpuc->events[0]; /* PMC0 only */ struct pebs_record_core *at, *top; @@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct debug_store *ds = cpuc->ds; struct perf_event *event = NULL; void *at, *top; diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 4af10617de33..45fa730a5283 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); static void __intel_pmu_lbr_enable(void) { u64 debugctl; - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->lbr_sel) wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); @@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void) void intel_pmu_lbr_enable(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!x86_pmu.lbr_nr) return; @@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event) void intel_pmu_lbr_disable(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!x86_pmu.lbr_nr) return; @@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event) void intel_pmu_lbr_enable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->lbr_users) __intel_pmu_lbr_enable(); @@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void) void intel_pmu_lbr_disable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->lbr_users) __intel_pmu_lbr_disable(); @@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) void intel_pmu_lbr_read(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!cpuc->lbr_users) return; diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 619f7699487a..d64f275fe274 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v) * or use ldexp(count, -32). * Watts = Joules/Time delta */ - return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); + return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); } static u64 rapl_event_update(struct perf_event *event) @@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu) static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) { - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct perf_event *event; unsigned long flags; @@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, static void rapl_pmu_event_start(struct perf_event *event, int mode) { - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); unsigned long flags; spin_lock_irqsave(&pmu->lock, flags); @@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) static void rapl_pmu_event_stop(struct perf_event *event, int mode) { - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct hw_perf_event *hwc = &event->hw; unsigned long flags; @@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) static int rapl_pmu_event_add(struct perf_event *event, int mode) { - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); struct hw_perf_event *hwc = &event->hw; unsigned long flags; @@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void) return -1; } - pmu = __get_cpu_var(rapl_pmu); + pmu = __this_cpu_read(rapl_pmu); pr_info("RAPL PMU detected, hw unit 2^-%d Joules," " API unit is 2^-32 Joules," diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c index 838fa8772c62..5b0c232d1ee6 100644 --- a/arch/x86/kernel/cpu/perf_event_knc.c +++ b/arch/x86/kernel/cpu/perf_event_knc.c @@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs) int bit, loops; u64 status; - cpuc = &__get_cpu_var(cpu_hw_events); + cpuc = this_cpu_ptr(&cpu_hw_events); knc_pmu_disable_all(); diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 5d466b7d8609..f2e56783af3d 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) static void p4_pmu_disable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { @@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event) static void p4_pmu_enable_all(int added) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { @@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - cpuc = &__get_cpu_var(cpu_hw_events); + cpuc = this_cpu_ptr(&cpu_hw_events); for (idx = 0; idx < x86_pmu.num_counters; idx++) { int overflow; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 5f9cf20cdb68..3d5fb509bdeb 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) int i; for (i = 0; i < HBP_NUM; i++) { - struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); + struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); if (!*slot) { *slot = bp; @@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) set_debugreg(info->address, i); __this_cpu_write(cpu_debugreg[i], info->address); - dr7 = &__get_cpu_var(cpu_dr7); + dr7 = this_cpu_ptr(&cpu_dr7); *dr7 |= encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); @@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) int i; for (i = 0; i < HBP_NUM; i++) { - struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); + struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); if (*slot == bp) { *slot = NULL; @@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return; - dr7 = &__get_cpu_var(cpu_dr7); + dr7 = this_cpu_ptr(&cpu_dr7); *dr7 &= ~__encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 4d1c746892eb..e4b503d5558c 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) regs->sp <= curbase + THREAD_SIZE) return; - irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + + irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + STACK_TOP_MARGIN; - irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); + irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) return; - oist = &__get_cpu_var(orig_ist); + oist = this_cpu_ptr(&orig_ist); estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; if (regs->sp >= estack_top && regs->sp <= estack_bottom) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 95c3cb16af3e..f6945bef2cd1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -244,9 +244,9 @@ u32 kvm_read_and_reset_pf_reason(void) { u32 reason = 0; - if (__get_cpu_var(apf_reason).enabled) { - reason = __get_cpu_var(apf_reason).reason; - __get_cpu_var(apf_reason).reason = 0; + if (__this_cpu_read(apf_reason.enabled)) { + reason = __this_cpu_read(apf_reason.reason); + __this_cpu_write(apf_reason.reason, 0); } return reason; @@ -319,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) * there's no need for lock or memory barriers. * An optimization barrier is implied in apic write. */ - if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) + if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) return; apic_write(APIC_EOI, APIC_EOI_ACK); } @@ -330,13 +330,13 @@ void kvm_guest_cpu_init(void) return; if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { - u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); + u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); #ifdef CONFIG_PREEMPT pa |= KVM_ASYNC_PF_SEND_ALWAYS; #endif wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); - __get_cpu_var(apf_reason).enabled = 1; + __this_cpu_write(apf_reason.enabled, 1); printk(KERN_INFO"KVM setup async PF for cpu %d\n", smp_processor_id()); } @@ -345,8 +345,8 @@ void kvm_guest_cpu_init(void) unsigned long pa; /* Size alignment is implied but just to make it explicit. */ BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); - __get_cpu_var(kvm_apic_eoi) = 0; - pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) + __this_cpu_write(kvm_apic_eoi, 0); + pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) | KVM_MSR_ENABLED; wrmsrl(MSR_KVM_PV_EOI_EN, pa); } @@ -357,11 +357,11 @@ void kvm_guest_cpu_init(void) static void kvm_pv_disable_apf(void) { - if (!__get_cpu_var(apf_reason).enabled) + if (!__this_cpu_read(apf_reason.enabled)) return; wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); - __get_cpu_var(apf_reason).enabled = 0; + __this_cpu_write(apf_reason.enabled, 0); printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", smp_processor_id()); @@ -724,7 +724,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) if (in_nmi()) return; - w = &__get_cpu_var(klock_waiting); + w = this_cpu_ptr(&klock_waiting); cpu = smp_processor_id(); start = spin_time_start(); |