diff options
author | David S. Miller <davem@davemloft.net> | 2017-11-12 09:17:05 +0900 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-11-12 09:17:05 +0900 |
commit | fdae5f37a88caed9d2105f5a1ff609322f9e5416 (patch) | |
tree | 086808d640803b16286a318eb4d0895006de5c19 /arch | |
parent | 7c5556decd0a629e9ee02e93653f75ba7b7da03c (diff) | |
parent | b39545684a90ef3374abc0969d64c7bc540d128d (diff) | |
download | linux-fdae5f37a88caed9d2105f5a1ff609322f9e5416.tar.gz linux-fdae5f37a88caed9d2105f5a1ff609322f9e5416.tar.bz2 linux-fdae5f37a88caed9d2105f5a1ff609322f9e5416.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/traps.c | 28 | ||||
-rw-r--r-- | arch/mips/ar7/platform.c | 5 | ||||
-rw-r--r-- | arch/mips/ar7/prom.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/smp-bmips.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 29 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/aperfmperf.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/proc.c | 4 |
9 files changed, 60 insertions, 35 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 948c648fea00..0fcd82f01388 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, set_fs(fs); } -static void dump_instr(const char *lvl, struct pt_regs *regs) +static void __dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; - mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. + * Note that we now dump the code first, just in case the backtrace + * kills us. */ - fs = get_fs(); - set_fs(KERNEL_DS); for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; if (thumb) - bad = __get_user(val, &((u16 *)addr)[i]); + bad = get_user(val, &((u16 *)addr)[i]); else - bad = __get_user(val, &((u32 *)addr)[i]); + bad = get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", @@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) } } printk("%sCode: %s\n", lvl, str); +} - set_fs(fs); +static void dump_instr(const char *lvl, struct pt_regs *regs) +{ + mm_segment_t fs; + + if (!user_mode(regs)) { + fs = get_fs(); + set_fs(KERNEL_DS); + __dump_instr(lvl, regs); + set_fs(fs); + } else { + __dump_instr(lvl, regs); + } } #ifdef CONFIG_ARM_UNWIND diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index df7acea3747a..4674f1efbe7a 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void) uart_port.type = PORT_AR7; uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.iotype = UPIO_MEM32; + uart_port.flags = UPF_FIXED_TYPE; uart_port.regshift = 2; uart_port.line = 0; @@ -653,6 +654,10 @@ static int __init ar7_register_devices(void) u32 val; int res; + res = ar7_gpio_init(); + if (res) + pr_warn("unable to register gpios: %d\n", res); + res = ar7_register_uarts(); if (res) pr_err("unable to setup uart(s): %d\n", res); diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c index 4fd83336131a..dd53987a690f 100644 --- a/arch/mips/ar7/prom.c +++ b/arch/mips/ar7/prom.c @@ -246,8 +246,6 @@ void __init prom_init(void) ar7_init_cmdline(fw_arg0, (char **)fw_arg1); ar7_init_env((struct env_var *)fw_arg2); console_config(); - - ar7_gpio_init(); } #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4))) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 406072e26752..87dcac2447c8 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -591,11 +591,11 @@ void __init bmips_cpu_setup(void) /* Flush and enable RAC */ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); - __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG); + __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); __raw_readl(cbr + BMIPS_RAC_CONFIG); cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); - __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG); + __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG); __raw_readl(cbr + BMIPS_RAC_CONFIG); cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7c62967d672c..59247af5fd45 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -646,6 +646,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); hnow_r = hpte_new_to_old_r(hnow_r); } + + /* + * If the HPT is being resized, don't update the HPTE, + * instead let the guest retry after the resize operation is complete. + * The synchronization for hpte_setup_done test vs. set is provided + * by the HPTE lock. + */ + if (!kvm->arch.hpte_setup_done) + goto out_unlock; + if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || rev->guest_rpte != hpte[2]) /* HPTE has been changed under us; let the guest retry */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 73bf1ebfa78f..8d43cf205d34 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2705,11 +2705,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * Hard-disable interrupts, and check resched flag and signals. * If we need to reschedule or deliver a signal, clean up * and return without going into the guest(s). + * If the hpte_setup_done flag has been cleared, don't go into the + * guest because that means a HPT resize operation is in progress. */ local_irq_disable(); hard_irq_disable(); if (lazy_irq_pending() || need_resched() || - recheck_signals(&core_info)) { + recheck_signals(&core_info) || + (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) { local_irq_enable(); vc->vcore_state = VCORE_INACTIVE; /* Unlock all except the primary vcore */ @@ -3078,7 +3081,7 @@ out: static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { - int n_ceded, i; + int n_ceded, i, r; struct kvmppc_vcore *vc; struct kvm_vcpu *v; @@ -3132,6 +3135,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { + /* See if the HPT and VRMA are ready to go */ + if (!kvm_is_radix(vcpu->kvm) && + !vcpu->kvm->arch.hpte_setup_done) { + spin_unlock(&vc->lock); + r = kvmppc_hv_setup_htab_rma(vcpu); + spin_lock(&vc->lock); + if (r) { + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + kvm_run->fail_entry.hardware_entry_failure_reason = 0; + vcpu->arch.ret = r; + break; + } + } + if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) kvmppc_vcore_end_preempt(vc); @@ -3249,13 +3266,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ smp_mb(); - /* On the first time here, set up HTAB and VRMA */ - if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) { - r = kvmppc_hv_setup_htab_rma(vcpu); - if (r) - goto out; - } - flush_all_to_thread(current); /* Save userspace EBB and other register values */ @@ -3303,7 +3313,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } mtspr(SPRN_VRSAVE, user_vrsave); - out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&vcpu->kvm->arch.vcpus_running); return r; diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 236999c54edc..c60922a66385 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -22,7 +22,7 @@ obj-y += common.o obj-y += rdrand.o obj-y += match.o obj-y += bugs.o -obj-y += aperfmperf.o +obj-$(CONFIG_CPU_FREQ) += aperfmperf.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 957813e0180d..0ee83321a313 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy) s64 time_delta = ktime_ms_delta(now, s->time); unsigned long flags; + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) + return; + local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); @@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy) unsigned int arch_freq_get_on_cpu(int cpu) { - s64 time_delta; unsigned int khz; if (!cpu_khz) @@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu) if (!static_cpu_has(X86_FEATURE_APERFMPERF)) return 0; - /* Don't bother re-computing within the cache threshold time. */ - time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu)); - khz = per_cpu(samples.khz, cpu); - if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS) - return khz; - smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); khz = per_cpu(samples.khz, cpu); if (khz) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 4378a729b933..6b7e17bf0b71 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -78,11 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "microcode\t: 0x%x\n", c->microcode); if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = arch_freq_get_on_cpu(cpu); + unsigned int freq = cpufreq_quick_get(cpu); if (!freq) - freq = cpufreq_quick_get(cpu); - if (!freq) freq = cpu_khz; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", freq / 1000, (freq % 1000)); |