summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c4
-rw-r--r--arch/x86_64/kernel/early-quirks.c2
-rw-r--r--arch/x86_64/kernel/functionlist1
-rw-r--r--arch/x86_64/kernel/hpet.c9
-rw-r--r--arch/x86_64/kernel/i8259.c6
-rw-r--r--arch/x86_64/kernel/k8.c4
-rw-r--r--arch/x86_64/kernel/nmi.c130
-rw-r--r--arch/x86_64/kernel/pci-gart.c6
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S2
9 files changed, 119 insertions, 45 deletions
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 23178ce6c783..e1548fbe95ae 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -66,8 +66,10 @@ static void init_low_mapping(void)
{
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
+ /* FIXME: We're playing with the current task's page tables here, which
+ * is potentially dangerous on SMP systems.
+ */
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- WARN_ON(num_online_cpus() != 1);
local_flush_tlb();
}
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 148c6bcf5bb4..fede55a53995 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -88,7 +88,7 @@ struct chipset {
void (*f)(void);
};
-static struct __initdata chipset early_qrk[] = {
+static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
{ PCI_VENDOR_ID_VIA, via_bugs },
{ PCI_VENDOR_ID_ATI, ati_bugs },
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
index 01fa23580c85..7ae18ec12454 100644
--- a/arch/x86_64/kernel/functionlist
+++ b/arch/x86_64/kernel/functionlist
@@ -514,7 +514,6 @@
*(.text.dentry_open)
*(.text.dentry_iput)
*(.text.bio_alloc)
-*(.text.alloc_skb_from_cache)
*(.text.wait_on_page_bit)
*(.text.vfs_readdir)
*(.text.vfs_lstat)
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c
index 8cf0b8a13778..b8286968662d 100644
--- a/arch/x86_64/kernel/hpet.c
+++ b/arch/x86_64/kernel/hpet.c
@@ -191,6 +191,7 @@ int hpet_reenable(void)
#define TICK_COUNT 100000000
#define TICK_MIN 5000
+#define MAX_TRIES 5
/*
* Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
@@ -198,13 +199,15 @@ int hpet_reenable(void)
*/
static void __init read_hpet_tsc(int *hpet, int *tsc)
{
- int tsc1, tsc2, hpet1;
+ int tsc1, tsc2, hpet1, i;
- do {
+ for (i = 0; i < MAX_TRIES; i++) {
tsc1 = get_cycles_sync();
hpet1 = hpet_readl(HPET_COUNTER);
tsc2 = get_cycles_sync();
- } while (tsc2 - tsc1 > TICK_MIN);
+ if (tsc2 - tsc1 > TICK_MIN)
+ break;
+ }
*hpet = hpet1;
*tsc = tsc2;
}
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 21d95b747437..489426682772 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -45,7 +45,7 @@
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
- * (these are usually mapped to vectors 0x20-0x2f)
+ * (these are usually mapped to vectors 0x30-0x3f)
*/
/*
@@ -299,7 +299,7 @@ void init_8259A(int auto_eoi)
* outb_p - this has to work on a wide range of PC hardware.
*/
outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
- outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
+ outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
if (auto_eoi)
outb_p(0x03, 0x21); /* master does Auto EOI */
@@ -307,7 +307,7 @@ void init_8259A(int auto_eoi)
outb_p(0x01, 0x21); /* master expects normal EOI */
outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
- outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
+ outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
is to be investigated) */
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
index 6416682d33d0..bc11b32e8b4d 100644
--- a/arch/x86_64/kernel/k8.c
+++ b/arch/x86_64/kernel/k8.c
@@ -61,8 +61,8 @@ int cache_k8_northbridges(void)
dev = NULL;
i = 0;
while ((dev = next_k8_northbridge(dev)) != NULL) {
- k8_northbridges[i++] = dev;
- pci_read_config_dword(dev, 0x9c, &flush_words[i]);
+ k8_northbridges[i] = dev;
+ pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
}
k8_northbridges[i] = NULL;
return 0;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 82d9d85d5270..dfab9f167366 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -39,15 +39,17 @@ int panic_on_unrecovered_nmi;
* different subsystems this reservation system just tries to coordinate
* things a little
*/
-static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
-static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
-
-static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
#define NMI_MAX_COUNTER_BITS 66
+#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS)
+
+static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
@@ -108,64 +110,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
+ int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
+ int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
-int reserve_perfctr_nmi(unsigned int msr)
+static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
-void release_perfctr_nmi(unsigned int msr)
+static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+ clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
-int reserve_evntsel_nmi(unsigned int msr)
+int reserve_perfctr_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_perfctr_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_perfctr_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_perfctr_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu)
+ __release_perfctr_nmi(cpu, msr);
+}
+
+int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
-void release_evntsel_nmi(unsigned int msr)
+static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
+ clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
+}
+
+int reserve_evntsel_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_evntsel_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_evntsel_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_evntsel_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu) {
+ __release_evntsel_nmi(cpu, msr);
+ }
}
static __cpuinit inline int nmi_known_cpu(void)
@@ -253,7 +319,7 @@ int __init check_nmi_watchdog (void)
for (cpu = 0; cpu < NR_CPUS; cpu++)
counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
- mdelay((10*1000)/nmi_hz); // wait 10 ticks
+ mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) {
if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
@@ -472,10 +538,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
/* Simulator may not support it */
@@ -501,9 +567,9 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63;
return 1;
fail2:
- release_evntsel_nmi(evntsel_msr);
+ __release_evntsel_nmi(-1, evntsel_msr);
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -514,8 +580,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
@@ -581,10 +647,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
@@ -609,7 +675,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -621,8 +687,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
@@ -650,10 +716,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
@@ -680,7 +746,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -704,8 +770,8 @@ static void stop_intel_arch_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog(void *unused)
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 2bac8c60ad61..0bae862e9a55 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -519,7 +519,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
if (!gatt)
- panic("Cannot allocate GATT table");
+ panic("Cannot allocate GATT table");
+ if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
+ panic("Could not set GART PTEs to uncacheable pages");
+ global_flush_tlb();
+
memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b73212c0a550..5176ecf006ee 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
-_proxy_pda = 0;
+_proxy_pda = 1;
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(7); /* RWE */