summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/header.S41
-rw-r--r--arch/x86/kernel/acpi/processor.c3
-rw-r--r--arch/x86/kernel/acpi/sleep_64.c3
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/apic_32.c6
-rw-r--r--arch/x86/kernel/apic_64.c2
-rw-r--r--arch/x86/kernel/apm_32.c10
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c27
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c30
-rw-r--r--arch/x86/kernel/cpuid.c6
-rw-r--r--arch/x86/kernel/crash.c4
-rw-r--r--arch/x86/kernel/head_32.S12
-rw-r--r--arch/x86/kernel/hpet.c16
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c2
-rw-r--r--arch/x86/kernel/i8237.c2
-rw-r--r--arch/x86/kernel/i8259_32.c2
-rw-r--r--arch/x86/kernel/i8259_64.c2
-rw-r--r--arch/x86/kernel/io_apic_32.c39
-rw-r--r--arch/x86/kernel/io_apic_64.c42
-rw-r--r--arch/x86/kernel/kprobes_32.c4
-rw-r--r--arch/x86/kernel/kprobes_64.c49
-rw-r--r--arch/x86/kernel/mfgpt_32.c15
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/nmi_32.c5
-rw-r--r--arch/x86/kernel/nmi_64.c5
-rw-r--r--arch/x86/kernel/paravirt_32.c4
-rw-r--r--arch/x86/kernel/pci-dma_64.c2
-rw-r--r--arch/x86/kernel/process_32.c13
-rw-r--r--arch/x86/kernel/process_64.c11
-rw-r--r--arch/x86/kernel/reboot_32.c4
-rw-r--r--arch/x86/kernel/reboot_64.c4
-rw-r--r--arch/x86/kernel/setup_32.c2
-rw-r--r--arch/x86/kernel/smpboot_32.c6
-rw-r--r--arch/x86/kernel/smpboot_64.c2
-rw-r--r--arch/x86/kernel/suspend_64.c39
-rw-r--r--arch/x86/kernel/topology.c5
-rw-r--r--arch/x86/kernel/traps_32.c15
-rw-r--r--arch/x86/kernel/traps_64.c10
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/mm/init_32.c9
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c4
-rw-r--r--arch/x86/oprofile/op_model_athlon.c22
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/pci/common.c16
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/mmu.c4
50 files changed, 321 insertions, 212 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 368864dfe6eb..80b7ba4056db 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,8 +112,9 @@ config GENERIC_TIME_VSYSCALL
bool
default X86_64
-
-
+config ARCH_SUPPORTS_OPROFILE
+ bool
+ default y
config ZONE_DMA32
@@ -148,7 +149,8 @@ config X86_SMP
config X86_HT
bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER || MK8)
+ depends on SMP
+ depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8)
default y
config X86_BIOS_REBOOT
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 116b03a45636..7aa1dc6d67c8 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -11,10 +11,9 @@ endif
$(srctree)/arch/x86/Makefile%: ;
ifeq ($(CONFIG_X86_32),y)
+ UTS_MACHINE := i386
include $(srctree)/arch/x86/Makefile_32
else
+ UTS_MACHINE := x86_64
include $(srctree)/arch/x86/Makefile_64
endif
-
-
-
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 6ef5a060fa11..4cc5b0411db5 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -236,39 +236,30 @@ start_of_setup:
movw %ax, %es
cld
-# Apparently some ancient versions of LILO invoked the kernel
-# with %ss != %ds, which happened to work by accident for the
-# old code. If the CAN_USE_HEAP flag is set in loadflags, or
-# %ss != %ds, then adjust the stack pointer.
+# Apparently some ancient versions of LILO invoked the kernel with %ss != %ds,
+# which happened to work by accident for the old code. Recalculate the stack
+# pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the
+# stack behind its own code, so we can't blindly put it directly past the heap.
- # Smallest possible stack we can tolerate
- movw $(_end+STACK_SIZE), %cx
-
- movw heap_end_ptr, %dx
- addw $512, %dx
- jnc 1f
- xorw %dx, %dx # Wraparound - whole segment available
-1: testb $CAN_USE_HEAP, loadflags
- jnz 2f
-
- # No CAN_USE_HEAP
movw %ss, %dx
cmpw %ax, %dx # %ds == %ss?
movw %sp, %dx
- # If so, assume %sp is reasonably set, otherwise use
- # the smallest possible stack.
- jne 4f # -> Smallest possible stack...
+ je 2f # -> assume %sp is reasonably set
+
+ # Invalid %ss, make up a new stack
+ movw $_end, %dx
+ testb $CAN_USE_HEAP, loadflags
+ jz 1f
+ movw heap_end_ptr, %dx
+1: addw $STACK_SIZE, %dx
+ jnc 2f
+ xorw %dx, %dx # Prevent wraparound
- # Make sure the stack is at least minimum size. Take a value
- # of zero to mean "full segment."
-2:
+2: # Now %dx should point to the end of our stack space
andw $~3, %dx # dword align (might as well...)
jnz 3f
movw $0xfffc, %dx # Make sure we're not zero
-3: cmpw %cx, %dx
- jnb 5f
-4: movw %cx, %dx # Minimum value we can possibly use
-5: movw %ax, %ss
+3: movw %ax, %ss
movzwl %dx, %esp # Clear upper half of %esp
sti # Now we should have a working stack
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index f63e5ff0aca1..a25db514c719 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -49,6 +49,9 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
+ if (cpu_has(c, X86_FEATURE_ACPI))
+ buf[2] |= ACPI_PDC_T_FFH;
+
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
diff --git a/arch/x86/kernel/acpi/sleep_64.c b/arch/x86/kernel/acpi/sleep_64.c
index 79475d237071..da42de261ba8 100644
--- a/arch/x86/kernel/acpi/sleep_64.c
+++ b/arch/x86/kernel/acpi/sleep_64.c
@@ -115,6 +115,3 @@ static int __init acpi_sleep_setup(char *str)
__setup("acpi_sleep=", acpi_sleep_setup);
-void acpi_pci_link_exit(void)
-{
-}
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index a97313b1270e..1e931aaf2ef6 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -35,10 +35,6 @@ wakeup_code:
wakeup_code_start = .
.code16
- movw $0xb800, %ax
- movw %ax,%fs
- movw $0x0e00 + 'L', %fs:(0x10)
-
cli
cld
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 08b07c176962..a56c782653be 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -789,7 +789,7 @@ void __init sync_Arb_IDs(void)
* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
* needed on AMD.
*/
- if (modern_apic())
+ if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return;
/*
* Wait for idle.
@@ -849,7 +849,7 @@ void __init init_bsp_APIC(void)
/**
* setup_local_APIC - setup the local APIC
*/
-void __devinit setup_local_APIC(void)
+void __cpuinit setup_local_APIC(void)
{
unsigned long oldvalue, value, maxlvt, integrated;
int i, j;
@@ -1530,7 +1530,7 @@ static int lapic_resume(struct sys_device *dev)
*/
static struct sysdev_class lapic_sysclass = {
- set_kset_name("lapic"),
+ .name = "lapic",
.resume = lapic_resume,
.suspend = lapic_suspend,
};
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index f28ccb588fba..fa6cdee6d303 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -639,7 +639,7 @@ static int lapic_resume(struct sys_device *dev)
}
static struct sysdev_class lapic_sysclass = {
- set_kset_name("lapic"),
+ .name = "lapic",
.resume = lapic_resume,
.suspend = lapic_suspend,
};
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 17089a041028..af045ca0f653 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -2256,14 +2256,12 @@ static int __init apm_init(void)
apm_info.disabled = 1;
return -ENODEV;
}
- if (PM_IS_ACTIVE()) {
+ if (pm_flags & PM_ACPI) {
printk(KERN_NOTICE "apm: overridden by ACPI.\n");
apm_info.disabled = 1;
return -ENODEV;
}
-#ifdef CONFIG_PM_LEGACY
- pm_active = 1;
-#endif
+ pm_flags |= PM_APM;
/*
* Set up a segment that references the real mode segment 0x40
@@ -2366,9 +2364,7 @@ static void __exit apm_exit(void)
kthread_stop(kapmd_task);
kapmd_task = NULL;
}
-#ifdef CONFIG_PM_LEGACY
- pm_active = 0;
-#endif
+ pm_flags &= ~PM_APM;
}
module_init(apm_init);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9921b01fe199..8b4507b8469b 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -49,6 +49,7 @@ static struct _cache_table cache_table[] __cpuinitdata =
{ 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
{ 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
{ 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
{ 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
{ 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
@@ -497,7 +498,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
#endif
-static void free_cache_attributes(unsigned int cpu)
+static void __cpuinit free_cache_attributes(unsigned int cpu)
{
int i;
@@ -732,10 +733,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (unlikely(retval < 0))
return retval;
- cache_kobject[cpu]->parent = &sys_dev->kobj;
- kobject_set_name(cache_kobject[cpu], "%s", "cache");
- cache_kobject[cpu]->ktype = &ktype_percpu_entry;
- retval = kobject_register(cache_kobject[cpu]);
+ retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry,
+ &sys_dev->kobj, "%s", "cache");
if (retval < 0) {
cpuid4_cache_sysfs_exit(cpu);
return retval;
@@ -745,23 +744,23 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object = INDEX_KOBJECT_PTR(cpu,i);
this_object->cpu = cpu;
this_object->index = i;
- this_object->kobj.parent = cache_kobject[cpu];
- kobject_set_name(&(this_object->kobj), "index%1lu", i);
- this_object->kobj.ktype = &ktype_cache;
- retval = kobject_register(&(this_object->kobj));
+ retval = kobject_init_and_add(&(this_object->kobj),
+ &ktype_cache, cache_kobject[cpu],
+ "index%1lu", i);
if (unlikely(retval)) {
for (j = 0; j < i; j++) {
- kobject_unregister(
- &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
+ kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
}
- kobject_unregister(cache_kobject[cpu]);
+ kobject_put(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
break;
}
+ kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
if (!retval)
cpu_set(cpu, cache_dev_map);
+ kobject_uevent(cache_kobject[cpu], KOBJ_ADD);
return retval;
}
@@ -777,8 +776,8 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
cpu_clear(cpu, cache_dev_map);
for (i = 0; i < num_cache_leaves; i++)
- kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
- kobject_unregister(cache_kobject[cpu]);
+ kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
+ kobject_put(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 4b21d29fb5aa..242e8668dbeb 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -745,7 +745,7 @@ static void mce_restart(void)
static struct sysdev_class mce_sysclass = {
.resume = mce_resume,
- set_kset_name("machinecheck"),
+ .name = "machinecheck",
};
DEFINE_PER_CPU(struct sys_device, device_mce);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 752fb16a817d..753588755fee 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -65,7 +65,7 @@ static struct threshold_block threshold_defaults = {
};
struct threshold_bank {
- struct kobject kobj;
+ struct kobject *kobj;
struct threshold_block *blocks;
cpumask_t cpus;
};
@@ -432,10 +432,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
else
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
- kobject_set_name(&b->kobj, "misc%i", block);
- b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
- b->kobj.ktype = &threshold_ktype;
- err = kobject_register(&b->kobj);
+ err = kobject_init_and_add(&b->kobj, &threshold_ktype,
+ per_cpu(threshold_banks, cpu)[bank]->kobj,
+ "misc%i", block);
if (err)
goto out_free;
recurse:
@@ -451,11 +450,13 @@ recurse:
if (err)
goto out_free;
+ kobject_uevent(&b->kobj, KOBJ_ADD);
+
return err;
out_free:
if (b) {
- kobject_unregister(&b->kobj);
+ kobject_put(&b->kobj);
kfree(b);
}
return err;
@@ -489,7 +490,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
- &b->kobj, name);
+ b->kobj, name);
if (err)
goto out;
@@ -505,16 +506,15 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
- kobject_set_name(&b->kobj, "threshold_bank%i", bank);
- b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
+ b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
+ if (!b->kobj)
+ goto out_free;
+
#ifndef CONFIG_SMP
b->cpus = CPU_MASK_ALL;
#else
b->cpus = per_cpu(cpu_core_map, cpu);
#endif
- err = kobject_register(&b->kobj);
- if (err)
- goto out_free;
per_cpu(threshold_banks, cpu)[bank] = b;
@@ -531,7 +531,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
continue;
err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
- &b->kobj, name);
+ b->kobj, name);
if (err)
goto out;
@@ -581,7 +581,7 @@ static void deallocate_threshold_block(unsigned int cpu,
return;
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
- kobject_unregister(&pos->kobj);
+ kobject_put(&pos->kobj);
list_del(&pos->miscj);
kfree(pos);
}
@@ -627,7 +627,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
deallocate_threshold_block(cpu, bank);
free_out:
- kobject_unregister(&b->kobj);
+ kobject_put(b->kobj);
kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 05c9936a16cc..d387c770c518 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -157,15 +157,15 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
err = cpuid_device_create(cpu);
break;
case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
- case CPU_DEAD_FROZEN:
cpuid_device_destroy(cpu);
break;
+ case CPU_UP_CANCELED_FROZEN:
+ destroy_suspended_device(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ break;
}
return err ? NOTIFY_BAD : NOTIFY_OK;
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 8bb482ff091b..9a5fa0abfcc7 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -22,6 +22,7 @@
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
+#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/smp.h>
@@ -140,5 +141,8 @@ void machine_crash_shutdown(struct pt_regs *regs)
#if defined(CONFIG_X86_IO_APIC)
disable_IO_APIC();
#endif
+#ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+#endif
crash_save_cpu(regs, safe_smp_processor_id());
}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 374b7ece8961..fbad51fce672 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -193,6 +193,12 @@ default_entry:
jb 10b
movl %edi,(init_pg_tables_end - __PAGE_OFFSET)
+ /* Do an early initialization of the fixmap area */
+ movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
+ movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
+ addl $0x67, %eax /* 0x67 == _PAGE_TABLE */
+ movl %eax, 4092(%edx)
+
xorl %ebx,%ebx /* This is the boot CPU (BSP) */
jmp 3f
/*
@@ -208,12 +214,6 @@ default_entry:
.section .init.text,"ax",@progbits
#endif
- /* Do an early initialization of the fixmap area */
- movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
- movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
- addl $0x007, %eax /* 0x007 = PRESENT+RW+USER */
- movl %eax, 4092(%edx)
-
#ifdef CONFIG_SMP
ENTRY(startup_32_smp)
cld
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 53303f2e5475..2f99ee206b95 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -446,6 +446,20 @@ static __init int hpet_late_init(void)
}
fs_initcall(hpet_late_init);
+void hpet_disable(void)
+{
+ if (is_hpet_capable()) {
+ unsigned long cfg = hpet_readl(HPET_CFG);
+
+ if (hpet_legacy_int_enabled) {
+ cfg &= ~HPET_CFG_LEGACY;
+ hpet_legacy_int_enabled = 0;
+ }
+ cfg &= ~HPET_CFG_ENABLE;
+ hpet_writel(cfg, HPET_CFG);
+ }
+}
+
#ifdef CONFIG_HPET_EMULATE_RTC
/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
@@ -643,7 +657,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
hpet_pie_count = 0;
}
- if (hpet_rtc_flags & RTC_PIE &&
+ if (hpet_rtc_flags & RTC_AIE &&
(curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
(curr_time.tm_min == hpet_alarm_time.tm_min) &&
(curr_time.tm_hour == hpet_alarm_time.tm_hour))
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index edd39ccf139e..02112fcc0de7 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -2,6 +2,7 @@
#include <asm/semaphore.h>
#include <asm/checksum.h>
#include <asm/desc.h>
+#include <asm/pgtable.h>
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
@@ -29,3 +30,4 @@ EXPORT_SYMBOL(__read_lock_failed);
#endif
EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(empty_zero_page);
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c
index 29313832df0c..dbd6c1d1b638 100644
--- a/arch/x86/kernel/i8237.c
+++ b/arch/x86/kernel/i8237.c
@@ -51,7 +51,7 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
}
static struct sysdev_class i8237_sysdev_class = {
- set_kset_name("i8237"),
+ .name = "i8237",
.suspend = i8237A_suspend,
.resume = i8237A_resume,
};
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index f634fc715c99..5f3496d01984 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -258,7 +258,7 @@ static int i8259A_shutdown(struct sys_device *dev)
}
static struct sysdev_class i8259_sysdev_class = {
- set_kset_name("i8259"),
+ .name = "i8259",
.suspend = i8259A_suspend,
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
index 3f27ea0b9816..ba6d57286f56 100644
--- a/arch/x86/kernel/i8259_64.c
+++ b/arch/x86/kernel/i8259_64.c
@@ -370,7 +370,7 @@ static int i8259A_shutdown(struct sys_device *dev)
}
static struct sysdev_class i8259_sysdev_class = {
- set_kset_name("i8259"),
+ .name = "i8259",
.suspend = i8259A_suspend,
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index f35c6eb33da9..ab77f1905469 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -962,7 +962,7 @@ static int EISA_ELCR(unsigned int irq)
#define default_MCA_trigger(idx) (1)
#define default_MCA_polarity(idx) (0)
-static int __init MPBIOS_polarity(int idx)
+static int MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
@@ -1882,13 +1882,16 @@ __setup("no_timer_check", notimercheck);
static int __init timer_irq_works(void)
{
unsigned long t1 = jiffies;
+ unsigned long flags;
if (no_timer_check)
return 1;
+ local_save_flags(flags);
local_irq_enable();
/* Let ten ticks pass... */
mdelay((10 * 1000) / HZ);
+ local_irq_restore(flags);
/*
* Expect a few ticks at least, to be sure some possible
@@ -2166,6 +2169,9 @@ static inline void __init check_timer(void)
{
int apic1, pin1, apic2, pin2;
int vector;
+ unsigned long flags;
+
+ local_irq_save(flags);
/*
* get/set the timer IRQ vector:
@@ -2211,7 +2217,7 @@ static inline void __init check_timer(void)
}
if (disable_timer_pin_1 > 0)
clear_IO_APIC_pin(0, pin1);
- return;
+ goto out;
}
clear_IO_APIC_pin(apic1, pin1);
printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
@@ -2234,7 +2240,7 @@ static inline void __init check_timer(void)
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
}
- return;
+ goto out;
}
/*
* Cleanup, just in case ...
@@ -2258,7 +2264,7 @@ static inline void __init check_timer(void)
if (timer_irq_works()) {
printk(" works.\n");
- return;
+ goto out;
}
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
printk(" failed.\n");
@@ -2274,11 +2280,13 @@ static inline void __init check_timer(void)
if (timer_irq_works()) {
printk(" works.\n");
- return;
+ goto out;
}
printk(" failed :(.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option");
+out:
+ local_irq_restore(flags);
}
/*
@@ -2393,7 +2401,7 @@ static int ioapic_resume(struct sys_device *dev)
}
static struct sysdev_class ioapic_sysdev_class = {
- set_kset_name("ioapic"),
+ .name = "ioapic",
.suspend = ioapic_suspend,
.resume = ioapic_resume,
};
@@ -2830,6 +2838,25 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
return 0;
}
+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+{
+ int i;
+
+ if (skip_ioapic_setup)
+ return -1;
+
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
+ mp_irqs[i].mpc_srcbusirq == bus_irq)
+ break;
+ if (i >= mp_irq_entries)
+ return -1;
+
+ *trigger = irq_trigger(i);
+ *polarity = irq_polarity(i);
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
static int __init parse_disable_timer_pin_1(char *arg)
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 953328b55a30..23a3ac06a23e 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -546,7 +546,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
#define default_PCI_trigger(idx) (1)
#define default_PCI_polarity(idx) (1)
-static int __init MPBIOS_polarity(int idx)
+static int MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
@@ -1281,10 +1281,13 @@ void disable_IO_APIC(void)
static int __init timer_irq_works(void)
{
unsigned long t1 = jiffies;
+ unsigned long flags;
+ local_save_flags(flags);
local_irq_enable();
/* Let ten ticks pass... */
mdelay((10 * 1000) / HZ);
+ local_irq_restore(flags);
/*
* Expect a few ticks at least, to be sure some possible
@@ -1655,6 +1658,9 @@ static inline void check_timer(void)
{
struct irq_cfg *cfg = irq_cfg + 0;
int apic1, pin1, apic2, pin2;
+ unsigned long flags;
+
+ local_irq_save(flags);
/*
* get/set the timer IRQ vector:
@@ -1696,7 +1702,7 @@ static inline void check_timer(void)
}
if (disable_timer_pin_1 > 0)
clear_IO_APIC_pin(0, pin1);
- return;
+ goto out;
}
clear_IO_APIC_pin(apic1, pin1);
apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
@@ -1718,7 +1724,7 @@ static inline void check_timer(void)
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
}
- return;
+ goto out;
}
/*
* Cleanup, just in case ...
@@ -1741,7 +1747,7 @@ static inline void check_timer(void)
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
- return;
+ goto out;
}
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_VERBOSE," failed.\n");
@@ -1756,10 +1762,12 @@ static inline void check_timer(void)
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
- return;
+ goto out;
}
apic_printk(APIC_VERBOSE," failed :(.\n");
panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
+out:
+ local_irq_restore(flags);
}
static int __init notimercheck(char *s)
@@ -1842,7 +1850,7 @@ static int ioapic_resume(struct sys_device *dev)
}
static struct sysdev_class ioapic_sysdev_class = {
- set_kset_name("ioapic"),
+ .name = "ioapic",
.suspend = ioapic_suspend,
.resume = ioapic_resume,
};
@@ -2222,8 +2230,27 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
return 0;
}
-#endif /* CONFIG_ACPI */
+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+{
+ int i;
+
+ if (skip_ioapic_setup)
+ return -1;
+
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
+ mp_irqs[i].mpc_srcbusirq == bus_irq)
+ break;
+ if (i >= mp_irq_entries)
+ return -1;
+
+ *trigger = irq_trigger(i);
+ *polarity = irq_polarity(i);
+ return 0;
+}
+
+#endif /* CONFIG_ACPI */
/*
* This function currently is only a helper for the i386 smp boot process where
@@ -2260,3 +2287,4 @@ void __init setup_ioapic_dest(void)
}
}
#endif
+
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index d87a523070d1..3a020f79f82b 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -727,9 +727,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
if (&regs->esp != kcb->jprobe_saved_esp) {
- struct pt_regs *saved_regs =
- container_of(kcb->jprobe_saved_esp,
- struct pt_regs, esp);
+ struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
printk("current esp %p does not match saved esp %p\n",
&regs->esp, kcb->jprobe_saved_esp);
printk("Saved registers for jprobe %p\n", jp);
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 3db3611933d8..5df19a9f9239 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -58,7 +58,7 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
-static __always_inline int is_IF_modifier(kprobe_opcode_t *insn)
+static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
{
switch (*insn) {
case 0xfa: /* cli */
@@ -485,7 +485,6 @@ static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
unsigned long *tos = (unsigned long *)regs->rsp;
- unsigned long next_rip = 0;
unsigned long copy_rip = (unsigned long)p->ainsn.insn;
unsigned long orig_rip = (unsigned long)p->addr;
kprobe_opcode_t *insn = p->ainsn.insn;
@@ -494,46 +493,42 @@ static void __kprobes resume_execution(struct kprobe *p,
if (*insn >= 0x40 && *insn <= 0x4f)
insn++;
+ regs->eflags &= ~TF_MASK;
switch (*insn) {
- case 0x9c: /* pushfl */
+ case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kcb->kprobe_old_rflags;
break;
- case 0xc3: /* ret/lret */
- case 0xcb:
- case 0xc2:
+ case 0xc2: /* iret/ret/lret */
+ case 0xc3:
case 0xca:
- regs->eflags &= ~TF_MASK;
- /* rip is already adjusted, no more changes required*/
- return;
- case 0xe8: /* call relative - Fix return addr */
+ case 0xcb:
+ case 0xcf:
+ case 0xea: /* jmp absolute -- ip is correct */
+ /* ip is already adjusted, no more changes required */
+ goto no_change;
+ case 0xe8: /* call relative - Fix return addr */
*tos = orig_rip + (*tos - copy_rip);
break;
case 0xff:
if ((insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
- /* Fix return addr; rip is correct. */
- next_rip = regs->rip;
+ /* Fix return addr; ip is correct. */
*tos = orig_rip + (*tos - copy_rip);
+ goto no_change;
} else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* rip is correct. */
- next_rip = regs->rip;
+ /* ip is correct. */
+ goto no_change;
}
- break;
- case 0xea: /* jmp absolute -- rip is correct */
- next_rip = regs->rip;
- break;
default:
break;
}
- regs->eflags &= ~TF_MASK;
- if (next_rip) {
- regs->rip = next_rip;
- } else {
- regs->rip = orig_rip + (regs->rip - copy_rip);
- }
+ regs->rip = orig_rip + (regs->rip - copy_rip);
+no_change:
+
+ return;
}
int __kprobes post_kprobe_handler(struct pt_regs *regs)
@@ -716,10 +711,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct jprobe *jp = container_of(p, struct jprobe, kp);
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
- if ((long *)regs->rsp != kcb->jprobe_saved_rsp) {
- struct pt_regs *saved_regs =
- container_of(kcb->jprobe_saved_rsp,
- struct pt_regs, rsp);
+ if ((unsigned long *)regs->rsp != kcb->jprobe_saved_rsp) {
+ struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
printk("current rsp %p does not match saved rsp %p\n",
(long *)regs->rsp, kcb->jprobe_saved_rsp);
printk("Saved registers for jprobe %p\n", jp);
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 0ab680f2d9db..3960ab7e1497 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -278,12 +278,12 @@ static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
{
+ /* Turn off the clock (and clear the event) */
+ mfgpt_disable_timer(mfgpt_event_clock);
+
if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
return IRQ_HANDLED;
- /* Turn off the clock */
- mfgpt_disable_timer(mfgpt_event_clock);
-
/* Clear the counter */
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
@@ -319,10 +319,6 @@ static int __init mfgpt_timer_setup(void)
}
mfgpt_event_clock = timer;
- /* Set the clock scale and enable the event mode for CMP2 */
- val = MFGPT_SCALE | (3 << 8);
-
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
/* Set up the IRQ on the MFGPT side */
if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) {
@@ -339,6 +335,11 @@ static int __init mfgpt_timer_setup(void)
goto err;
}
+ /* Set the clock scale and enable the event mode for CMP2 */
+ val = MFGPT_SCALE | (3 << 8);
+
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
+
/* Set up the clock event */
mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32);
mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ee6eba4ecfea..21f6e3c0be18 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -155,15 +155,15 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
err = msr_device_create(cpu);
break;
case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
- case CPU_DEAD_FROZEN:
msr_device_destroy(cpu);
break;
+ case CPU_UP_CANCELED_FROZEN:
+ destroy_suspended_device(msr_class, MKDEV(MSR_MAJOR, cpu));
+ break;
}
return err ? NOTIFY_BAD : NOTIFY_OK;
}
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index 600fd404e440..4f4bfd3a88b6 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -105,7 +105,8 @@ static int __init check_nmi_watchdog(void)
if (!per_cpu(wd_enabled, cpu))
continue;
if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
- printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+ printk(KERN_WARNING "WARNING: CPU#%d: NMI "
+ "appears to be stuck (%d->%d)!\n",
cpu,
prev_nmi_count[cpu],
nmi_count(cpu));
@@ -175,7 +176,7 @@ static int lapic_nmi_resume(struct sys_device *dev)
static struct sysdev_class nmi_sysclass = {
- set_kset_name("lapic_nmi"),
+ .name = "lapic_nmi",
.resume = lapic_nmi_resume,
.suspend = lapic_nmi_suspend,
};
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index a576fd740062..c3d1476b6a11 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -109,7 +109,8 @@ int __init check_nmi_watchdog (void)
if (!per_cpu(wd_enabled, cpu))
continue;
if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
- printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+ printk(KERN_WARNING "WARNING: CPU#%d: NMI "
+ "appears to be stuck (%d->%d)!\n",
cpu,
counts[cpu],
cpu_pda(cpu)->__nmi_count);
@@ -210,7 +211,7 @@ static int lapic_nmi_resume(struct sys_device *dev)
}
static struct sysdev_class nmi_sysclass = {
- set_kset_name("lapic_nmi"),
+ .name = "lapic_nmi",
.resume = lapic_nmi_resume,
.suspend = lapic_nmi_suspend,
};
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
index 6a80d67c2121..f5000799f8ef 100644
--- a/arch/x86/kernel/paravirt_32.c
+++ b/arch/x86/kernel/paravirt_32.c
@@ -465,8 +465,8 @@ struct pv_mmu_ops pv_mmu_ops = {
};
EXPORT_SYMBOL_GPL(pv_time_ops);
-EXPORT_SYMBOL_GPL(pv_cpu_ops);
-EXPORT_SYMBOL_GPL(pv_mmu_ops);
+EXPORT_SYMBOL (pv_cpu_ops);
+EXPORT_SYMBOL (pv_mmu_ops);
EXPORT_SYMBOL_GPL(pv_apic_ops);
EXPORT_SYMBOL_GPL(pv_info);
EXPORT_SYMBOL (pv_irq_ops);
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index aa805b11b24f..5552d23d23c2 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -12,7 +12,7 @@
#include <asm/gart.h>
#include <asm/calgary.h>
-int iommu_merge __read_mostly = 1;
+int iommu_merge __read_mostly = 0;
EXPORT_SYMBOL(iommu_merge);
dma_addr_t bad_dma_address __read_mostly;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7b899584d290..46d391d49de8 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -204,6 +204,10 @@ void cpu_idle(void)
}
}
+static void do_nothing(void *unused)
+{
+}
+
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
@@ -228,6 +232,13 @@ void cpu_idle_wait(void)
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
+ /*
+ * We waited 1 sec, if a CPU still did not call idle
+ * it may be because it is in idle and not waking up
+ * because it has nothing to do.
+ * Give all the remaining CPUS a kick.
+ */
+ smp_call_function_mask(map, do_nothing, 0, 0);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
@@ -261,7 +272,7 @@ static void mwait_idle(void)
mwait_idle_with_hints(0, 0);
}
-void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_MWAIT)) {
printk("monitor/mwait feature present.\n");
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6309b275cb9c..ab79e1dfa023 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -135,6 +135,10 @@ static void poll_idle (void)
cpu_relax();
}
+static void do_nothing(void *unused)
+{
+}
+
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
@@ -160,6 +164,13 @@ void cpu_idle_wait(void)
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
+ /*
+ * We waited 1 sec, if a CPU still did not call idle
+ * it may be because it is in idle and not waking up
+ * because it has nothing to do.
+ * Give all the remaining CPUS a kick.
+ */
+ smp_call_function_mask(map, do_nothing, 0, 0);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
diff --git a/arch/x86/kernel/reboot_32.c b/arch/x86/kernel/reboot_32.c
index 9e2269d00918..bb1a0f889c5e 100644
--- a/arch/x86/kernel/reboot_32.c
+++ b/arch/x86/kernel/reboot_32.c
@@ -11,6 +11,7 @@
#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <asm/apic.h>
+#include <asm/hpet.h>
#include <asm/desc.h>
#include "mach_reboot.h"
#include <asm/reboot_fixups.h>
@@ -326,6 +327,9 @@ static void native_machine_shutdown(void)
#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
#endif
+#ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+#endif
}
void __attribute__((weak)) mach_reboot_fixups(void)
diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c
index 71b13c5f5817..53620a92a8fd 100644
--- a/arch/x86/kernel/reboot_64.c
+++ b/arch/x86/kernel/reboot_64.c
@@ -17,6 +17,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/apic.h>
+#include <asm/hpet.h>
#include <asm/gart.h>
/*
@@ -113,6 +114,9 @@ void machine_shutdown(void)
disable_IO_APIC();
+#ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+#endif
local_irq_restore(flags);
pci_iommu_shutdown();
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index e1e18c34c821..9c24b45b513c 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -67,7 +67,7 @@
address, and must not be in the .bss segment! */
unsigned long init_pg_tables_end __initdata = ~0UL;
-int disable_pse __devinitdata = 0;
+int disable_pse __cpuinitdata = 0;
/*
* Machine setup..
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index ef0f34ede1ab..4ea80cbe52e5 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -60,7 +60,7 @@
#include <asm/mtrr.h>
/* Set if we find a B stepping CPU */
-static int __devinitdata smp_b_stepping;
+static int __cpuinitdata smp_b_stepping;
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -745,8 +745,8 @@ static inline int alloc_cpu_id(void)
}
#ifdef CONFIG_HOTPLUG_CPU
-static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
-static inline struct task_struct * alloc_idle_task(int cpu)
+static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
+static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
{
struct task_struct *idle;
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 500670c93d81..aaf4e1291217 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -526,7 +526,7 @@ struct create_idle {
int cpu;
};
-void do_fork_idle(struct work_struct *work)
+static void __cpuinit do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index db284ef44d53..2e5efaaf8800 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -192,42 +192,25 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en
return 0;
}
-static int res_kernel_text_pud_init(pud_t *pud, unsigned long start)
-{
- pmd_t *pmd;
- unsigned long paddr;
-
- pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
- if (!pmd)
- return -ENOMEM;
- set_pud(pud + pud_index(start), __pud(__pa(pmd) | _KERNPG_TABLE));
- for (paddr = 0; paddr < KERNEL_TEXT_SIZE; pmd++, paddr += PMD_SIZE) {
- unsigned long pe;
-
- pe = __PAGE_KERNEL_LARGE_EXEC | _PAGE_GLOBAL | paddr;
- pe &= __supported_pte_mask;
- set_pmd(pmd, __pmd(pe));
- }
-
- return 0;
-}
-
static int set_up_temporary_mappings(void)
{
unsigned long start, end, next;
- pud_t *pud;
int error;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt)
return -ENOMEM;
+ /* It is safe to reuse the original kernel mapping */
+ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
+ init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+
/* Set up the direct mapping from scratch */
start = (unsigned long)pfn_to_kaddr(0);
end = (unsigned long)pfn_to_kaddr(end_pfn);
for (; start < end; start = next) {
- pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
next = start + PGDIR_SIZE;
@@ -238,17 +221,7 @@ static int set_up_temporary_mappings(void)
set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud)));
}
-
- /* Set up the kernel text mapping from scratch */
- pud = (pud_t *)get_safe_page(GFP_ATOMIC);
- if (!pud)
- return -ENOMEM;
- error = res_kernel_text_pud_init(pud, __START_KERNEL_map);
- if (!error)
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- __pgd(__pa(pud) | _PAGE_TABLE));
-
- return error;
+ return 0;
}
int swsusp_arch_resume(void)
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 8caa0b777466..7e16d675eb85 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -33,7 +33,7 @@
static struct i386_cpu cpu_devices[NR_CPUS];
-int arch_register_cpu(int num)
+int __cpuinit arch_register_cpu(int num)
{
/*
* CPU0 cannot be offlined due to several
@@ -53,7 +53,8 @@ int arch_register_cpu(int num)
}
#ifdef CONFIG_HOTPLUG_CPU
-void arch_unregister_cpu(int num) {
+void arch_unregister_cpu(int num)
+{
return unregister_cpu(&cpu_devices[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 298d13ed3ab3..02d1e1e58e81 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -283,6 +283,11 @@ void dump_stack(void)
{
unsigned long stack;
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+ current->pid, current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
show_trace(current, NULL, &stack);
}
@@ -368,14 +373,13 @@ void die(const char * str, struct pt_regs * regs, long err)
if (die.lock_owner != raw_smp_processor_id()) {
console_verbose();
+ raw_local_irq_save(flags);
__raw_spin_lock(&die.lock);
- raw_local_save_flags(flags);
die.lock_owner = smp_processor_id();
die.lock_owner_depth = 0;
bust_spinlocks(1);
- }
- else
- raw_local_save_flags(flags);
+ } else
+ raw_local_irq_save(flags);
if (++die.lock_owner_depth < 3) {
unsigned long esp;
@@ -537,6 +541,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
info.si_errno = 0; \
info.si_code = sicode; \
info.si_addr = (void __user *)siaddr; \
+ trace_hardirqs_fixup(); \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
@@ -828,6 +833,8 @@ fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
unsigned int condition;
struct task_struct *tsk = current;
+ trace_hardirqs_fixup();
+
get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 4a6bd4965f56..cc68b92316cd 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -31,6 +31,7 @@
#include <linux/uaccess.h>
#include <linux/bug.h>
#include <linux/kdebug.h>
+#include <linux/utsname.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
@@ -400,6 +401,12 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
void dump_stack(void)
{
unsigned long dummy;
+
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+ current->pid, current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
show_trace(NULL, NULL, &dummy);
}
@@ -628,6 +635,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
info.si_errno = 0; \
info.si_code = sicode; \
info.si_addr = (void __user *)siaddr; \
+ trace_hardirqs_fixup(); \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
@@ -846,6 +854,8 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
struct task_struct *tsk = current;
siginfo_t info;
+ trace_hardirqs_fixup();
+
get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index c4dffbeea5e1..19626ace0f50 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@ config LGUEST_GUEST
bool "Lguest guest support"
select PARAVIRT
depends on !X86_PAE
+ depends on !(X86_VISWS || X86_VOYAGER)
select VIRTIO
select VIRTIO_RING
select VIRTIO_CONSOLE
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c7d19471261d..3c76d194fd2c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -321,8 +321,13 @@ extern void set_highmem_pages_init(int);
static void __init set_highmem_pages_init(int bad_ppro)
{
int pfn;
- for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
- add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
+ /*
+ * Holes under sparsemem might not have no mem_map[]:
+ */
+ if (pfn_valid(pfn))
+ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+ }
totalram_pages += totalhigh_pages;
}
#endif /* CONFIG_FLATMEM */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a7308b2cd058..0f9c8c890658 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -345,7 +345,7 @@ static void __init find_early_table_space(unsigned long end)
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
This runs before bootmem is initialized and gets pages directly from the
physical memory. To access them they are temporarily mapped. */
-void __meminit init_memory_mapping(unsigned long start, unsigned long end)
+void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
{
unsigned long next;
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2d0eeac7251f..c8ab79ef4276 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -51,7 +51,7 @@ static int nmi_resume(struct sys_device *dev)
static struct sysdev_class oprofile_sysclass = {
- set_kset_name("oprofile"),
+ .name = "oprofile",
.resume = nmi_resume,
.suspend = nmi_suspend,
};
@@ -380,7 +380,7 @@ static int __init ppro_init(char ** cpu_type)
if (cpu_model == 14)
*cpu_type = "i386/core";
- else if (cpu_model == 15)
+ else if (cpu_model == 15 || cpu_model == 23)
*cpu_type = "i386/core_2";
else if (cpu_model > 0xd)
return 0;
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c
index 3057a19e4641..c3ee43333f26 100644
--- a/arch/x86/oprofile/op_model_athlon.c
+++ b/arch/x86/oprofile/op_model_athlon.c
@@ -1,6 +1,6 @@
/**
* @file op_model_athlon.h
- * athlon / K7 model-specific MSR operations
+ * athlon / K7 / K8 / Family 10h model-specific MSR operations
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
@@ -31,12 +31,16 @@
#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0)
#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR(x) (x &= (1<<21))
+#define CTRL_CLEAR_LO(x) (x &= (1<<21))
+#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
#define CTRL_SET_ENABLE(val) (val |= 1<<20)
#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
#define CTRL_SET_UM(val, m) (val |= (m << 8))
-#define CTRL_SET_EVENT(val, e) (val |= e)
+#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
+#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
+#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
+#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
static unsigned long reset_value[NUM_COUNTERS];
@@ -70,7 +74,8 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs)
if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
continue;
CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
+ CTRL_CLEAR_LO(low);
+ CTRL_CLEAR_HI(high);
CTRL_WRITE(low, high, msrs, i);
}
@@ -89,12 +94,17 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs)
CTR_WRITE(counter_config[i].count, msrs, i);
CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
+ CTRL_CLEAR_LO(low);
+ CTRL_CLEAR_HI(high);
CTRL_SET_ENABLE(low);
CTRL_SET_USR(low, counter_config[i].user);
CTRL_SET_KERN(low, counter_config[i].kernel);
CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT(low, counter_config[i].event);
+ CTRL_SET_EVENT_LOW(low, counter_config[i].event);
+ CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
+ CTRL_SET_HOST_ONLY(high, 0);
+ CTRL_SET_GUEST_ONLY(high, 0);
+
CTRL_WRITE(low, high, msrs, i);
} else {
reset_value[i] = 0;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 7e35078673a4..0234f2831bf3 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -13,7 +13,7 @@ static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
return 0;
}
-static struct dmi_system_id acpi_pciprobe_dmi_table[] = {
+static struct dmi_system_id acpi_pciprobe_dmi_table[] __devinitdata = {
/*
* Systems where PCI IO resource ISA alignment can be skipped
* when the ISA enable bit in the bridge control is not set
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index f4386990b150..862746390666 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -315,6 +315,22 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
},
},
#endif
+ {
+ .callback = set_bf_sort,
+ .ident = "HP ProLiant DL385 G2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
+ },
+ },
+ {
+ .callback = set_bf_sort,
+ .ident = "HP ProLiant DL585 G2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
+ },
+ },
{}
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 94c39aaf695f..79ad15252150 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -95,7 +95,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
*
* 0: not available, 1: available
*/
-static int have_vcpu_info_placement = 1;
+static int have_vcpu_info_placement = 0;
static void __init xen_vcpu_setup(int cpu)
{
@@ -1131,7 +1131,7 @@ asmlinkage void __init xen_start_kernel(void)
if (!xen_start_info)
return;
- BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0);
+ BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);
/* Install Xen paravirt ops */
pv_info = xen_info;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b2e32f9d0071..0ac6c5dc49ba 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -244,6 +244,8 @@ pte_t xen_make_pte(unsigned long long pte)
if (pte & 1)
pte = phys_to_machine(XPADDR(pte)).maddr;
+ pte &= ~_PAGE_PCD;
+
return (pte_t){ pte, pte >> 32 };
}
@@ -291,6 +293,8 @@ pte_t xen_make_pte(unsigned long pte)
if (pte & _PAGE_PRESENT)
pte = phys_to_machine(XPADDR(pte)).maddr;
+ pte &= ~_PAGE_PCD;
+
return (pte_t){ pte };
}