diff options
Diffstat (limited to 'arch/x86')
70 files changed, 1038 insertions, 276 deletions
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index ad8ec356fb36..0e103236b754 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -14,3 +14,4 @@ obj-y += crypto/ obj-y += vdso/ obj-$(CONFIG_IA32_EMULATION) += ia32/ +obj-y += platform/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 299fbc86f570..e8327686d3c5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1,6 +1,3 @@ -# x86 configuration -mainmenu "Linux Kernel Configuration for x86" - # Select 32 or 64 bit config 64BIT bool "64-bit kernel" if ARCH = "x86" @@ -1896,6 +1893,11 @@ config PCI_OLPC def_bool y depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY) +config PCI_XEN + def_bool y + depends on PCI && XEN + select SWIOTLB_XEN + config PCI_DOMAINS def_bool y depends on PCI diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 92091de11113..55d106b5e31b 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -93,6 +93,9 @@ extern u8 acpi_sci_flags; extern int acpi_sci_override_gsi; void acpi_pic_sci_set_trigger(unsigned int, u16); +extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, + int trigger, int polarity); + static inline void disable_acpi(void) { acpi_disabled = 1; diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 286de34b0ed6..f6ce0bda3b98 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v) static inline u32 native_apic_msr_read(u32 reg) { - u32 low, high; + u64 msr; if (reg == APIC_DFR) return -1; - rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); - return low; + rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); + return (u32)msr; } static inline void native_x2apic_wait_icr_idle(void) @@ -181,12 +181,12 @@ extern void enable_x2apic(void); extern void x2apic_icr_write(u32 low, u32 id); static inline int x2apic_enabled(void) { - int msr, msr2; + u64 msr; if (!cpu_has_x2apic) return 0; - rdmsr(MSR_IA32_APICBASE, msr, msr2); + rdmsrl(MSR_IA32_APICBASE, msr); if (msr & X2APIC_ENABLE) return 1; return 0; diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index f0203f4791a8..072273082528 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -41,6 +41,8 @@ #include <asm-generic/int-ll64.h> #include <asm/page.h> +#include <xen/xen.h> + #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ @@ -351,6 +353,17 @@ extern void early_iounmap(void __iomem *addr, unsigned long size); extern void fixup_early_ioremap(void); extern bool is_early_ioremap_ptep(pte_t *ptep); +#ifdef CONFIG_XEN +struct bio_vec; + +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); + +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) +#endif /* CONFIG_XEN */ + #define IO_SPACE_LIMIT 0xffff #endif /* _ASM_X86_IO_H */ diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index c8be4566c3d2..a6b28d017c2f 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -169,6 +169,7 @@ extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern void probe_nr_irqs_gsi(void); +extern int get_nr_irqs_gsi(void); extern void setup_ioapic_ids_from_mpc(void); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 18e3b8a8709f..ef9975812c77 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) #define __PV_IS_CALLEE_SAVE(func) \ ((struct paravirt_callee_save) { func }) -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); } -static inline void arch_local_irq_restore(unsigned long f) +static inline notrace void arch_local_irq_restore(unsigned long f) { PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_disable); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_enable); } -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { unsigned long f; diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index d395540ff894..ca0437c714b2 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -7,6 +7,7 @@ #include <linux/string.h> #include <asm/scatterlist.h> #include <asm/io.h> +#include <asm/x86_init.h> #ifdef __KERNEL__ @@ -94,8 +95,36 @@ static inline void early_quirks(void) { } extern void pci_iommu_alloc(void); -/* MSI arch hook */ -#define arch_setup_msi_irqs arch_setup_msi_irqs +#ifdef CONFIG_PCI_MSI +/* MSI arch specific hooks */ +static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + return x86_msi.setup_msi_irqs(dev, nvec, type); +} + +static inline void x86_teardown_msi_irqs(struct pci_dev *dev) +{ + x86_msi.teardown_msi_irqs(dev); +} + +static inline void x86_teardown_msi_irq(unsigned int irq) +{ + x86_msi.teardown_msi_irq(irq); +} +#define arch_setup_msi_irqs x86_setup_msi_irqs +#define arch_teardown_msi_irqs x86_teardown_msi_irqs +#define arch_teardown_msi_irq x86_teardown_msi_irq +/* implemented in arch/x86/kernel/apic/io_apic. */ +int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +void native_teardown_msi_irq(unsigned int irq); +/* default to the implementation in drivers/lib/msi.c */ +#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS +void default_teardown_msi_irqs(struct pci_dev *dev); +#else +#define native_setup_msi_irqs NULL +#define native_teardown_msi_irq NULL +#define default_teardown_msi_irqs NULL +#endif #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 49c7219826f9..704526734bef 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -47,6 +47,7 @@ enum pci_bf_sort_state { extern unsigned int pcibios_max_latency; void pcibios_resource_survey(void); +void pcibios_set_cache_line_size(void); /* pci-pc.c */ diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index bf6b88ef8eeb..e969f691cbfd 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -5,7 +5,7 @@ * * SGI UV architectural definitions * - * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_X86_UV_UV_HUB_H @@ -77,7 +77,8 @@ * * 1111110000000000 * 5432109876543210 - * pppppppppplc0cch + * pppppppppplc0cch Nehalem-EX + * ppppppppplcc0cch Westmere-EX * sssssssssss * * p = pnode bits @@ -148,12 +149,25 @@ struct uv_hub_info_s { unsigned char m_val; unsigned char n_val; struct uv_scir_s scir; + unsigned char apic_pnode_shift; }; DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) +union uvh_apicid { + unsigned long v; + struct uvh_apicid_s { + unsigned long local_apic_mask : 24; + unsigned long local_apic_shift : 5; + unsigned long unused1 : 3; + unsigned long pnode_mask : 24; + unsigned long pnode_shift : 5; + unsigned long unused2 : 3; + } s; +}; + /* * Local & Global MMR space macros. * Note: macros are intended to be used ONLY by inline functions @@ -182,6 +196,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) +#define UVH_APICID 0x002D0E00L #define UV_APIC_PNODE_SHIFT 6 /* Local Bus from cpu's perspective */ @@ -280,7 +295,7 @@ static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) */ static inline int uv_apicid_to_pnode(int apicid) { - return (apicid >> UV_APIC_PNODE_SHIFT); + return (apicid >> uv_hub_info->apic_pnode_shift); } /* diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index b2f2d2e05cec..6d90adf4428a 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -806,6 +806,78 @@ union uvh_node_present_table_u { }; /* ========================================================================= */ +/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL + +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_alias210_overlay_config_0_mmr_u { + unsigned long v; + struct uvh_rh_gam_alias210_overlay_config_0_mmr_s { + unsigned long rsvd_0_23: 24; /* */ + unsigned long base : 8; /* RW */ + unsigned long rsvd_32_47: 16; /* */ + unsigned long m_alias : 5; /* RW */ + unsigned long rsvd_53_62: 10; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL + +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_alias210_overlay_config_1_mmr_u { + unsigned long v; + struct uvh_rh_gam_alias210_overlay_config_1_mmr_s { + unsigned long rsvd_0_23: 24; /* */ + unsigned long base : 8; /* RW */ + unsigned long rsvd_32_47: 16; /* */ + unsigned long m_alias : 5; /* RW */ + unsigned long rsvd_53_62: 10; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL + +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_alias210_overlay_config_2_mmr_u { + unsigned long v; + struct uvh_rh_gam_alias210_overlay_config_2_mmr_s { + unsigned long rsvd_0_23: 24; /* */ + unsigned long base : 8; /* RW */ + unsigned long rsvd_32_47: 16; /* */ + unsigned long m_alias : 5; /* RW */ + unsigned long rsvd_53_62: 10; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ /* ========================================================================= */ #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL @@ -857,6 +929,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { }; /* ========================================================================= */ +/* UVH_RH_GAM_CONFIG_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL + +#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 +#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL +#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 +#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL +#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12 +#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL + +union uvh_rh_gam_config_mmr_u { + unsigned long v; + struct uvh_rh_gam_config_mmr_s { + unsigned long m_skt : 6; /* RW */ + unsigned long n_skt : 4; /* RW */ + unsigned long rsvd_10_11: 2; /* */ + unsigned long mmiol_cfg : 1; /* RW */ + unsigned long rsvd_13_63: 51; /* */ + } s; +}; + +/* ========================================================================= */ /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ /* ========================================================================= */ #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL @@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u { } s; }; -/* ========================================================================= */ -/* UVH_SI_ADDR_MAP_CONFIG */ -/* ========================================================================= */ -#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL - -#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 -#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL -#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 -#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL - -union uvh_si_addr_map_config_u { - unsigned long v; - struct uvh_si_addr_map_config_s { - unsigned long m_skt : 6; /* RW */ - unsigned long rsvd_6_7: 2; /* */ - unsigned long n_skt : 4; /* RW */ - unsigned long rsvd_12_63: 52; /* */ - } s; -}; - -/* ========================================================================= */ -/* UVH_SI_ALIAS0_OVERLAY_CONFIG */ -/* ========================================================================= */ -#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL - -#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24 -#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL -#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48 -#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL -#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63 -#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL - -union uvh_si_alias0_overlay_config_u { - unsigned long v; - struct uvh_si_alias0_overlay_config_s { - unsigned long rsvd_0_23: 24; /* */ - unsigned long base : 8; /* RW */ - unsigned long rsvd_32_47: 16; /* */ - unsigned long m_alias : 5; /* RW */ - unsigned long rsvd_53_62: 10; /* */ - unsigned long enable : 1; /* RW */ - } s; -}; - -/* ========================================================================= */ -/* UVH_SI_ALIAS1_OVERLAY_CONFIG */ -/* ========================================================================= */ -#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL - -#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24 -#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL -#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48 -#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL -#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63 -#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL - -union uvh_si_alias1_overlay_config_u { - unsigned long v; - struct uvh_si_alias1_overlay_config_s { - unsigned long rsvd_0_23: 24; /* */ - unsigned long base : 8; /* RW */ - unsigned long rsvd_32_47: 16; /* */ - unsigned long m_alias : 5; /* RW */ - unsigned long rsvd_53_62: 10; /* */ - unsigned long enable : 1; /* RW */ - } s; -}; - -/* ========================================================================= */ -/* UVH_SI_ALIAS2_OVERLAY_CONFIG */ -/* ========================================================================= */ -#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL - -#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24 -#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL -#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48 -#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL -#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63 -#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL - -union uvh_si_alias2_overlay_config_u { - unsigned long v; - struct uvh_si_alias2_overlay_config_s { - unsigned long rsvd_0_23: 24; /* */ - unsigned long base : 8; /* RW */ - unsigned long rsvd_32_47: 16; /* */ - unsigned long m_alias : 5; /* RW */ - unsigned long rsvd_53_62: 10; /* */ - unsigned long enable : 1; /* RW */ - } s; -}; - -#endif /* _ASM_X86_UV_UV_MMRS_H */ +#endif /* __ASM_UV_MMRS_X86_H__ */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index baa579c8e038..64642ad019fb 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -154,9 +154,18 @@ struct x86_platform_ops { int (*i8042_detect)(void); }; +struct pci_dev; + +struct x86_msi_ops { + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); + void (*teardown_msi_irq)(unsigned int irq); + void (*teardown_msi_irqs)(struct pci_dev *dev); +}; + extern struct x86_init_ops x86_init; extern struct x86_cpuinit_ops x86_cpuinit; extern struct x86_platform_ops x86_platform; +extern struct x86_msi_ops x86_msi; extern void x86_init_noop(void); extern void x86_init_uint_noop(unsigned int unused); diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h new file mode 100644 index 000000000000..2329b3eaf8d3 --- /dev/null +++ b/arch/x86/include/asm/xen/pci.h @@ -0,0 +1,65 @@ +#ifndef _ASM_X86_XEN_PCI_H +#define _ASM_X86_XEN_PCI_H + +#if defined(CONFIG_PCI_XEN) +extern int __init pci_xen_init(void); +extern int __init pci_xen_hvm_init(void); +#define pci_xen 1 +#else +#define pci_xen 0 +#define pci_xen_init (0) +static inline int pci_xen_hvm_init(void) +{ + return -1; +} +#endif +#if defined(CONFIG_XEN_DOM0) +void __init xen_setup_pirqs(void); +#else +static inline void __init xen_setup_pirqs(void) +{ +} +#endif + +#if defined(CONFIG_PCI_MSI) +#if defined(CONFIG_PCI_XEN) +/* The drivers/pci/xen-pcifront.c sets this structure to + * its own functions. + */ +struct xen_pci_frontend_ops { + int (*enable_msi)(struct pci_dev *dev, int **vectors); + void (*disable_msi)(struct pci_dev *dev); + int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec); + void (*disable_msix)(struct pci_dev *dev); +}; + +extern struct xen_pci_frontend_ops *xen_pci_frontend; + +static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, + int **vectors) +{ + if (xen_pci_frontend && xen_pci_frontend->enable_msi) + return xen_pci_frontend->enable_msi(dev, vectors); + return -ENODEV; +} +static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) +{ + if (xen_pci_frontend && xen_pci_frontend->disable_msi) + xen_pci_frontend->disable_msi(dev); +} +static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, + int **vectors, int nvec) +{ + if (xen_pci_frontend && xen_pci_frontend->enable_msix) + return xen_pci_frontend->enable_msix(dev, vectors, nvec); + return -ENODEV; +} +static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev) +{ + if (xen_pci_frontend && xen_pci_frontend->disable_msix) + xen_pci_frontend->disable_msix(dev); +} +#endif /* CONFIG_PCI_XEN */ +#endif /* CONFIG_PCI_MSI */ + +#endif /* _ASM_X86_XEN_PCI_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 2c833d8c4141..9e13763b6092 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -36,7 +36,6 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o -obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o @@ -58,7 +57,6 @@ obj-$(CONFIG_INTEL_TXT) += tboot.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += cpu/ obj-y += acpi/ -obj-$(CONFIG_SFI) += sfi.o obj-y += reboot.o obj-$(CONFIG_MCA) += mca_32.o obj-$(CONFIG_X86_MSR) += msr.o @@ -82,7 +80,6 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_VM86) += vm86_32.o @@ -104,14 +101,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o -obj-$(CONFIG_SCx200) += scx200.o -scx200-y += scx200_32.o - -obj-$(CONFIG_OLPC) += olpc.o -obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o -obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o -obj-$(CONFIG_X86_MRST) += mrst.o - microcode-y := microcode_core.o microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o @@ -124,7 +113,6 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) - obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o obj-$(CONFIG_AUDIT) += audit_64.o obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c05872aa3ce0..71232b941b6c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -513,35 +513,62 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) return 0; } -/* - * success: return IRQ number (>=0) - * failure: return < 0 - */ -int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +static int acpi_register_gsi_pic(struct device *dev, u32 gsi, + int trigger, int polarity) { - unsigned int irq; - unsigned int plat_gsi = gsi; - #ifdef CONFIG_PCI /* * Make sure all (legacy) PCI IRQs are set as level-triggered. */ - if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { - if (trigger == ACPI_LEVEL_SENSITIVE) - eisa_set_level_irq(gsi); - } + if (trigger == ACPI_LEVEL_SENSITIVE) + eisa_set_level_irq(gsi); #endif + return gsi; +} + +static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, + int trigger, int polarity) +{ #ifdef CONFIG_X86_IO_APIC - if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { - plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); - } + gsi = mp_register_gsi(dev, gsi, trigger, polarity); #endif + + return gsi; +} + +int (*__acpi_register_gsi)(struct device *dev, u32 gsi, + int trigger, int polarity) = acpi_register_gsi_pic; + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + unsigned int irq; + unsigned int plat_gsi = gsi; + + plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity); irq = gsi_to_irq(plat_gsi); return irq; } +void __init acpi_set_irq_model_pic(void) +{ + acpi_irq_model = ACPI_IRQ_MODEL_PIC; + __acpi_register_gsi = acpi_register_gsi_pic; + acpi_ioapic = 0; +} + +void __init acpi_set_irq_model_ioapic(void) +{ + acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; + __acpi_register_gsi = acpi_register_gsi_ioapic; + acpi_ioapic = 1; +} + /* * ACPI based hotplug support for CPU */ @@ -1259,8 +1286,7 @@ static void __init acpi_process_madt(void) */ error = acpi_parse_madt_ioapic_entries(); if (!error) { - acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; - acpi_ioapic = 1; + acpi_set_irq_model_ioapic(); smp_found_config = 1; } diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 0b30214282a8..5079f24c955a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -638,7 +638,7 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) atomic_set(&stop_machine_first, 1); wrote_text = 0; /* Use __stop_machine() because the caller already got online_cpus. */ - __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); + __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask); return addr; } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 850657d1b0ed..3f838d537392 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -52,7 +52,6 @@ #include <asm/mce.h> #include <asm/kvm_para.h> #include <asm/tsc.h> -#include <asm/atomic.h> unsigned int num_processors; diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index cefd6942f0e9..62f6e1e55b90 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -17,15 +17,16 @@ #include <linux/nmi.h> #include <linux/module.h> -/* For reliability, we're prepared to waste bits here. */ -static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; - u64 hw_nmi_get_sample_period(void) { return (u64)(cpu_khz) * 1000 * 60; } #ifdef ARCH_HAS_NMI_WATCHDOG + +/* For reliability, we're prepared to waste bits here. */ +static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; + void arch_trigger_all_cpu_backtrace(void) { int i; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 8ae808d110f4..7cc0a721f628 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3109,7 +3109,7 @@ void destroy_irq(unsigned int irq) irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); - if (intr_remapping_enabled) + if (irq_remapped(cfg)) free_irte(irq); raw_spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq, cfg); @@ -3331,7 +3331,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) return 0; } -int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int node, ret, sub_handle, index = 0; unsigned int irq, irq_want; @@ -3389,7 +3389,7 @@ error: return ret; } -void arch_teardown_msi_irq(unsigned int irq) +void native_teardown_msi_irq(unsigned int irq) { destroy_irq(irq); } @@ -3650,6 +3650,11 @@ void __init probe_nr_irqs_gsi(void) printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); } +int get_nr_irqs_gsi(void) +{ + return nr_irqs_gsi; +} + #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index f744f54cb248..194539aea175 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -5,7 +5,7 @@ * * SGI UV APIC functions (note: not an Intel compatible APIC) * - * Copyright (C) 2007-2009 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. */ #include <linux/cpumask.h> #include <linux/hardirq.h> @@ -41,6 +41,7 @@ DEFINE_PER_CPU(int, x2apic_extra_bits); static enum uv_system_type uv_system_type; static u64 gru_start_paddr, gru_end_paddr; +static union uvh_apicid uvh_apicid; int uv_min_hub_revision_id; EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); static DEFINE_SPINLOCK(uv_nmi_lock); @@ -70,12 +71,27 @@ static int early_get_nodeid(void) return node_id.s.node_id; } +static void __init early_get_apic_pnode_shift(void) +{ + unsigned long *mmr; + + mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_APICID, sizeof(*mmr)); + uvh_apicid.v = *mmr; + early_iounmap(mmr, sizeof(*mmr)); + if (!uvh_apicid.v) + /* + * Old bios, use default value + */ + uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; +} + static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { int nodeid; if (!strcmp(oem_id, "SGI")) { nodeid = early_get_nodeid(); + early_get_apic_pnode_shift(); x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; x86_platform.nmi_init = uv_nmi_init; if (!strcmp(oem_table_id, "UVL")) @@ -84,7 +100,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) uv_system_type = UV_X2APIC; else if (!strcmp(oem_table_id, "UVH")) { __get_cpu_var(x2apic_extra_bits) = - nodeid << (UV_APIC_PNODE_SHIFT - 1); + nodeid << (uvh_apicid.s.pnode_shift - 1); uv_system_type = UV_NON_UNIQUE_APIC; return 1; } @@ -363,14 +379,14 @@ struct redir_addr { #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT static __initdata struct redir_addr redir_addrs[] = { - {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, - {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, - {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR}, }; static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { - union uvh_si_alias0_overlay_config_u alias; + union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias; union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; int i; @@ -644,7 +660,7 @@ void uv_nmi_init(void) void __init uv_system_init(void) { - union uvh_si_addr_map_config_u m_n_config; + union uvh_rh_gam_config_mmr_u m_n_config; union uvh_node_id_u node_id; unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; @@ -654,7 +670,7 @@ void __init uv_system_init(void) map_low_mmrs(); - m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); + m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); m_val = m_n_config.s.m_skt; n_val = m_n_config.s.n_skt; mmr_base = @@ -716,6 +732,10 @@ void __init uv_system_init(void) int apicid = per_cpu(x86_cpu_to_apicid, cpu); nid = cpu_to_node(cpu); + /* + * apic_pnode_shift must be set before calling uv_apicid_to_pnode(); + */ + uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift; pnode = uv_apicid_to_pnode(apicid); blade = boot_pnode_to_blade(pnode); lcpu = uv_blade_info[blade].nr_possible_cpus; diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 46d58448c3af..e421b8cd6944 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) struct amd_nb *nb; int i; - nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); + nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, + cpu_to_node(cpu)); if (!nb) return NULL; - memset(nb, 0, sizeof(*nb)); nb->nb_id = nb_id; /* diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 64668dbf00a4..96656f207751 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/percpu.h> +#include <linux/mm.h> #include <asm/apic.h> @@ -125,7 +126,9 @@ void __cpuinit irq_ctx_init(int cpu) if (per_cpu(hardirq_ctx, cpu)) return; - irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER); + irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), + THREAD_FLAGS, + THREAD_ORDER)); irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; @@ -134,7 +137,9 @@ void __cpuinit irq_ctx_init(int cpu) per_cpu(hardirq_ctx, cpu) = irqctx; - irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER); + irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), + THREAD_FLAGS, + THREAD_ORDER)); irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index d81cfebb848f..ec592caac4b4 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -387,7 +387,7 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) * disable hardware debugging while it is processing gdb packets or * handling exception. */ -void kgdb_disable_hw_debug(struct pt_regs *regs) +static void kgdb_disable_hw_debug(struct pt_regs *regs) { int i; int cpu = raw_smp_processor_id(); @@ -724,6 +724,7 @@ struct kgdb_arch arch_kgdb_ops = { .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = kgdb_set_hw_break, .remove_hw_breakpoint = kgdb_remove_hw_break, + .disable_hw_break = kgdb_disable_hw_debug, .remove_all_hw_break = kgdb_remove_all_hw_break, .correct_hw_break = kgdb_correct_hw_break, }; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index e1af7c055c7d..ce0cb4721c9a 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf) return 0; } - equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); + equiv_cpu_table = vmalloc(size); if (!equiv_cpu_table) { pr_err("failed to allocate equivalent CPU table\n"); return 0; diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index 71825806cd44..6da143c2a6b8 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void) wrmsrl(address, val); } -static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) +static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d) { pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; return 0; } -static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { +static const struct dmi_system_id __initconst mmconf_dmi_table[] = { { .callback = set_check_enable_amd_mmconf, .ident = "Sun Microsystems Machine", @@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { {} }; -void __cpuinit check_enable_amd_mmconf_dmi(void) +/* Called from a __cpuinit function, but only on the BSP. */ +void __ref check_enable_amd_mmconf_dmi(void) { dmi_check_system(mmconf_dmi_table); } diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index bab3b9e6f66d..008b91eefa18 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags) valid_flags = flags; } -/* - * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, - * yielding a 64-bit result. - */ -static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift) -{ - u64 product; -#ifdef __i386__ - u32 tmp1, tmp2; -#endif - - if (shift < 0) - delta >>= -shift; - else - delta <<= shift; - -#ifdef __i386__ - __asm__ ( - "mul %5 ; " - "mov %4,%%eax ; " - "mov %%edx,%4 ; " - "mul %5 ; " - "xor %5,%5 ; " - "add %4,%%eax ; " - "adc %5,%%edx ; " - : "=A" (product), "=r" (tmp1), "=r" (tmp2) - : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); -#elif defined(__x86_64__) - __asm__ ( - "mul %%rdx ; shrd $32,%%rdx,%%rax" - : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); -#else -#error implement me! -#endif - - return product; -} - static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) { u64 delta = native_read_tsc() - shadow->tsc_timestamp; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 95a32746fbf9..21c6746338af 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -769,6 +769,8 @@ void __init setup_arch(char **cmdline_p) x86_init.oem.arch_setup(); + resource_alloc_from_bottom = 0; + iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; setup_memory_map(); parse_setup_data(); /* update the e820_saved too */ diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index cd6da6bf3eca..ceb2911aa439 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -6,10 +6,12 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/module.h> +#include <linux/pci.h> #include <asm/bios_ebda.h> #include <asm/paravirt.h> #include <asm/pci_x86.h> +#include <asm/pci.h> #include <asm/mpspec.h> #include <asm/setup.h> #include <asm/apic.h> @@ -99,3 +101,8 @@ struct x86_platform_ops x86_platform = { }; EXPORT_SYMBOL_GPL(x86_platform); +struct x86_msi_ops x86_msi = { + .setup_msi_irqs = native_setup_msi_irqs, + .teardown_msi_irq = native_teardown_msi_irq, + .teardown_msi_irqs = default_teardown_msi_irqs, +}; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 908ea5464a51..fb8b376bf28c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) } } -static void set_spte_track_bits(u64 *sptep, u64 new_spte) +static int set_spte_track_bits(u64 *sptep, u64 new_spte) { pfn_t pfn; u64 old_spte = *sptep; @@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte) old_spte = __xchg_spte(sptep, new_spte); if (!is_rmap_spte(old_spte)) - return; + return 0; pfn = spte_to_pfn(old_spte); if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) kvm_set_pfn_dirty(pfn); + return 1; } static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) { - set_spte_track_bits(sptep, new_spte); - rmap_remove(kvm, sptep); + if (set_spte_track_bits(sptep, new_spte)) + rmap_remove(kvm, sptep); } static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2288ad829b32..cdac9e592aa5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; + events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = @@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); + events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); + memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, @@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; + memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, @@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); + memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } @@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memslots *slots, *old_slots; unsigned long *dirty_bitmap; - spin_lock(&kvm->mmu_lock); - kvm_mmu_slot_remove_write_access(kvm, log->slot); - spin_unlock(&kvm->mmu_lock); - r = -ENOMEM; dirty_bitmap = vmalloc(n); if (!dirty_bitmap) @@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; kfree(old_slots); + spin_lock(&kvm->mmu_lock); + kvm_mmu_slot_remove_write_access(kvm, log->slot); + spin_unlock(&kvm->mmu_lock); + r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { vfree(dirty_bitmap); @@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp, user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; + memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) @@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { + preempt_disable(); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); + preempt_enable(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } wbinvd(); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 60f498511dd6..7ffc9b727efd 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -178,11 +178,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start, /* extend the search scope */ end = max_pfn_mapped << PAGE_SHIFT; - if (end > (MAX_DMA32_PFN<<PAGE_SHIFT)) - start = MAX_DMA32_PFN<<PAGE_SHIFT; - else - start = MAX_DMA_PFN<<PAGE_SHIFT; - mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align); + start = MAX_DMA_PFN << PAGE_SHIFT; + mem = memblock_find_in_range(start, end, size, align); if (mem != MEMBLOCK_ERROR) return __va(mem); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 49358481c733..12cdbb17ad18 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void) } } -static int tlb_cpuhp_notify(struct notifier_block *n, +static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, unsigned long action, void *hcpu) { switch (action & 0xf) { diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index a0207a7fdf39..effd96e33f16 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o obj-$(CONFIG_PCI_DIRECT) += direct.o obj-$(CONFIG_PCI_OLPC) += olpc.o +obj-$(CONFIG_PCI_XEN) += xen.o obj-y += fixup.o obj-$(CONFIG_ACPI) += acpi.o diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 15466c096ba5..0972315c3860 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) struct acpi_resource_address64 addr; acpi_status status; unsigned long flags; - struct resource *root, *conflict; u64 start, end; status = resource_to_addr(acpi_res, &addr); @@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data) return AE_OK; if (addr.resource_type == ACPI_MEMORY_RANGE) { - root = &iomem_resource; flags = IORESOURCE_MEM; if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) flags |= IORESOURCE_PREFETCH; } else if (addr.resource_type == ACPI_IO_RANGE) { - root = &ioport_resource; flags = IORESOURCE_IO; } else return AE_OK; @@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data) return AE_OK; } - conflict = insert_resource_conflict(root, res); - if (conflict) { - dev_err(&info->bridge->dev, - "address space collision: host bridge window %pR " - "conflicts with %s %pR\n", - res, conflict->name, conflict); - } else { - pci_bus_add_resource(info->bus, res, 0); - info->res_num++; - if (addr.translation_offset) - dev_info(&info->bridge->dev, "host bridge window %pR " - "(PCI address [%#llx-%#llx])\n", - res, res->start - addr.translation_offset, - res->end - addr.translation_offset); + info->res_num++; + if (addr.translation_offset) + dev_info(&info->bridge->dev, "host bridge window %pR " + "(PCI address [%#llx-%#llx])\n", + res, res->start - addr.translation_offset, + res->end - addr.translation_offset); + else + dev_info(&info->bridge->dev, "host bridge window %pR\n", res); + + return AE_OK; +} + +static bool resource_contains(struct resource *res, resource_size_t point) +{ + if (res->start <= point && point <= res->end) + return true; + return false; +} + +static void coalesce_windows(struct pci_root_info *info, int type) +{ + int i, j; + struct resource *res1, *res2; + + for (i = 0; i < info->res_num; i++) { + res1 = &info->res[i]; + if (!(res1->flags & type)) + continue; + + for (j = i + 1; j < info->res_num; j++) { + res2 = &info->res[j]; + if (!(res2->flags & type)) + continue; + + /* + * I don't like throwing away windows because then + * our resources no longer match the ACPI _CRS, but + * the kernel resource tree doesn't allow overlaps. + */ + if (resource_contains(res1, res2->start) || + resource_contains(res1, res2->end) || + resource_contains(res2, res1->start) || + resource_contains(res2, res1->end)) { + res1->start = min(res1->start, res2->start); + res1->end = max(res1->end, res2->end); + dev_info(&info->bridge->dev, + "host bridge window expanded to %pR; %pR ignored\n", + res1, res2); + res2->flags = 0; + } + } + } +} + +static void add_resources(struct pci_root_info *info) +{ + int i; + struct resource *res, *root, *conflict; + + if (!pci_use_crs) + return; + + coalesce_windows(info, IORESOURCE_MEM); + coalesce_windows(info, IORESOURCE_IO); + + for (i = 0; i < info->res_num; i++) { + res = &info->res[i]; + + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + else if (res->flags & IORESOURCE_IO) + root = &ioport_resource; else - dev_info(&info->bridge->dev, - "host bridge window %pR\n", res); + continue; + + conflict = insert_resource_conflict(root, res); + if (conflict) + dev_err(&info->bridge->dev, + "address space collision: host bridge window %pR " + "conflicts with %s %pR\n", + res, conflict->name, conflict); + else + pci_bus_add_resource(info->bus, res, 0); } - return AE_OK; } static void @@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum, acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, &info); + add_resources(&info); return; name_alloc_fail: diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index a0772af64efb..f7c8a399978c 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -421,16 +421,10 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) return bus; } - -int __init pcibios_init(void) +void __init pcibios_set_cache_line_size(void) { struct cpuinfo_x86 *c = &boot_cpu_data; - if (!raw_pci_ops) { - printk(KERN_WARNING "PCI: System does not support PCI\n"); - return 0; - } - /* * Set PCI cacheline size to that of the CPU if the CPU has reported it. * (For older CPUs that don't support cpuid, we se it to 32 bytes @@ -445,7 +439,16 @@ int __init pcibios_init(void) pci_dfl_cache_line_size = 32 >> 2; printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); } +} + +int __init pcibios_init(void) +{ + if (!raw_pci_ops) { + printk(KERN_WARNING "PCI: System does not support PCI\n"); + return 0; + } + pcibios_set_cache_line_size(); pcibios_resource_survey(); if (pci_bf_sort >= pci_force_bf) diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 55253095be84..c4bb261c106e 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; - resource_size_t start = res->start; + resource_size_t start = round_down(res->end - size + 1, align); if (res->flags & IORESOURCE_IO) { - if (skip_isa_ioresource_align(dev)) - return start; - if (start & 0x300) - start = (start + 0x3ff) & ~0x3ff; + + /* + * If we're avoiding ISA aliases, the largest contiguous I/O + * port space is 256 bytes. Clearing bits 9 and 10 preserves + * all 256-byte and smaller alignments, so the result will + * still be correctly aligned. + */ + if (!skip_isa_ioresource_align(dev)) + start &= ~0x300; } else if (res->flags & IORESOURCE_MEM) { if (start < BIOS_END) - start = BIOS_END; + start = res->end; /* fail; no space */ } return start; } @@ -311,6 +316,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, */ prot |= _PAGE_CACHE_UC_MINUS; + prot |= _PAGE_IOMAP; /* creating a mapping for IO */ + vma->vm_page_prot = __pgprot(prot); if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index f547ee05f715..9f9bfb705cf9 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route case PCI_DEVICE_ID_INTEL_ICH9_3: case PCI_DEVICE_ID_INTEL_ICH9_4: case PCI_DEVICE_ID_INTEL_ICH9_5: - case PCI_DEVICE_ID_INTEL_TOLAPAI_0: + case PCI_DEVICE_ID_INTEL_EP80579_0: case PCI_DEVICE_ID_INTEL_ICH10_0: case PCI_DEVICE_ID_INTEL_ICH10_1: case PCI_DEVICE_ID_INTEL_ICH10_2: case PCI_DEVICE_ID_INTEL_ICH10_3: + case PCI_DEVICE_ID_INTEL_PATSBURG_LPC: r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } - if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && - (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { + if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) && + (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) { r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } - if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && - (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { + if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) && + (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) { r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index a918553ebc75..e282886616a0 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, int end, u64 addr) { struct pci_mmcfg_region *new; - int num_buses; struct resource *res; if (addr == 0) @@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, list_add_sorted(new); - num_buses = end - start + 1; res = &new->res; res->start = addr + PCI_MMCFG_BUS_OFFSET(start); - res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; + res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c new file mode 100644 index 000000000000..d7b5109f7a9c --- /dev/null +++ b/arch/x86/pci/xen.c @@ -0,0 +1,416 @@ +/* + * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux + * x86 PCI core to support the Xen PCI Frontend + * + * Author: Ryan Wilson <hap9@epoch.ncsc.mil> + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/acpi.h> + +#include <linux/io.h> +#include <asm/io_apic.h> +#include <asm/pci_x86.h> + +#include <asm/xen/hypervisor.h> + +#include <xen/features.h> +#include <xen/events.h> +#include <asm/xen/pci.h> + +#ifdef CONFIG_ACPI +static int xen_hvm_register_pirq(u32 gsi, int triggering) +{ + int rc, irq; + struct physdev_map_pirq map_irq; + int shareable = 0; + char *name; + + if (!xen_hvm_domain()) + return -1; + + map_irq.domid = DOMID_SELF; + map_irq.type = MAP_PIRQ_TYPE_GSI; + map_irq.index = gsi; + map_irq.pirq = -1; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); + if (rc) { + printk(KERN_WARNING "xen map irq failed %d\n", rc); + return -1; + } + + if (triggering == ACPI_EDGE_SENSITIVE) { + shareable = 0; + name = "ioapic-edge"; + } else { + shareable = 1; + name = "ioapic-level"; + } + + irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name); + + printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); + + return irq; +} + +static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, + int trigger, int polarity) +{ + return xen_hvm_register_pirq(gsi, trigger); +} +#endif + +#if defined(CONFIG_PCI_MSI) +#include <linux/msi.h> +#include <asm/msidef.h> + +struct xen_pci_frontend_ops *xen_pci_frontend; +EXPORT_SYMBOL_GPL(xen_pci_frontend); + +static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, + struct msi_msg *msg) +{ + /* We set vector == 0 to tell the hypervisor we don't care about it, + * but we want a pirq setup instead. + * We use the dest_id field to pass the pirq that we want. */ + msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq); + msg->address_lo = + MSI_ADDR_BASE_LO | + MSI_ADDR_DEST_MODE_PHYSICAL | + MSI_ADDR_REDIRECTION_CPU | + MSI_ADDR_DEST_ID(pirq); + + msg->data = + MSI_DATA_TRIGGER_EDGE | + MSI_DATA_LEVEL_ASSERT | + /* delivery mode reserved */ + (3 << 8) | + MSI_DATA_VECTOR(0); +} + +static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int irq, pirq, ret = 0; + struct msi_desc *msidesc; + struct msi_msg msg; + + list_for_each_entry(msidesc, &dev->msi_list, list) { + xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? + "msi-x" : "msi", &irq, &pirq); + if (irq < 0 || pirq < 0) + goto error; + printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq); + xen_msi_compose_msg(dev, pirq, &msg); + ret = set_irq_msi(irq, msidesc); + if (ret < 0) + goto error_while; + write_msi_msg(irq, &msg); + } + return 0; + +error_while: + unbind_from_irqhandler(irq, NULL); +error: + if (ret == -ENODEV) + dev_err(&dev->dev, "Xen PCI frontend has not registered" \ + " MSI/MSI-X support!\n"); + + return ret; +} + +/* + * For MSI interrupts we have to use drivers/xen/event.s functions to + * allocate an irq_desc and setup the right */ + + +static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int irq, ret, i; + struct msi_desc *msidesc; + int *v; + + v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); + if (!v) + return -ENOMEM; + + if (type == PCI_CAP_ID_MSIX) + ret = xen_pci_frontend_enable_msix(dev, &v, nvec); + else + ret = xen_pci_frontend_enable_msi(dev, &v); + if (ret) + goto error; + i = 0; + list_for_each_entry(msidesc, &dev->msi_list, list) { + irq = xen_allocate_pirq(v[i], 0, /* not sharable */ + (type == PCI_CAP_ID_MSIX) ? + "pcifront-msi-x" : "pcifront-msi"); + if (irq < 0) { + ret = -1; + goto free; + } + + ret = set_irq_msi(irq, msidesc); + if (ret) + goto error_while; + i++; + } + kfree(v); + return 0; + +error_while: + unbind_from_irqhandler(irq, NULL); +error: + if (ret == -ENODEV) + dev_err(&dev->dev, "Xen PCI frontend has not registered" \ + " MSI/MSI-X support!\n"); +free: + kfree(v); + return ret; +} + +static void xen_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *msidesc; + + msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); + if (msidesc->msi_attrib.is_msix) + xen_pci_frontend_disable_msix(dev); + else + xen_pci_frontend_disable_msi(dev); +} + +static void xen_teardown_msi_irq(unsigned int irq) +{ + xen_destroy_irq(irq); +} + +static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int irq, ret; + struct msi_desc *msidesc; + + list_for_each_entry(msidesc, &dev->msi_list, list) { + irq = xen_create_msi_irq(dev, msidesc, type); + if (irq < 0) + return -1; + + ret = set_irq_msi(irq, msidesc); + if (ret) + goto error; + } + return 0; + +error: + xen_destroy_irq(irq); + return ret; +} +#endif + +static int xen_pcifront_enable_irq(struct pci_dev *dev) +{ + int rc; + int share = 1; + + dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); + + if (dev->irq < 0) + return -EINVAL; + + if (dev->irq < NR_IRQS_LEGACY) + share = 0; + + rc = xen_allocate_pirq(dev->irq, share, "pcifront"); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", + dev->irq, rc); + return rc; + } + return 0; +} + +int __init pci_xen_init(void) +{ + if (!xen_pv_domain() || xen_initial_domain()) + return -ENODEV; + + printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n"); + + pcibios_set_cache_line_size(); + + pcibios_enable_irq = xen_pcifront_enable_irq; + pcibios_disable_irq = NULL; + +#ifdef CONFIG_ACPI + /* Keep ACPI out of the picture */ + acpi_noirq = 1; +#endif + +#ifdef CONFIG_PCI_MSI + x86_msi.setup_msi_irqs = xen_setup_msi_irqs; + x86_msi.teardown_msi_irq = xen_teardown_msi_irq; + x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; +#endif + return 0; +} + +int __init pci_xen_hvm_init(void) +{ + if (!xen_feature(XENFEAT_hvm_pirqs)) + return 0; + +#ifdef CONFIG_ACPI + /* + * We don't want to change the actual ACPI delivery model, + * just how GSIs get registered. + */ + __acpi_register_gsi = acpi_register_gsi_xen_hvm; +#endif + +#ifdef CONFIG_PCI_MSI + x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; + x86_msi.teardown_msi_irq = xen_teardown_msi_irq; +#endif + return 0; +} + +#ifdef CONFIG_XEN_DOM0 +static int xen_register_pirq(u32 gsi, int triggering) +{ + int rc, irq; + struct physdev_map_pirq map_irq; + int shareable = 0; + char *name; + + if (!xen_pv_domain()) + return -1; + + if (triggering == ACPI_EDGE_SENSITIVE) { + shareable = 0; + name = "ioapic-edge"; + } else { + shareable = 1; + name = "ioapic-level"; + } + + irq = xen_allocate_pirq(gsi, shareable, name); + + printk(KERN_DEBUG "xen: --> irq=%d\n", irq); + + if (irq < 0) + goto out; + + map_irq.domid = DOMID_SELF; + map_irq.type = MAP_PIRQ_TYPE_GSI; + map_irq.index = gsi; + map_irq.pirq = irq; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); + if (rc) { + printk(KERN_WARNING "xen map irq failed %d\n", rc); + return -1; + } + +out: + return irq; +} + +static int xen_register_gsi(u32 gsi, int triggering, int polarity) +{ + int rc, irq; + struct physdev_setup_gsi setup_gsi; + + if (!xen_pv_domain()) + return -1; + + printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", + gsi, triggering, polarity); + + irq = xen_register_pirq(gsi, triggering); + + setup_gsi.gsi = gsi; + setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); + setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); + if (rc == -EEXIST) + printk(KERN_INFO "Already setup the GSI :%d\n", gsi); + else if (rc) { + printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", + gsi, rc); + } + + return irq; +} + +static __init void xen_setup_acpi_sci(void) +{ + int rc; + int trigger, polarity; + int gsi = acpi_sci_override_gsi; + + if (!gsi) + return; + + rc = acpi_get_override_irq(gsi, &trigger, &polarity); + if (rc) { + printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi" + " sci, rc=%d\n", rc); + return; + } + trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; + polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; + + printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " + "polarity=%d\n", gsi, trigger, polarity); + + gsi = xen_register_gsi(gsi, trigger, polarity); + printk(KERN_INFO "xen: acpi sci %d\n", gsi); + + return; +} + +static int acpi_register_gsi_xen(struct device *dev, u32 gsi, + int trigger, int polarity) +{ + return xen_register_gsi(gsi, trigger, polarity); +} + +static int __init pci_xen_initial_domain(void) +{ +#ifdef CONFIG_PCI_MSI + x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; + x86_msi.teardown_msi_irq = xen_teardown_msi_irq; +#endif + xen_setup_acpi_sci(); + __acpi_register_gsi = acpi_register_gsi_xen; + + return 0; +} + +void __init xen_setup_pirqs(void) +{ + int irq; + + pci_xen_initial_domain(); + + if (0 == nr_ioapics) { + for (irq = 0; irq < NR_IRQS_LEGACY; irq++) + xen_allocate_pirq(irq, 0, "xt-pic"); + return; + } + + /* Pre-allocate legacy irqs */ + for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { + int trigger, polarity; + + if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) + continue; + + xen_register_pirq(irq, + trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); + } +} +#endif diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile new file mode 100644 index 000000000000..7bf70b812fa2 --- /dev/null +++ b/arch/x86/platform/Makefile @@ -0,0 +1,8 @@ +# Platform specific code goes here +obj-y += efi/ +obj-y += mrst/ +obj-y += olpc/ +obj-y += scx200/ +obj-y += sfi/ +obj-y += visws/ +obj-y += uv/ diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile new file mode 100644 index 000000000000..73b8be0f3675 --- /dev/null +++ b/arch/x86/platform/efi/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o diff --git a/arch/x86/kernel/efi.c b/arch/x86/platform/efi/efi.c index 0fe27d7c6258..0fe27d7c6258 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/platform/efi/efi.c diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/platform/efi/efi_32.c index 5cab48ee61a4..5cab48ee61a4 100644 --- a/arch/x86/kernel/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac0621a7ac3d..ac0621a7ac3d 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index fbe66e626c09..fbe66e626c09 100644 --- a/arch/x86/kernel/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 4c07ccab8146..4c07ccab8146 100644 --- a/arch/x86/kernel/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile new file mode 100644 index 000000000000..efbbc552fa95 --- /dev/null +++ b/arch/x86/platform/mrst/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_X86_MRST) += mrst.o diff --git a/arch/x86/kernel/mrst.c b/arch/x86/platform/mrst/mrst.c index 79ae68154e87..79ae68154e87 100644 --- a/arch/x86/kernel/mrst.c +++ b/arch/x86/platform/mrst/mrst.c diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile new file mode 100644 index 000000000000..c31b8fcb5a86 --- /dev/null +++ b/arch/x86/platform/olpc/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_OLPC) += olpc.o +obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o +obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o diff --git a/arch/x86/kernel/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c index f5442c03abc3..f5442c03abc3 100644 --- a/arch/x86/kernel/olpc-xo1.c +++ b/arch/x86/platform/olpc/olpc-xo1.c diff --git a/arch/x86/kernel/olpc.c b/arch/x86/platform/olpc/olpc.c index edaf3fe8dc5e..edaf3fe8dc5e 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/platform/olpc/olpc.c diff --git a/arch/x86/kernel/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c index 787320464379..787320464379 100644 --- a/arch/x86/kernel/olpc_ofw.c +++ b/arch/x86/platform/olpc/olpc_ofw.c diff --git a/arch/x86/platform/scx200/Makefile b/arch/x86/platform/scx200/Makefile new file mode 100644 index 000000000000..762b4c7f4314 --- /dev/null +++ b/arch/x86/platform/scx200/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_SCx200) += scx200.o +scx200-y += scx200_32.o diff --git a/arch/x86/kernel/scx200_32.c b/arch/x86/platform/scx200/scx200_32.c index 7e004acbe526..7e004acbe526 100644 --- a/arch/x86/kernel/scx200_32.c +++ b/arch/x86/platform/scx200/scx200_32.c diff --git a/arch/x86/platform/sfi/Makefile b/arch/x86/platform/sfi/Makefile new file mode 100644 index 000000000000..cc5db1168a5e --- /dev/null +++ b/arch/x86/platform/sfi/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SFI) += sfi.o diff --git a/arch/x86/kernel/sfi.c b/arch/x86/platform/sfi/sfi.c index dd4c281ffe57..dd4c281ffe57 100644 --- a/arch/x86/kernel/sfi.c +++ b/arch/x86/platform/sfi/sfi.c diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile new file mode 100644 index 000000000000..6c40995fefb8 --- /dev/null +++ b/arch/x86/platform/uv/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 8bc57baaa9ad..8bc57baaa9ad 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 20ea20a39e2a..a318194002b5 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode) * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub */ - bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* - UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); + bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE + * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); BUG_ON(!bau_desc); pa = uv_gpa(bau_desc); /* need the real nasid*/ @@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode) struct bau_payload_queue_entry *pqp_malloc; struct bau_control *bcp; - pqp = (struct bau_payload_queue_entry *) kmalloc_node( - (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), - GFP_KERNEL, node); + pqp = kmalloc_node((DEST_Q_SIZE + 1) + * sizeof(struct bau_payload_queue_entry), + GFP_KERNEL, node); BUG_ON(!pqp); pqp_malloc = pqp; @@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs) timeout_us = calculate_destination_timeout(); - uvhub_descs = (struct uvhub_desc *) - kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); + uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); for_each_present_cpu(cpu) { diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 7b24460917d5..7b24460917d5 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c index 309c70fb7759..309c70fb7759 100644 --- a/arch/x86/kernel/uv_sysfs.c +++ b/arch/x86/platform/uv/uv_sysfs.c diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/platform/uv/uv_time.c index 56e421bc379b..56e421bc379b 100644 --- a/arch/x86/kernel/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c diff --git a/arch/x86/platform/visws/Makefile b/arch/x86/platform/visws/Makefile new file mode 100644 index 000000000000..91bc17ab2fd5 --- /dev/null +++ b/arch/x86/platform/visws/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_X86_VISWS) += visws_quirks.o diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c index 3371bd053b89..3371bd053b89 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/platform/visws/visws_quirks.c diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 90a7f5ad6916..5b54892e4bc3 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -13,6 +13,16 @@ config XEN kernel to boot in a paravirtualized environment under the Xen hypervisor. +config XEN_DOM0 + def_bool y + depends on XEN && PCI_XEN && SWIOTLB_XEN + depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI + +# Dummy symbol since people have come to rely on the PRIVILEGED_GUEST +# name in tools. +config XEN_PRIVILEGED_GUEST + def_bool XEN_DOM0 + config XEN_PVHVM def_bool y depends on XEN diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 70ddeaeb1ef3..235c0f4d3861 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -46,6 +46,7 @@ #include <asm/paravirt.h> #include <asm/apic.h> #include <asm/page.h> +#include <asm/xen/pci.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <asm/fixmap.h> @@ -236,6 +237,7 @@ static __init void xen_init_cpuid_mask(void) cpuid_leaf1_edx_mask = ~((1 << X86_FEATURE_MCE) | /* disable MCE */ (1 << X86_FEATURE_MCA) | /* disable MCA */ + (1 << X86_FEATURE_MTRR) | /* disable MTRR */ (1 << X86_FEATURE_ACC)); /* thermal monitoring */ if (!xen_initial_domain()) @@ -1184,6 +1186,7 @@ asmlinkage void __init xen_start_kernel(void) xen_raw_console_write("mapping kernel into physical memory\n"); pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); + xen_ident_map_ISA(); /* Allocate and initialize top and mid mfn levels for p2m structure */ xen_build_mfn_list_list(); @@ -1222,6 +1225,8 @@ asmlinkage void __init xen_start_kernel(void) add_preferred_console("xenboot", 0, NULL); add_preferred_console("tty", 0, NULL); add_preferred_console("hvc", 0, NULL); + if (pci_xen) + x86_init.pci.arch_init = pci_xen_init; } else { /* Make sure ACS will be enabled */ pci_request_acs(); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 9631c90907eb..21ed8d7f75a5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1975,6 +1975,7 @@ static void *m2v(phys_addr_t maddr) return __ka(m2p(maddr)); } +/* Set the page permissions on an identity-mapped pages */ static void set_page_prot(void *addr, pgprot_t prot) { unsigned long pfn = __pa(addr) >> PAGE_SHIFT; @@ -2125,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, { pmd_t *kernel_pmd; - level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); + level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + xen_start_info->nr_pt_frames * PAGE_SIZE + @@ -2159,6 +2160,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, } #endif /* CONFIG_X86_64 */ +static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; + static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) { pte_t pte; @@ -2179,15 +2182,28 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #else case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: #endif -#ifdef CONFIG_X86_LOCAL_APIC - case FIX_APIC_BASE: /* maps dummy local APIC */ -#endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: /* All local page mappings */ pte = pfn_pte(phys, prot); break; +#ifdef CONFIG_X86_LOCAL_APIC + case FIX_APIC_BASE: /* maps dummy local APIC */ + pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); + break; +#endif + +#ifdef CONFIG_X86_IO_APIC + case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: + /* + * We just don't map the IO APIC - all access is via + * hypercalls. Keep the address in the pte for reference. + */ + pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); + break; +#endif + case FIX_PARAVIRT_BOOTMAP: /* This is an MFN, but it isn't an IO mapping from the IO domain */ @@ -2212,6 +2228,29 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif } +__init void xen_ident_map_ISA(void) +{ + unsigned long pa; + + /* + * If we're dom0, then linear map the ISA machine addresses into + * the kernel's address space. + */ + if (!xen_initial_domain()) + return; + + xen_raw_printk("Xen: setup ISA identity maps\n"); + + for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) { + pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO); + + if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0)) + BUG(); + } + + xen_flush_tlb(); +} + static __init void xen_post_allocator_init(void) { pv_mmu_ops.set_pte = xen_set_pte; @@ -2320,6 +2359,8 @@ void __init xen_init_mmu_ops(void) pv_mmu_ops = xen_mmu_ops; vmap_lazy_unmap = false; + + memset(dummy_mapping, 0xff, PAGE_SIZE); } /* Protected by xen_reservation_lock. */ diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 22471001b74c..bfd0632fe65e 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -1,6 +1,7 @@ /* Glue code to lib/swiotlb-xen.c */ #include <linux/dma-mapping.h> +#include <linux/pci.h> #include <xen/swiotlb-xen.h> #include <asm/xen/hypervisor.h> @@ -55,6 +56,9 @@ void __init pci_xen_swiotlb_init(void) if (xen_swiotlb) { xen_swiotlb_init(1); dma_ops = &xen_swiotlb_dma_ops; + + /* Make sure ACS will be enabled */ + pci_request_acs(); } } IOMMU_INIT_FINISH(pci_xen_swiotlb_detect, diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 105db2501050..769c4b01fa32 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, const struct e820map *e820) { phys_addr_t max_addr = PFN_PHYS(max_pfn); - phys_addr_t last_end = 0; + phys_addr_t last_end = ISA_END_ADDRESS; unsigned long released = 0; int i; + /* Free any unused memory above the low 1Mbyte. */ for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { phys_addr_t end = e820->map[i].addr; end = min(max_addr, end); - released += xen_release_chunk(last_end, end); - last_end = e820->map[i].addr + e820->map[i].size; + if (last_end < end) + released += xen_release_chunk(last_end, end); + last_end = max(last_end, e820->map[i].addr + e820->map[i].size); } if (last_end < max_addr) @@ -164,6 +166,7 @@ char * __init xen_memory_setup(void) XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { + BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; @@ -201,9 +204,13 @@ char * __init xen_memory_setup(void) } /* - * Even though this is normal, usable memory under Xen, reserve - * ISA memory anyway because too many things think they can poke + * In domU, the ISA region is normal, usable memory, but we + * reserve ISA memory anyway because too many things poke * about in there. + * + * In Dom0, the host E820 information can leave gaps in the + * ISA range, which would cause us to release those pages. To + * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); @@ -367,7 +374,5 @@ void __init xen_arch_setup(void) pm_idle = xen_idle; - paravirt_disable_iospace(); - fiddle_vdso(); } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index f4d010031465..72a4c7959045 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -28,6 +28,7 @@ #include <asm/xen/interface.h> #include <asm/xen/hypercall.h> +#include <xen/xen.h> #include <xen/page.h> #include <xen/events.h> @@ -156,11 +157,35 @@ static void __init xen_fill_possible_map(void) { int i, rc; + if (xen_initial_domain()) + return; + + for (i = 0; i < nr_cpu_ids; i++) { + rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); + if (rc >= 0) { + num_processors++; + set_cpu_possible(i, true); + } + } +} + +static void __init xen_filter_cpu_maps(void) +{ + int i, rc; + + if (!xen_initial_domain()) + return; + + num_processors = 0; + disabled_cpus = 0; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); + } else { + set_cpu_possible(i, false); + set_cpu_present(i, false); } } } @@ -174,6 +199,7 @@ static void __init xen_smp_prepare_boot_cpu(void) old memory can be recycled */ make_lowmem_page_readwrite(xen_initial_gdt); + xen_filter_cpu_maps(); xen_setup_vcpu_info_placement(); } |