diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-21 16:38:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-21 16:38:33 -0700 |
commit | d936d2d452ca1848cc4b397bdfb96d4278b9f934 (patch) | |
tree | 205a6d386d363d94208a3340f911009d74d2aa29 | |
parent | 0903391acbc1b509a60a6cb682344c748a69821a (diff) | |
parent | fc78d343fa74514f6fd117b5ef4cd27e4ac30236 (diff) | |
download | linux-d936d2d452ca1848cc4b397bdfb96d4278b9f934.tar.gz linux-d936d2d452ca1848cc4b397bdfb96d4278b9f934.tar.bz2 linux-d936d2d452ca1848cc4b397bdfb96d4278b9f934.zip |
Merge tag 'stable/for-linus-3.11-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen bug-fixes from Konrad Rzeszutek Wilk:
- On ARM did not have balanced calls to get/put_cpu.
- Fix to make tboot + Xen + Linux correctly.
- Fix events VCPU binding issues.
- Fix a vCPU online race where IPIs are sent to not-yet-online vCPU.
* tag 'stable/for-linus-3.11-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/smp: initialize IPI vectors before marking CPU online
xen/events: mask events when changing their VCPU binding
xen/events: initialize local per-cpu mask for all possible events
x86/xen: do not identity map UNUSABLE regions in the machine E820
xen/arm: missing put_cpu in xen_percpu_init
-rw-r--r-- | arch/arm/xen/enlighten.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 22 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 11 | ||||
-rw-r--r-- | drivers/xen/events.c | 13 |
4 files changed, 44 insertions, 3 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c9770ba5c7df..8a6295c86209 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) per_cpu(xen_vcpu, cpu) = vcpup; enable_percpu_irq(xen_events_irq, 0); + put_cpu(); } static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 056d11faef21..8f3eea6b80c5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) e820_add_region(start, end - start, type); } +void xen_ignore_unusable(struct e820entry *list, size_t map_size) +{ + struct e820entry *entry; + unsigned int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + if (entry->type == E820_UNUSABLE) + entry->type = E820_RAM; + } +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + /* + * Xen won't allow a 1:1 mapping to be created to UNUSABLE + * regions, so if we're using the machine memory map leave the + * region as RAM as it is in the pseudo-physical map. + * + * UNUSABLE regions in domUs are not handled and will need + * a patch in the future. + */ + if (xen_initial_domain()) + xen_ignore_unusable(map, memmap.nr_entries); + /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index ca92754eb846..b81c88e51daa 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc; - rc = native_cpu_up(cpu, tidle); - WARN_ON (xen_smp_intr_init(cpu)); + /* + * xen_smp_intr_init() needs to run before native_cpu_up() + * so that IPI vectors are set up on the booting CPU before + * it is marked online in native_cpu_up(). + */ + rc = xen_smp_intr_init(cpu); + WARN_ON(rc); + if (!rc) + rc = native_cpu_up(cpu, tidle); return rc; } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a58ac435a9a4..5e8be462aed5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) for_each_possible_cpu(i) memset(per_cpu(cpu_evtchn_mask, i), - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); } static inline void clear_evtchn(int port) @@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) { + struct shared_info *s = HYPERVISOR_shared_info; struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); + int masked; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) bind_vcpu.vcpu = tcpu; /* + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ + masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); + + /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. @@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); + if (!masked) + unmask_evtchn(evtchn); + return 0; } |