diff options
author | Gleb Natapov <gleb@redhat.com> | 2011-03-07 14:55:06 +0200 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2011-03-17 13:08:33 -0300 |
commit | 5601d05b8c340ee2643febc146099325eff187eb (patch) | |
tree | 61fb3298bb267eecfd899621cc852114d636de52 /arch/x86 | |
parent | 831ca6093ca486060721f5c3c74f97b10f3172b9 (diff) | |
download | linux-5601d05b8c340ee2643febc146099325eff187eb.tar.gz linux-5601d05b8c340ee2643febc146099325eff187eb.tar.bz2 linux-5601d05b8c340ee2643febc146099325eff187eb.zip |
KVM: emulator: Fix io permission checking for 64bit guest
Current implementation truncates upper 32bit of TR base address during IO
permission bitmap check. The patch fixes this.
Reported-and-tested-by: Francis Moreau <francis.moro@gmail.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 4 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 37 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 15 |
3 files changed, 35 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 50ebc327a368..0f5213564326 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -142,9 +142,9 @@ struct x86_emulate_ops { int (*pio_out_emulated)(int size, unsigned short port, const void *val, unsigned int count, struct kvm_vcpu *vcpu); - bool (*get_cached_descriptor)(struct desc_struct *desc, + bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3, int seg, struct kvm_vcpu *vcpu); - void (*set_cached_descriptor)(struct desc_struct *desc, + void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3, int seg, struct kvm_vcpu *vcpu); u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu); void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a90d7e033304..d6088b8686fb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -878,7 +878,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, if (selector & 1 << 2) { struct desc_struct desc; memset (dt, 0, sizeof *dt); - if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu)) + if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR, + ctxt->vcpu)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ @@ -930,6 +931,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, return ret; } +/* Does not support long mode */ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops, u16 selector, int seg) @@ -1041,7 +1043,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, } load: ops->set_segment_selector(selector, seg, ctxt->vcpu); - ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); + ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu); return X86EMUL_CONTINUE; exception: emulate_exception(ctxt, err_vec, err_code, true); @@ -1561,7 +1563,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *ss) { memset(cs, 0, sizeof(struct desc_struct)); - ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu); + ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu); memset(ss, 0, sizeof(struct desc_struct)); cs->l = 0; /* will be adjusted later */ @@ -1608,9 +1610,9 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) cs.d = 0; cs.l = 1; } - ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); + ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); - ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); + ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); c->regs[VCPU_REGS_RCX] = c->eip; @@ -1680,9 +1682,9 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) cs.l = 1; } - ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); + ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); - ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); + ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); @@ -1737,9 +1739,9 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; - ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); + ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); - ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); + ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); c->eip = c->regs[VCPU_REGS_RDX]; @@ -1765,24 +1767,29 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { struct desc_struct tr_seg; + u32 base3; int r; u16 io_bitmap_ptr; u8 perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; + unsigned long base; - ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu); + ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; - r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2, - ctxt->vcpu, NULL); + base = get_desc_base(&tr_seg); +#ifdef CONFIG_X86_64 + base |= ((u64)base3) << 32; +#endif + r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; - r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8, - &perm, 1, ctxt->vcpu, NULL); + r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 1, ctxt->vcpu, + NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) @@ -2127,7 +2134,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, } ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); - ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu); + ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu); ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); if (has_error_code) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b9c2b8e6c70c..01f08a65d09b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4162,8 +4162,8 @@ static unsigned long emulator_get_cached_segment_base(int seg, return get_segment_base(vcpu, seg); } -static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, - struct kvm_vcpu *vcpu) +static bool emulator_get_cached_descriptor(struct desc_struct *desc, u32 *base3, + int seg, struct kvm_vcpu *vcpu) { struct kvm_segment var; @@ -4176,6 +4176,10 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); +#ifdef CONFIG_X86_64 + if (base3) + *base3 = var.base >> 32; +#endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; @@ -4188,8 +4192,8 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, return true; } -static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg, - struct kvm_vcpu *vcpu) +static void emulator_set_cached_descriptor(struct desc_struct *desc, u32 base3, + int seg, struct kvm_vcpu *vcpu) { struct kvm_segment var; @@ -4197,6 +4201,9 @@ static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg, kvm_get_segment(vcpu, &var, seg); var.base = get_desc_base(desc); +#ifdef CONFIG_X86_64 + var.base |= ((u64)base3) << 32; +#endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; |