diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2014-06-23 17:37:18 +0100 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2015-08-12 11:28:25 +0100 |
commit | 6c3d63c9a26ba56e2ca63a9f68d52f77ae551d91 (patch) | |
tree | 3a324cd188bc07c3978dfe64ed634beca9437d96 /virt/kvm | |
parent | 7a67b4b7e04b30345584c1b76a80db628530bf21 (diff) | |
download | linux-stable-6c3d63c9a26ba56e2ca63a9f68d52f77ae551d91.tar.gz linux-stable-6c3d63c9a26ba56e2ca63a9f68d52f77ae551d91.tar.bz2 linux-stable-6c3d63c9a26ba56e2ca63a9f68d52f77ae551d91.zip |
KVM: arm/arm64: vgic: Allow dynamic mapping of physical/virtual interrupts
In order to be able to feed physical interrupts to a guest, we need
to be able to establish the virtual-physical mapping between the two
worlds.
The mappings are kept in a set of RCU lists, indexed by virtual interrupts.
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic.c | 209 |
1 files changed, 208 insertions, 1 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 5bd1695845a3..51c990075624 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -24,6 +24,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/rculist.h> #include <linux/uaccess.h> #include <asm/kvm_emulate.h> @@ -74,6 +75,28 @@ * cause the interrupt to become inactive in such a situation. * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become * inactive as long as the external input line is held high. + * + * + * Initialization rules: there are multiple stages to the vgic + * initialization, both for the distributor and the CPU interfaces. + * + * Distributor: + * + * - kvm_vgic_early_init(): initialization of static data that doesn't + * depend on any sizing information or emulation type. No allocation + * is allowed there. + * + * - vgic_init(): allocation and initialization of the generic data + * structures that depend on sizing information (number of CPUs, + * number of interrupts). Also initializes the vcpu specific data + * structures. Can be executed lazily for GICv2. + * [to be renamed to kvm_vgic_init??] + * + * CPU Interface: + * + * - kvm_vgic_cpu_early_init(): initialization of static data that + * doesn't depend on any sizing information or emulation type. No + * allocation is allowed there. */ #include "vgic.h" @@ -82,6 +105,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); +static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, + int virt_irq); static const struct vgic_ops *vgic_ops; static const struct vgic_params *vgic; @@ -1583,6 +1608,164 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) return IRQ_HANDLED; } +static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu, + int virt_irq) +{ + if (virt_irq < VGIC_NR_PRIVATE_IRQS) + return &vcpu->arch.vgic_cpu.irq_phys_map_list; + else + return &vcpu->kvm->arch.vgic.irq_phys_map_list; +} + +/** + * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ + * @vcpu: The VCPU pointer + * @virt_irq: The virtual irq number + * @irq: The Linux IRQ number + * + * Establish a mapping between a guest visible irq (@virt_irq) and a + * Linux irq (@irq). On injection, @virt_irq will be associated with + * the physical interrupt represented by @irq. This mapping can be + * established multiple times as long as the parameters are the same. + * + * Returns a valid pointer on success, and an error pointer otherwise + */ +struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, + int virt_irq, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); + struct irq_phys_map *map; + struct irq_phys_map_entry *entry; + struct irq_desc *desc; + struct irq_data *data; + int phys_irq; + + desc = irq_to_desc(irq); + if (!desc) { + kvm_err("%s: no interrupt descriptor\n", __func__); + return ERR_PTR(-EINVAL); + } + + data = irq_desc_get_irq_data(desc); + while (data->parent_data) + data = data->parent_data; + + phys_irq = data->hwirq; + + /* Create a new mapping */ + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + spin_lock(&dist->irq_phys_map_lock); + + /* Try to match an existing mapping */ + map = vgic_irq_map_search(vcpu, virt_irq); + if (map) { + /* Make sure this mapping matches */ + if (map->phys_irq != phys_irq || + map->irq != irq) + map = ERR_PTR(-EINVAL); + + /* Found an existing, valid mapping */ + goto out; + } + + map = &entry->map; + map->virt_irq = virt_irq; + map->phys_irq = phys_irq; + map->irq = irq; + + list_add_tail_rcu(&entry->entry, root); + +out: + spin_unlock(&dist->irq_phys_map_lock); + /* If we've found a hit in the existing list, free the useless + * entry */ + if (IS_ERR(map) || map != &entry->map) + kfree(entry); + return map; +} + +static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, + int virt_irq) +{ + struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); + struct irq_phys_map_entry *entry; + struct irq_phys_map *map; + + rcu_read_lock(); + + list_for_each_entry_rcu(entry, root, entry) { + map = &entry->map; + if (map->virt_irq == virt_irq) { + rcu_read_unlock(); + return map; + } + } + + rcu_read_unlock(); + + return NULL; +} + +static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu) +{ + struct irq_phys_map_entry *entry; + + entry = container_of(rcu, struct irq_phys_map_entry, rcu); + kfree(entry); +} + +/** + * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping + * @vcpu: The VCPU pointer + * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq + * + * Remove an existing mapping between virtual and physical interrupts. + */ +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + struct irq_phys_map_entry *entry; + struct list_head *root; + + if (!map) + return -EINVAL; + + root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq); + + spin_lock(&dist->irq_phys_map_lock); + + list_for_each_entry(entry, root, entry) { + if (&entry->map == map) { + list_del_rcu(&entry->entry); + call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu); + break; + } + } + + spin_unlock(&dist->irq_phys_map_lock); + + return 0; +} + +static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct irq_phys_map_entry *entry; + + spin_lock(&dist->irq_phys_map_lock); + + list_for_each_entry(entry, root, entry) { + list_del_rcu(&entry->entry); + call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu); + } + + spin_unlock(&dist->irq_phys_map_lock); +} + void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; @@ -1591,6 +1774,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) kfree(vgic_cpu->active_shared); kfree(vgic_cpu->pend_act_shared); kfree(vgic_cpu->vgic_irq_lr_map); + vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list); vgic_cpu->pending_shared = NULL; vgic_cpu->active_shared = NULL; vgic_cpu->pend_act_shared = NULL; @@ -1628,6 +1812,17 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) } /** + * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage + * + * No memory allocation should be performed here, only static init. + */ +void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list); +} + +/** * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW * * The host's GIC naturally limits the maximum amount of VCPUs a guest @@ -1664,6 +1859,7 @@ void kvm_vgic_destroy(struct kvm *kvm) kfree(dist->irq_spi_target); kfree(dist->irq_pending_on_cpu); kfree(dist->irq_active_on_cpu); + vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list); dist->irq_sgi_sources = NULL; dist->irq_spi_cpu = NULL; dist->irq_spi_target = NULL; @@ -1787,6 +1983,18 @@ static int init_vgic_model(struct kvm *kvm, int type) return 0; } +/** + * kvm_vgic_early_init - Earliest possible vgic initialization stage + * + * No memory allocation should be performed here, only static init. + */ +void kvm_vgic_early_init(struct kvm *kvm) +{ + spin_lock_init(&kvm->arch.vgic.lock); + spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock); + INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list); +} + int kvm_vgic_create(struct kvm *kvm, u32 type) { int i, vcpu_lock_idx = -1, ret; @@ -1832,7 +2040,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) if (ret) goto out_unlock; - spin_lock_init(&kvm->arch.vgic.lock); kvm->arch.vgic.in_kernel = true; kvm->arch.vgic.vgic_model = type; kvm->arch.vgic.vctrl_base = vgic->vctrl_base; |