diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-08-24 11:54:24 +0300 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 09:32:08 +0200 |
commit | 280aa177dcd1edc718d8a92f17f235b783ec6307 (patch) | |
tree | 63597a9ffd8cf72571986584aee82c067b5b684d /virt/kvm/irq_comm.c | |
parent | 136bdfeee7b5bc986fc94af3a40d7d13ea37bb95 (diff) | |
download | linux-280aa177dcd1edc718d8a92f17f235b783ec6307.tar.gz linux-280aa177dcd1edc718d8a92f17f235b783ec6307.tar.bz2 linux-280aa177dcd1edc718d8a92f17f235b783ec6307.zip |
KVM: Convert irq notifiers lists to RCU locking
Use RCU locking for mask/ack notifiers lists.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/irq_comm.c')
-rw-r--r-- | virt/kvm/irq_comm.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index f01972595938..6c946141dbcc 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -183,19 +183,19 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) rcu_read_lock(); gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; - rcu_read_unlock(); - if (gsi != -1) - hlist_for_each_entry(kian, n, &kvm->irq_ack_notifier_list, link) + hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, + link) if (kian->gsi == gsi) kian->irq_acked(kian); + rcu_read_unlock(); } void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); - hlist_add_head(&kian->link, &kvm->irq_ack_notifier_list); + hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); } @@ -203,8 +203,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); - hlist_del_init(&kian->link); + hlist_del_init_rcu(&kian->link); mutex_unlock(&kvm->irq_lock); + synchronize_rcu(); } int kvm_request_irq_source_id(struct kvm *kvm) @@ -257,7 +258,7 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, { mutex_lock(&kvm->irq_lock); kimn->irq = irq; - hlist_add_head(&kimn->link, &kvm->mask_notifier_list); + hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); mutex_unlock(&kvm->irq_lock); } @@ -265,8 +266,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn) { mutex_lock(&kvm->irq_lock); - hlist_del(&kimn->link); + hlist_del_rcu(&kimn->link); mutex_unlock(&kvm->irq_lock); + synchronize_rcu(); } void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) @@ -274,11 +276,11 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) struct kvm_irq_mask_notifier *kimn; struct hlist_node *n; - WARN_ON(!mutex_is_locked(&kvm->irq_lock)); - - hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) + rcu_read_lock(); + hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) if (kimn->irq == irq) kimn->func(kimn, mask); + rcu_read_unlock(); } void kvm_free_irq_routing(struct kvm *kvm) |