diff options
Diffstat (limited to 'drivers/s390/crypto/vfio_ap_ops.c')
-rw-r--r-- | drivers/s390/crypto/vfio_ap_ops.c | 140 |
1 files changed, 109 insertions, 31 deletions
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index a52c2690933f..766557547f83 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -650,13 +650,22 @@ static void vfio_ap_matrix_init(struct ap_config_info *info, matrix->adm_max = info->apxa ? info->nd : 15; } +static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev) +{ + if (matrix_mdev->cfg_chg_trigger) + eventfd_signal(matrix_mdev->cfg_chg_trigger); +} + static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) { - if (matrix_mdev->kvm) + if (matrix_mdev->kvm) { kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->shadow_apcb.apm, matrix_mdev->shadow_apcb.aqm, matrix_mdev->shadow_apcb.adm); + + signal_guest_ap_cfg_changed(matrix_mdev); + } } static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) @@ -792,6 +801,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) if (ret) goto err_put_vdev; matrix_mdev->req_trigger = NULL; + matrix_mdev->cfg_chg_trigger = NULL; dev_set_drvdata(&mdev->dev, matrix_mdev); mutex_lock(&matrix_dev->mdevs_lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); @@ -863,48 +873,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) vfio_put_device(&matrix_mdev->vdev); } -#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ - "already assigned to %s" +#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s" + +#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" -static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, - unsigned long *apm, - unsigned long *aqm) +static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, + struct ap_matrix_mdev *assigned_to, + unsigned long *apm, unsigned long *aqm) { unsigned long apid, apqi; - const struct device *dev = mdev_dev(matrix_mdev->mdev); - const char *mdev_name = dev_name(dev); - for_each_set_bit_inv(apid, apm, AP_DEVICES) + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, + apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); + } + } +} + +static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, + unsigned long *apm, unsigned long *aqm) +{ + unsigned long apid, apqi; + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) - dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); + dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); + } } /** * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs * + * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being + * assigned; or, NULL if this function was called by the AP bus + * driver in_use callback to verify none of the APQNs being reserved + * for the host device driver are in use by a vfio_ap mediated device * @mdev_apm: mask indicating the APIDs of the APQNs to be verified * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified * - * Verifies that each APQN derived from the Cartesian product of a bitmap of - * AP adapter IDs and AP queue indexes is not configured for any matrix - * mediated device. AP queue sharing is not allowed. + * Verifies that each APQN derived from the Cartesian product of APIDs + * represented by the bits set in @mdev_apm and the APQIs of the bits set in + * @mdev_aqm is not assigned to a mediated device other than the mdev to which + * the APQN is being assigned (@assignee). AP queue sharing is not allowed. * * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. */ -static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, +static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, + unsigned long *mdev_apm, unsigned long *mdev_aqm) { - struct ap_matrix_mdev *matrix_mdev; + struct ap_matrix_mdev *assigned_to; DECLARE_BITMAP(apm, AP_DEVICES); DECLARE_BITMAP(aqm, AP_DOMAINS); - list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { /* - * If the input apm and aqm are fields of the matrix_mdev - * object, then move on to the next matrix_mdev. + * If the mdev to which the mdev_apm and mdev_aqm is being + * assigned is the same as the mdev being verified */ - if (mdev_apm == matrix_mdev->matrix.apm && - mdev_aqm == matrix_mdev->matrix.aqm) + if (assignee == assigned_to) continue; memset(apm, 0, sizeof(apm)); @@ -914,15 +942,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, * We work on full longs, as we can only exclude the leftover * bits in non-inverse order. The leftover is all zeros. */ - if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, - AP_DEVICES)) + if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) continue; - if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, - AP_DOMAINS)) + if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) continue; - vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); + if (assignee) + vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); + else + vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm); return -EADDRINUSE; } @@ -951,7 +980,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) matrix_mdev->matrix.aqm)) return -EADDRNOTAVAIL; - return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, + return vfio_ap_mdev_verify_no_sharing(matrix_mdev, + matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm); } @@ -2046,6 +2076,13 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); + get_update_locks_for_mdev(matrix_mdev); + + if (matrix_mdev->kvm) { + kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + signal_guest_ap_cfg_changed(matrix_mdev); + } + if (matrix_mdev->req_trigger) { if (!(count % 10)) dev_notice_ratelimited(dev, @@ -2057,6 +2094,8 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) dev_notice(dev, "No device request registered, blocked until released by user\n"); } + + release_update_locks_for_mdev(matrix_mdev); } static int vfio_ap_mdev_get_device_info(unsigned long arg) @@ -2097,6 +2136,10 @@ static ssize_t vfio_ap_get_irq_info(unsigned long arg) info.count = 1; info.flags = VFIO_IRQ_INFO_EVENTFD; break; + case VFIO_AP_CFG_CHG_IRQ_INDEX: + info.count = 1; + info.flags = VFIO_IRQ_INFO_EVENTFD; + break; default: return -EINVAL; } @@ -2160,6 +2203,39 @@ static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, return 0; } +static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) +{ + s32 fd; + void __user *data; + unsigned long minsz; + struct eventfd_ctx *cfg_chg_trigger; + + minsz = offsetofend(struct vfio_irq_set, count); + data = (void __user *)(arg + minsz); + + if (get_user(fd, (s32 __user *)data)) + return -EFAULT; + + if (fd == -1) { + if (matrix_mdev->cfg_chg_trigger) + eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); + matrix_mdev->cfg_chg_trigger = NULL; + } else if (fd >= 0) { + cfg_chg_trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(cfg_chg_trigger)) + return PTR_ERR(cfg_chg_trigger); + + if (matrix_mdev->cfg_chg_trigger) + eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); + + matrix_mdev->cfg_chg_trigger = cfg_chg_trigger; + } else { + return -EINVAL; + } + + return 0; +} + static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) { @@ -2175,6 +2251,8 @@ static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, switch (irq_set.index) { case VFIO_AP_REQ_IRQ_INDEX: return vfio_ap_set_request_irq(matrix_mdev, arg); + case VFIO_AP_CFG_CHG_IRQ_INDEX: + return vfio_ap_set_cfg_change_irq(matrix_mdev, arg); default: return -EINVAL; } @@ -2199,8 +2277,8 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, ret = vfio_ap_mdev_reset_queues(matrix_mdev); break; case VFIO_DEVICE_GET_IRQ_INFO: - ret = vfio_ap_get_irq_info(arg); - break; + ret = vfio_ap_get_irq_info(arg); + break; case VFIO_DEVICE_SET_IRQS: ret = vfio_ap_set_irqs(matrix_mdev, arg); break; @@ -2316,10 +2394,10 @@ int vfio_ap_mdev_register(void) matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; - matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; + matrix_dev->mdev_types = &matrix_dev->mdev_type; ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, &vfio_ap_matrix_driver, - matrix_dev->mdev_types, 1); + &matrix_dev->mdev_types, 1); if (ret) goto err_driver; return 0; @@ -2458,7 +2536,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) mutex_lock(&matrix_dev->guests_lock); mutex_lock(&matrix_dev->mdevs_lock); - ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); + ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); mutex_unlock(&matrix_dev->mdevs_lock); mutex_unlock(&matrix_dev->guests_lock); |