diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 981 |
1 files changed, 685 insertions, 296 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index e14050a69276..dea605b7f683 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -12,31 +12,48 @@ #include "rxe_queue.h" #include "rxe_hw_counters.h" -static int rxe_query_device(struct ib_device *dev, +static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr); + +/* dev */ +static int rxe_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, - struct ib_udata *uhw) + struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - if (uhw->inlen || uhw->outlen) - return -EINVAL; + if (udata->inlen || udata->outlen) { + rxe_dbg_dev(rxe, "malformed udata"); + err = -EINVAL; + goto err_out; + } + + memcpy(attr, &rxe->attr, sizeof(*attr)); - *attr = rxe->attr; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_query_port(struct ib_device *dev, +static int rxe_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *attr) { - struct rxe_dev *rxe = to_rdev(dev); - int rc; + struct rxe_dev *rxe = to_rdev(ibdev); + int err, ret; + + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } - /* *attr being zeroed by the caller, avoid zeroing it here */ - *attr = rxe->port.attr; + memcpy(attr, &rxe->port.attr, sizeof(*attr)); mutex_lock(&rxe->usdev_lock); - rc = ib_get_eth_speed(dev, port_num, &attr->active_speed, - &attr->active_width); + ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed, + &attr->active_width); if (attr->state == IB_PORT_ACTIVE) attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; @@ -47,27 +64,45 @@ static int rxe_query_port(struct ib_device *dev, mutex_unlock(&rxe->usdev_lock); - return rc; + return ret; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_query_pkey(struct ib_device *device, +static int rxe_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { - if (index > 0) - return -EINVAL; + struct rxe_dev *rxe = to_rdev(ibdev); + int err; + + if (index != 0) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad pkey index = %d", index); + goto err_out; + } *pkey = IB_DEFAULT_PKEY_FULL; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_modify_device(struct ib_device *dev, +static int rxe_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *attr) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | - IB_DEVICE_MODIFY_NODE_DESC)) - return -EOPNOTSUPP; + IB_DEVICE_MODIFY_NODE_DESC)) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask); + goto err_out; + } if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); @@ -78,16 +113,33 @@ static int rxe_modify_device(struct ib_device *dev, } return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_modify_port(struct ib_device *dev, - u32 port_num, int mask, struct ib_port_modify *attr) +static int rxe_modify_port(struct ib_device *ibdev, u32 port_num, + int mask, struct ib_port_modify *attr) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); struct rxe_port *port; + int err; - port = &rxe->port; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } + //TODO is shutdown useful + if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask); + goto err_out; + } + + port = &rxe->port; port->attr.port_cap_flags |= attr->set_port_cap_mask; port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; @@ -95,73 +147,125 @@ static int rxe_modify_port(struct ib_device *dev, port->attr.qkey_viol_cntr = 0; return 0; -} -static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, - u32 port_num) -{ - return IB_LINK_LAYER_ETHERNET; +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) +static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev, + u32 port_num) { - struct rxe_dev *rxe = to_rdev(ibuc->device); - struct rxe_ucontext *uc = to_ruc(ibuc); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - return rxe_add_to_pool(&rxe->uc_pool, uc); -} + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } -static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) -{ - struct rxe_ucontext *uc = to_ruc(ibuc); + return IB_LINK_LAYER_ETHERNET; - rxe_cleanup(uc); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_port_immutable(struct ib_device *dev, u32 port_num, +static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { + struct rxe_dev *rxe = to_rdev(ibdev); + struct ib_port_attr attr = {}; int err; - struct ib_port_attr attr; - immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } - err = ib_query_port(dev, port_num, &attr); + err = ib_query_port(ibdev, port_num, &attr); if (err) - return err; + goto err_out; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } +/* uc */ +static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) +{ + struct rxe_dev *rxe = to_rdev(ibuc->device); + struct rxe_ucontext *uc = to_ruc(ibuc); + int err; + + err = rxe_add_to_pool(&rxe->uc_pool, uc); + if (err) + rxe_err_dev(rxe, "unable to create uc"); + + return err; +} + +static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) +{ + struct rxe_ucontext *uc = to_ruc(ibuc); + int err; + + err = rxe_cleanup(uc); + if (err) + rxe_err_uc(uc, "cleanup failed, err = %d", err); +} + +/* pd */ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); + int err; - return rxe_add_to_pool(&rxe->pd_pool, pd); + err = rxe_add_to_pool(&rxe->pd_pool, pd); + if (err) { + rxe_dbg_dev(rxe, "unable to alloc pd"); + goto err_out; + } + + return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_pd *pd = to_rpd(ibpd); + int err; + + err = rxe_cleanup(pd); + if (err) + rxe_err_pd(pd, "cleanup failed, err = %d", err); - rxe_cleanup(pd); return 0; } +/* ah */ static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) - { struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_ah *ah = to_rah(ibah); struct rxe_create_ah_resp __user *uresp = NULL; - int err; + int err, cleanup_err; if (udata) { /* test if new user provider */ @@ -174,16 +278,18 @@ static int rxe_create_ah(struct ib_ah *ibah, err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "unable to create ah"); + goto err_out; + } /* create index > 0 */ ah->ah_num = ah->elem.index; err = rxe_ah_chk_attr(ah, init_attr->ah_attr); if (err) { - rxe_cleanup(ah); - return err; + rxe_dbg_ah(ah, "bad attr"); + goto err_cleanup; } if (uresp) { @@ -191,8 +297,9 @@ static int rxe_create_ah(struct ib_ah *ibah, err = copy_to_user(&uresp->ah_num, &ah->ah_num, sizeof(uresp->ah_num)); if (err) { - rxe_cleanup(ah); - return -EFAULT; + err = -EFAULT; + rxe_dbg_ah(ah, "unable to copy to user"); + goto err_cleanup; } } else if (ah->is_user) { /* only if old user provider */ @@ -203,19 +310,34 @@ static int rxe_create_ah(struct ib_ah *ibah, rxe_finalize(ah); return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(ah); + if (cleanup_err) + rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_ah(ah, "returned err = %d", err); + return err; } static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) { - int err; struct rxe_ah *ah = to_rah(ibah); + int err; err = rxe_ah_chk_attr(ah, attr); - if (err) - return err; + if (err) { + rxe_dbg_ah(ah, "bad attr"); + goto err_out; + } rxe_init_av(attr, &ah->av); + return 0; + +err_out: + rxe_err_ah(ah, "returned err = %d", err); + return err; } static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) @@ -225,92 +347,77 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) memset(attr, 0, sizeof(*attr)); attr->type = ibah->type; rxe_av_to_attr(&ah->av, attr); + return 0; } static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); + int err; - rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); - - return 0; -} - -static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) -{ - int i; - u32 length; - struct rxe_recv_wqe *recv_wqe; - int num_sge = ibwr->num_sge; - int full; - - full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); - if (unlikely(full)) - return -ENOMEM; - - if (unlikely(num_sge > rq->max_sge)) - return -EINVAL; - - length = 0; - for (i = 0; i < num_sge; i++) - length += ibwr->sg_list[i].length; - - recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); - recv_wqe->wr_id = ibwr->wr_id; - - memcpy(recv_wqe->dma.sge, ibwr->sg_list, - num_sge * sizeof(struct ib_sge)); - - recv_wqe->dma.length = length; - recv_wqe->dma.resid = length; - recv_wqe->dma.num_sge = num_sge; - recv_wqe->dma.cur_sge = 0; - recv_wqe->dma.sge_offset = 0; - - queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); + err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); + if (err) + rxe_err_ah(ah, "cleanup failed, err = %d", err); return 0; } +/* srq */ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibsrq->device); struct rxe_pd *pd = to_rpd(ibsrq->pd); struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_create_srq_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_err_dev(rxe, "malformed udata"); + goto err_out; + } uresp = udata->outbuf; } - if (init->srq_type != IB_SRQT_BASIC) - return -EOPNOTSUPP; + if (init->srq_type != IB_SRQT_BASIC) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "srq type = %d, not supported", + init->srq_type); + goto err_out; + } err = rxe_srq_chk_init(rxe, init); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "invalid init attributes"); + goto err_out; + } err = rxe_add_to_pool(&rxe->srq_pool, srq); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "unable to create srq, err = %d", err); + goto err_out; + } rxe_get(pd); srq->pd = pd; err = rxe_srq_from_init(rxe, srq, init, udata, uresp); - if (err) + if (err) { + rxe_dbg_srq(srq, "create srq failed, err = %d", err); goto err_cleanup; + } return 0; err_cleanup: - rxe_cleanup(srq); - + cleanup_err = rxe_cleanup(srq); + if (cleanup_err) + rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); return err; } @@ -318,46 +425,64 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata) { - int err; struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_dev *rxe = to_rdev(ibsrq->device); - struct rxe_modify_srq_cmd ucmd = {}; + struct rxe_modify_srq_cmd cmd = {}; + int err; if (udata) { - if (udata->inlen < sizeof(ucmd)) - return -EINVAL; + if (udata->inlen < sizeof(cmd)) { + err = -EINVAL; + rxe_dbg_srq(srq, "malformed udata"); + goto err_out; + } - err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); - if (err) - return err; + err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); + if (err) { + err = -EFAULT; + rxe_dbg_srq(srq, "unable to read udata"); + goto err_out; + } } err = rxe_srq_chk_attr(rxe, srq, attr, mask); - if (err) - return err; + if (err) { + rxe_dbg_srq(srq, "bad init attributes"); + goto err_out; + } + + err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata); + if (err) { + rxe_dbg_srq(srq, "bad attr"); + goto err_out; + } + + return 0; - return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); +err_out: + rxe_err_srq(srq, "returned err = %d", err); + return err; } static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct rxe_srq *srq = to_rsrq(ibsrq); + int err; - if (srq->error) - return -EINVAL; + if (srq->error) { + err = -EINVAL; + rxe_dbg_srq(srq, "srq in error state"); + goto err_out; + } attr->max_wr = srq->rq.queue->buf->index_mask; attr->max_sge = srq->rq.max_sge; attr->srq_limit = srq->limit; return 0; -} -static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) -{ - struct rxe_srq *srq = to_rsrq(ibsrq); - - rxe_cleanup(srq); - return 0; +err_out: + rxe_err_srq(srq, "returned err = %d", err); + return err; } static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, @@ -378,76 +503,116 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&srq->rq.producer_lock, flags); - if (err) + if (err) { *bad_wr = wr; + rxe_err_srq(srq, "returned err = %d", err); + } return err; } +static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +{ + struct rxe_srq *srq = to_rsrq(ibsrq); + int err; + + err = rxe_cleanup(srq); + if (err) + rxe_err_srq(srq, "cleanup failed, err = %d", err); + + return 0; +} + +/* qp */ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_pd *pd = to_rpd(ibqp->pd); struct rxe_qp *qp = to_rqp(ibqp); struct rxe_create_qp_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; - uresp = udata->outbuf; - } - - if (init->create_flags) - return -EOPNOTSUPP; - - err = rxe_qp_chk_init(rxe, init); - if (err) - return err; + if (udata->inlen) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + goto err_out; + } - if (udata) { - if (udata->inlen) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + goto err_out; + } qp->is_user = true; + uresp = udata->outbuf; } else { qp->is_user = false; } + if (init->create_flags) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err); + goto err_out; + } + + err = rxe_qp_chk_init(rxe, init); + if (err) { + rxe_dbg_dev(rxe, "bad init attr, err = %d", err); + goto err_out; + } + err = rxe_add_to_pool(&rxe->qp_pool, qp); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "unable to create qp, err = %d", err); + goto err_out; + } err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); - if (err) - goto qp_init; + if (err) { + rxe_dbg_qp(qp, "create qp failed, err = %d", err); + goto err_cleanup; + } rxe_finalize(qp); return 0; -qp_init: - rxe_cleanup(qp); +err_cleanup: + cleanup_err = rxe_cleanup(qp); + if (cleanup_err) + rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); return err; } static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_qp *qp = to_rqp(ibqp); + int err; - if (mask & ~IB_QP_ATTR_STANDARD_BITS) - return -EOPNOTSUPP; + if (mask & ~IB_QP_ATTR_STANDARD_BITS) { + err = -EOPNOTSUPP; + rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d", + mask, err); + goto err_out; + } err = rxe_qp_chk_attr(rxe, qp, attr, mask); - if (err) - return err; + if (err) { + rxe_dbg_qp(qp, "bad mask/attr, err = %d", err); + goto err_out; + } err = rxe_qp_from_attr(qp, attr, mask, udata); - if (err) - return err; + if (err) { + rxe_dbg_qp(qp, "modify qp failed, err = %d", err); + goto err_out; + } if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, @@ -455,6 +620,10 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->attr.dest_qp_num); return 0; + +err_out: + rxe_err_qp(qp, "returned err = %d", err); + return err; } static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -471,41 +640,90 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct rxe_qp *qp = to_rqp(ibqp); - int ret; + int err; - ret = rxe_qp_chk_destroy(qp); - if (ret) - return ret; + err = rxe_qp_chk_destroy(qp); + if (err) { + rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err); + goto err_out; + } + + err = rxe_cleanup(qp); + if (err) + rxe_err_qp(qp, "cleanup failed, err = %d", err); - rxe_cleanup(qp); return 0; + +err_out: + rxe_err_qp(qp, "returned err = %d", err); + return err; } +/* send wr */ + +/* sanity check incoming send work request */ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, - unsigned int mask, unsigned int length) + unsigned int *maskp, unsigned int *lengthp) { int num_sge = ibwr->num_sge; struct rxe_sq *sq = &qp->sq; + unsigned int mask = 0; + unsigned long length = 0; + int err = -EINVAL; + int i; - if (unlikely(num_sge > sq->max_sge)) - return -EINVAL; + do { + mask = wr_opcode_mask(ibwr->opcode, qp); + if (!mask) { + rxe_err_qp(qp, "bad wr opcode for qp type"); + break; + } - if (unlikely(mask & WR_ATOMIC_MASK)) { - if (length < 8) - return -EINVAL; + if (num_sge > sq->max_sge) { + rxe_err_qp(qp, "num_sge > max_sge"); + break; + } - if (atomic_wr(ibwr)->remote_addr & 0x7) - return -EINVAL; - } + length = 0; + for (i = 0; i < ibwr->num_sge; i++) + length += ibwr->sg_list[i].length; - if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && - (length > sq->max_inline))) - return -EINVAL; + if (length > (1UL << 31)) { + rxe_err_qp(qp, "message length too long"); + break; + } - return 0; + if (mask & WR_ATOMIC_MASK) { + if (length != 8) { + rxe_err_qp(qp, "atomic length != 8"); + break; + } + if (atomic_wr(ibwr)->remote_addr & 0x7) { + rxe_err_qp(qp, "misaligned atomic address"); + break; + } + } + if (ibwr->send_flags & IB_SEND_INLINE) { + if (!(mask & WR_INLINE_MASK)) { + rxe_err_qp(qp, "opcode doesn't support inline data"); + break; + } + if (length > sq->max_inline) { + rxe_err_qp(qp, "inline length too big"); + break; + } + } + + err = 0; + } while (0); + + *maskp = mask; + *lengthp = (int)length; + + return err; } -static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, +static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, const struct ib_send_wr *ibwr) { wr->wr_id = ibwr->wr_id; @@ -521,8 +739,18 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, wr->wr.ud.ah_num = to_rah(ibah)->ah_num; if (qp_type(qp) == IB_QPT_GSI) wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; - if (wr->opcode == IB_WR_SEND_WITH_IMM) + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: wr->ex.imm_data = ibwr->ex.imm_data; + break; + case IB_WR_SEND: + break; + default: + rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP", + wr->opcode); + return -EINVAL; + } } else { switch (wr->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: @@ -539,6 +767,11 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, case IB_WR_SEND_WITH_INV: wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; break; + case IB_WR_RDMA_READ_WITH_INV: + wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; + wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; + wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; + break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: wr->wr.atomic.remote_addr = @@ -550,16 +783,26 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, break; case IB_WR_LOCAL_INV: wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; - break; + break; case IB_WR_REG_MR: wr->wr.reg.mr = reg_wr(ibwr)->mr; wr->wr.reg.key = reg_wr(ibwr)->key; wr->wr.reg.access = reg_wr(ibwr)->access; - break; + break; + case IB_WR_SEND: + case IB_WR_BIND_MW: + case IB_WR_FLUSH: + case IB_WR_ATOMIC_WRITE: + break; default: + rxe_err_qp(qp, "unsupported wr opcode %d", + wr->opcode); + return -EINVAL; break; } } + + return 0; } static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, @@ -570,24 +813,27 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, int i; for (i = 0; i < ibwr->num_sge; i++, sge++) { - memcpy(p, (void *)(uintptr_t)sge->addr, sge->length); + memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); p += sge->length; } } -static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, +static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, unsigned int mask, unsigned int length, struct rxe_send_wqe *wqe) { int num_sge = ibwr->num_sge; + int err; - init_send_wr(qp, &wqe->wr, ibwr); + err = init_send_wr(qp, &wqe->wr, ibwr); + if (err) + return err; /* local operation */ if (unlikely(mask & WR_LOCAL_OP_MASK)) { wqe->mask = mask; wqe->state = wqe_state_posted; - return; + return 0; } if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) @@ -606,82 +852,62 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, wqe->dma.sge_offset = 0; wqe->state = wqe_state_posted; wqe->ssn = atomic_add_return(1, &qp->ssn); + + return 0; } -static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, - unsigned int mask, u32 length) +static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr) { int err; struct rxe_sq *sq = &qp->sq; struct rxe_send_wqe *send_wqe; - unsigned long flags; + unsigned int mask; + unsigned int length; int full; - err = validate_send_wr(qp, ibwr, mask, length); + err = validate_send_wr(qp, ibwr, &mask, &length); if (err) return err; - spin_lock_irqsave(&qp->sq.sq_lock, flags); - full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); - if (unlikely(full)) { - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + rxe_err_qp(qp, "send queue full"); return -ENOMEM; } send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP); - init_send_wqe(qp, ibwr, mask, length, send_wqe); + err = init_send_wqe(qp, ibwr, mask, length, send_wqe); + if (!err) + queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); - queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); - - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); - - return 0; + return err; } -static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, +static int rxe_post_send_kernel(struct rxe_qp *qp, + const struct ib_send_wr *ibwr, const struct ib_send_wr **bad_wr) { int err = 0; - unsigned int mask; - unsigned int length = 0; - int i; - struct ib_send_wr *next; - - while (wr) { - mask = wr_opcode_mask(wr->opcode, qp); - if (unlikely(!mask)) { - err = -EINVAL; - *bad_wr = wr; - break; - } - - if (unlikely((wr->send_flags & IB_SEND_INLINE) && - !(mask & WR_INLINE_MASK))) { - err = -EINVAL; - *bad_wr = wr; - break; - } - - next = wr->next; - - length = 0; - for (i = 0; i < wr->num_sge; i++) - length += wr->sg_list[i].length; - - err = post_one_send(qp, wr, mask, length); + unsigned long flags; + spin_lock_irqsave(&qp->sq.sq_lock, flags); + while (ibwr) { + err = post_one_send(qp, ibwr); if (err) { - *bad_wr = wr; + *bad_wr = ibwr; break; } - wr = next; + ibwr = ibwr->next; } + spin_unlock_irqrestore(&qp->sq.sq_lock, flags); - rxe_sched_task(&qp->req.task); - if (unlikely(qp->req.state == QP_STATE_ERROR)) + if (!err) + rxe_sched_task(&qp->req.task); + + spin_lock_bh(&qp->state_lock); + if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->comp.task); + spin_unlock_bh(&qp->state_lock); return err; } @@ -690,23 +916,88 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct rxe_qp *qp = to_rqp(ibqp); + int err; - if (unlikely(!qp->valid)) { - *bad_wr = wr; + spin_lock_bh(&qp->state_lock); + /* caller has already called destroy_qp */ + if (WARN_ON_ONCE(!qp->valid)) { + spin_unlock_bh(&qp->state_lock); + rxe_err_qp(qp, "qp has been destroyed"); return -EINVAL; } - if (unlikely(qp->req.state < QP_STATE_READY)) { + if (unlikely(qp_state(qp) < IB_QPS_RTS)) { + spin_unlock_bh(&qp->state_lock); *bad_wr = wr; + rxe_err_qp(qp, "qp not ready to send"); return -EINVAL; } + spin_unlock_bh(&qp->state_lock); if (qp->is_user) { /* Utilize process context to do protocol processing */ rxe_run_task(&qp->req.task); - return 0; - } else - return rxe_post_send_kernel(qp, wr, bad_wr); + } else { + err = rxe_post_send_kernel(qp, wr, bad_wr); + if (err) + return err; + } + + return 0; +} + +/* recv wr */ +static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) +{ + int i; + unsigned long length; + struct rxe_recv_wqe *recv_wqe; + int num_sge = ibwr->num_sge; + int full; + int err; + + full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); + if (unlikely(full)) { + err = -ENOMEM; + rxe_dbg("queue full"); + goto err_out; + } + + if (unlikely(num_sge > rq->max_sge)) { + err = -EINVAL; + rxe_dbg("bad num_sge > max_sge"); + goto err_out; + } + + length = 0; + for (i = 0; i < num_sge; i++) + length += ibwr->sg_list[i].length; + + /* IBA max message size is 2^31 */ + if (length >= (1UL<<31)) { + err = -EINVAL; + rxe_dbg("message length too long"); + goto err_out; + } + + recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); + + recv_wqe->wr_id = ibwr->wr_id; + recv_wqe->dma.length = length; + recv_wqe->dma.resid = length; + recv_wqe->dma.num_sge = num_sge; + recv_wqe->dma.cur_sge = 0; + recv_wqe->dma.sge_offset = 0; + memcpy(recv_wqe->dma.sge, ibwr->sg_list, + num_sge * sizeof(struct ib_sge)); + + queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); + + return 0; + +err_out: + rxe_dbg("returned err = %d", err); + return err; } static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, @@ -717,13 +1008,26 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, struct rxe_rq *rq = &qp->rq; unsigned long flags; - if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { + spin_lock_bh(&qp->state_lock); + /* caller has already called destroy_qp */ + if (WARN_ON_ONCE(!qp->valid)) { + spin_unlock_bh(&qp->state_lock); + rxe_err_qp(qp, "qp has been destroyed"); + return -EINVAL; + } + + /* see C10-97.2.1 */ + if (unlikely((qp_state(qp) < IB_QPS_INIT))) { + spin_unlock_bh(&qp->state_lock); *bad_wr = wr; + rxe_dbg_qp(qp, "qp not ready to post recv"); return -EINVAL; } + spin_unlock_bh(&qp->state_lock); if (unlikely(qp->srq)) { *bad_wr = wr; + rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead"); return -EINVAL; } @@ -740,76 +1044,102 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); - if (qp->resp.state == QP_STATE_ERROR) + spin_lock_bh(&qp->state_lock); + if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->resp.task); + spin_unlock_bh(&qp->state_lock); return err; } +/* cq */ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { - int err; struct ib_device *dev = ibcq->device; struct rxe_dev *rxe = to_rdev(dev); struct rxe_cq *cq = to_rcq(ibcq); struct rxe_create_cq_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + goto err_out; + } uresp = udata->outbuf; } - if (attr->flags) - return -EOPNOTSUPP; + if (attr->flags) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err); + goto err_out; + } err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "bad init attributes, err = %d", err); + goto err_out; + } + + err = rxe_add_to_pool(&rxe->cq_pool, cq); + if (err) { + rxe_dbg_dev(rxe, "unable to create cq, err = %d", err); + goto err_out; + } err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, uresp); - if (err) - return err; - - return rxe_add_to_pool(&rxe->cq_pool, cq); -} - -static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) -{ - struct rxe_cq *cq = to_rcq(ibcq); - - /* See IBA C11-17: The CI shall return an error if this Verb is - * invoked while a Work Queue is still associated with the CQ. - */ - if (atomic_read(&cq->num_wq)) - return -EINVAL; - - rxe_cq_disable(cq); + if (err) { + rxe_dbg_cq(cq, "create cq failed, err = %d", err); + goto err_cleanup; + } - rxe_cleanup(cq); return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(cq); + if (cleanup_err) + rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) { - int err; struct rxe_cq *cq = to_rcq(ibcq); struct rxe_dev *rxe = to_rdev(ibcq->device); struct rxe_resize_cq_resp __user *uresp = NULL; + int err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_cq(cq, "malformed udata"); + goto err_out; + } uresp = udata->outbuf; } err = rxe_cq_chk_attr(rxe, cq, cqe, 0); - if (err) - return err; + if (err) { + rxe_dbg_cq(cq, "bad attr, err = %d", err); + goto err_out; + } - return rxe_cq_resize_queue(cq, cqe, uresp, udata); + err = rxe_cq_resize_queue(cq, cqe, uresp, udata); + if (err) { + rxe_dbg_cq(cq, "resize cq failed, err = %d", err); + goto err_out; + } + + return 0; + +err_out: + rxe_err_cq(cq, "returned err = %d", err); + return err; } static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) @@ -823,7 +1153,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) for (i = 0; i < num_entries; i++) { cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP); if (!cqe) - break; + break; /* queue empty */ memcpy(wc++, &cqe->ibwc, sizeof(*wc)); queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP); @@ -864,6 +1194,32 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) return ret; } +static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +{ + struct rxe_cq *cq = to_rcq(ibcq); + int err; + + /* See IBA C11-17: The CI shall return an error if this Verb is + * invoked while a Work Queue is still associated with the CQ. + */ + if (atomic_read(&cq->num_wq)) { + err = -EINVAL; + rxe_dbg_cq(cq, "still in use"); + goto err_out; + } + + err = rxe_cleanup(cq); + if (err) + rxe_err_cq(cq, "cleanup failed, err = %d", err); + + return 0; + +err_out: + rxe_err_cq(cq, "returned err = %d", err); + return err; +} + +/* mr */ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) { struct rxe_dev *rxe = to_rdev(ibpd->device); @@ -872,14 +1228,14 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto err_out; - } + if (!mr) + return ERR_PTR(-ENOMEM); err = rxe_add_to_pool(&rxe->mr_pool, mr); - if (err) + if (err) { + rxe_dbg_dev(rxe, "unable to create mr"); goto err_free; + } rxe_get(pd); mr->ibmr.pd = ibpd; @@ -891,47 +1247,49 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) err_free: kfree(mr); -err_out: + rxe_err_pd(pd, "returned err = %d", err); return ERR_PTR(err); } -static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, - u64 start, - u64 length, - u64 iova, - int access, struct ib_udata *udata) +static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, + u64 length, u64 iova, int access, + struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); struct rxe_mr *mr; + int err, cleanup_err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto err_out; - } + if (!mr) + return ERR_PTR(-ENOMEM); err = rxe_add_to_pool(&rxe->mr_pool, mr); - if (err) + if (err) { + rxe_dbg_pd(pd, "unable to create mr"); goto err_free; + } rxe_get(pd); mr->ibmr.pd = ibpd; mr->ibmr.device = ibpd->device; err = rxe_mr_init_user(rxe, start, length, iova, access, mr); - if (err) + if (err) { + rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err); goto err_cleanup; + } rxe_finalize(mr); return &mr->ibmr; err_cleanup: - rxe_cleanup(mr); + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); err_free: kfree(mr); -err_out: + rxe_err_pd(pd, "returned err = %d", err); return ERR_PTR(err); } @@ -941,17 +1299,19 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); struct rxe_mr *mr; - int err; + int err, cleanup_err; - if (mr_type != IB_MR_TYPE_MEM_REG) - return ERR_PTR(-EINVAL); - - mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; + if (mr_type != IB_MR_TYPE_MEM_REG) { + err = -EINVAL; + rxe_dbg_pd(pd, "mr type %d not supported, err = %d", + mr_type, err); goto err_out; } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + err = rxe_add_to_pool(&rxe->mr_pool, mr); if (err) goto err_free; @@ -961,20 +1321,49 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, mr->ibmr.device = ibpd->device; err = rxe_mr_init_fast(max_num_sg, mr); - if (err) + if (err) { + rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err); goto err_cleanup; + } rxe_finalize(mr); return &mr->ibmr; err_cleanup: - rxe_cleanup(mr); + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d", err); err_free: kfree(mr); err_out: + rxe_err_pd(pd, "returned err = %d", err); return ERR_PTR(err); } +static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +{ + struct rxe_mr *mr = to_rmr(ibmr); + int err, cleanup_err; + + /* See IBA 10.6.7.2.6 */ + if (atomic_read(&mr->num_mw) > 0) { + err = -EINVAL; + rxe_dbg_mr(mr, "mr has mw's bound"); + goto err_out; + } + + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); + + kfree_rcu(mr); + return 0; + +err_out: + rxe_err_mr(mr, "returned err = %d", err); + return err; +} + static ssize_t parent_show(struct device *device, struct device_attribute *attr, char *buf) { @@ -1095,7 +1484,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) err = ib_register_device(dev, ibdev_name, NULL); if (err) - rxe_dbg(rxe, "failed with error %d\n", err); + rxe_dbg_dev(rxe, "failed with error %d\n", err); /* * Note that rxe may be invalid at this point if another thread |