summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_cq.c
diff options
context:
space:
mode:
authorHeiko J Schick <schickhj.ibm.com>2006-09-22 15:22:22 -0700
committerRoland Dreier <rolandd@cisco.com>2006-09-22 15:22:22 -0700
commitfab97220c9e409a98b1956ba677ddd2dd43b0b95 (patch)
treef4fd0456b758a651eb7e774c550641e177a9b333 /drivers/infiniband/hw/ehca/ehca_cq.c
parentded7f1a16d50527359be02f8b04f9ba56bc923e6 (diff)
downloadlinux-stable-fab97220c9e409a98b1956ba677ddd2dd43b0b95.tar.gz
linux-stable-fab97220c9e409a98b1956ba677ddd2dd43b0b95.tar.bz2
linux-stable-fab97220c9e409a98b1956ba677ddd2dd43b0b95.zip
IB/ehca: Add driver for IBM eHCA InfiniBand adapters
Add a driver for IBM GX bus InfiniBand adapters, which are usable with some pSeries/System p systems. Signed-off-by: Heiko J Schick <schickhj.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_cq.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c427
1 files changed, 427 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
new file mode 100644
index 000000000000..458fe19648a1
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -0,0 +1,427 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Completion queue handling
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Khadija Souissi <souissi@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_iverbs.h"
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "hcp_if.h"
+
+static struct kmem_cache *cq_cache;
+
+int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
+{
+ unsigned int qp_num = qp->real_qp_num;
+ unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
+ unsigned long spl_flags;
+
+ spin_lock_irqsave(&cq->spinlock, spl_flags);
+ hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
+ spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+
+ ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
+ cq->cq_number, qp_num);
+
+ return 0;
+}
+
+int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
+{
+ int ret = -EINVAL;
+ unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+ struct hlist_node *iter;
+ struct ehca_qp *qp;
+ unsigned long spl_flags;
+
+ spin_lock_irqsave(&cq->spinlock, spl_flags);
+ hlist_for_each(iter, &cq->qp_hashtab[key]) {
+ qp = hlist_entry(iter, struct ehca_qp, list_entries);
+ if (qp->real_qp_num == real_qp_num) {
+ hlist_del(iter);
+ ehca_dbg(cq->ib_cq.device,
+ "removed qp from cq .cq_num=%x real_qp_num=%x",
+ cq->cq_number, real_qp_num);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+ if (ret)
+ ehca_err(cq->ib_cq.device,
+ "qp not found cq_num=%x real_qp_num=%x",
+ cq->cq_number, real_qp_num);
+
+ return ret;
+}
+
+struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
+{
+ struct ehca_qp *ret = NULL;
+ unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+ struct hlist_node *iter;
+ struct ehca_qp *qp;
+ hlist_for_each(iter, &cq->qp_hashtab[key]) {
+ qp = hlist_entry(iter, struct ehca_qp, list_entries);
+ if (qp->real_qp_num == real_qp_num) {
+ ret = qp;
+ break;
+ }
+ }
+ return ret;
+}
+
+struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ static const u32 additional_cqe = 20;
+ struct ib_cq *cq;
+ struct ehca_cq *my_cq;
+ struct ehca_shca *shca =
+ container_of(device, struct ehca_shca, ib_device);
+ struct ipz_adapter_handle adapter_handle;
+ struct ehca_alloc_cq_parms param; /* h_call's out parameters */
+ struct h_galpa gal;
+ void *vpage;
+ u32 counter;
+ u64 rpage, cqx_fec, h_ret;
+ int ipz_rc, ret, i;
+ unsigned long flags;
+
+ if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
+ return ERR_PTR(-EINVAL);
+
+ my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+ if (!my_cq) {
+ ehca_err(device, "Out of memory for ehca_cq struct device=%p",
+ device);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(my_cq, 0, sizeof(struct ehca_cq));
+ memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
+
+ spin_lock_init(&my_cq->spinlock);
+ spin_lock_init(&my_cq->cb_lock);
+ spin_lock_init(&my_cq->task_lock);
+ my_cq->ownpid = current->tgid;
+
+ cq = &my_cq->ib_cq;
+
+ adapter_handle = shca->ipz_hca_handle;
+ param.eq_handle = shca->eq.ipz_eq_handle;
+
+ do {
+ if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
+ cq = ERR_PTR(-ENOMEM);
+ ehca_err(device, "Can't reserve idr nr. device=%p",
+ device);
+ goto create_cq_exit1;
+ }
+
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ cq = ERR_PTR(-ENOMEM);
+ ehca_err(device, "Can't allocate new idr entry. device=%p",
+ device);
+ goto create_cq_exit1;
+ }
+
+ /*
+ * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
+ * for receiving errors CQEs.
+ */
+ param.nr_cqe = cqe + additional_cqe;
+ h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
+
+ if (h_ret != H_SUCCESS) {
+ ehca_err(device, "hipz_h_alloc_resource_cq() failed "
+ "h_ret=%lx device=%p", h_ret, device);
+ cq = ERR_PTR(ehca2ib_return_code(h_ret));
+ goto create_cq_exit2;
+ }
+
+ ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
+ EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
+ if (!ipz_rc) {
+ ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
+ ipz_rc, device);
+ cq = ERR_PTR(-EINVAL);
+ goto create_cq_exit3;
+ }
+
+ for (counter = 0; counter < param.act_pages; counter++) {
+ vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
+ if (!vpage) {
+ ehca_err(device, "ipz_qpageit_get_inc() "
+ "returns NULL device=%p", device);
+ cq = ERR_PTR(-EAGAIN);
+ goto create_cq_exit4;
+ }
+ rpage = virt_to_abs(vpage);
+
+ h_ret = hipz_h_register_rpage_cq(adapter_handle,
+ my_cq->ipz_cq_handle,
+ &my_cq->pf,
+ 0,
+ 0,
+ rpage,
+ 1,
+ my_cq->galpas.
+ kernel);
+
+ if (h_ret < H_SUCCESS) {
+ ehca_err(device, "hipz_h_register_rpage_cq() failed "
+ "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i "
+ "act_pages=%i", my_cq, my_cq->cq_number,
+ h_ret, counter, param.act_pages);
+ cq = ERR_PTR(-EINVAL);
+ goto create_cq_exit4;
+ }
+
+ if (counter == (param.act_pages - 1)) {
+ vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
+ if ((h_ret != H_SUCCESS) || vpage) {
+ ehca_err(device, "Registration of pages not "
+ "complete ehca_cq=%p cq_num=%x "
+ "h_ret=%lx", my_cq, my_cq->cq_number,
+ h_ret);
+ cq = ERR_PTR(-EAGAIN);
+ goto create_cq_exit4;
+ }
+ } else {
+ if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(device, "Registration of page failed "
+ "ehca_cq=%p cq_num=%x h_ret=%lx"
+ "counter=%i act_pages=%i",
+ my_cq, my_cq->cq_number,
+ h_ret, counter, param.act_pages);
+ cq = ERR_PTR(-ENOMEM);
+ goto create_cq_exit4;
+ }
+ }
+ }
+
+ ipz_qeit_reset(&my_cq->ipz_queue);
+
+ gal = my_cq->galpas.kernel;
+ cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
+ ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+ my_cq, my_cq->cq_number, cqx_fec);
+
+ my_cq->ib_cq.cqe = my_cq->nr_of_entries =
+ param.act_nr_of_entries - additional_cqe;
+ my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
+
+ for (i = 0; i < QP_HASHTAB_LEN; i++)
+ INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
+
+ if (context) {
+ struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
+ struct ehca_create_cq_resp resp;
+ struct vm_area_struct *vma;
+ memset(&resp, 0, sizeof(resp));
+ resp.cq_number = my_cq->cq_number;
+ resp.token = my_cq->token;
+ resp.ipz_queue.qe_size = ipz_queue->qe_size;
+ resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
+ resp.ipz_queue.queue_length = ipz_queue->queue_length;
+ resp.ipz_queue.pagesize = ipz_queue->pagesize;
+ resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
+ ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
+ ipz_queue->queue_length,
+ (void**)&resp.ipz_queue.queue,
+ &vma);
+ if (ret) {
+ ehca_err(device, "Could not mmap queue pages");
+ cq = ERR_PTR(ret);
+ goto create_cq_exit4;
+ }
+ my_cq->uspace_queue = resp.ipz_queue.queue;
+ resp.galpas = my_cq->galpas;
+ ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
+ (void**)&resp.galpas.kernel.fw_handle,
+ &vma);
+ if (ret) {
+ ehca_err(device, "Could not mmap fw_handle");
+ cq = ERR_PTR(ret);
+ goto create_cq_exit5;
+ }
+ my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ ehca_err(device, "Copy to udata failed.");
+ goto create_cq_exit6;
+ }
+ }
+
+ return cq;
+
+create_cq_exit6:
+ ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+
+create_cq_exit5:
+ ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
+
+create_cq_exit4:
+ ipz_queue_dtor(&my_cq->ipz_queue);
+
+create_cq_exit3:
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+ if (h_ret != H_SUCCESS)
+ ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
+ "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
+
+create_cq_exit2:
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ idr_remove(&ehca_cq_idr, my_cq->token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+create_cq_exit1:
+ kmem_cache_free(cq_cache, my_cq);
+
+ return cq;
+}
+
+int ehca_destroy_cq(struct ib_cq *cq)
+{
+ u64 h_ret;
+ int ret;
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ int cq_num = my_cq->cq_number;
+ struct ib_device *device = cq->device;
+ struct ehca_shca *shca = container_of(device, struct ehca_shca,
+ ib_device);
+ struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+ u32 cur_pid = current->tgid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ while (my_cq->nr_callbacks)
+ yield();
+
+ idr_remove(&ehca_cq_idr, my_cq->token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+ ehca_err(device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_cq->ownpid);
+ return -EINVAL;
+ }
+
+ /* un-mmap if vma alloc */
+ if (my_cq->uspace_queue ) {
+ ret = ehca_munmap(my_cq->uspace_queue,
+ my_cq->ipz_queue.queue_length);
+ if (ret)
+ ehca_err(device, "Could not munmap queue ehca_cq=%p "
+ "cq_num=%x", my_cq, cq_num);
+ ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+ if (ret)
+ ehca_err(device, "Could not munmap fwh ehca_cq=%p "
+ "cq_num=%x", my_cq, cq_num);
+ }
+
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
+ if (h_ret == H_R_STATE) {
+ /* cq in err: read err data and destroy it forcibly */
+ ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
+ "state. Try to delete it forcibly.",
+ my_cq, cq_num, my_cq->ipz_cq_handle.handle);
+ ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+ if (h_ret == H_SUCCESS)
+ ehca_dbg(device, "cq_num=%x deleted successfully.",
+ cq_num);
+ }
+ if (h_ret != H_SUCCESS) {
+ ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
+ "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
+ return ehca2ib_return_code(h_ret);
+ }
+ ipz_queue_dtor(&my_cq->ipz_queue);
+ kmem_cache_free(cq_cache, my_cq);
+
+ return 0;
+}
+
+int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ u32 cur_pid = current->tgid;
+
+ if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+ ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_cq->ownpid);
+ return -EINVAL;
+ }
+
+ /* TODO: proper resize needs to be done */
+ ehca_err(cq->device, "not implemented yet");
+
+ return -EFAULT;
+}
+
+int ehca_init_cq_cache(void)
+{
+ cq_cache = kmem_cache_create("ehca_cache_cq",
+ sizeof(struct ehca_cq), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!cq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_cq_cache(void)
+{
+ if (cq_cache)
+ kmem_cache_destroy(cq_cache);
+}