summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c12
-rw-r--r--drivers/infiniband/core/mad.c52
-rw-r--r--drivers/infiniband/core/mad_priv.h5
-rw-r--r--drivers/infiniband/core/mad_rmpp.c20
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucm.c12
-rw-r--r--drivers/infiniband/core/uverbs_mem.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c49
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ht400.c21
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c37
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c25
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_pe800.c16
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c180
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h31
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c17
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c160
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h8
-rw-r--r--drivers/infiniband/hw/ipath/ips_common.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c41
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c66
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c64
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c199
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
41 files changed, 653 insertions, 539 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 7cfedb8d9bcd..86fee43502cd 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,6 +34,8 @@
*
* $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
*/
+
+#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/idr.h>
@@ -122,7 +124,7 @@ struct cm_id_private {
struct rb_node service_node;
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
- wait_queue_head_t wait;
+ struct completion comp;
atomic_t refcount;
struct ib_mad_send_buf *msg;
@@ -159,7 +161,7 @@ static void cm_work_handler(void *data);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
if (atomic_dec_and_test(&cm_id_priv->refcount))
- wake_up(&cm_id_priv->wait);
+ complete(&cm_id_priv->comp);
}
static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
@@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
goto error;
spin_lock_init(&cm_id_priv->lock);
- init_waitqueue_head(&cm_id_priv->wait);
+ init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
@@ -724,8 +726,8 @@ retest:
}
cm_free_id(cm_id->local_id);
- atomic_dec(&cm_id_priv->refcount);
- wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
+ cm_deref_id(cm_id_priv);
+ wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
if (cm_id_priv->private_data && cm_id_priv->private_data_len)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 3a702da83e41..5ad41a64314c 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -228,10 +228,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error1;
}
/* Make sure class supplied is consistent with RMPP */
- if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
- if (!rmpp_version)
- goto error1;
- } else {
+ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (rmpp_version)
goto error1;
}
@@ -355,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
INIT_WORK(&mad_agent_priv->local_work, local_completions,
mad_agent_priv);
atomic_set(&mad_agent_priv->refcount, 1);
- init_waitqueue_head(&mad_agent_priv->wait);
+ init_completion(&mad_agent_priv->comp);
return &mad_agent_priv->agent;
@@ -470,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
mad_snoop_priv->agent.port_num = port_num;
mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
- init_waitqueue_head(&mad_snoop_priv->wait);
+ init_completion(&mad_snoop_priv->comp);
mad_snoop_priv->snoop_index = register_snoop_agent(
&port_priv->qp_info[qpn],
mad_snoop_priv);
@@ -489,6 +486,18 @@ error1:
}
EXPORT_SYMBOL(ib_register_mad_snoop);
+static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
+{
+ if (atomic_dec_and_test(&mad_agent_priv->refcount))
+ complete(&mad_agent_priv->comp);
+}
+
+static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
+{
+ if (atomic_dec_and_test(&mad_snoop_priv->refcount))
+ complete(&mad_snoop_priv->comp);
+}
+
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
@@ -512,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv);
- atomic_dec(&mad_agent_priv->refcount);
- wait_event(mad_agent_priv->wait,
- !atomic_read(&mad_agent_priv->refcount));
+ deref_mad_agent(mad_agent_priv);
+ wait_for_completion(&mad_agent_priv->comp);
kfree(mad_agent_priv->reg_req);
ib_dereg_mr(mad_agent_priv->agent.mr);
@@ -532,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
atomic_dec(&qp_info->snoop_count);
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- atomic_dec(&mad_snoop_priv->refcount);
- wait_event(mad_snoop_priv->wait,
- !atomic_read(&mad_snoop_priv->refcount));
+ deref_snoop_agent(mad_snoop_priv);
+ wait_for_completion(&mad_snoop_priv->comp);
kfree(mad_snoop_priv);
}
@@ -603,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info,
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
send_buf, mad_send_wc);
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- wake_up(&mad_snoop_priv->wait);
+ deref_snoop_agent(mad_snoop_priv);
spin_lock_irqsave(&qp_info->snoop_lock, flags);
}
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
@@ -629,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
mad_recv_wc);
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- wake_up(&mad_snoop_priv->wait);
+ deref_snoop_agent(mad_snoop_priv);
spin_lock_irqsave(&qp_info->snoop_lock, flags);
}
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
@@ -971,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
free_send_rmpp_list(mad_send_wr);
kfree(send_buf->mad);
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- wake_up(&mad_agent_priv->wait);
+ deref_mad_agent(mad_agent_priv);
}
EXPORT_SYMBOL(ib_free_send_mad);
@@ -1760,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
mad_recv_wc);
if (!mad_recv_wc) {
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- wake_up(&mad_agent_priv->wait);
+ deref_mad_agent(mad_agent_priv);
return;
}
}
@@ -1773,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
if (!mad_send_wr) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
ib_free_recv_mad(mad_recv_wc);
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- wake_up(&mad_agent_priv->wait);
+ deref_mad_agent(mad_agent_priv);
return;
}
ib_mark_mad_done(mad_send_wr);
@@ -1793,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
} else {
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
mad_recv_wc);
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- wake_up(&mad_agent_priv->wait);
+ deref_mad_agent(mad_agent_priv);
}
}
@@ -2024,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
mad_send_wc);
/* Release reference on agent taken when sending */
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- wake_up(&mad_agent_priv->wait);
+ deref_mad_agent(mad_agent_priv);
return;
done:
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 6c9c133d71ef..b4fa28d3160f 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -37,6 +37,7 @@
#ifndef __IB_MAD_PRIV_H__
#define __IB_MAD_PRIV_H__
+#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
@@ -108,7 +109,7 @@ struct ib_mad_agent_private {
struct list_head rmpp_list;
atomic_t refcount;
- wait_queue_head_t wait;
+ struct completion comp;
};
struct ib_mad_snoop_private {
@@ -117,7 +118,7 @@ struct ib_mad_snoop_private {
int snoop_index;
int mad_snoop_flags;
atomic_t refcount;
- wait_queue_head_t wait;
+ struct completion comp;
};
struct ib_mad_send_wr_private {
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index dfd4e588ce03..d4704e054e30 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -49,7 +49,7 @@ struct mad_rmpp_recv {
struct list_head list;
struct work_struct timeout_work;
struct work_struct cleanup_work;
- wait_queue_head_t wait;
+ struct completion comp;
enum rmpp_state state;
spinlock_t lock;
atomic_t refcount;
@@ -69,10 +69,16 @@ struct mad_rmpp_recv {
u8 method;
};
+static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
+{
+ if (atomic_dec_and_test(&rmpp_recv->refcount))
+ complete(&rmpp_recv->comp);
+}
+
static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
{
- atomic_dec(&rmpp_recv->refcount);
- wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
+ deref_rmpp_recv(rmpp_recv);
+ wait_for_completion(&rmpp_recv->comp);
ib_destroy_ah(rmpp_recv->ah);
kfree(rmpp_recv);
}
@@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
goto error;
rmpp_recv->agent = agent;
- init_waitqueue_head(&rmpp_recv->wait);
+ init_completion(&rmpp_recv->comp);
INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
spin_lock_init(&rmpp_recv->lock);
@@ -279,12 +285,6 @@ error: kfree(rmpp_recv);
return NULL;
}
-static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
-{
- if (atomic_dec_and_test(&rmpp_recv->refcount))
- wake_up(&rmpp_recv->wait);
-}
-
static struct mad_rmpp_recv *
find_rmpp_recv(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 15121cb5a1f6..21f9282c1b25 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
switch (width) {
case 4:
ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
- (offset % 4)) & 0xf);
+ (4 - (offset % 8))) & 0xf);
break;
case 8:
ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f6a05965a4e8..9164a09b6ccd 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -32,6 +32,8 @@
*
* $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
*/
+
+#include <linux/completion.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/module.h>
@@ -72,7 +74,7 @@ struct ib_ucm_file {
struct ib_ucm_context {
int id;
- wait_queue_head_t wait;
+ struct completion comp;
atomic_t ref;
int events_reported;
@@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
{
if (atomic_dec_and_test(&ctx->ref))
- wake_up(&ctx->wait);
+ complete(&ctx->comp);
}
static inline int ib_ucm_new_cm_id(int event)
@@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
return NULL;
atomic_set(&ctx->ref, 1);
- init_waitqueue_head(&ctx->wait);
+ init_completion(&ctx->comp);
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
@@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- atomic_dec(&ctx->ref);
- wait_event(ctx->wait, !atomic_read(&ctx->ref));
+ ib_ucm_ctx_put(ctx);
+ wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the cm_id. */
ib_destroy_cm_id(ctx->cm_id);
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index 36a32c315668..efe147dbeb42 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
*/
work = kmalloc(sizeof *work, GFP_KERNEL);
- if (!work)
+ if (!work) {
+ mmput(mm);
return;
+ }
INIT_WORK(&work->work, ib_umem_account, work);
work->mm = mm;
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 593e28969c69..46762387f5f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -60,11 +60,11 @@
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
#define __IPATH_SMADBG 0x8000 /* sma packet debug */
-#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */
-#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */
-#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */
-#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */
-#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */
+#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
+#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
+#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
+#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
+#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
#else /* _IPATH_DEBUGGING */
@@ -79,11 +79,12 @@
#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
#define __IPATH_VERBDBG 0x0 /* very verbose debug */
#define __IPATH_PKTDBG 0x0 /* print packet data */
-#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */
+#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
/* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
-#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
+#define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */
+#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index cd533cf951c2..28ddceb260e8 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp)
bail:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
- mutex_unlock(&ipath_mutex);
/* Only expose a way to reset the device if we
make it into diag mode. */
if (ret == 0)
ipath_expose_reset(&dd->pcidev->dev);
+ mutex_unlock(&ipath_mutex);
+
return ret;
}
@@ -365,15 +366,3 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
bail:
return ret;
}
-
-void ipath_diag_bringup_link(struct ipath_devdata *dd)
-{
- if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE))
- return;
-
- diag_set_link = 1;
- ipath_cdbg(VERBOSE, "Trying to set to set link active for "
- "diag pkt\n");
- ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM);
- ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 58a94efb0070..dddcdae736ac 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -116,10 +116,9 @@ static int __devinit ipath_init_one(struct pci_dev *,
#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
static const struct pci_device_id ipath_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
- PCI_DEVICE_ID_INFINIPATH_HT)},
- {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
- PCI_DEVICE_ID_INFINIPATH_PE800)},
+ { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
+ { 0, }
};
MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
@@ -418,9 +417,19 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (ret) {
- dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
- "fails: %d\n", dd->ipath_unit, ret);
- goto bail_regions;
+ /*
+ * if the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (ret) {
+ dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
+ "fails: %d\n", dd->ipath_unit, ret);
+ goto bail_regions;
+ }
+ else
+ ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
}
pci_set_master(pdev);
@@ -1729,7 +1738,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
}
}
-int __init infinipath_init(void)
+static int __init infinipath_init(void)
{
int ret;
@@ -1896,19 +1905,19 @@ static void __exit infinipath_cleanup(void)
} else
ipath_dbg("irq is 0, not doing free_irq "
"for unit %u\n", dd->ipath_unit);
- dd->pcidev = NULL;
- }
- /*
- * we check for NULL here, because it's outside the kregbase
- * check, and we need to call it after the free_irq. Thus
- * it's possible that the function pointers were never
- * initialized.
- */
- if (dd->ipath_f_cleanup)
- /* clean up chip-specific stuff */
- dd->ipath_f_cleanup(dd);
+ /*
+ * we check for NULL here, because it's outside
+ * the kregbase check, and we need to call it
+ * after the free_irq. Thus it's possible that
+ * the function pointers were never initialized.
+ */
+ if (dd->ipath_f_cleanup)
+ /* clean up chip-specific stuff */
+ dd->ipath_f_cleanup(dd);
+ dd->pcidev = NULL;
+ }
spin_lock_irqsave(&ipath_devs_lock, flags);
}
@@ -1949,7 +1958,7 @@ int ipath_reset_device(int unit)
}
if (dd->ipath_pd)
- for (i = 1; i < dd->ipath_portcnt; i++) {
+ for (i = 1; i < dd->ipath_cfgports; i++) {
if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
ipath_dbg("unit %u port %d is in use "
"(PID %u cmd %s), can't reset\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index f11a900e8cd7..a2f1ceafcca9 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -505,11 +505,10 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust)
* ipath_get_guid - get the GUID from the i2c device
* @dd: the infinipath device
*
- * When we add the multi-chip support, we will probably have to add
- * the ability to use the number of guids field, and get the guid from
- * the first chip's flash, to use for all of them.
+ * We have the capability to use the ipath_nguid field, and get
+ * the guid from the first chip's flash, to use for all of them.
*/
-void ipath_get_guid(struct ipath_devdata *dd)
+void ipath_get_eeprom_info(struct ipath_devdata *dd)
{
void *buf;
struct ipath_flash *ifp;
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index c347191f02bf..ada267e41f6c 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -139,7 +139,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
kinfo->spi_piosize = dd->ipath_ibmaxlen;
kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
kinfo->spi_port = pd->port_port;
- kinfo->spi_sw_version = IPATH_USER_SWVERSION;
+ kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
kinfo->spi_hw_version = dd->ipath_revision;
if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
@@ -1224,6 +1224,10 @@ static unsigned int ipath_poll(struct file *fp,
if (tail == head) {
set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
+ if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
+ (void)ipath_write_ureg(dd, ur_rcvhdrhead,
+ dd->ipath_rhdrhead_intr_off
+ | head, pd->port_port);
poll_wait(fp, &pd->port_wait, pt);
if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c
index 4652435998f3..fac0a2b74de2 100644
--- a/drivers/infiniband/hw/ipath/ipath_ht400.c
+++ b/drivers/infiniband/hw/ipath/ipath_ht400.c
@@ -607,7 +607,12 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
case 4: /* Ponderosa is one of the bringup boards */
n = "Ponderosa";
break;
- case 5: /* HT-460 original production board */
+ case 5:
+ /*
+ * HT-460 original production board; two production levels, with
+ * different serial number ranges. See ipath_ht_early_init() for
+ * case where we enable IPATH_GPIO_INTR for later serial # range.
+ */
n = "InfiniPath_HT-460";
break;
case 6:
@@ -642,7 +647,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
if (n)
snprintf(name, namelen, "%s", n);
- if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) {
+ if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
/*
* This version of the driver only supports the HT-400
* Rev 3.2
@@ -1520,6 +1525,18 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
INFINIPATH_S_ABORT);
+
+ ipath_get_eeprom_info(dd);
+ if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
+ dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
+ /*
+ * Later production HT-460 has same changes as HT-465, so
+ * can use GPIO interrupts. They have serial #'s starting
+ * with 128, rather than 112.
+ */
+ dd->ipath_flags |= IPATH_GPIO_INTR;
+ dd->ipath_flags &= ~IPATH_POLL_RX_INTR;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 2823ff9c0c62..dc83250d26a6 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
/*
* Number of buffers reserved for driver (layered drivers and SMA
- * send). Reserved at end of buffer list.
+ * send). Reserved at end of buffer list. Initialized based on
+ * number of PIO buffers if not set via module interface.
+ * The problem with this is that it's global, but we'll use different
+ * numbers for different chip types. So the default value is not
+ * very useful. I've redefined it for the 1.3 release so that it's
+ * zero unless set by the user to something else, in which case we
+ * try to respect it.
*/
-static ushort ipath_kpiobufs = 32;
+static ushort ipath_kpiobufs;
static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
-module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint,
+module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
&ipath_kpiobufs, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
@@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd,
* Don't clear ipath_flags as 8bit mode was set before
* entering this func. However, we do set the linkstate to
* unknown, so we can watch for a transition.
+ * PRESENT is set because we want register reads to work,
+ * and the kernel infrastructure saw it in config space;
+ * We clear it if we have failures.
*/
- dd->ipath_flags |= IPATH_LINKUNK;
+ dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
IPATH_LINKDOWN | IPATH_LINKINIT);
@@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd,
|| (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
ipath_dev_err(dd, "Register read failures from chip, "
"giving up initialization\n");
+ dd->ipath_flags &= ~IPATH_PRESENT;
ret = -ENODEV;
goto done;
}
@@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
*/
dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
/ (sizeof(u64) * BITS_PER_BYTE / 2);
- if (!ipath_kpiobufs) /* have to have at least 1, for SMA */
- kpiobufs = ipath_kpiobufs = 1;
- else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) <
- (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) {
- dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) "
- "for %u ports to have %u each!\n",
- dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
- dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT);
- kpiobufs = 1; /* reserve just the minimum for SMA/ether */
- } else
+ if (ipath_kpiobufs == 0) {
+ /* not set by user, or set explictly to default */
+ if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
+ kpiobufs = 32;
+ else
+ kpiobufs = 16;
+ }
+ else
kpiobufs = ipath_kpiobufs;
if (kpiobufs >
@@ -871,7 +879,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
done:
if (!ret) {
- ipath_get_guid(dd);
*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
if (!dd->ipath_f_intrsetup(dd)) {
/* now we can enable all interrupts from the chip */
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 60f5f4108069..3e72a1fe3d73 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -172,8 +172,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
"was %s\n", dd->ipath_unit,
ib_linkstate(lstate),
ib_linkstate((unsigned)
- dd->ipath_lastibcstat
- & IPATH_IBSTATE_MASK));
+ dd->ipath_lastibcstat
+ & IPATH_IBSTATE_MASK));
}
else {
lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
@@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
if (ret > 0)
- goto clear;
+ goto set;
ret = __ipath_verbs_piobufavail(dd);
if (ret > 0)
- goto clear;
+ goto set;
return;
-clear:
+set:
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
@@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
{
struct ipath_devdata *dd = data;
- u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
+ u32 istat;
ipath_err_t estat = 0;
static unsigned unexpected = 0;
irqreturn_t ret;
+ if(!(dd->ipath_flags & IPATH_PRESENT)) {
+ /* this is mostly so we don't try to touch the chip while
+ * it is being reset */
+ /*
+ * This return value is perhaps odd, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+ }
+
+ istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
if (unlikely(!istat)) {
ipath_stats.sps_nullintr++;
ret = IRQ_NONE; /* not our interrupt, or already handled */
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 159d0aed31a5..5d92d57b6f54 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -528,7 +528,6 @@ extern spinlock_t ipath_devs_lock;
extern struct ipath_devdata *ipath_lookup(int unit);
extern u16 ipath_layer_rcv_opcode;
-extern int ipath_verbs_registered;
extern int __ipath_layer_intr(struct ipath_devdata *, u32);
extern int ipath_layer_intr(struct ipath_devdata *, u32);
extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
@@ -651,7 +650,7 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
void ipath_init_pe800_funcs(struct ipath_devdata *);
/* init HT-400-specific func */
void ipath_init_ht400_funcs(struct ipath_devdata *);
-void ipath_get_guid(struct ipath_devdata *);
+void ipath_get_eeprom_info(struct ipath_devdata *);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
/*
@@ -732,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
ipath_ureg regno, int port)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0;
return readl(regno + (u64 __iomem *)
@@ -763,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
ipath_kreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return -1;
return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
}
@@ -771,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
ipath_kreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return -1;
return readq(&dd->ipath_kregbase[regno]);
@@ -787,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd,
static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
ipath_sreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0;
return readq(regno + (u64 __iomem *)
@@ -798,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
ipath_sreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0;
return readl(regno + (u64 __iomem *)
(dd->ipath_cregbase +
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index aa33b0e9f2f6..5ae8761f9dd2 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -136,9 +136,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
ret = 1;
goto bail;
}
- spin_lock(&rkt->lock);
mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
- spin_unlock(&rkt->lock);
if (unlikely(mr == NULL || mr->lkey != sge->lkey)) {
ret = 0;
goto bail;
@@ -184,8 +182,6 @@ bail:
* @acc: access flags
*
* Return 1 if successful, otherwise 0.
- *
- * The QP r_rq.lock should be held.
*/
int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
u32 len, u64 vaddr, u32 rkey, int acc)
@@ -196,9 +192,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
size_t off;
int ret;
- spin_lock(&rkt->lock);
mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
- spin_unlock(&rkt->lock);
if (unlikely(mr == NULL || mr->lkey != rkey)) {
ret = 0;
goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index 2cabf6340572..9ec4ac77b87f 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -46,13 +46,15 @@
/* Acquire before ipath_devs_lock. */
static DEFINE_MUTEX(ipath_layer_mutex);
+static int ipath_verbs_registered;
+
u16 ipath_layer_rcv_opcode;
+
static int (*layer_intr)(void *, u32);
static int (*layer_rcv)(void *, void *, struct sk_buff *);
static int (*layer_rcv_lid)(void *, void *);
static int (*verbs_piobufavail)(void *);
static void (*verbs_rcv)(void *, void *, void *, u32);
-int ipath_verbs_registered;
static void *(*layer_add_one)(int, struct ipath_devdata *);
static void (*layer_remove_one)(void *);
@@ -586,6 +588,8 @@ void ipath_verbs_unregister(void)
verbs_rcv = NULL;
verbs_timer_cb = NULL;
+ ipath_verbs_registered = 0;
+
mutex_unlock(&ipath_layer_mutex);
}
@@ -868,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
update_sge(ss, len);
length -= len;
}
+ /* Update address before sending packet. */
+ update_sge(ss, length);
/* must flush early everything before trigger word */
ipath_flush_wc();
__raw_writel(last, piobuf);
/* be sure trigger word is written */
ipath_flush_wc();
- update_sge(ss, length);
}
/**
@@ -939,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
!((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
u32 w;
+ u32 *addr = (u32 *) ss->sge.vaddr;
+ /* Update address before sending packet. */
+ update_sge(ss, len);
/* Need to round up for the last dword in the packet. */
w = (len + 3) >> 2;
- __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
+ __iowrite32_copy(piobuf, addr, w - 1);
/* must flush early everything before trigger word */
ipath_flush_wc();
- __raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
- piobuf + w - 1);
+ __raw_writel(addr[w - 1], piobuf + w - 1);
/* be sure trigger word is written */
ipath_flush_wc();
- update_sge(ss, len);
ret = 0;
goto bail;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c
index e693a7a82667..02e8c75b24f6 100644
--- a/drivers/infiniband/hw/ipath/ipath_pe800.c
+++ b/drivers/infiniband/hw/ipath/ipath_pe800.c
@@ -305,8 +305,8 @@ static const struct ipath_cregs ipath_pe_cregs = {
* we'll print them and continue. We reuse the same message buffer as
* ipath_handle_errors() to avoid excessive stack usage.
*/
-void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
+static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+ size_t msgl)
{
ipath_err_t hwerrs;
u32 bits, ctrl;
@@ -552,7 +552,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
* freeze mode), and enable hardware errors as errors (along with
* everything else) in errormask
*/
-void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
+static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
{
ipath_err_t val;
u64 extsval;
@@ -577,7 +577,7 @@ void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
* ipath_pe_bringup_serdes - bring up the serdes
* @dd: the infinipath device
*/
-int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
+static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
{
u64 val, tmp, config1;
int ret = 0, change = 0;
@@ -694,7 +694,7 @@ int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
* @dd: the infinipath device
* Called when driver is being unloaded
*/
-void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
+static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
{
u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
@@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
/* Use ERROR so it shows up in logs, etc. */
ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
dd->ipath_unit);
+ /* keep chip from being accessed in a few places */
+ dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
val = dd->ipath_control | INFINIPATH_C_RESET;
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
mb();
@@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
if ((r = pci_enable_device(dd->pcidev)))
ipath_dev_err(dd, "pci_enable_device failed after "
"reset: %d\n", r);
+ /* whether it worked or not, mark as present, again */
+ dd->ipath_flags |= IPATH_PRESENT;
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
if (val == dd->ipath_revision) {
ipath_cdbg(VERBOSE, "Got matching revision "
@@ -1176,6 +1180,8 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
*/
dd->ipath_rhdrhead_intr_off = 1ULL<<32;
+ ipath_get_eeprom_info(dd);
+
return 0;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 6058d70d7577..9f8855d970c8 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -188,8 +188,8 @@ static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
* Allocate the next available QPN and put the QP into the hash table.
* The hash table holds a reference to the QP.
*/
-int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
- enum ib_qp_type type)
+static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
+ enum ib_qp_type type)
{
unsigned long flags;
u32 qpn;
@@ -232,7 +232,7 @@ bail:
* Remove the QP from the table so it can't be found asynchronously by
* the receive interrupt routine.
*/
-void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
+static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
{
struct ipath_qp *q, **qpp;
unsigned long flags;
@@ -358,6 +358,65 @@ static void ipath_reset_qp(struct ipath_qp *qp)
}
/**
+ * ipath_error_qp - put a QP into an error state
+ * @qp: the QP to put into an error state
+ *
+ * Flushes both send and receive work queues.
+ * QP r_rq.lock and s_lock should be held.
+ */
+
+static void ipath_error_qp(struct ipath_qp *qp)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+
+ _VERBS_INFO("QP%d/%d in error state\n",
+ qp->ibqp.qp_num, qp->remote_qpn);
+
+ spin_lock(&dev->pending_lock);
+ /* XXX What if its already removed by the timeout code? */
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
+ if (!list_empty(&qp->piowait))
+ list_del_init(&qp->piowait);
+ spin_unlock(&dev->pending_lock);
+
+ wc.status = IB_WC_WR_FLUSH_ERR;
+ wc.vendor_err = 0;
+ wc.byte_len = 0;
+ wc.imm_data = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = 0;
+ wc.wc_flags = 0;
+ wc.pkey_index = 0;
+ wc.slid = 0;
+ wc.sl = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+
+ while (qp->s_last != qp->s_head) {
+ struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+
+ wc.wr_id = wqe->wr.wr_id;
+ wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
+ }
+ qp->s_cur = qp->s_tail = qp->s_head;
+ qp->s_hdrwords = 0;
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+
+ wc.opcode = IB_WC_RECV;
+ while (qp->r_rq.tail != qp->r_rq.head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
+ if (++qp->r_rq.tail >= qp->r_rq.size)
+ qp->r_rq.tail = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+}
+
+/**
* ipath_modify_qp - modify the attributes of a queue pair
* @ibqp: the queue pair who's attributes we're modifying
* @attr: the new attributes
@@ -368,6 +427,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
+ struct ipath_ibdev *dev = to_idev(ibqp->device);
struct ipath_qp *qp = to_iqp(ibqp);
enum ib_qp_state cur_state, new_state;
unsigned long flags;
@@ -384,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr_mask))
goto inval;
+ if (attr_mask & IB_QP_AV)
+ if (attr->ah_attr.dlid == 0 ||
+ attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
+ goto inval;
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
switch (new_state) {
case IB_QPS_RESET:
ipath_reset_qp(qp);
@@ -398,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
- if (attr_mask & IB_QP_PKEY_INDEX) {
- struct ipath_ibdev *dev = to_idev(ibqp->device);
-
- if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
- goto inval;
+ if (attr_mask & IB_QP_PKEY_INDEX)
qp->s_pkey_index = attr->pkey_index;
- }
if (attr_mask & IB_QP_DEST_QPN)
qp->remote_qpn = attr->dest_qp_num;
@@ -420,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_ACCESS_FLAGS)
qp->qp_access_flags = attr->qp_access_flags;
- if (attr_mask & IB_QP_AV) {
- if (attr->ah_attr.dlid == 0 ||
- attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
- goto inval;
+ if (attr_mask & IB_QP_AV)
qp->remote_ah_attr = attr->ah_attr;
- }
if (attr_mask & IB_QP_PATH_MTU)
qp->path_mtu = attr->path_mtu;
@@ -440,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->s_rnr_retry_cnt = qp->s_rnr_retry;
}
- if (attr_mask & IB_QP_MIN_RNR_TIMER) {
- if (attr->min_rnr_timer > 31)
- goto inval;
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
qp->s_min_rnr_timer = attr->min_rnr_timer;
- }
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
@@ -651,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
init_attr->qp_type == IB_QPT_RC ?
ipath_do_rc_send : ipath_do_uc_send,
(unsigned long)qp);
- qp->piowait.next = LIST_POISON1;
- qp->piowait.prev = LIST_POISON2;
- qp->timerwait.next = LIST_POISON1;
- qp->timerwait.prev = LIST_POISON2;
+ INIT_LIST_HEAD(&qp->piowait);
+ INIT_LIST_HEAD(&qp->timerwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
qp->s_size = init_attr->cap.max_send_wr + 1;
@@ -675,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
ipath_reset_qp(qp);
/* Tell the core driver that the kernel SMA is present. */
- if (qp->ibqp.qp_type == IB_QPT_SMI)
+ if (init_attr->qp_type == IB_QPT_SMI)
ipath_layer_set_verbs_flags(dev->dd,
IPATH_VERBS_KERNEL_SMA);
break;
@@ -724,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
/* Make sure the QP isn't on the timeout list. */
spin_lock_irqsave(&dev->pending_lock, flags);
- if (qp->timerwait.next != LIST_POISON1)
- list_del(&qp->timerwait);
- if (qp->piowait.next != LIST_POISON1)
- list_del(&qp->piowait);
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
+ if (!list_empty(&qp->piowait))
+ list_del_init(&qp->piowait);
spin_unlock_irqrestore(&dev->pending_lock, flags);
/*
@@ -796,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
spin_lock(&dev->pending_lock);
/* XXX What if its already removed by the timeout code? */
- if (qp->timerwait.next != LIST_POISON1)
- list_del(&qp->timerwait);
- if (qp->piowait.next != LIST_POISON1)
- list_del(&qp->piowait);
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
+ if (!list_empty(&qp->piowait))
+ list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
@@ -821,65 +880,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
}
/**
- * ipath_error_qp - put a QP into an error state
- * @qp: the QP to put into an error state
- *
- * Flushes both send and receive work queues.
- * QP r_rq.lock and s_lock should be held.
- */
-
-void ipath_error_qp(struct ipath_qp *qp)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
-
- _VERBS_INFO("QP%d/%d in error state\n",
- qp->ibqp.qp_num, qp->remote_qpn);
-
- spin_lock(&dev->pending_lock);
- /* XXX What if its already removed by the timeout code? */
- if (qp->timerwait.next != LIST_POISON1)
- list_del(&qp->timerwait);
- if (qp->piowait.next != LIST_POISON1)
- list_del(&qp->piowait);
- spin_unlock(&dev->pending_lock);
-
- wc.status = IB_WC_WR_FLUSH_ERR;
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.imm_data = 0;
- wc.qp_num = qp->ibqp.qp_num;
- wc.src_qp = 0;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = 0;
- wc.sl = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
-
- while (qp->s_last != qp->s_head) {
- struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-
- wc.wr_id = wqe->wr.wr_id;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
- }
- qp->s_cur = qp->s_tail = qp->s_head;
- qp->s_hdrwords = 0;
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-
- wc.opcode = IB_WC_RECV;
- while (qp->r_rq.tail != qp->r_rq.head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
- if (++qp->r_rq.tail >= qp->r_rq.size)
- qp->r_rq.tail = 0;
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
-}
-
-/**
* ipath_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index a4055ca00614..493b1821a934 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -57,7 +57,7 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
qp->s_len = wqe->length - len;
dev = to_idev(qp->ibqp.device);
spin_lock(&dev->pending_lock);
- if (qp->timerwait.next == LIST_POISON1)
+ if (list_empty(&qp->timerwait))
list_add_tail(&qp->timerwait,
&dev->pending[dev->pending_index]);
spin_unlock(&dev->pending_lock);
@@ -356,7 +356,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
qp->s_next_psn = qp->s_psn;
spin_lock(&dev->pending_lock);
- if (qp->timerwait.next == LIST_POISON1)
+ if (list_empty(&qp->timerwait))
list_add_tail(&qp->timerwait,
&dev->pending[dev->pending_index]);
spin_unlock(&dev->pending_lock);
@@ -726,8 +726,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
*/
dev = to_idev(qp->ibqp.device);
spin_lock(&dev->pending_lock);
- if (qp->timerwait.next != LIST_POISON1)
- list_del(&qp->timerwait);
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
spin_unlock(&dev->pending_lock);
if (wqe->wr.opcode == IB_WR_RDMA_READ)
@@ -886,8 +886,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
* just won't find anything to restart if we ACK everything.
*/
spin_lock(&dev->pending_lock);
- if (qp->timerwait.next != LIST_POISON1)
- list_del(&qp->timerwait);
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
spin_unlock(&dev->pending_lock);
/*
@@ -1194,8 +1194,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
IB_WR_RDMA_READ))
goto ack_done;
spin_lock(&dev->pending_lock);
- if (qp->s_rnr_timeout == 0 &&
- qp->timerwait.next != LIST_POISON1)
+ if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
list_move_tail(&qp->timerwait,
&dev->pending[dev->pending_index]);
spin_unlock(&dev->pending_lock);
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 1e59750c5f63..402126eb79c9 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -34,8 +34,9 @@
#define _IPATH_REGISTERS_H
/*
- * This file should only be included by kernel source, and by the diags.
- * It defines the registers, and their contents, for the InfiniPath HT-400 chip
+ * This file should only be included by kernel source, and by the diags. It
+ * defines the registers, and their contents, for the InfiniPath HT-400
+ * chip.
*/
/*
@@ -156,8 +157,10 @@
#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
-#define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */
-#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */
+/* cycle through TS1/TS2 till OK */
+#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */
@@ -182,7 +185,8 @@
#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
#define INFINIPATH_IBCS_TXREADY 0x40000000
#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
-/* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
+/* link training states (shift by
+ INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
@@ -267,10 +271,12 @@
/* kr_serdesconfig0 bits */
#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
-#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */
-#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */
-#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT,
- Otherwise not used on IB side */
+/* tx idle enables (per lane) */
+#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL
+/* rx detect enables (per lane) */
+#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
+/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
+#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
/* kr_xgxsconfig bits */
#define INFINIPATH_XGXS_RESET 0x7ULL
@@ -390,12 +396,13 @@ struct ipath_kregs {
ipath_kreg kr_txintmemsize;
ipath_kreg kr_xgxsconfig;
ipath_kreg kr_ibpllcfg;
- /* use these two (and the following N ports) only with ipath_k*_kreg64_port();
- * not *kreg64() */
+ /* use these two (and the following N ports) only with
+ * ipath_k*_kreg64_port(); not *kreg64() */
ipath_kreg kr_rcvhdraddr;
ipath_kreg kr_rcvhdrtailaddr;
- /* remaining registers are not present on all types of infinipath chips */
+ /* remaining registers are not present on all types of infinipath
+ chips */
ipath_kreg kr_rcvpktledcnt;
ipath_kreg kr_pcierbuftestreg0;
ipath_kreg kr_pcierbuftestreg1;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f232e77b78ee..d38f4f3cfd1d 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -435,7 +435,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
unsigned long flags;
spin_lock_irqsave(&dev->pending_lock, flags);
- if (qp->piowait.next == LIST_POISON1)
+ if (list_empty(&qp->piowait))
list_add_tail(&qp->piowait, &dev->piowait);
spin_unlock_irqrestore(&dev->pending_lock, flags);
/*
@@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
}
wqe->wr.num_sge = j;
qp->s_head = next;
- /*
- * Wake up the send tasklet if the QP is not waiting
- * for an RNR timeout.
- */
- next = qp->s_rnr_timeout;
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (next == 0) {
- if (qp->ibqp.qp_type == IB_QPT_UC)
- ipath_do_uc_send((unsigned long) qp);
- else
- ipath_do_rc_send((unsigned long) qp);
- }
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ ipath_do_uc_send((unsigned long) qp);
+ else
+ ipath_do_rc_send((unsigned long) qp);
ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 32acd8048b49..f323791cc495 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = {
* enters diag mode. A device reset is quite likely to crash the
* machine entirely, so we don't want to normally make it
* available.
+ *
+ * Called with ipath_mutex held.
*/
int ipath_expose_reset(struct device *dev)
{
- return device_create_file(dev, &dev_attr_reset);
+ static int exposed;
+ int ret;
+
+ if (!exposed) {
+ ret = device_create_file(dev, &dev_attr_reset);
+ exposed = 1;
+ }
+ else
+ ret = 0;
+
+ return ret;
}
int ipath_driver_create_group(struct device_driver *drv)
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 5ff3de6128b2..e606daf83210 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -46,8 +46,10 @@
* This is called from ipath_post_ud_send() to forward a WQE addressed
* to the same HCA.
*/
-void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
- u32 length, struct ib_send_wr *wr, struct ib_wc *wc)
+static void ipath_ud_loopback(struct ipath_qp *sqp,
+ struct ipath_sge_state *ss,
+ u32 length, struct ib_send_wr *wr,
+ struct ib_wc *wc)
{
struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
struct ipath_qp *qp;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 9f27fd35cdbb..28fdbdaa789d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -41,7 +41,7 @@
/* Not static, because we don't want the compiler removing it */
const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
-unsigned int ib_ipath_qp_table_size = 251;
+static unsigned int ib_ipath_qp_table_size = 251;
module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -87,7 +87,7 @@ const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
/*
* System image GUID.
*/
-__be64 sys_image_guid;
+static __be64 sys_image_guid;
/**
* ipath_copy_sge - copy data to SGE memory
@@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg)
{
struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ipath_qp *resend = NULL;
- struct ipath_qp *rnr = NULL;
struct list_head *last;
struct ipath_qp *qp;
unsigned long flags;
@@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg)
last = &dev->pending[dev->pending_index];
while (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait);
- if (last->next == LIST_POISON1 ||
- last->next != &qp->timerwait ||
- qp->timerwait.prev != last) {
- INIT_LIST_HEAD(last);
- } else {
- list_del(&qp->timerwait);
- qp->timerwait.prev = (struct list_head *) resend;
- resend = qp;
- atomic_inc(&qp->refcount);
- }
+ list_del_init(&qp->timerwait);
+ qp->timer_next = resend;
+ resend = qp;
+ atomic_inc(&qp->refcount);
}
last = &dev->rnrwait;
if (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait);
if (--qp->s_rnr_timeout == 0) {
do {
- if (last->next == LIST_POISON1 ||
- last->next != &qp->timerwait ||
- qp->timerwait.prev != last) {
- INIT_LIST_HEAD(last);
- break;
- }
- list_del(&qp->timerwait);
- qp->timerwait.prev =
- (struct list_head *) rnr;
- rnr = qp;
+ list_del_init(&qp->timerwait);
+ tasklet_hi_schedule(&qp->s_task);
if (list_empty(last))
break;
qp = list_entry(last->next, struct ipath_qp,
@@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg)
spin_unlock_irqrestore(&dev->pending_lock, flags);
/* XXX What if timer fires again while this is running? */
- for (qp = resend; qp != NULL;
- qp = (struct ipath_qp *) qp->timerwait.prev) {
+ for (qp = resend; qp != NULL; qp = qp->timer_next) {
struct ib_wc wc;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg)
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
- for (qp = rnr; qp != NULL;
- qp = (struct ipath_qp *) qp->timerwait.prev)
- tasklet_hi_schedule(&qp->s_task);
}
/**
@@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg)
*
* This is called from ipath_intr() at interrupt level when a PIO buffer is
* available after ipath_verbs_send() returned an error that no buffers were
- * available. Return 0 if we consumed all the PIO buffers and we still have
+ * available. Return 1 if we consumed all the PIO buffers and we still have
* QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
- * return one).
+ * return zero).
*/
static int ipath_ib_piobufavail(void *arg)
{
@@ -573,13 +554,13 @@ static int ipath_ib_piobufavail(void *arg)
while (!list_empty(&dev->piowait)) {
qp = list_entry(dev->piowait.next, struct ipath_qp,
piowait);
- list_del(&qp->piowait);
+ list_del_init(&qp->piowait);
tasklet_hi_schedule(&qp->s_task);
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
bail:
- return 1;
+ return 0;
}
static int ipath_query_device(struct ib_device *ibdev,
@@ -970,6 +951,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
idev->dd = dd;
strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
+ dev->owner = THIS_MODULE;
dev->node_guid = ipath_layer_get_guid(dd);
dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask =
@@ -1110,7 +1092,7 @@ static void ipath_unregister_ib_device(void *arg)
ib_dealloc_device(ibdev);
}
-int __init ipath_verbs_init(void)
+static int __init ipath_verbs_init(void)
{
return ipath_verbs_register(ipath_register_ib_device,
ipath_unregister_ib_device,
@@ -1118,33 +1100,33 @@ int __init ipath_verbs_init(void)
ipath_ib_timer);
}
-void __exit ipath_verbs_cleanup(void)
+static void __exit ipath_verbs_cleanup(void)
{
ipath_verbs_unregister();
}
static ssize_t show_rev(struct class_device *cdev, char *buf)
{
- struct ipath_ibdev *dev =
- container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
- int vendor, boardrev, majrev, minrev;
+ struct ipath_ibdev *dev =
+ container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ int vendor, boardrev, majrev, minrev;
- ipath_layer_query_device(dev->dd, &vendor, &boardrev,
- &majrev, &minrev);
- return sprintf(buf, "%d.%d\n", majrev, minrev);
+ ipath_layer_query_device(dev->dd, &vendor, &boardrev,
+ &majrev, &minrev);
+ return sprintf(buf, "%d.%d\n", majrev, minrev);
}
static ssize_t show_hca(struct class_device *cdev, char *buf)
{
- struct ipath_ibdev *dev =
- container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
- int ret;
+ struct ipath_ibdev *dev =
+ container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ int ret;
- ret = ipath_layer_get_boardname(dev->dd, buf, 128);
- if (ret < 0)
- goto bail;
- strcat(buf, "\n");
- ret = strlen(buf);
+ ret = ipath_layer_get_boardname(dev->dd, buf, 128);
+ if (ret < 0)
+ goto bail;
+ strcat(buf, "\n");
+ ret = strlen(buf);
bail:
return ret;
@@ -1152,40 +1134,40 @@ bail:
static ssize_t show_stats(struct class_device *cdev, char *buf)
{
- struct ipath_ibdev *dev =
- container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
- int i;
- int len;
-
- len = sprintf(buf,
- "RC resends %d\n"
- "RC QACKs %d\n"
- "RC ACKs %d\n"
- "RC SEQ NAKs %d\n"
- "RC RDMA seq %d\n"
- "RC RNR NAKs %d\n"
- "RC OTH NAKs %d\n"
- "RC timeouts %d\n"
- "RC RDMA dup %d\n"
- "piobuf wait %d\n"
- "no piobuf %d\n"
- "PKT drops %d\n"
- "WQE errs %d\n",
- dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
- dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
- dev->n_other_naks, dev->n_timeouts,
- dev->n_rdma_dup_busy, dev->n_piowait,
- dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
- for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
+ struct ipath_ibdev *dev =
+ container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ int i;
+ int len;
+
+ len = sprintf(buf,
+ "RC resends %d\n"
+ "RC no QACK %d\n"
+ "RC ACKs %d\n"
+ "RC SEQ NAKs %d\n"
+ "RC RDMA seq %d\n"
+ "RC RNR NAKs %d\n"
+ "RC OTH NAKs %d\n"
+ "RC timeouts %d\n"
+ "RC RDMA dup %d\n"
+ "piobuf wait %d\n"
+ "no piobuf %d\n"
+ "PKT drops %d\n"
+ "WQE errs %d\n",
+ dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
+ dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
+ dev->n_other_naks, dev->n_timeouts,
+ dev->n_rdma_dup_busy, dev->n_piowait,
+ dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
+ for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
const struct ipath_opcode_stats *si = &dev->opstats[i];
- if (!si->n_packets && !si->n_bytes)
- continue;
- len += sprintf(buf + len, "%02x %llu/%llu\n", i,
+ if (!si->n_packets && !si->n_bytes)
+ continue;
+ len += sprintf(buf + len, "%02x %llu/%llu\n", i,
(unsigned long long) si->n_packets,
- (unsigned long long) si->n_bytes);
- }
- return len;
+ (unsigned long long) si->n_bytes);
+ }
+ return len;
}
static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -1194,25 +1176,25 @@ static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
static struct class_device_attribute *ipath_class_attributes[] = {
- &class_device_attr_hw_rev,
- &class_device_attr_hca_type,
- &class_device_attr_board_id,
- &class_device_attr_stats
+ &class_device_attr_hw_rev,
+ &class_device_attr_hca_type,
+ &class_device_attr_board_id,
+ &class_device_attr_stats
};
static int ipath_verbs_register_sysfs(struct ib_device *dev)
{
- int i;
+ int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
- if (class_device_create_file(&dev->class_dev,
- ipath_class_attributes[i])) {
- ret = 1;
+ for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
+ if (class_device_create_file(&dev->class_dev,
+ ipath_class_attributes[i])) {
+ ret = 1;
goto bail;
}
- ret = 0;
+ ret = 0;
bail:
return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index b824632b2a8c..4f8d59300e9b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -282,7 +282,8 @@ struct ipath_srq {
*/
struct ipath_qp {
struct ib_qp ibqp;
- struct ipath_qp *next; /* link list for QPN hash table */
+ struct ipath_qp *next; /* link list for QPN hash table */
+ struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
struct list_head piowait; /* link for wait PIO buf */
struct list_head timerwait; /* link for waiting for timeouts */
struct ib_ah_attr remote_ah_attr;
@@ -577,8 +578,6 @@ int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
-void ipath_error_qp(struct ipath_qp *qp);
-
void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
void ipath_do_rc_send(unsigned long data);
@@ -607,9 +606,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc);
-void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
- u32 length, struct ib_send_wr *wr, struct ib_wc *wc);
-
int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h
index 410a764dfcef..ab7cbbbfd03a 100644
--- a/drivers/infiniband/hw/ipath/ips_common.h
+++ b/drivers/infiniband/hw/ipath/ips_common.h
@@ -95,7 +95,7 @@ struct ether_header {
__u8 seq_num;
__le32 len;
/* MUST be of word size due to PIO write requirements */
- __u32 csum;
+ __le32 csum;
__le16 csum_offset;
__le16 flags;
__u16 first_2_bytes;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1985b5dfa481..798e13e14faf 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -182,7 +182,7 @@ struct mthca_cmd_context {
u8 status;
};
-static int fw_cmd_doorbell = 1;
+static int fw_cmd_doorbell = 0;
module_param(fw_cmd_doorbell, int, 0644);
MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
"(and supported by FW)");
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 312cf90731ea..205854e9c662 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
spin_lock(&dev->cq_table.lock);
cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-
if (cq)
- atomic_inc(&cq->refcount);
+ ++cq->refcount;
+
spin_unlock(&dev->cq_table.lock);
if (!cq) {
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
- if (atomic_dec_and_test(&cq->refcount))
+ spin_lock(&dev->cq_table.lock);
+ if (!--cq->refcount)
wake_up(&cq->wait);
+ spin_unlock(&dev->cq_table.lock);
}
static inline int is_recv_cqe(struct mthca_cqe *cqe)
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe)
return !(cqe->is_send & 0x80);
}
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq)
{
- struct mthca_cq *cq;
struct mthca_cqe *cqe;
u32 prod_index;
int nfreed = 0;
- spin_lock_irq(&dev->cq_table.lock);
- cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
- spin_unlock_irq(&dev->cq_table.lock);
-
- if (!cq)
- return;
-
spin_lock_irq(&cq->lock);
/*
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
if (0)
mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
- qpn, cqn, cq->cons_index, prod_index);
+ qpn, cq->cqn, cq->cons_index, prod_index);
/*
* Now sweep backwards through the CQ, removing CQ entries
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
}
spin_unlock_irq(&cq->lock);
- if (atomic_dec_and_test(&cq->refcount))
- wake_up(&cq->wait);
}
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
spin_lock_init(&cq->lock);
- atomic_set(&cq->refcount, 1);
+ cq->refcount = 1;
init_waitqueue_head(&cq->wait);
memset(cq_context, 0, sizeof *cq_context);
@@ -896,6 +886,17 @@ err_out:
return err;
}
+static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
+{
+ int c;
+
+ spin_lock_irq(&dev->cq_table.lock);
+ c = cq->refcount;
+ spin_unlock_irq(&dev->cq_table.lock);
+
+ return c;
+}
+
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq)
{
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev,
spin_lock_irq(&dev->cq_table.lock);
mthca_array_clear(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1));
+ --cq->refcount;
spin_unlock_irq(&dev->cq_table.lock);
if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev,
else
synchronize_irq(dev->pdev->irq);
- atomic_dec(&cq->refcount);
- wait_event(cq->wait, !atomic_read(&cq->refcount));
+ wait_event(cq->wait, !get_cq_refcount(dev, cq));
if (cq->is_kernel) {
mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 4c1dcb4c1822..f8160b8de090 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev,
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type);
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq);
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index f235c7ea42f0..4730863ece9a 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -49,7 +49,7 @@ enum {
MTHCA_VENDOR_CLASS2 = 0xa
};
-int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
+static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
{
struct ib_port_attr *tprops = NULL;
int ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 25e1c1db9a40..a486dec1707e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
int __devinit mthca_init_mr_table(struct mthca_dev *dev)
{
+ unsigned long addr;
int err, i;
err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_fmr_mpt;
}
+ addr = pci_resource_start(dev->pdev, 4) +
+ ((pci_resource_len(dev->pdev, 4) - 1) &
+ dev->mr_table.mpt_base);
+
dev->mr_table.tavor_fmr.mpt_base =
- ioremap(dev->mr_table.mpt_base,
- (1 << i) * sizeof (struct mthca_mpt_entry));
+ ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry));
if (!dev->mr_table.tavor_fmr.mpt_base) {
mthca_warn(dev, "MPT ioremap for FMR failed.\n");
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_fmr_mpt;
}
+ addr = pci_resource_start(dev->pdev, 4) +
+ ((pci_resource_len(dev->pdev, 4) - 1) &
+ dev->mr_table.mtt_base);
+
dev->mr_table.tavor_fmr.mtt_base =
- ioremap(dev->mr_table.mtt_base,
- (1 << i) * MTHCA_MTT_SEG_SIZE);
+ ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE);
if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 565a24b1756f..a2eae8a30167 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
goto out;
}
- memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out:
kfree(in_mad);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 6676a786d690..179a8f610d0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -139,11 +139,12 @@ struct mthca_ah {
* a qp may be locked, with the send cq locked first. No other
* nesting should be done.
*
- * Each struct mthca_cq/qp also has an atomic_t ref count. The
- * pointer from the cq/qp_table to the struct counts as one reference.
- * This reference also is good for access through the consumer API, so
- * modifying the CQ/QP etc doesn't need to take another reference.
- * Access because of a completion being polled does need a reference.
+ * Each struct mthca_cq/qp also has an ref count, protected by the
+ * corresponding table lock. The pointer from the cq/qp_table to the
+ * struct counts as one reference. This reference also is good for
+ * access through the consumer API, so modifying the CQ/QP etc doesn't
+ * need to take another reference. Access to a QP because of a
+ * completion being polled does not need a reference either.
*
* Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
* destroy function to sleep on.
@@ -159,8 +160,9 @@ struct mthca_ah {
* - decrement ref count; if zero, wake up waiters
*
* To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
- * - decrement ref count
+ * - lock cq/qp_table
+ * - remove pointer and decrement ref count
+ * - unlock cq/qp_table lock
* - wait_event until ref count is zero
*
* It is the consumer's responsibilty to make sure that no QP
@@ -197,7 +199,7 @@ struct mthca_cq_resize {
struct mthca_cq {
struct ib_cq ibcq;
spinlock_t lock;
- atomic_t refcount;
+ int refcount;
int cqn;
u32 cons_index;
struct mthca_cq_buf buf;
@@ -217,7 +219,7 @@ struct mthca_cq {
struct mthca_srq {
struct ib_srq ibsrq;
spinlock_t lock;
- atomic_t refcount;
+ int refcount;
int srqn;
int max;
int max_gs;
@@ -254,7 +256,7 @@ struct mthca_wq {
struct mthca_qp {
struct ib_qp ibqp;
- atomic_t refcount;
+ int refcount;
u32 qpn;
int is_direct;
u8 port; /* for SQP and memfree use only */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f37b0e367323..07c13be07a4a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
spin_lock(&dev->qp_table.lock);
qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
if (qp)
- atomic_inc(&qp->refcount);
+ ++qp->refcount;
spin_unlock(&dev->qp_table.lock);
if (!qp) {
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
- if (atomic_dec_and_test(&qp->refcount))
+ spin_lock(&dev->qp_table.lock);
+ if (!--qp->refcount)
wake_up(&qp->wait);
+ spin_unlock(&dev->qp_table.lock);
}
static int to_mthca_state(enum ib_qp_state ib_state)
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_wq_init(&qp->sq);
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
int ret;
int i;
- atomic_set(&qp->refcount, 1);
+ qp->refcount = 1;
init_waitqueue_head(&qp->wait);
qp->state = IB_QPS_RESET;
qp->atomic_rd_en = 0;
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
return err;
}
+static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
+{
+ int c;
+
+ spin_lock_irq(&dev->qp_table.lock);
+ c = qp->refcount;
+ spin_unlock_irq(&dev->qp_table.lock);
+
+ return c;
+}
+
void mthca_free_qp(struct mthca_dev *dev,
struct mthca_qp *qp)
{
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev,
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1));
+ --qp->refcount;
spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
- atomic_dec(&qp->refcount);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
+ wait_event(qp->wait, !get_qp_refcount(dev, qp));
if (qp->state != IB_QPS_RESET)
mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev,
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_free_memfree(dev, qp);
@@ -1714,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
ind = qp->rq.next_ind;
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
- nreq = 0;
-
- doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
- doorbell[1] = cpu_to_be32(qp->qpn << 8);
-
- wmb();
-
- mthca_write64(doorbell,
- dev->kar + MTHCA_RECEIVE_DOORBELL,
- MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
- qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
- size0 = 0;
- }
-
+ for (nreq = 0; wr; wr = wr->next) {
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
@@ -1784,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
++ind;
if (unlikely(ind >= qp->rq.max))
ind -= qp->rq.max;
+
+ ++nreq;
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+ doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+ size0 = 0;
+ }
}
out:
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index adcaf85355ae..b292fefa3b41 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
goto err_out_mailbox;
spin_lock_init(&srq->lock);
- atomic_set(&srq->refcount, 1);
+ srq->refcount = 1;
init_waitqueue_head(&srq->wait);
if (mthca_is_memfree(dev))
@@ -308,6 +308,17 @@ err_out:
return err;
}
+static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+ int c;
+
+ spin_lock_irq(&dev->srq_table.lock);
+ c = srq->refcount;
+ spin_unlock_irq(&dev->srq_table.lock);
+
+ return c;
+}
+
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
spin_lock_irq(&dev->srq_table.lock);
mthca_array_clear(&dev->srq_table.srq,
srq->srqn & (dev->limits.num_srqs - 1));
+ --srq->refcount;
spin_unlock_irq(&dev->srq_table.lock);
- atomic_dec(&srq->refcount);
- wait_event(srq->wait, !atomic_read(&srq->refcount));
+ wait_event(srq->wait, !get_srq_refcount(dev, srq));
if (!srq->ibsrq.uobject) {
mthca_free_srq_buf(dev, srq);
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
spin_lock(&dev->srq_table.lock);
srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
if (srq)
- atomic_inc(&srq->refcount);
+ ++srq->refcount;
spin_unlock(&dev->srq_table.lock);
if (!srq) {
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
out:
- if (atomic_dec_and_test(&srq->refcount))
+ spin_lock(&dev->srq_table.lock);
+ if (!--srq->refcount)
wake_up(&srq->wait);
+ spin_unlock(&dev->srq_table.lock);
}
/*
@@ -477,26 +490,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
first_ind = srq->first_free;
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
- nreq = 0;
-
- doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
- doorbell[1] = cpu_to_be32(srq->srqn << 8);
-
- /*
- * Make sure that descriptors are written
- * before doorbell is rung.
- */
- wmb();
-
- mthca_write64(doorbell,
- dev->kar + MTHCA_RECEIVE_DOORBELL,
- MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
- first_ind = srq->first_free;
- }
-
+ for (nreq = 0; wr; wr = wr->next) {
ind = srq->first_free;
if (ind < 0) {
@@ -556,6 +550,26 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
+
+ ++nreq;
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
+ doorbell[1] = cpu_to_be32(srq->srqn << 8);
+
+ /*
+ * Make sure that descriptors are written
+ * before doorbell is rung.
+ */
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ first_ind = srq->first_free;
+ }
}
if (likely(nreq)) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a54da42849ae..8406839b91cf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -275,6 +275,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
spin_lock_irqsave(&priv->tx_lock, flags);
++priv->tx_tail;
if (netif_queue_stopped(dev) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
netif_wake_queue(dev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4ca175553f9f..f887780e8093 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (priv->pkey == pkey) {
unregister_netdev(priv->dev);
ipoib_dev_cleanup(priv->dev);
-
list_del(&priv->list);
-
- kfree(priv);
+ free_netdev(priv->dev);
ret = 0;
break;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5f2b3f6e4c47..9cbdffa08dc2 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
/* XXX should send SRP_I_LOGOUT request */
init_completion(&target->done);
- ib_send_cm_dreq(target->cm_id, NULL, 0);
+ if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+ printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
+ return;
+ }
wait_for_completion(&target->done);
}
@@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr)
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
spin_unlock_irq(target->scsi_host->host_lock);
- scsi_host_put(target->scsi_host);
return;
}
target->state = SRP_TARGET_REMOVED;
@@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr)
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
scsi_host_put(target->scsi_host);
- /* And another put to really free the target port... */
- scsi_host_put(target->scsi_host);
}
static int srp_connect_target(struct srp_target_port *target)
@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target)
}
}
+static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ struct srp_target_port *target,
+ struct srp_request *req)
+{
+ struct scatterlist *scat;
+ int nents;
+
+ if (!scmnd->request_buffer ||
+ (scmnd->sc_data_direction != DMA_TO_DEVICE &&
+ scmnd->sc_data_direction != DMA_FROM_DEVICE))
+ return;
+
+ /*
+ * This handling of non-SG commands can be killed when the
+ * SCSI midlayer no longer generates non-SG commands.
+ */
+ if (likely(scmnd->use_sg)) {
+ nents = scmnd->use_sg;
+ scat = scmnd->request_buffer;
+ } else {
+ nents = 1;
+ scat = &req->fake_sg;
+ }
+
+ dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
+ scmnd->sc_data_direction);
+}
+
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
list_for_each_entry(req, &target->req_queue, list) {
req->scmnd->result = DID_RESET << 16;
req->scmnd->scsi_done(req->scmnd);
+ srp_unmap_data(req->scmnd, target, req);
}
target->rx_head = 0;
target->tx_head = 0;
target->tx_tail = 0;
- target->req_head = 0;
- for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
- target->req_ring[i].next = i + 1;
- target->req_ring[SRP_SQ_SIZE - 1].next = -1;
+ INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
+ list_add_tail(&target->req_ring[i].list, &target->free_reqs);
ret = srp_connect_target(target);
if (ret)
@@ -589,32 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len;
}
-static void srp_unmap_data(struct scsi_cmnd *scmnd,
- struct srp_target_port *target,
- struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
{
- struct scatterlist *scat;
- int nents;
-
- if (!scmnd->request_buffer ||
- (scmnd->sc_data_direction != DMA_TO_DEVICE &&
- scmnd->sc_data_direction != DMA_FROM_DEVICE))
- return;
-
- /*
- * This handling of non-SG commands can be killed when the
- * SCSI midlayer no longer generates non-SG commands.
- */
- if (likely(scmnd->use_sg)) {
- nents = scmnd->use_sg;
- scat = scmnd->request_buffer;
- } else {
- nents = 1;
- scat = &req->fake_sg;
- }
-
- dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
- scmnd->sc_data_direction);
+ srp_unmap_data(req->scmnd, target, req);
+ list_move_tail(&req->list, &target->free_reqs);
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -639,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
req->tsk_status = rsp->data[3];
complete(&req->done);
} else {
- scmnd = req->scmnd;
+ scmnd = req->scmnd;
if (!scmnd)
printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
(unsigned long long) rsp->tag);
@@ -657,16 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
- srp_unmap_data(scmnd, target, req);
-
if (!req->tsk_mgmt) {
- req->scmnd = NULL;
scmnd->host_scribble = (void *) -1L;
scmnd->scsi_done(scmnd);
- list_del(&req->list);
- req->next = target->req_head;
- target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT;
+ srp_remove_req(target, req);
} else
req->cmd_done = 1;
}
@@ -853,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
- long req_index;
int len;
if (target->state == SRP_TARGET_CONNECTING)
@@ -873,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
SRP_MAX_IU_LEN, DMA_TO_DEVICE);
- req_index = target->req_head;
+ req = list_entry(target->free_reqs.next, struct srp_request, list);
scmnd->scsi_done = done;
scmnd->result = 0;
- scmnd->host_scribble = (void *) req_index;
+ scmnd->host_scribble = (void *) (long) req->index;
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
cmd->opcode = SRP_CMD;
cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
- cmd->tag = req_index;
+ cmd->tag = req->index;
memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
- req = &target->req_ring[req_index];
-
req->scmnd = scmnd;
req->cmd = iu;
req->cmd_done = 0;
@@ -913,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap;
}
- target->req_head = req->next;
- list_add_tail(&req->list, &target->req_queue);
+ list_move_tail(&req->list, &target->req_queue);
return 0;
@@ -1137,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
-static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
+static int srp_send_tsk_mgmt(struct srp_target_port *target,
+ struct srp_request *req, u8 func)
{
- struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- int req_index;
- int ret = FAILED;
spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED) {
- scmnd->result = DID_BAD_TARGET << 16;
+ req->scmnd->result = DID_BAD_TARGET << 16;
goto out;
}
- if (scmnd->host_scribble == (void *) -1L)
- goto out;
-
- req_index = (long) scmnd->host_scribble;
- printk(KERN_ERR "Abort for req_index %d\n", req_index);
-
- req = &target->req_ring[req_index];
init_completion(&req->done);
iu = __srp_get_tx_iu(target);
@@ -1171,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
- tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
+ tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
tsk_mgmt->tsk_mgmt_func = func;
- tsk_mgmt->task_tag = req_index;
+ tsk_mgmt->task_tag = req->index;
if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
goto out;
@@ -1182,39 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
req->tsk_mgmt = iu;
spin_unlock_irq(target->scsi_host->host_lock);
+
if (!wait_for_completion_timeout(&req->done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
- return FAILED;
- spin_lock_irq(target->scsi_host->host_lock);
+ return -1;
- if (req->cmd_done) {
- list_del(&req->list);
- req->next = target->req_head;
- target->req_head = req_index;
-
- scmnd->scsi_done(scmnd);
- } else if (!req->tsk_status) {
- scmnd->result = DID_ABORT << 16;
- ret = SUCCESS;
- }
+ return 0;
out:
spin_unlock_irq(target->scsi_host->host_lock);
- return ret;
+ return -1;
+}
+
+static int srp_find_req(struct srp_target_port *target,
+ struct scsi_cmnd *scmnd,
+ struct srp_request **req)
+{
+ if (scmnd->host_scribble == (void *) -1L)
+ return -1;
+
+ *req = &target->req_ring[(long) scmnd->host_scribble];
+
+ return 0;
}
static int srp_abort(struct scsi_cmnd *scmnd)
{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req;
+ int ret = SUCCESS;
+
printk(KERN_ERR "SRP abort called\n");
- return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK);
+ if (srp_find_req(target, scmnd, &req))
+ return FAILED;
+ if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
+ return FAILED;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+
+ if (req->cmd_done) {
+ srp_remove_req(target, req);
+ scmnd->scsi_done(scmnd);
+ } else if (!req->tsk_status) {
+ srp_remove_req(target, req);
+ scmnd->result = DID_ABORT << 16;
+ } else
+ ret = FAILED;
+
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ return ret;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req, *tmp;
+
printk(KERN_ERR "SRP reset_device called\n");
- return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET);
+ if (srp_find_req(target, scmnd, &req))
+ return FAILED;
+ if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
+ return FAILED;
+ if (req->tsk_status)
+ return FAILED;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+
+ list_for_each_entry_safe(req, tmp, &target->req_queue, list)
+ if (req->scmnd->device == scmnd->device) {
+ req->scmnd->result = DID_RESET << 16;
+ req->scmnd->scsi_done(req->scmnd);
+ srp_remove_req(target, req);
+ }
+
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ return SUCCESS;
}
static int srp_reset_host(struct scsi_cmnd *scmnd)
@@ -1514,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev,
INIT_WORK(&target->work, srp_reconnect_work, target);
- for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
- target->req_ring[i].next = i + 1;
- target->req_ring[SRP_SQ_SIZE - 1].next = -1;
+ INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ target->req_ring[i].index = i;
+ list_add_tail(&target->req_ring[i].list, &target->free_reqs);
+ }
ret = srp_parse_options(buf, target);
if (ret)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index bd7f7c3115de..c5cd43aae860 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -101,7 +101,7 @@ struct srp_request {
*/
struct scatterlist fake_sg;
struct completion done;
- short next;
+ short index;
u8 cmd_done;
u8 tsk_status;
};
@@ -133,7 +133,7 @@ struct srp_target_port {
unsigned tx_tail;
struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
- int req_head;
+ struct list_head free_reqs;
struct list_head req_queue;
struct srp_request req_ring[SRP_SQ_SIZE];