diff options
author | Sudarsana Kalluru <Sudarsana.Kalluru@qlogic.com> | 2015-12-07 06:25:59 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-12-07 14:14:04 -0500 |
commit | 8f16bc97fa2a47e2e46d36f2f682e1215ee172f5 (patch) | |
tree | 6f66536e6aea5474156bada139a9c1e728e871e3 /drivers/net/ethernet/qlogic/qed/qed_int.c | |
parent | c78df14ee0f6bc5e8741b4324b600b7277abb13e (diff) | |
download | linux-8f16bc97fa2a47e2e46d36f2f682e1215ee172f5.tar.gz linux-8f16bc97fa2a47e2e46d36f2f682e1215ee172f5.tar.bz2 linux-8f16bc97fa2a47e2e46d36f2f682e1215ee172f5.zip |
qed: Correct slowpath interrupt scheme
When using INTa, ISR might be called before device is configured
for INTa [E.g., due to other device asserting the shared interrupt line],
in which case the ISR would read the SISR registers that shouldn't be
read unless HW is already configured for INTa. This might break interrupts
later on. There's also an MSI-X issue due to this difference, although
it's mostly theoretical.
This patch changes the initialization order, calling request_irq() for the
slowpath interrupt only after the chip is configured for working
in the preferred interrupt mode.
Signed-off-by: Sudarsana Kalluru <Sudarsana.Kalluru@qlogic.com>
Signed-off-by: Manish Chopra <manish.chopra@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_int.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_int.c | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index de50e84902af..9cc9d62c1fec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); } -void qed_int_igu_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) +int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_int_mode int_mode) { - int i; - - p_hwfn->b_int_enabled = 1; + int rc, i; /* Mask non-link attentions */ for (i = 0; i < 9; i++) qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); - /* Enable interrupt Generation */ - qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); - /* Configure AEU signal change to produce attentions for link */ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); @@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { + rc = qed_slowpath_irq_req(p_hwfn); + if (rc != 0) { + DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); + return -EINVAL; + } + p_hwfn->b_int_requested = true; + } + /* Enable interrupt Generation */ + qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + p_hwfn->b_int_enabled = 1; + + return rc; } void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, @@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, return info->igu_sb_cnt; } + +void qed_int_disable_post_isr_release(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + cdev->hwfns[i].b_int_requested = false; +} |