diff options
Diffstat (limited to 'drivers/net/ethernet/brocade')
30 files changed, 20952 insertions, 0 deletions
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig new file mode 100644 index 000000000000..264155778857 --- /dev/null +++ b/drivers/net/ethernet/brocade/Kconfig @@ -0,0 +1,23 @@ +# +# Brocade device configuration +# + +config NET_VENDOR_BROCADE + bool "Brocade devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Brocade cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_BROCADE + +source "drivers/net/ethernet/brocade/bna/Kconfig" + +endif # NET_VENDOR_BROCADE diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile new file mode 100644 index 000000000000..b58238d2df6a --- /dev/null +++ b/drivers/net/ethernet/brocade/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Brocade device drivers. +# + +obj-$(CONFIG_BNA) += bna/ diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig new file mode 100644 index 000000000000..dc2eb526fbf7 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/Kconfig @@ -0,0 +1,17 @@ +# +# Brocade network device configuration +# + +config BNA + tristate "Brocade 1010/1020 10Gb Ethernet Driver support" + depends on PCI + ---help--- + This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet + cards. + To compile this driver as a module, choose M here: the module + will be called bna. + + For general information and support, go to the Brocade support + website at: + + <http://support.brocade.com> diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile new file mode 100644 index 000000000000..74d3abca1960 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -0,0 +1,12 @@ +# +# Copyright (c) 2005-2010 Brocade Communications Systems, Inc. +# All rights reserved. +# + +obj-$(CONFIG_BNA) += bna.o + +bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o +bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o +bna-objs += cna_fwimg.o + +EXTRA_CFLAGS := -Idrivers/net/bna diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c new file mode 100644 index 000000000000..8e627186507c --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -0,0 +1,299 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#include "bfa_cee.h" +#include "bfi_cna.h" +#include "bfa_ioc.h" + +static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg); +static void bfa_cee_format_cee_cfg(void *buffer); + +static void +bfa_cee_format_cee_cfg(void *buffer) +{ + struct bfa_cee_attr *cee_cfg = buffer; + bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote); +} + +static void +bfa_cee_stats_swap(struct bfa_cee_stats *stats) +{ + u32 *buffer = (u32 *)stats; + int i; + + for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32)); + i++) { + buffer[i] = ntohl(buffer[i]); + } +} + +static void +bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg) +{ + lldp_cfg->time_to_live = + ntohs(lldp_cfg->time_to_live); + lldp_cfg->enabled_system_cap = + ntohs(lldp_cfg->enabled_system_cap); +} + +/** + * bfa_cee_attr_meminfo() + * + * @brief Returns the size of the DMA memory needed by CEE attributes + * + * @param[in] void + * + * @return Size of DMA region + */ +static u32 +bfa_cee_attr_meminfo(void) +{ + return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); +} +/** + * bfa_cee_stats_meminfo() + * + * @brief Returns the size of the DMA memory needed by CEE stats + * + * @param[in] void + * + * @return Size of DMA region + */ +static u32 +bfa_cee_stats_meminfo(void) +{ + return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_cee_get_attr_isr() + * + * @brief CEE ISR for get-attributes responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->get_attr_status = status; + if (status == BFA_STATUS_OK) { + memcpy(cee->attr, cee->attr_dma.kva, + sizeof(struct bfa_cee_attr)); + bfa_cee_format_cee_cfg(cee->attr); + } + cee->get_attr_pending = false; + if (cee->cbfn.get_attr_cbfn) + cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); +} + +/** + * bfa_cee_get_attr_isr() + * + * @brief CEE ISR for get-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->get_stats_status = status; + if (status == BFA_STATUS_OK) { + memcpy(cee->stats, cee->stats_dma.kva, + sizeof(struct bfa_cee_stats)); + bfa_cee_stats_swap(cee->stats); + } + cee->get_stats_pending = false; + if (cee->cbfn.get_stats_cbfn) + cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); +} + +/** + * bfa_cee_get_attr_isr() + * + * @brief CEE ISR for reset-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->reset_stats_status = status; + cee->reset_stats_pending = false; + if (cee->cbfn.reset_stats_cbfn) + cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); +} +/** + * bfa_nw_cee_meminfo() + * + * @brief Returns the size of the DMA memory needed by CEE module + * + * @param[in] void + * + * @return Size of DMA region + */ +u32 +bfa_nw_cee_meminfo(void) +{ + return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); +} + +/** + * bfa_nw_cee_mem_claim() + * + * @brief Initialized CEE DMA Memory + * + * @param[in] cee CEE module pointer + * dma_kva Kernel Virtual Address of CEE DMA Memory + * dma_pa Physical Address of CEE DMA Memory + * + * @return void + */ +void +bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) +{ + cee->attr_dma.kva = dma_kva; + cee->attr_dma.pa = dma_pa; + cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo(); + cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo(); + cee->attr = (struct bfa_cee_attr *) dma_kva; + cee->stats = (struct bfa_cee_stats *) + (dma_kva + bfa_cee_attr_meminfo()); +} + +/** + * bfa_cee_isrs() + * + * @brief Handles Mail-box interrupts for CEE module. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return void + */ + +static void +bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) +{ + union bfi_cee_i2h_msg_u *msg; + struct bfi_cee_get_rsp *get_rsp; + struct bfa_cee *cee = (struct bfa_cee *) cbarg; + msg = (union bfi_cee_i2h_msg_u *) m; + get_rsp = (struct bfi_cee_get_rsp *) m; + switch (msg->mh.msg_id) { + case BFI_CEE_I2H_GET_CFG_RSP: + bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_GET_STATS_RSP: + bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_RESET_STATS_RSP: + bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); + break; + default: + BUG_ON(1); + } +} + +/** + * bfa_cee_notify() + * + * @brief CEE module heart-beat failure handler. + * @brief CEE module IOC event handler. + * + * @param[in] IOC event type + * + * @return void + */ + +static void +bfa_cee_notify(void *arg, enum bfa_ioc_event event) +{ + struct bfa_cee *cee; + cee = (struct bfa_cee *) arg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (cee->get_attr_pending == true) { + cee->get_attr_status = BFA_STATUS_FAILED; + cee->get_attr_pending = false; + if (cee->cbfn.get_attr_cbfn) { + cee->cbfn.get_attr_cbfn( + cee->cbfn.get_attr_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->get_stats_pending == true) { + cee->get_stats_status = BFA_STATUS_FAILED; + cee->get_stats_pending = false; + if (cee->cbfn.get_stats_cbfn) { + cee->cbfn.get_stats_cbfn( + cee->cbfn.get_stats_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->reset_stats_pending == true) { + cee->reset_stats_status = BFA_STATUS_FAILED; + cee->reset_stats_pending = false; + if (cee->cbfn.reset_stats_cbfn) { + cee->cbfn.reset_stats_cbfn( + cee->cbfn.reset_stats_cbarg, + BFA_STATUS_FAILED); + } + } + break; + + default: + break; + } +} + +/** + * bfa_nw_cee_attach() + * + * @brief CEE module-attach API + * + * @param[in] cee - Pointer to the CEE module data structure + * ioc - Pointer to the ioc module data structure + * dev - Pointer to the device driver module data structure + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. + * + * @return void + */ +void +bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, + void *dev) +{ + BUG_ON(!(cee != NULL)); + cee->dev = dev; + cee->ioc = ioc; + + bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); + bfa_q_qe_init(&cee->ioc_notify); + bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee); + bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify); +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h new file mode 100644 index 000000000000..58d54e98d595 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -0,0 +1,63 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#ifndef __BFA_CEE_H__ +#define __BFA_CEE_H__ + +#include "bfa_defs_cna.h" +#include "bfa_ioc.h" + +typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status); +typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status); +typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status); + +struct bfa_cee_cbfn { + bfa_cee_get_attr_cbfn_t get_attr_cbfn; + void *get_attr_cbarg; + bfa_cee_get_stats_cbfn_t get_stats_cbfn; + void *get_stats_cbarg; + bfa_cee_reset_stats_cbfn_t reset_stats_cbfn; + void *reset_stats_cbarg; +}; + +struct bfa_cee { + void *dev; + bool get_attr_pending; + bool get_stats_pending; + bool reset_stats_pending; + enum bfa_status get_attr_status; + enum bfa_status get_stats_status; + enum bfa_status reset_stats_status; + struct bfa_cee_cbfn cbfn; + struct bfa_ioc_notify ioc_notify; + struct bfa_cee_attr *attr; + struct bfa_cee_stats *stats; + struct bfa_dma attr_dma; + struct bfa_dma stats_dma; + struct bfa_ioc *ioc; + struct bfa_mbox_cmd get_cfg_mb; + struct bfa_mbox_cmd get_stats_mb; + struct bfa_mbox_cmd reset_stats_mb; +}; + +u32 bfa_nw_cee_meminfo(void); +void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, + u64 dma_pa); +void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); + +#endif /* __BFA_CEE_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h new file mode 100644 index 000000000000..3da1a946ccdd --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -0,0 +1,140 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +/** + * @file bfa_cs.h BFA common services + */ + +#ifndef __BFA_CS_H__ +#define __BFA_CS_H__ + +#include "cna.h" + +/** + * @ BFA state machine interfaces + */ + +typedef void (*bfa_sm_t)(void *sm, int event); + +/** + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc + * etype - object type, eg. enum ioc_event + */ +#define bfa_sm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event) + +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) +#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) +#define bfa_sm_get_state(_sm) ((_sm)->sm) +#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) + +/** + * For converting from state machine function to state encoding. + */ +struct bfa_sm_table { + bfa_sm_t sm; /*!< state machine function */ + int state; /*!< state machine encoding */ + char *name; /*!< state name for display */ +}; +#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) + +/** + * State machine with entry actions. + */ +typedef void (*bfa_fsm_t)(void *fsm, int event); + +/** + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc + * etype - object type, eg. enum ioc_event + */ +#define bfa_fsm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event); \ + static void oc ## _sm_ ## st ## _entry(otype * fsm) + +#define bfa_fsm_set_state(_fsm, _state) do { \ + (_fsm)->fsm = (bfa_fsm_t)(_state); \ + _state ## _entry(_fsm); \ +} while (0) + +#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) +#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm) +#define bfa_fsm_cmp_state(_fsm, _state) \ + ((_fsm)->fsm == (bfa_fsm_t)(_state)) + +static inline int +bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +/** + * @ Generic wait counter. + */ + +typedef void (*bfa_wc_resume_t) (void *cbarg); + +struct bfa_wc { + bfa_wc_resume_t wc_resume; + void *wc_cbarg; + int wc_count; +}; + +static inline void +bfa_wc_up(struct bfa_wc *wc) +{ + wc->wc_count++; +} + +static inline void +bfa_wc_down(struct bfa_wc *wc) +{ + wc->wc_count--; + if (wc->wc_count == 0) + wc->wc_resume(wc->wc_cbarg); +} + +/** + * Initialize a waiting counter. + */ +static inline void +bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) +{ + wc->wc_resume = wc_resume; + wc->wc_cbarg = wc_cbarg; + wc->wc_count = 0; + bfa_wc_up(wc); +} + +/** + * Wait for counter to reach zero + */ +static inline void +bfa_wc_wait(struct bfa_wc *wc) +{ + bfa_wc_down(wc); +} + +#endif /* __BFA_CS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h new file mode 100644 index 000000000000..2f12d68021d5 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -0,0 +1,296 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#ifndef __BFA_DEFS_H__ +#define __BFA_DEFS_H__ + +#include "cna.h" +#include "bfa_defs_status.h" +#include "bfa_defs_mfg_comm.h" + +#define BFA_STRING_32 32 +#define BFA_VERSION_LEN 64 + +/** + * ---------------------- adapter definitions ------------ + */ + +/** + * BFA adapter level attributes. + */ +enum { + BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), + /* + *!< adapter serial num length + */ + BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */ + BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */ + BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */ + BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */ + BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */ +}; + +struct bfa_adapter_attr { + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 card_type; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + u64 pwwn; + char node_symname[FC_SYMNAME_MAX]; + char hw_ver[BFA_VERSION_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + char os_type[BFA_ADAPTER_OS_TYPE_LEN]; + struct bfa_mfg_vpd vpd; + struct mac mac; + + u8 nports; + u8 max_speed; + u8 prototype; + char asic_rev; + + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 cna_capable; + + u8 is_mezz; + u8 trunk_capable; +}; + +/** + * ---------------------- IOC definitions ------------ + */ + +enum { + BFA_IOC_DRIVER_LEN = 16, + BFA_IOC_CHIP_REV_LEN = 8, +}; + +/** + * Driver and firmware versions. + */ +struct bfa_ioc_driver_attr { + char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ + char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ + char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */ + char bios_ver[BFA_VERSION_LEN]; /*!< bios version */ + char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */ + char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ +}; + +/** + * IOC PCI device attributes + */ +struct bfa_ioc_pci_attr { + u16 vendor_id; /*!< PCI vendor ID */ + u16 device_id; /*!< PCI device ID */ + u16 ssid; /*!< subsystem ID */ + u16 ssvid; /*!< subsystem vendor ID */ + u32 pcifn; /*!< PCI device function */ + u32 rsvd; /* padding */ + char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ +}; + +/** + * IOC states + */ +enum bfa_ioc_state { + BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ + BFA_IOC_RESET = 2, /*!< IOC is in reset state */ + BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */ + BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */ + BFA_IOC_GETATTR = 5, /*!< IOC is being configured */ + BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */ + BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */ + BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */ + BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */ + BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ + BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ + BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ + BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ +}; + +/** + * IOC firmware stats + */ +struct bfa_fw_ioc_stats { + u32 enable_reqs; + u32 disable_reqs; + u32 get_attr_reqs; + u32 dbg_sync; + u32 dbg_dump; + u32 unknown_reqs; +}; + +/** + * IOC driver stats + */ +struct bfa_ioc_drv_stats { + u32 ioc_isrs; + u32 ioc_enables; + u32 ioc_disables; + u32 ioc_hbfails; + u32 ioc_boots; + u32 stats_tmos; + u32 hb_count; + u32 disable_reqs; + u32 enable_reqs; + u32 disable_replies; + u32 enable_replies; + u32 rsvd; +}; + +/** + * IOC statistics + */ +struct bfa_ioc_stats { + struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ + struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ +}; + +enum bfa_ioc_type { + BFA_IOC_TYPE_FC = 1, + BFA_IOC_TYPE_FCoE = 2, + BFA_IOC_TYPE_LL = 3, +}; + +/** + * IOC attributes returned in queries + */ +struct bfa_ioc_attr { + enum bfa_ioc_type ioc_type; + enum bfa_ioc_state state; /*!< IOC state */ + struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */ + struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */ + struct bfa_ioc_pci_attr pci_attr; + u8 port_id; /*!< port number */ + u8 port_mode; /*!< enum bfa_mode */ + u8 cap_bm; /*!< capability */ + u8 port_mode_cfg; /*!< enum bfa_mode */ + u8 rsvd[4]; /*!< 64bit align */ +}; + +/** + * Adapter capability mask definition + */ +enum { + BFA_CM_HBA = 0x01, + BFA_CM_CNA = 0x02, + BFA_CM_NIC = 0x04, +}; + +/** + * ---------------------- mfg definitions ------------ + */ + +/** + * Checksum size + */ +#define BFA_MFG_CHKSUM_SIZE 16 + +#define BFA_MFG_PARTNUM_SIZE 14 +#define BFA_MFG_SUPPLIER_ID_SIZE 10 +#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 + +#pragma pack(1) + +/** + * @brief BFA adapter manufacturing block definition. + * + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_block { + u8 version; /*!< manufacturing block version */ + u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */ + u16 mfgsize; /*!< mfg block size */ + u16 u16_chksum; /*!< old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /*!< manufacturing day */ + u8 mfg_month; /*!< manufacturing month */ + u16 mfg_year; /*!< manufacturing year */ + u64 mfg_wwn; /*!< wwn base for this adapter */ + u8 num_wwn; /*!< number of wwns assigned */ + u8 mfg_speeds; /*!< speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char + supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char + supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /*!< mac address */ + u8 num_mac; /*!< number of mac addresses */ + u8 rsv2; + u32 card_type; /*!< card type */ + char cap_nic; /*!< capability nic */ + char cap_cna; /*!< capability cna */ + char cap_hba; /*!< capability hba */ + char cap_fc16g; /*!< capability fc 16g */ + char cap_sriov; /*!< capability sriov */ + char cap_mezz; /*!< capability mezz */ + u8 rsv3; + u8 mfg_nports; /*!< number of ports */ + char media[8]; /*!< xfi/xaui */ + char initial_mode[8];/*!< initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ +}; + +#pragma pack() + +/** + * ---------------------- pci definitions ------------ + */ + +/* + * PCI device ID information + */ +enum { + BFA_PCI_DEVICE_ID_CT2 = 0x22, +}; + +#define bfa_asic_id_ct(device) \ + ((device) == PCI_DEVICE_ID_BROCADE_CT || \ + (device) == PCI_DEVICE_ID_BROCADE_CT_FC) +#define bfa_asic_id_ct2(device) \ + ((device) == BFA_PCI_DEVICE_ID_CT2) +#define bfa_asic_id_ctc(device) \ + (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) + +/** + * PCI sub-system device and vendor ID information + */ +enum { + BFA_PCI_FCOE_SSDEVICE_ID = 0x14, + BFA_PCI_CT2_SSID_FCoE = 0x22, + BFA_PCI_CT2_SSID_ETH = 0x23, + BFA_PCI_CT2_SSID_FC = 0x24, +}; + +enum bfa_mode { + BFA_MODE_HBA = 1, + BFA_MODE_CNA = 2, + BFA_MODE_NIC = 3 +}; + +#endif /* __BFA_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h new file mode 100644 index 000000000000..8ab33ee2c2bc --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -0,0 +1,229 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BFA_DEFS_CNA_H__ +#define __BFA_DEFS_CNA_H__ + +#include "bfa_defs.h" + +/** + * @brief + * FC physical port statistics. + */ +struct bfa_port_fc_stats { + u64 secs_reset; /*!< Seconds since stats is reset */ + u64 tx_frames; /*!< Tx frames */ + u64 tx_words; /*!< Tx words */ + u64 tx_lip; /*!< Tx LIP */ + u64 tx_nos; /*!< Tx NOS */ + u64 tx_ols; /*!< Tx OLS */ + u64 tx_lr; /*!< Tx LR */ + u64 tx_lrr; /*!< Tx LRR */ + u64 rx_frames; /*!< Rx frames */ + u64 rx_words; /*!< Rx words */ + u64 lip_count; /*!< Rx LIP */ + u64 nos_count; /*!< Rx NOS */ + u64 ols_count; /*!< Rx OLS */ + u64 lr_count; /*!< Rx LR */ + u64 lrr_count; /*!< Rx LRR */ + u64 invalid_crcs; /*!< Rx CRC err frames */ + u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */ + u64 undersized_frm; /*!< Rx undersized frames */ + u64 oversized_frm; /*!< Rx oversized frames */ + u64 bad_eof_frm; /*!< Rx frames with bad EOF */ + u64 error_frames; /*!< Errored frames */ + u64 dropped_frames; /*!< Dropped frames */ + u64 link_failures; /*!< Link Failure (LF) count */ + u64 loss_of_syncs; /*!< Loss of sync count */ + u64 loss_of_signals; /*!< Loss of signal count */ + u64 primseq_errs; /*!< Primitive sequence protocol err. */ + u64 bad_os_count; /*!< Invalid ordered sets */ + u64 err_enc_out; /*!< Encoding err nonframe_8b10b */ + u64 err_enc; /*!< Encoding err frame_8b10b */ + u64 bbsc_frames_lost; /*!< Credit Recovery-Frames Lost */ + u64 bbsc_credits_lost; /*!< Credit Recovery-Credits Lost */ + u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ +}; + +/** + * @brief + * Eth Physical Port statistics. + */ +struct bfa_port_eth_stats { + u64 secs_reset; /*!< Seconds since stats is reset */ + u64 frame_64; /*!< Frames 64 bytes */ + u64 frame_65_127; /*!< Frames 65-127 bytes */ + u64 frame_128_255; /*!< Frames 128-255 bytes */ + u64 frame_256_511; /*!< Frames 256-511 bytes */ + u64 frame_512_1023; /*!< Frames 512-1023 bytes */ + u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */ + u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */ + u64 tx_bytes; /*!< Tx bytes */ + u64 tx_packets; /*!< Tx packets */ + u64 tx_mcast_packets; /*!< Tx multicast packets */ + u64 tx_bcast_packets; /*!< Tx broadcast packets */ + u64 tx_control_frame; /*!< Tx control frame */ + u64 tx_drop; /*!< Tx drops */ + u64 tx_jabber; /*!< Tx jabber */ + u64 tx_fcs_error; /*!< Tx FCS errors */ + u64 tx_fragments; /*!< Tx fragments */ + u64 rx_bytes; /*!< Rx bytes */ + u64 rx_packets; /*!< Rx packets */ + u64 rx_mcast_packets; /*!< Rx multicast packets */ + u64 rx_bcast_packets; /*!< Rx broadcast packets */ + u64 rx_control_frames; /*!< Rx control frames */ + u64 rx_unknown_opcode; /*!< Rx unknown opcode */ + u64 rx_drop; /*!< Rx drops */ + u64 rx_jabber; /*!< Rx jabber */ + u64 rx_fcs_error; /*!< Rx FCS errors */ + u64 rx_alignment_error; /*!< Rx alignment errors */ + u64 rx_frame_length_error; /*!< Rx frame len errors */ + u64 rx_code_error; /*!< Rx code errors */ + u64 rx_fragments; /*!< Rx fragments */ + u64 rx_pause; /*!< Rx pause */ + u64 rx_zero_pause; /*!< Rx zero pause */ + u64 tx_pause; /*!< Tx pause */ + u64 tx_zero_pause; /*!< Tx zero pause */ + u64 rx_fcoe_pause; /*!< Rx FCoE pause */ + u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */ + u64 tx_fcoe_pause; /*!< Tx FCoE pause */ + u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */ + u64 rx_iscsi_pause; /*!< Rx iSCSI pause */ + u64 rx_iscsi_zero_pause; /*!< Rx iSCSI zero pause */ + u64 tx_iscsi_pause; /*!< Tx iSCSI pause */ + u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ +}; + +/** + * @brief + * Port statistics. + */ +union bfa_port_stats_u { + struct bfa_port_fc_stats fc; + struct bfa_port_eth_stats eth; +}; + +#pragma pack(1) + +#define BFA_CEE_LLDP_MAX_STRING_LEN (128) +#define BFA_CEE_DCBX_MAX_PRIORITY (8) +#define BFA_CEE_DCBX_MAX_PGID (8) + +#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 +#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 +#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 +#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008 +#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010 +#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020 +#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040 +#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080 +#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100 +#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200 +#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400 + +/* LLDP string type */ +struct bfa_cee_lldp_str { + u8 sub_type; + u8 len; + u8 rsvd[2]; + u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; +}; + +/* LLDP paramters */ +struct bfa_cee_lldp_cfg { + struct bfa_cee_lldp_str chassis_id; + struct bfa_cee_lldp_str port_id; + struct bfa_cee_lldp_str port_desc; + struct bfa_cee_lldp_str sys_name; + struct bfa_cee_lldp_str sys_desc; + struct bfa_cee_lldp_str mgmt_addr; + u16 time_to_live; + u16 enabled_system_cap; +}; + +enum bfa_cee_dcbx_version { + DCBX_PROTOCOL_PRECEE = 1, + DCBX_PROTOCOL_CEE = 2, +}; + +enum bfa_cee_lls { + /* LLS is down because the TLV not sent by the peer */ + CEE_LLS_DOWN_NO_TLV = 0, + /* LLS is down as advertised by the peer */ + CEE_LLS_DOWN = 1, + CEE_LLS_UP = 2, +}; + +/* CEE/DCBX parameters */ +struct bfa_cee_dcbx_cfg { + u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY]; + u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID]; + u8 pfc_primap; /* bitmap of priorties with PFC enabled */ + u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */ + u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */ + u8 dcbx_version; /* operating version:CEE or preCEE */ + u8 lls_fcoe; /* FCoE Logical Link Status */ + u8 lls_lan; /* LAN Logical Link Status */ + u8 rsvd[2]; +}; + +/* CEE status */ +/* Making this to tri-state for the benefit of port list command */ +enum bfa_cee_status { + CEE_UP = 0, + CEE_PHY_UP = 1, + CEE_LOOPBACK = 2, + CEE_PHY_DOWN = 3, +}; + +/* CEE Query */ +struct bfa_cee_attr { + u8 cee_status; + u8 error_reason; + struct bfa_cee_lldp_cfg lldp_remote; + struct bfa_cee_dcbx_cfg dcbx_remote; + mac_t src_mac; + u8 link_speed; + u8 nw_priority; + u8 filler[2]; +}; + +/* LLDP/DCBX/CEE Statistics */ +struct bfa_cee_stats { + u32 lldp_tx_frames; /*!< LLDP Tx Frames */ + u32 lldp_rx_frames; /*!< LLDP Rx Frames */ + u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */ + u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */ + u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */ + u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */ + u32 lldp_info_aged_out; /*!< LLDP remote info aged out */ + u32 dcbx_phylink_ups; /*!< DCBX phy link ups */ + u32 dcbx_phylink_downs; /*!< DCBX phy link downs */ + u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */ + u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */ + u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */ + u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */ + u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */ + u32 cee_status_down; /*!< CEE status down */ + u32 cee_status_up; /*!< CEE status up */ + u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */ + u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */ +}; + +#pragma pack() + +#endif /* __BFA_DEFS_CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h new file mode 100644 index 000000000000..6681fe87c1e1 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -0,0 +1,171 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BFA_DEFS_MFG_COMM_H__ +#define __BFA_DEFS_MFG_COMM_H__ + +#include "bfa_defs.h" + +/** + * Manufacturing block version + */ +#define BFA_MFG_VERSION 3 +#define BFA_MFG_VERSION_UNINIT 0xFF + +/** + * Manufacturing block encrypted version + */ +#define BFA_MFG_ENC_VER 2 + +/** + * Manufacturing block version 1 length + */ +#define BFA_MFG_VER1_LEN 128 + +/** + * Manufacturing block header length + */ +#define BFA_MFG_HDR_LEN 4 + +#define BFA_MFG_SERIALNUM_SIZE 11 +#define STRSZ(_n) (((_n) + 4) & ~3) + +/** + * Manufacturing card type + */ +enum { + BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ + BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ + BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */ + BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */ + BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */ + BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */ + BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */ + BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */ + BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */ + BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */ + BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */ + BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */ + BFA_MFG_TYPE_PROWLER_F = 1560, /*!< Prowler FC only cards */ + BFA_MFG_TYPE_PROWLER_N = 1410, /*!< Prowler NIC only cards */ + BFA_MFG_TYPE_PROWLER_C = 1710, /*!< Prowler CNA only cards */ + BFA_MFG_TYPE_PROWLER_D = 1860, /*!< Prowler Dual cards */ + BFA_MFG_TYPE_CHINOOK = 1867, /*!< Chinook cards */ + BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */ +}; + +#pragma pack(1) + +/** + * Check if Mezz card + */ +#define bfa_mfg_is_mezz(type) (( \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE || \ + (type) == BFA_MFG_TYPE_ASTRA || \ + (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ + (type) == BFA_MFG_TYPE_LIGHTNING || \ + (type) == BFA_MFG_TYPE_CHINOOK)) + +enum { + CB_GPIO_TTV = (1), /*!< TTV debug capable cards */ + CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */ + CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */ + CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */ + CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */ + CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */ + CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */ +}; + +#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \ +do { \ + if ((gpio) & CB_GPIO_PROTO) { \ + (prop) |= BFI_ADAPTER_PROTO; \ + (gpio) &= ~CB_GPIO_PROTO; \ + } \ + switch ((gpio)) { \ + case CB_GPIO_TTV: \ + (prop) |= BFI_ADAPTER_TTV; \ + case CB_GPIO_DFLY: \ + case CB_GPIO_FC8P2: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \ + (card_type) = BFA_MFG_TYPE_FC8P2; \ + break; \ + case CB_GPIO_FC8P1: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \ + (card_type) = BFA_MFG_TYPE_FC8P1; \ + break; \ + case CB_GPIO_FC4P2: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \ + (card_type) = BFA_MFG_TYPE_FC4P2; \ + break; \ + case CB_GPIO_FC4P1: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \ + (card_type) = BFA_MFG_TYPE_FC4P1; \ + break; \ + default: \ + (prop) |= BFI_ADAPTER_UNSUPP; \ + (card_type) = BFA_MFG_TYPE_INVALID; \ + } \ +} while (0) + +/** + * VPD data length + */ +#define BFA_MFG_VPD_LEN 512 +#define BFA_MFG_VPD_LEN_INVALID 0 + +#define BFA_MFG_VPD_PCI_HDR_OFF 137 +#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ +#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ + +/** + * VPD vendor tag + */ +enum { + BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ + BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ + BFA_MFG_VPD_HP = 2, /*!< vendor HP */ + BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */ + BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */ + BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */ + BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */ + BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ +}; + +/** + * @brief BFA adapter flash vpd data definition. + * + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_vpd { + u8 version; /*!< vpd data version */ + u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */ + u8 chksum; /*!< u8 checksum */ + u8 vendor; /*!< vendor */ + u8 len; /*!< vpd data length excluding header */ + u8 rsv; + u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */ +}; + +#pragma pack() + +#endif /* __BFA_DEFS_MFG_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h new file mode 100644 index 000000000000..7c5fe6c2e80e --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -0,0 +1,216 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BFA_DEFS_STATUS_H__ +#define __BFA_DEFS_STATUS_H__ + +/** + * API status return values + * + * NOTE: The error msgs are auto generated from the comments. Only singe line + * comments are supported + */ +enum bfa_status { + BFA_STATUS_OK = 0, + BFA_STATUS_FAILED = 1, + BFA_STATUS_EINVAL = 2, + BFA_STATUS_ENOMEM = 3, + BFA_STATUS_ENOSYS = 4, + BFA_STATUS_ETIMER = 5, + BFA_STATUS_EPROTOCOL = 6, + BFA_STATUS_ENOFCPORTS = 7, + BFA_STATUS_NOFLASH = 8, + BFA_STATUS_BADFLASH = 9, + BFA_STATUS_SFP_UNSUPP = 10, + BFA_STATUS_UNKNOWN_VFID = 11, + BFA_STATUS_DATACORRUPTED = 12, + BFA_STATUS_DEVBUSY = 13, + BFA_STATUS_ABORTED = 14, + BFA_STATUS_NODEV = 15, + BFA_STATUS_HDMA_FAILED = 16, + BFA_STATUS_FLASH_BAD_LEN = 17, + BFA_STATUS_UNKNOWN_LWWN = 18, + BFA_STATUS_UNKNOWN_RWWN = 19, + BFA_STATUS_FCPT_LS_RJT = 20, + BFA_STATUS_VPORT_EXISTS = 21, + BFA_STATUS_VPORT_MAX = 22, + BFA_STATUS_UNSUPP_SPEED = 23, + BFA_STATUS_INVLD_DFSZ = 24, + BFA_STATUS_CNFG_FAILED = 25, + BFA_STATUS_CMD_NOTSUPP = 26, + BFA_STATUS_NO_ADAPTER = 27, + BFA_STATUS_LINKDOWN = 28, + BFA_STATUS_FABRIC_RJT = 29, + BFA_STATUS_UNKNOWN_VWWN = 30, + BFA_STATUS_NSLOGIN_FAILED = 31, + BFA_STATUS_NO_RPORTS = 32, + BFA_STATUS_NSQUERY_FAILED = 33, + BFA_STATUS_PORT_OFFLINE = 34, + BFA_STATUS_RPORT_OFFLINE = 35, + BFA_STATUS_TGTOPEN_FAILED = 36, + BFA_STATUS_BAD_LUNS = 37, + BFA_STATUS_IO_FAILURE = 38, + BFA_STATUS_NO_FABRIC = 39, + BFA_STATUS_EBADF = 40, + BFA_STATUS_EINTR = 41, + BFA_STATUS_EIO = 42, + BFA_STATUS_ENOTTY = 43, + BFA_STATUS_ENXIO = 44, + BFA_STATUS_EFOPEN = 45, + BFA_STATUS_VPORT_WWN_BP = 46, + BFA_STATUS_PORT_NOT_DISABLED = 47, + BFA_STATUS_BADFRMHDR = 48, + BFA_STATUS_BADFRMSZ = 49, + BFA_STATUS_MISSINGFRM = 50, + BFA_STATUS_LINKTIMEOUT = 51, + BFA_STATUS_NO_FCPIM_NEXUS = 52, + BFA_STATUS_CHECKSUM_FAIL = 53, + BFA_STATUS_GZME_FAILED = 54, + BFA_STATUS_SCSISTART_REQD = 55, + BFA_STATUS_IOC_FAILURE = 56, + BFA_STATUS_INVALID_WWN = 57, + BFA_STATUS_MISMATCH = 58, + BFA_STATUS_IOC_ENABLED = 59, + BFA_STATUS_ADAPTER_ENABLED = 60, + BFA_STATUS_IOC_NON_OP = 61, + BFA_STATUS_ADDR_MAP_FAILURE = 62, + BFA_STATUS_SAME_NAME = 63, + BFA_STATUS_PENDING = 64, + BFA_STATUS_8G_SPD = 65, + BFA_STATUS_4G_SPD = 66, + BFA_STATUS_AD_IS_ENABLE = 67, + BFA_STATUS_EINVAL_TOV = 68, + BFA_STATUS_EINVAL_QDEPTH = 69, + BFA_STATUS_VERSION_FAIL = 70, + BFA_STATUS_DIAG_BUSY = 71, + BFA_STATUS_BEACON_ON = 72, + BFA_STATUS_BEACON_OFF = 73, + BFA_STATUS_LBEACON_ON = 74, + BFA_STATUS_LBEACON_OFF = 75, + BFA_STATUS_PORT_NOT_INITED = 76, + BFA_STATUS_RPSC_ENABLED = 77, + BFA_STATUS_ENOFSAVE = 78, + BFA_STATUS_BAD_FILE = 79, + BFA_STATUS_RLIM_EN = 80, + BFA_STATUS_RLIM_DIS = 81, + BFA_STATUS_IOC_DISABLED = 82, + BFA_STATUS_ADAPTER_DISABLED = 83, + BFA_STATUS_BIOS_DISABLED = 84, + BFA_STATUS_AUTH_ENABLED = 85, + BFA_STATUS_AUTH_DISABLED = 86, + BFA_STATUS_ERROR_TRL_ENABLED = 87, + BFA_STATUS_ERROR_QOS_ENABLED = 88, + BFA_STATUS_NO_SFP_DEV = 89, + BFA_STATUS_MEMTEST_FAILED = 90, + BFA_STATUS_INVALID_DEVID = 91, + BFA_STATUS_QOS_ENABLED = 92, + BFA_STATUS_QOS_DISABLED = 93, + BFA_STATUS_INCORRECT_DRV_CONFIG = 94, + BFA_STATUS_REG_FAIL = 95, + BFA_STATUS_IM_INV_CODE = 96, + BFA_STATUS_IM_INV_VLAN = 97, + BFA_STATUS_IM_INV_ADAPT_NAME = 98, + BFA_STATUS_IM_LOW_RESOURCES = 99, + BFA_STATUS_IM_VLANID_IS_PVID = 100, + BFA_STATUS_IM_VLANID_EXISTS = 101, + BFA_STATUS_IM_FW_UPDATE_FAIL = 102, + BFA_STATUS_PORTLOG_ENABLED = 103, + BFA_STATUS_PORTLOG_DISABLED = 104, + BFA_STATUS_FILE_NOT_FOUND = 105, + BFA_STATUS_QOS_FC_ONLY = 106, + BFA_STATUS_RLIM_FC_ONLY = 107, + BFA_STATUS_CT_SPD = 108, + BFA_STATUS_LEDTEST_OP = 109, + BFA_STATUS_CEE_NOT_DN = 110, + BFA_STATUS_10G_SPD = 111, + BFA_STATUS_IM_INV_TEAM_NAME = 112, + BFA_STATUS_IM_DUP_TEAM_NAME = 113, + BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, + BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, + BFA_STATUS_IM_PVID_MISMATCH = 116, + BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, + BFA_STATUS_IM_MTU_MISMATCH = 118, + BFA_STATUS_IM_RSS_MISMATCH = 119, + BFA_STATUS_IM_HDS_MISMATCH = 120, + BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, + BFA_STATUS_IM_PORT_PARAMS = 122, + BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, + BFA_STATUS_IM_CANNOT_REM_PRI = 124, + BFA_STATUS_IM_MAX_PORTS_REACHED = 125, + BFA_STATUS_IM_LAST_PORT_DELETE = 126, + BFA_STATUS_IM_NO_DRIVER = 127, + BFA_STATUS_IM_MAX_VLANS_REACHED = 128, + BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, + BFA_STATUS_NO_MINPORT_DRIVER = 130, + BFA_STATUS_CARD_TYPE_MISMATCH = 131, + BFA_STATUS_BAD_ASICBLK = 132, + BFA_STATUS_NO_DRIVER = 133, + BFA_STATUS_INVALID_MAC = 134, + BFA_STATUS_IM_NO_VLAN = 135, + BFA_STATUS_IM_ETH_LB_FAILED = 136, + BFA_STATUS_IM_PVID_REMOVE = 137, + BFA_STATUS_IM_PVID_EDIT = 138, + BFA_STATUS_CNA_NO_BOOT = 139, + BFA_STATUS_IM_PVID_NON_ZERO = 140, + BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, + BFA_STATUS_IM_GET_INETCFG_FAILED = 142, + BFA_STATUS_IM_NOT_BOUND = 143, + BFA_STATUS_INSUFFICIENT_PERMS = 144, + BFA_STATUS_IM_INV_VLAN_NAME = 145, + BFA_STATUS_CMD_NOTSUPP_CNA = 146, + BFA_STATUS_IM_PASSTHRU_EDIT = 147, + BFA_STATUS_IM_BIND_FAILED = 148, + BFA_STATUS_IM_UNBIND_FAILED = 149, + BFA_STATUS_IM_PORT_IN_TEAM = 150, + BFA_STATUS_IM_VLAN_NOT_FOUND = 151, + BFA_STATUS_IM_TEAM_NOT_FOUND = 152, + BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, + BFA_STATUS_PBC = 154, + BFA_STATUS_DEVID_MISSING = 155, + BFA_STATUS_BAD_FWCFG = 156, + BFA_STATUS_CREATE_FILE = 157, + BFA_STATUS_INVALID_VENDOR = 158, + BFA_STATUS_SFP_NOT_READY = 159, + BFA_STATUS_FLASH_UNINIT = 160, + BFA_STATUS_FLASH_EMPTY = 161, + BFA_STATUS_FLASH_CKFAIL = 162, + BFA_STATUS_TRUNK_UNSUPP = 163, + BFA_STATUS_TRUNK_ENABLED = 164, + BFA_STATUS_TRUNK_DISABLED = 165, + BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166, + BFA_STATUS_BOOT_CODE_UPDATED = 167, + BFA_STATUS_BOOT_VERSION = 168, + BFA_STATUS_CARDTYPE_MISSING = 169, + BFA_STATUS_INVALID_CARDTYPE = 170, + BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171, + BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172, + BFA_STATUS_ETHBOOT_ENABLED = 173, + BFA_STATUS_ETHBOOT_DISABLED = 174, + BFA_STATUS_IOPROFILE_OFF = 175, + BFA_STATUS_NO_PORT_INSTANCE = 176, + BFA_STATUS_BOOT_CODE_TIMEDOUT = 177, + BFA_STATUS_NO_VPORT_LOCK = 178, + BFA_STATUS_VPORT_NO_CNFG = 179, + BFA_STATUS_MAX_VAL +}; + +enum bfa_eproto_status { + BFA_EPROTO_BAD_ACCEPT = 0, + BFA_EPROTO_UNKNOWN_RSP = 1 +}; + +#endif /* __BFA_DEFS_STATUS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c new file mode 100644 index 000000000000..b0307a00a109 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -0,0 +1,2473 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#include "bfa_ioc.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +/** + * IOC local definitions + */ + +/** + * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. + */ + +#define bfa_ioc_firmware_lock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) +#define bfa_ioc_firmware_unlock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) +#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) +#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) +#define bfa_ioc_notify_fail(__ioc) \ + ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) +#define bfa_ioc_sync_start(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) +#define bfa_ioc_sync_join(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) +#define bfa_ioc_sync_leave(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) +#define bfa_ioc_sync_ack(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) +#define bfa_ioc_sync_complete(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) + +#define bfa_ioc_mbox_cmd_pending(__ioc) \ + (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ + readl((__ioc)->ioc_regs.hfn_mbox_cmd)) + +static bool bfa_nw_auto_recover = true; + +/* + * forward declarations + */ +static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); +static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); +static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); +static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); +static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); +static void bfa_ioc_send_enable(struct bfa_ioc *ioc); +static void bfa_ioc_send_disable(struct bfa_ioc *ioc); +static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); +static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); +static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); +static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); +static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); +static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); +static void bfa_ioc_recover(struct bfa_ioc *ioc); +static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); +static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); +static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); +static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); +static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); +static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); +static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); +static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); +static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); +static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); +static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, + u32 boot_param); +static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); +static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, + char *serial_num); +static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, + char *fw_ver); +static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, + char *chip_rev); +static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, + char *optrom_ver); +static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, + char *manufacturer); +static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); +static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); + +/** + * IOC state machine definitions/declarations + */ +enum ioc_event { + IOC_E_RESET = 1, /*!< IOC reset request */ + IOC_E_ENABLE = 2, /*!< IOC enable request */ + IOC_E_DISABLE = 3, /*!< IOC disable request */ + IOC_E_DETACH = 4, /*!< driver detach cleanup */ + IOC_E_ENABLED = 5, /*!< f/w enabled */ + IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ + IOC_E_DISABLED = 7, /*!< f/w disabled */ + IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /*!< heartbeat failure */ + IOC_E_HWERROR = 10, /*!< hardware error interrupt */ + IOC_E_TIMEOUT = 11, /*!< timeout */ + IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ +}; + +bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); + +static struct bfa_sm_table ioc_sm_table[] = { + {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, + {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, + {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, + {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, + {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, + {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, + {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, + {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, + {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, +}; + +/* + * Forward declareations for iocpf state machine + */ +static void bfa_iocpf_enable(struct bfa_ioc *ioc); +static void bfa_iocpf_disable(struct bfa_ioc *ioc); +static void bfa_iocpf_fail(struct bfa_ioc *ioc); +static void bfa_iocpf_initfail(struct bfa_ioc *ioc); +static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); +static void bfa_iocpf_stop(struct bfa_ioc *ioc); + +/** + * IOCPF state machine events + */ +enum iocpf_event { + IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ + IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ + IOCPF_E_STOP = 3, /*!< stop on driver detach */ + IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ + IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ + IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ + IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ + IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ + IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ + IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ + IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ + IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ +}; + +/** + * IOCPF states + */ +enum bfa_iocpf_state { + BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ + BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ + BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ + BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ + BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ + BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ + BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ + BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ + BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ +}; + +bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); + +static struct bfa_sm_table iocpf_sm_table[] = { + {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, + {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, + {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, + {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, +}; + +/** + * IOC State Machine + */ + +/** + * Beginning state. IOC uninit state. + */ +static void +bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) +{ +} + +/** + * IOC is in uninit state. + */ +static void +bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_RESET: + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * Reset entry actions -- initialize state machine + */ +static void +bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) +{ + bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); +} + +/** + * IOC is in reset state. + */ +static void +bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + bfa_ioc_disable_comp(ioc); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) +{ + bfa_iocpf_enable(ioc); +} + +/** + * Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + /* !!! fall through !!! */ + case IOC_E_HWERROR: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * Semaphore should be acquired for version check. + */ +static void +bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) +{ + mod_timer(&ioc->ioc_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + bfa_ioc_send_getattr(ioc); +} + +/** + * IOC configuration in progress. Timer is active. + */ +static void +bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_FWRSP_GETATTR: + del_timer(&ioc->ioc_timer); + bfa_ioc_check_attr_wwns(ioc); + bfa_ioc_hb_monitor(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + del_timer(&ioc->ioc_timer); + /* fall through */ + case IOC_E_TIMEOUT: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_getattrfail(ioc); + break; + + case IOC_E_DISABLE: + del_timer(&ioc->ioc_timer); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) +{ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); + bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); +} + +static void +bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_ioc_hb_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + bfa_ioc_hb_stop(ioc); + /* !!! fall through !!! */ + case IOC_E_HBFAIL: + if (ioc->iocpf.auto_recover) + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + else + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + + bfa_ioc_fail_notify(ioc); + + if (event != IOC_E_PFFAILED) + bfa_iocpf_fail(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) +{ + bfa_iocpf_disable(ioc); +} + +/** + * IOC is being disabled + */ +static void +bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_DISABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + break; + + case IOC_E_HWERROR: + /* + * No state change. Will move to disabled state + * after iocpf sm completes failure processing and + * moves to disabled state. + */ + bfa_iocpf_fail(ioc); + break; + + case IOC_E_HWFAILED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + bfa_ioc_disable_comp(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * IOC disable completion entry. + */ +static void +bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) +{ + bfa_ioc_disable_comp(ioc); +} + +static void +bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) +{ +} + +/** + * Hardware initialization retry. + */ +static void +bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + /** + * Initialization retry failed. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) +{ +} + +/** + * IOC failure. + */ +static void +bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_HWERROR: + /* HB failure notification, ignore. */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) +{ +} + +/** + * IOC failure. + */ +static void +bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * IOCPF State Machine + */ + +/** + * Reset entry actions -- initialize state machine + */ +static void +bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) +{ + iocpf->fw_mismatch_notified = false; + iocpf->auto_recover = bfa_nw_auto_recover; +} + +/** + * Beginning state. IOC is in reset state. + */ +static void +bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_STOP: + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * Semaphore should be acquired for version check. + */ +static void +bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_init(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/** + * Awaiting h/w semaphore to continue with version check. + */ +static void +bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_firmware_lock(ioc)) { + if (bfa_ioc_sync_start(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_ioc_firmware_unlock(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); + } + } else { + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * Notify enable completion callback + */ +static void +bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) +{ + /* Call only the first time sm enters fwmismatch state. */ + if (iocpf->fw_mismatch_notified == false) + bfa_ioc_pf_fwmismatch(iocpf->ioc); + + iocpf->fw_mismatch_notified = true; + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); +} + +/** + * Awaiting firmware version match. + */ +static void +bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * Request for semaphore. + */ +static void +bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/** + * Awaiting semaphore for h/w initialzation. + */ +static void +bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_sync_complete(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_nw_ioc_hw_sem_release(ioc); + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) +{ + iocpf->poll_time = 0; + bfa_ioc_reset(iocpf->ioc, false); +} + +/** + * Hardware is being initialized. Interrupts are enabled. + * Holding hardware semaphore lock. + */ +static void +bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWREADY: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); + break; + + case IOCPF_E_TIMEOUT: + bfa_nw_ioc_hw_sem_release(ioc); + bfa_ioc_pf_failed(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_ioc_sync_leave(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) +{ + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + /** + * Enable Interrupts before sending fw IOC ENABLE cmd. + */ + iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); + bfa_ioc_send_enable(iocpf->ioc); +} + +/** + * Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWRSP_ENABLE: + del_timer(&ioc->iocpf_timer); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); + break; + + case IOCPF_E_INITFAIL: + del_timer(&ioc->iocpf_timer); + /* + * !!! fall through !!! + */ + case IOCPF_E_TIMEOUT: + bfa_nw_ioc_hw_sem_release(ioc); + if (event == IOCPF_E_TIMEOUT) + bfa_ioc_pf_failed(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_pf_enabled(iocpf->ioc); +} + +static void +bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + case IOCPF_E_GETATTRFAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_FAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) +{ + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + bfa_ioc_send_disable(iocpf->ioc); +} + +/** + * IOC is being disabled + */ +static void +bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWRSP_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + del_timer(&ioc->iocpf_timer); + /* + * !!! fall through !!! + */ + + case IOCPF_E_TIMEOUT: + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FWRSP_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/** + * IOC hb ack request is being removed. + */ +static void +bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_leave(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * IOC disable completion entry. + */ +static void +bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_ioc_pf_disabled(iocpf->ioc); +} + +static void +bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/** + * Hardware initialization failed. + */ +static void +bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_notify_fail(ioc); + bfa_ioc_sync_leave(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) +{ +} + +/** + * Hardware initialization failed. + */ +static void +bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) +{ + /** + * Mark IOC as failed in hardware and stop firmware. + */ + bfa_ioc_lpu_stop(iocpf->ioc); + + /** + * Flush any queued up mailbox requests. + */ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/** + * IOC is in failed state. + */ +static void +bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_ack(ioc); + bfa_ioc_notify_fail(ioc); + if (!iocpf->auto_recover) { + bfa_ioc_sync_leave(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + } else { + if (bfa_ioc_sync_complete(ioc)) + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + else { + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + } + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) +{ +} + +/** + * @brief + * IOC is in failed state. + */ +static void +bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(event); + } +} + +/** + * BFA IOC private functions + */ + +/** + * Notify common modules registered for notification. + */ +static void +bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) +{ + struct bfa_ioc_notify *notify; + struct list_head *qe; + + list_for_each(qe, &ioc->notify_q) { + notify = (struct bfa_ioc_notify *)qe; + notify->cbfn(notify->cbarg, event); + } +} + +static void +bfa_ioc_disable_comp(struct bfa_ioc *ioc) +{ + ioc->cbfn->disable_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); +} + +bool +bfa_nw_ioc_sem_get(void __iomem *sem_reg) +{ + u32 r32; + int cnt = 0; +#define BFA_SEM_SPINCNT 3000 + + r32 = readl(sem_reg); + + while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { + cnt++; + udelay(2); + r32 = readl(sem_reg); + } + + if (!(r32 & 1)) + return true; + + return false; +} + +void +bfa_nw_ioc_sem_release(void __iomem *sem_reg) +{ + readl(sem_reg); + writel(1, sem_reg); +} + +static void +bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) +{ + struct bfi_ioc_image_hdr fwhdr; + u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + if (fwstate == BFI_IOC_UNINIT) + return; + + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) + return; + + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + + /* + * Try to lock and then unlock the semaphore. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + writel(1, ioc->ioc_regs.ioc_sem_reg); +} + +static void +bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) +{ + u32 r32; + + /** + * First read to the semaphore register will return 0, subsequent reads + * will return 1. Semaphore is released by writing 1 to the register + */ + r32 = readl(ioc->ioc_regs.ioc_sem_reg); + if (r32 == ~0) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); + return; + } + if (!(r32 & 1)) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); + return; + } + + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); +} + +void +bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) +{ + writel(1, ioc->ioc_regs.ioc_sem_reg); +} + +static void +bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) +{ + del_timer(&ioc->sem_timer); +} + +/** + * @brief + * Initialize LPU local memory (aka secondary memory / SRAM) + */ +static void +bfa_ioc_lmem_init(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + int i; +#define PSS_LMEM_INIT_TIME 10000 + + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LMEM_RESET; + pss_ctl |= __PSS_LMEM_INIT_EN; + + /* + * i2c workaround 12.5khz clock + */ + pss_ctl |= __PSS_I2C_CLK_DIV(3UL); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); + + /** + * wait for memory initialization to be complete + */ + i = 0; + do { + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + i++; + } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); + + /** + * If memory initialization is not successful, IOC timeout will catch + * such failures. + */ + BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); + + pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_start(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + + /** + * Take processor out of reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LPU0_RESET; + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_stop(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + + /** + * Put processors in reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +/** + * Get driver and firmware versions. + */ +void +bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) +{ + u32 pgnum; + u32 loff = 0; + int i; + u32 *fwsig = (u32 *) fwhdr; + + pgnum = bfa_ioc_smem_pgnum(ioc, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); + i++) { + fwsig[i] = + swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); + loff += sizeof(u32); + } +} + +/** + * Returns TRUE if same. + */ +bool +bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) +{ + struct bfi_ioc_image_hdr *drv_fwhdr; + int i; + + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { + if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) + return false; + } + + return true; +} + +/** + * Return true if current running version is valid. Firmware signature and + * execution context (driver/bios) must match. + */ +static bool +bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) +{ + struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; + + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + if (fwhdr.signature != drv_fwhdr->signature) + return false; + + if (swab32(fwhdr.bootenv) != boot_env) + return false; + + return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); +} + +/** + * Conditionally flush any pending message from firmware at start. + */ +static void +bfa_ioc_msgflush(struct bfa_ioc *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if (r32) + writel(1, ioc->ioc_regs.lpu_mbox_cmd); +} + +/** + * @img ioc_init_logic.jpg + */ +static void +bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) +{ + enum bfi_ioc_state ioc_fwstate; + bool fwvalid; + u32 boot_env; + + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + if (force) + ioc_fwstate = BFI_IOC_UNINIT; + + boot_env = BFI_FWBOOT_ENV_OS; + + /** + * check if firmware is valid + */ + fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? + false : bfa_ioc_fwver_valid(ioc, boot_env); + + if (!fwvalid) { + bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env); + bfa_ioc_poll_fwinit(ioc); + return; + } + + /** + * If hardware initialization is in progress (initialized by other IOC), + * just wait for an initialization completion interrupt. + */ + if (ioc_fwstate == BFI_IOC_INITING) { + bfa_ioc_poll_fwinit(ioc); + return; + } + + /** + * If IOC function is disabled and firmware version is same, + * just re-enable IOC. + */ + if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { + /** + * When using MSI-X any pending firmware ready event should + * be flushed. Otherwise MSI-X interrupts are not delivered. + */ + bfa_ioc_msgflush(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + /** + * Initialize the h/w for any other states. + */ + bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env); + bfa_ioc_poll_fwinit(ioc); +} + +void +bfa_nw_ioc_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + + bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); +} + +static void +bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) +{ + u32 *msgp = (u32 *) ioc_msg; + u32 i; + + BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); + + /* + * first write msg to mailbox registers + */ + for (i = 0; i < len / sizeof(u32); i++) + writel(cpu_to_le32(msgp[i]), + ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) + writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + /* + * write 1 to mailbox CMD to trigger LPU event + */ + writel(1, ioc->ioc_regs.hfn_mbox_cmd); + (void) readl(ioc->ioc_regs.hfn_mbox_cmd); +} + +static void +bfa_ioc_send_enable(struct bfa_ioc *ioc) +{ + struct bfi_ioc_ctrl_req enable_req; + struct timeval tv; + + bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, + bfa_ioc_portid(ioc)); + enable_req.clscode = htons(ioc->clscode); + do_gettimeofday(&tv); + enable_req.tv_sec = ntohl(tv.tv_sec); + bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); +} + +static void +bfa_ioc_send_disable(struct bfa_ioc *ioc) +{ + struct bfi_ioc_ctrl_req disable_req; + + bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, + bfa_ioc_portid(ioc)); + bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); +} + +static void +bfa_ioc_send_getattr(struct bfa_ioc *ioc) +{ + struct bfi_ioc_getattr_req attr_req; + + bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, + bfa_ioc_portid(ioc)); + bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); + bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); +} + +void +bfa_nw_ioc_hb_check(void *cbarg) +{ + struct bfa_ioc *ioc = cbarg; + u32 hb_count; + + hb_count = readl(ioc->ioc_regs.heartbeat); + if (ioc->hb_count == hb_count) { + bfa_ioc_recover(ioc); + return; + } else { + ioc->hb_count = hb_count; + } + + bfa_ioc_mbox_poll(ioc); + mod_timer(&ioc->hb_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HB_TOV)); +} + +static void +bfa_ioc_hb_monitor(struct bfa_ioc *ioc) +{ + ioc->hb_count = readl(ioc->ioc_regs.heartbeat); + mod_timer(&ioc->hb_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HB_TOV)); +} + +static void +bfa_ioc_hb_stop(struct bfa_ioc *ioc) +{ + del_timer(&ioc->hb_timer); +} + +/** + * @brief + * Initiate a full firmware download. + */ +static void +bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, + u32 boot_env) +{ + u32 *fwimg; + u32 pgnum; + u32 loff = 0; + u32 chunkno = 0; + u32 i; + u32 asicmode; + + /** + * Initialize LMEM first before code download + */ + bfa_ioc_lmem_init(ioc); + + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); + + pgnum = bfa_ioc_smem_pgnum(ioc, loff); + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) { + if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { + chunkno = BFA_IOC_FLASH_CHUNK_NO(i); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + + /** + * write smem + */ + writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])), + ((ioc->ioc_regs.smem_page_start) + (loff))); + + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, + ioc->ioc_regs.host_page_num_fn); + } + } + + writel(bfa_ioc_smem_pgnum(ioc, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * Set boot type, env and device mode at the end. + */ + asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, + ioc->port0_mode, ioc->port1_mode); + writel(asicmode, ((ioc->ioc_regs.smem_page_start) + + BFI_FWBOOT_DEVMODE_OFF)); + writel(boot_type, ((ioc->ioc_regs.smem_page_start) + + (BFI_FWBOOT_TYPE_OFF))); + writel(boot_env, ((ioc->ioc_regs.smem_page_start) + + (BFI_FWBOOT_ENV_OFF))); +} + +static void +bfa_ioc_reset(struct bfa_ioc *ioc, bool force) +{ + bfa_ioc_hwinit(ioc, force); +} + +/** + * BFA ioc enable reply by firmware + */ +static void +bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, + u8 cap_bm) +{ + struct bfa_iocpf *iocpf = &ioc->iocpf; + + ioc->port_mode = ioc->port_mode_cfg = port_mode; + ioc->ad_cap_bm = cap_bm; + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); +} + +/** + * @brief + * Update BFA configuration from firmware configuration. + */ +static void +bfa_ioc_getattr_reply(struct bfa_ioc *ioc) +{ + struct bfi_ioc_attr *attr = ioc->attr; + + attr->adapter_prop = ntohl(attr->adapter_prop); + attr->card_type = ntohl(attr->card_type); + attr->maxfrsize = ntohs(attr->maxfrsize); + + bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); +} + +/** + * Attach time initialization of mbox logic. + */ +static void +bfa_ioc_mbox_attach(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + int mc; + + INIT_LIST_HEAD(&mod->cmd_q); + for (mc = 0; mc < BFI_MC_MAX; mc++) { + mod->mbhdlr[mc].cbfn = NULL; + mod->mbhdlr[mc].cbarg = ioc->bfa; + } +} + +/** + * Mbox poll timer -- restarts any pending mailbox requests. + */ +static void +bfa_ioc_mbox_poll(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd *cmd; + bfa_mbox_cmd_cbfn_t cbfn; + void *cbarg; + u32 stat; + + /** + * If no command pending, do nothing + */ + if (list_empty(&mod->cmd_q)) + return; + + /** + * If previous command is not yet fetched by firmware, do nothing + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) + return; + + /** + * Enqueue command to firmware. + */ + bfa_q_deq(&mod->cmd_q, &cmd); + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); + + /** + * Give a callback to the client, indicating that the command is sent + */ + if (cmd->cbfn) { + cbfn = cmd->cbfn; + cbarg = cmd->cbarg; + cmd->cbfn = NULL; + cbfn(cbarg); + } +} + +/** + * Cleanup any pending requests. + */ +static void +bfa_ioc_mbox_flush(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd *cmd; + + while (!list_empty(&mod->cmd_q)) + bfa_q_deq(&mod->cmd_q, &cmd); +} + +static void +bfa_ioc_fail_notify(struct bfa_ioc *ioc) +{ + /** + * Notify driver and common modules registered for notification. + */ + ioc->cbfn->hbfail_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); +} + +/** + * IOCPF to IOC interface + */ +static void +bfa_ioc_pf_enabled(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_ENABLED); +} + +static void +bfa_ioc_pf_disabled(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DISABLED); +} + +static void +bfa_ioc_pf_failed(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); +} + +static void +bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); +} + +static void +bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) +{ + /** + * Provide enable completion callback and AEN notification. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); +} + +/** + * IOC public + */ +static enum bfa_status +bfa_ioc_pll_init(struct bfa_ioc *ioc) +{ + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + + bfa_ioc_pll_init_asic(ioc); + + ioc->pllinit = true; + /* + * release semaphore. + */ + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} + +/** + * Interface used by diag module to do firmware boot with memory test + * as the entry vector. + */ +static void +bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, + u32 boot_env) +{ + bfa_ioc_stats(ioc, ioc_boots); + + if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) + return; + + /** + * Initialize IOC state of all functions on a chip reset. + */ + if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { + writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); + } else { + writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); + } + + bfa_ioc_msgflush(ioc); + bfa_ioc_download_fw(ioc, boot_type, boot_env); + bfa_ioc_lpu_start(ioc); +} + +/** + * Enable/disable IOC failure auto recovery. + */ +void +bfa_nw_ioc_auto_recover(bool auto_recover) +{ + bfa_nw_auto_recover = auto_recover; +} + +static bool +bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) +{ + u32 *msgp = mbmsg; + u32 r32; + int i; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if ((r32 & 1) == 0) + return false; + + /** + * read the MBOX msg + */ + for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); + i++) { + r32 = readl(ioc->ioc_regs.lpu_mbox + + i * sizeof(u32)); + msgp[i] = htonl(r32); + } + + /** + * turn off mailbox interrupt by clearing mailbox status + */ + writel(1, ioc->ioc_regs.lpu_mbox_cmd); + readl(ioc->ioc_regs.lpu_mbox_cmd); + + return true; +} + +static void +bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) +{ + union bfi_ioc_i2h_msg_u *msg; + struct bfa_iocpf *iocpf = &ioc->iocpf; + + msg = (union bfi_ioc_i2h_msg_u *) m; + + bfa_ioc_stats(ioc, ioc_isrs); + + switch (msg->mh.msg_id) { + case BFI_IOC_I2H_HBEAT: + break; + + case BFI_IOC_I2H_ENABLE_REPLY: + bfa_ioc_enable_reply(ioc, + (enum bfa_mode)msg->fw_event.port_mode, + msg->fw_event.cap_bm); + break; + + case BFI_IOC_I2H_DISABLE_REPLY: + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); + break; + + case BFI_IOC_I2H_GETATTR_REPLY: + bfa_ioc_getattr_reply(ioc); + break; + + default: + BUG_ON(1); + } +} + +/** + * IOC attach time initialization and setup. + * + * @param[in] ioc memory for IOC + * @param[in] bfa driver instance structure + */ +void +bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) +{ + ioc->bfa = bfa; + ioc->cbfn = cbfn; + ioc->fcmode = false; + ioc->pllinit = false; + ioc->dbg_fwsave_once = true; + ioc->iocpf.ioc = ioc; + + bfa_ioc_mbox_attach(ioc); + INIT_LIST_HEAD(&ioc->notify_q); + + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(ioc, IOC_E_RESET); +} + +/** + * Driver detach time IOC cleanup. + */ +void +bfa_nw_ioc_detach(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DETACH); + + /* Done with detach, empty the notify_q. */ + INIT_LIST_HEAD(&ioc->notify_q); +} + +/** + * Setup IOC PCI properties. + * + * @param[in] pcidev PCI device information for this IOC + */ +void +bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, + enum bfi_pcifn_class clscode) +{ + ioc->clscode = clscode; + ioc->pcidev = *pcidev; + + /** + * Initialize IOC and device personality + */ + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; + ioc->asic_mode = BFI_ASIC_MODE_FC; + + switch (pcidev->device_id) { + case PCI_DEVICE_ID_BROCADE_CT: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + break; + + case BFA_PCI_DEVICE_ID_CT2: + ioc->asic_gen = BFI_ASIC_GEN_CT2; + if (clscode == BFI_PCIFN_CLASS_FC && + pcidev->ssid == BFA_PCI_CT2_SSID_FC) { + ioc->asic_mode = BFI_ASIC_MODE_FC16; + ioc->fcmode = true; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + } else { + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + } else { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_NIC; + ioc->ad_cap_bm = BFA_CM_NIC; + } + } + break; + + default: + BUG_ON(1); + } + + /** + * Set asic specific interfaces. + */ + if (ioc->asic_gen == BFI_ASIC_GEN_CT) + bfa_nw_ioc_set_ct_hwif(ioc); + else { + WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); + bfa_nw_ioc_set_ct2_hwif(ioc); + bfa_nw_ioc_ct2_poweron(ioc); + } + + bfa_ioc_map_port(ioc); + bfa_ioc_reg_init(ioc); +} + +/** + * Initialize IOC dma memory + * + * @param[in] dm_kva kernel virtual address of IOC dma memory + * @param[in] dm_pa physical address of IOC dma memory + */ +void +bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) +{ + /** + * dma memory for firmware attribute + */ + ioc->attr_dma.kva = dm_kva; + ioc->attr_dma.pa = dm_pa; + ioc->attr = (struct bfi_ioc_attr *) dm_kva; +} + +/** + * Return size of dma memory required. + */ +u32 +bfa_nw_ioc_meminfo(void) +{ + return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); +} + +void +bfa_nw_ioc_enable(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_enables); + ioc->dbg_fwsave_once = true; + + bfa_fsm_send_event(ioc, IOC_E_ENABLE); +} + +void +bfa_nw_ioc_disable(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_disables); + bfa_fsm_send_event(ioc, IOC_E_DISABLE); +} + +static u32 +bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) +{ + return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); +} + +/** + * Register mailbox message handler function, to be called by common modules + */ +void +bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + + mod->mbhdlr[mc].cbfn = cbfn; + mod->mbhdlr[mc].cbarg = cbarg; +} + +/** + * Queue a mailbox command request to firmware. Waits if mailbox is busy. + * Responsibility of caller to serialize + * + * @param[in] ioc IOC instance + * @param[i] cmd Mailbox command + */ +bool +bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, + bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + u32 stat; + + cmd->cbfn = cbfn; + cmd->cbarg = cbarg; + + /** + * If a previous command is pending, queue new command + */ + if (!list_empty(&mod->cmd_q)) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return true; + } + + /** + * If mailbox is busy, queue command for poll timer + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return true; + } + + /** + * mailbox is free -- queue command to firmware + */ + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); + + return false; +} + +/** + * Handle mailbox interrupts + */ +void +bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfi_mbmsg m; + int mc; + + if (bfa_ioc_msgget(ioc, &m)) { + /** + * Treat IOC message class as special. + */ + mc = m.mh.msg_class; + if (mc == BFI_MC_IOC) { + bfa_ioc_isr(ioc, &m); + return; + } + + if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) + return; + + mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); + } + + bfa_ioc_lpu_read_stat(ioc); + + /** + * Try to send pending mailbox commands + */ + bfa_ioc_mbox_poll(ioc); +} + +void +bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_ioc_stats_hb_count(ioc, ioc->hb_count); + bfa_fsm_send_event(ioc, IOC_E_HWERROR); +} + +/** + * return true if IOC is disabled + */ +bool +bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || + bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); +} + +/** + * Add to IOC heartbeat failure notification queue. To be used by common + * modules such as cee, port, diag. + */ +void +bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, + struct bfa_ioc_notify *notify) +{ + list_add_tail(¬ify->qe, &ioc->notify_q); +} + +#define BFA_MFG_NAME "Brocade" +static void +bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, + struct bfa_adapter_attr *ad_attr) +{ + struct bfi_ioc_attr *ioc_attr; + + ioc_attr = ioc->attr; + + bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); + bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); + bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); + bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); + memcpy(&ad_attr->vpd, &ioc_attr->vpd, + sizeof(struct bfa_mfg_vpd)); + + ad_attr->nports = bfa_ioc_get_nports(ioc); + ad_attr->max_speed = bfa_ioc_speed_sup(ioc); + + bfa_ioc_get_adapter_model(ioc, ad_attr->model); + /* For now, model descr uses same model string */ + bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); + + ad_attr->card_type = ioc_attr->card_type; + ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); + + if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) + ad_attr->prototype = 1; + else + ad_attr->prototype = 0; + + ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); + ad_attr->mac = bfa_nw_ioc_get_mac(ioc); + + ad_attr->pcie_gen = ioc_attr->pcie_gen; + ad_attr->pcie_lanes = ioc_attr->pcie_lanes; + ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; + ad_attr->asic_rev = ioc_attr->asic_rev; + + bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); +} + +static enum bfa_ioc_type +bfa_ioc_get_type(struct bfa_ioc *ioc) +{ + if (ioc->clscode == BFI_PCIFN_CLASS_ETH) + return BFA_IOC_TYPE_LL; + + BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); + + return (ioc->attr->port_mode == BFI_PORT_MODE_FC) + ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; +} + +static void +bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) +{ + memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); + memcpy(serial_num, + (void *)ioc->attr->brcd_serialnum, + BFA_ADAPTER_SERIAL_NUM_LEN); +} + +static void +bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) +{ + memset(fw_ver, 0, BFA_VERSION_LEN); + memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); +} + +static void +bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) +{ + BUG_ON(!(chip_rev)); + + memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); + + chip_rev[0] = 'R'; + chip_rev[1] = 'e'; + chip_rev[2] = 'v'; + chip_rev[3] = '-'; + chip_rev[4] = ioc->attr->asic_rev; + chip_rev[5] = '\0'; +} + +static void +bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) +{ + memset(optrom_ver, 0, BFA_VERSION_LEN); + memcpy(optrom_ver, ioc->attr->optrom_version, + BFA_VERSION_LEN); +} + +static void +bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) +{ + memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); + memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); +} + +static void +bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) +{ + struct bfi_ioc_attr *ioc_attr; + + BUG_ON(!(model)); + memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); + + ioc_attr = ioc->attr; + + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + BFA_MFG_NAME, ioc_attr->card_type); +} + +static enum bfa_ioc_state +bfa_ioc_get_state(struct bfa_ioc *ioc) +{ + enum bfa_iocpf_state iocpf_st; + enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + + if (ioc_st == BFA_IOC_ENABLING || + ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { + + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + switch (iocpf_st) { + case BFA_IOCPF_SEMWAIT: + ioc_st = BFA_IOC_SEMWAIT; + break; + + case BFA_IOCPF_HWINIT: + ioc_st = BFA_IOC_HWINIT; + break; + + case BFA_IOCPF_FWMISMATCH: + ioc_st = BFA_IOC_FWMISMATCH; + break; + + case BFA_IOCPF_FAIL: + ioc_st = BFA_IOC_FAIL; + break; + + case BFA_IOCPF_INITFAIL: + ioc_st = BFA_IOC_INITFAIL; + break; + + default: + break; + } + } + return ioc_st; +} + +void +bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) +{ + memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); + + ioc_attr->state = bfa_ioc_get_state(ioc); + ioc_attr->port_id = ioc->port_id; + ioc_attr->port_mode = ioc->port_mode; + + ioc_attr->port_mode_cfg = ioc->port_mode_cfg; + ioc_attr->cap_bm = ioc->ad_cap_bm; + + ioc_attr->ioc_type = bfa_ioc_get_type(ioc); + + bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); + + ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; + ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; + bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); +} + +/** + * WWN public + */ +static u64 +bfa_ioc_get_pwwn(struct bfa_ioc *ioc) +{ + return ioc->attr->pwwn; +} + +mac_t +bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) +{ + return ioc->attr->mac; +} + +/** + * Firmware failure detected. Start recovery actions. + */ +static void +bfa_ioc_recover(struct bfa_ioc *ioc) +{ + pr_crit("Heart Beat of IOC has failed\n"); + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_ioc_stats_hb_count(ioc, ioc->hb_count); + bfa_fsm_send_event(ioc, IOC_E_HBFAIL); +} + +static void +bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc) +{ + if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) + return; +} + +/** + * @dg hal_iocpf_pvt BFA IOC PF private functions + * @{ + */ + +static void +bfa_iocpf_enable(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); +} + +static void +bfa_iocpf_disable(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); +} + +static void +bfa_iocpf_fail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); +} + +static void +bfa_iocpf_initfail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); +} + +static void +bfa_iocpf_getattrfail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); +} + +static void +bfa_iocpf_stop(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); +} + +void +bfa_nw_iocpf_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + enum bfa_iocpf_state iocpf_st; + + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + if (iocpf_st == BFA_IOCPF_HWINIT) + bfa_ioc_poll_fwinit(ioc); + else + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); +} + +void +bfa_nw_iocpf_sem_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + + bfa_ioc_hw_sem_get(ioc); +} + +static void +bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) +{ + u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + if (fwstate == BFI_IOC_DISABLED) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { + bfa_nw_iocpf_timeout(ioc); + } else { + ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; + mod_timer(&ioc->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_POLL_TOV)); + } +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h new file mode 100644 index 000000000000..ca158d1eaef3 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -0,0 +1,325 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#ifndef __BFA_IOC_H__ +#define __BFA_IOC_H__ + +#include "bfa_cs.h" +#include "bfi.h" +#include "cna.h" + +#define BFA_IOC_TOV 3000 /* msecs */ +#define BFA_IOC_HWSEM_TOV 500 /* msecs */ +#define BFA_IOC_HB_TOV 500 /* msecs */ +#define BFA_IOC_POLL_TOV 200 /* msecs */ + +/** + * PCI device information required by IOC + */ +struct bfa_pcidev { + int pci_slot; + u8 pci_func; + u16 device_id; + u16 ssid; + void __iomem *pci_bar_kva; +}; + +/** + * Structure used to remember the DMA-able memory block's KVA and Physical + * Address + */ +struct bfa_dma { + void *kva; /* ! Kernel virtual address */ + u64 pa; /* ! Physical address */ +}; + +#define BFA_DMA_ALIGN_SZ 256 + +/** + * smem size for Crossbow and Catapult + */ +#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ +#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ + +/** + * @brief BFA dma address assignment macro. (big endian format) + */ +#define bfa_dma_be_addr_set(dma_addr, pa) \ + __bfa_dma_be_addr_set(&dma_addr, (u64)pa) +static inline void +__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) +{ + dma_addr->a32.addr_lo = (u32) htonl(pa); + dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa)); +} + +struct bfa_ioc_regs { + void __iomem *hfn_mbox_cmd; + void __iomem *hfn_mbox; + void __iomem *lpu_mbox_cmd; + void __iomem *lpu_mbox; + void __iomem *lpu_read_stat; + void __iomem *pss_ctl_reg; + void __iomem *pss_err_status_reg; + void __iomem *app_pll_fast_ctl_reg; + void __iomem *app_pll_slow_ctl_reg; + void __iomem *ioc_sem_reg; + void __iomem *ioc_usage_sem_reg; + void __iomem *ioc_init_sem_reg; + void __iomem *ioc_usage_reg; + void __iomem *host_page_num_fn; + void __iomem *heartbeat; + void __iomem *ioc_fwstate; + void __iomem *alt_ioc_fwstate; + void __iomem *ll_halt; + void __iomem *alt_ll_halt; + void __iomem *err_set; + void __iomem *ioc_fail_sync; + void __iomem *shirq_isr_next; + void __iomem *shirq_msk_next; + void __iomem *smem_page_start; + u32 smem_pg0; +}; + +/** + * IOC Mailbox structures + */ +typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); +struct bfa_mbox_cmd { + struct list_head qe; + bfa_mbox_cmd_cbfn_t cbfn; + void *cbarg; + u32 msg[BFI_IOC_MSGSZ]; +}; + +/** + * IOC mailbox module + */ +typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); +struct bfa_ioc_mbox_mod { + struct list_head cmd_q; /*!< pending mbox queue */ + int nmclass; /*!< number of handlers */ + struct { + bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */ + void *cbarg; + } mbhdlr[BFI_MC_MAX]; +}; + +/** + * IOC callback function interfaces + */ +typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); +typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa); +struct bfa_ioc_cbfn { + bfa_ioc_enable_cbfn_t enable_cbfn; + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +}; + +/** + * IOC event notification mechanism. + */ +enum bfa_ioc_event { + BFA_IOC_E_ENABLED = 1, + BFA_IOC_E_DISABLED = 2, + BFA_IOC_E_FAILED = 3, +}; + +typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event); + +struct bfa_ioc_notify { + struct list_head qe; + bfa_ioc_notify_cbfn_t cbfn; + void *cbarg; +}; + +/** + * Initialize a IOC event notification structure + */ +#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ + (__notify)->cbfn = (__cbfn); \ + (__notify)->cbarg = (__cbarg); \ +} while (0) + +struct bfa_iocpf { + bfa_fsm_t fsm; + struct bfa_ioc *ioc; + bool fw_mismatch_notified; + bool auto_recover; + u32 poll_time; +}; + +struct bfa_ioc { + bfa_fsm_t fsm; + struct bfa *bfa; + struct bfa_pcidev pcidev; + struct timer_list ioc_timer; + struct timer_list iocpf_timer; + struct timer_list sem_timer; + struct timer_list hb_timer; + u32 hb_count; + struct list_head notify_q; + void *dbg_fwsave; + int dbg_fwsave_len; + bool dbg_fwsave_once; + enum bfi_pcifn_class clscode; + struct bfa_ioc_regs ioc_regs; + struct bfa_ioc_drv_stats stats; + bool fcmode; + bool pllinit; + bool stats_busy; /*!< outstanding stats */ + u8 port_id; + + struct bfa_dma attr_dma; + struct bfi_ioc_attr *attr; + struct bfa_ioc_cbfn *cbfn; + struct bfa_ioc_mbox_mod mbox_mod; + const struct bfa_ioc_hwif *ioc_hwif; + struct bfa_iocpf iocpf; + enum bfi_asic_gen asic_gen; + enum bfi_asic_mode asic_mode; + enum bfi_port_mode port0_mode; + enum bfi_port_mode port1_mode; + enum bfa_mode port_mode; + u8 ad_cap_bm; /*!< adapter cap bit mask */ + u8 port_mode_cfg; /*!< config port mode */ +}; + +struct bfa_ioc_hwif { + enum bfa_status (*ioc_pll_init) (void __iomem *rb, + enum bfi_asic_mode m); + bool (*ioc_firmware_lock) (struct bfa_ioc *ioc); + void (*ioc_firmware_unlock) (struct bfa_ioc *ioc); + void (*ioc_reg_init) (struct bfa_ioc *ioc); + void (*ioc_map_port) (struct bfa_ioc *ioc); + void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, + bool msix); + void (*ioc_notify_fail) (struct bfa_ioc *ioc); + void (*ioc_ownership_reset) (struct bfa_ioc *ioc); + bool (*ioc_sync_start) (struct bfa_ioc *ioc); + void (*ioc_sync_join) (struct bfa_ioc *ioc); + void (*ioc_sync_leave) (struct bfa_ioc *ioc); + void (*ioc_sync_ack) (struct bfa_ioc *ioc); + bool (*ioc_sync_complete) (struct bfa_ioc *ioc); + bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc); +}; + +#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) +#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) +#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) +#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) +#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_fetch_stats(__ioc, __stats) \ + (((__stats)->drv_stats) = (__ioc)->stats) +#define bfa_ioc_clr_stats(__ioc) \ + memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) +#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) +#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) +#define bfa_ioc_speed_sup(__ioc) \ + BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) +#define bfa_ioc_get_nports(__ioc) \ + BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) + +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) +#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \ + ((_ioc)->stats.hb_count = (_hb_count)) +#define BFA_IOC_FWIMG_MINSZ (16 * 1024) +#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ + ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \ + ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE) +#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) + +/** + * IOC mailbox interface + */ +bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, + struct bfa_mbox_cmd *cmd, + bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); +void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc); +void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); + +/** + * IOC interfaces + */ + +#define bfa_ioc_pll_init_asic(__ioc) \ + ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ + (__ioc)->asic_mode)) + +#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \ + if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \ + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \ +} while (0) +#define bfa_ioc_ownership_reset(__ioc) \ + ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) + +#define bfa_ioc_lpu_read_stat(__ioc) do { \ + if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \ + ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \ +} while (0) + +void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc); +void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc); +void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc); + +void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, + struct bfa_ioc_cbfn *cbfn); +void bfa_nw_ioc_auto_recover(bool auto_recover); +void bfa_nw_ioc_detach(struct bfa_ioc *ioc); +void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, + enum bfi_pcifn_class clscode); +u32 bfa_nw_ioc_meminfo(void); +void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); +void bfa_nw_ioc_enable(struct bfa_ioc *ioc); +void bfa_nw_ioc_disable(struct bfa_ioc *ioc); + +void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); +bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); +void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); +void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, + struct bfa_ioc_notify *notify); +bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); +void bfa_nw_ioc_sem_release(void __iomem *sem_reg); +void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc); +void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *fwhdr); +bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *fwhdr); +mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); + +/* + * Timeout APIs + */ +void bfa_nw_ioc_timeout(void *ioc); +void bfa_nw_ioc_hb_check(void *ioc); +void bfa_nw_iocpf_timeout(void *ioc); +void bfa_nw_iocpf_sem_timeout(void *ioc); + +/* + * F/W Image Size & Chunk + */ +u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off); +u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen); + +#endif /* __BFA_IOC_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c new file mode 100644 index 000000000000..348479bbfa3a --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -0,0 +1,878 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#include "bfa_ioc.h" +#include "cna.h" +#include "bfi.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +#define bfa_ioc_ct_sync_pos(__ioc) \ + ((u32) (1 << bfa_ioc_pcifn(__ioc))) +#define BFA_IOC_SYNC_REQD_SH 16 +#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) +#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) +#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) +#define bfa_ioc_ct_sync_reqd_pos(__ioc) \ + (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) + +/* + * forward declarations + */ +static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); +static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); +static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); +static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); +static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); +static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); +static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); +static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); +static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); +static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); +static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); +static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, + enum bfi_asic_mode asic_mode); +static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, + enum bfi_asic_mode asic_mode); +static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); + +static const struct bfa_ioc_hwif nw_hwif_ct = { + .ioc_pll_init = bfa_ioc_ct_pll_init, + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, + .ioc_reg_init = bfa_ioc_ct_reg_init, + .ioc_map_port = bfa_ioc_ct_map_port, + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, + .ioc_notify_fail = bfa_ioc_ct_notify_fail, + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, + .ioc_sync_start = bfa_ioc_ct_sync_start, + .ioc_sync_join = bfa_ioc_ct_sync_join, + .ioc_sync_leave = bfa_ioc_ct_sync_leave, + .ioc_sync_ack = bfa_ioc_ct_sync_ack, + .ioc_sync_complete = bfa_ioc_ct_sync_complete, +}; + +static const struct bfa_ioc_hwif nw_hwif_ct2 = { + .ioc_pll_init = bfa_ioc_ct2_pll_init, + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, + .ioc_reg_init = bfa_ioc_ct2_reg_init, + .ioc_map_port = bfa_ioc_ct2_map_port, + .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, + .ioc_isr_mode_set = NULL, + .ioc_notify_fail = bfa_ioc_ct_notify_fail, + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, + .ioc_sync_start = bfa_ioc_ct_sync_start, + .ioc_sync_join = bfa_ioc_ct_sync_join, + .ioc_sync_leave = bfa_ioc_ct_sync_leave, + .ioc_sync_ack = bfa_ioc_ct_sync_ack, + .ioc_sync_complete = bfa_ioc_ct_sync_complete, +}; + +/** + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) +{ + ioc->ioc_hwif = &nw_hwif_ct; +} + +void +bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) +{ + ioc->ioc_hwif = &nw_hwif_ct2; +} + +/** + * Return true if firmware of current driver matches the running firmware. + */ +static bool +bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) +{ + enum bfi_ioc_state ioc_fwstate; + u32 usecnt; + struct bfi_ioc_image_hdr fwhdr; + + /** + * If bios boot (flash based) -- do not increment usage count + */ + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < + BFA_IOC_FWIMG_MINSZ) + return true; + + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + + /** + * If usage count is 0, always return TRUE. + */ + if (usecnt == 0) { + writel(1, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_fail_sync); + return true; + } + + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + /** + * Use count cannot be non-zero and chip in uninitialized state. + */ + BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); + + /** + * Check if another driver with a different firmware is active + */ + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + return false; + } + + /** + * Same firmware version. Increment the reference count. + */ + usecnt++; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + return true; +} + +static void +bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) +{ + u32 usecnt; + + /** + * If bios boot (flash based) -- do not decrement usage count + */ + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < + BFA_IOC_FWIMG_MINSZ) + return; + + /** + * decrement usage count + */ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + BUG_ON(!(usecnt > 0)); + + usecnt--; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); +} + +/** + * Notify other functions on HB failure. + */ +static void +bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) +{ + writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); + writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); + /* Wait for halt to take effect */ + readl(ioc->ioc_regs.ll_halt); + readl(ioc->ioc_regs.alt_ll_halt); +} + +/** + * Host to LPU mailbox message addresses + */ +static const struct { + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; +} ct_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, + { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, + { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } +}; + +/** + * Host <-> LPU mailbox command/status registers - port 0 + */ +static const struct { + u32 hfn; + u32 lpu; +} ct_p0reg[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } +}; + +/** + * Host <-> LPU mailbox command/status registers - port 1 + */ +static const struct { + u32 hfn; + u32 lpu; +} ct_p1reg[] = { + { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } +}; + +static const struct { + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; + u32 hfn; + u32 lpu; + u32 lpu_read; +} ct2_reg[] = { + { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU0_READ_STAT}, + { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU1_READ_STAT}, +}; + +static void +bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) +{ + void __iomem *rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; + ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; + ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; + ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; + ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; + ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; + ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; + ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +static void +bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) +{ + void __iomem *rb; + int port = bfa_ioc_portid(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; + ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; + + if (port == 0) { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; + ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; + ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; + ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; + ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; + ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; + ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; + ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = rb + ERR_SET_REG; +} + +/** + * Initialize IOC to port mapping. + */ + +#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) +static void +bfa_ioc_ct_map_port(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + /** + * For catapult, base port id on personality register and IOC type + */ + r32 = readl(rb + FNC_PERS_REG); + r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); + ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; + +} + +static void +bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); + ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); +} + +/** + * Set interrupt mode for a function: INTX or MSIX + */ +static void +bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32, mode; + + r32 = readl(rb + FNC_PERS_REG); + + mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & + __F0_INTX_STATUS; + + /** + * If already in desired mode, do not change anything + */ + if ((!msix && mode) || (msix && !mode)) + return; + + if (msix) + mode = __F0_INTX_STATUS_MSIX; + else + mode = __F0_INTX_STATUS_INTA; + + r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + + writel(r32, rb + FNC_PERS_REG); +} + +static bool +bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_read_stat); + if (r32) { + writel(1, ioc->ioc_regs.lpu_read_stat); + return true; + } + + return false; +} + +/** + * MSI-X resource allocation for 1860 with no asic block + */ +#define HOSTFN_MSIX_DEFAULT 64 +#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 +#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c +#define __MSIX_VT_NUMVT__MK 0x003ff800 +#define __MSIX_VT_NUMVT__SH 11 +#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) +#define __MSIX_VT_OFST_ 0x000007ff +void +bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); + if (r32 & __MSIX_VT_NUMVT__MK) { + writel(r32 & __MSIX_VT_OFST_, + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); + return; + } + + writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | + HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_OFST_NUMVT); + writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); +} + +/** + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) +{ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + bfa_nw_ioc_hw_sem_release(ioc); +} + +/** + * Synchronized IOC failure processing routines + */ +static bool +bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + + /* + * Driver load time. If the sync required bit for this PCI fn + * is set, it is due to an unclean exit by the driver for this + * PCI fn in the previous incarnation. Whoever comes here first + * should clean it up, no matter which PCI fn. + */ + + if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { + writel(0, ioc->ioc_regs.ioc_fail_sync); + writel(1, ioc->ioc_regs.ioc_usage_reg); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + return true; + } + + return bfa_ioc_ct_sync_complete(ioc); +} +/** + * Synchronized IOC failure processing routines + */ +static void +bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); + + writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | + bfa_ioc_ct_sync_pos(ioc); + + writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + + writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); +} + +static bool +bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); + u32 tmp_ackd; + + if (sync_ackd == 0) + return true; + + /** + * The check below is to see whether any other PCI fn + * has reinitialized the ASIC (reset sync_ackd bits) + * and failed again while this IOC was waiting for hw + * semaphore (in bfa_iocpf_sm_semwait()). + */ + tmp_ackd = sync_ackd; + if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && + !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) + sync_ackd |= bfa_ioc_ct_sync_pos(ioc); + + if (sync_reqd == sync_ackd) { + writel(bfa_ioc_ct_clear_sync_ackd(r32), + ioc->ioc_regs.ioc_fail_sync); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); + return true; + } + + /** + * If another PCI fn reinitialized and failed again while + * this IOC was waiting for hw sem, the sync_ackd bit for + * this IOC need to be set again to allow reinitialization. + */ + if (tmp_ackd != sync_ackd) + writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); + + return false; +} + +static enum bfa_status +bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) +{ + u32 pll_sclk, pll_fclk, r32; + bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); + + pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | + __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(1U); + pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(1U); + + if (fcmode) { + writel(0, (rb + OP_MODE)); + writel(__APP_EMS_CMLCKSEL | + __APP_EMS_REFCKBUFEN2 | + __APP_EMS_CHANNEL_SEL, + (rb + ETH_MAC_SER_REG)); + } else { + writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); + writel(__APP_EMS_REFCKBUFEN1, + (rb + ETH_MAC_SER_REG)); + } + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(pll_sclk | + __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | + __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, + rb + APP_PLL_LCLK_CTL_REG); + readl(rb + HOSTFN0_INT_MSK); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk | + __APP_PLL_SCLK_ENABLE, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_ENABLE, + rb + APP_PLL_LCLK_CTL_REG); + + if (!fcmode) { + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); + } + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + if (!fcmode) { + writel(0, (rb + PMM_1T_RESET_REG_P0)); + writel(0, (rb + PMM_1T_RESET_REG_P1)); + } + + writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); + udelay(1000); + r32 = readl((rb + MBIST_STAT_REG)); + writel(0, (rb + MBIST_CTL_REG)); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_ct2_sclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put s_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); + r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | + __APP_PLL_SCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * Ignore mode and program for the max clock (which is FC16) + * Firmware/NFC will do the PLL init appropiately + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * while doing PLL init dont clock gate ethernet subsystem + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel((r32 | __ETH_CLK_ENABLE_PORT0), + (rb + CT2_CHIP_MISC_PRG)); + + r32 = readl((rb + CT2_PCIE_MISC_REG)); + writel((r32 | __ETH_CLK_ENABLE_PORT1), + (rb + CT2_PCIE_MISC_REG)); + + /* + * set sclk value + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | + __APP_PLL_SCLK_CLK_DIV2); + writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); + + /* + * Dont do clock gating for ethernet subsystem, firmware/NFC will + * do this appropriately + */ +} + +static void +bfa_ioc_ct2_lclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put l_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); + r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | + __APP_PLL_LCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set LPU speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32, (rb + CT2_CHIP_MISC_PRG)); + + /* + * set LPU half speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set lclk for mode (set for FC16) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); + r32 |= 0x20c1731b; + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_mem_init(void __iomem *rb) +{ + u32 r32; + + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + + writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); + udelay(1000); + writel(0, (rb + CT2_MBIST_CTL_REG)); +} + +static void +bfa_ioc_ct2_mac_reset(void __iomem *rb) +{ + volatile u32 r32; + + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), + (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* put port0, port1 MAC & AHB in reset */ + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + (rb + CT2_CSI_MAC_CONTROL_REG(0))); + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + (rb + CT2_CSI_MAC_CONTROL_REG(1))); +} + +#define CT2_NFC_MAX_DELAY 1000 +static enum bfa_status +bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) +{ + volatile u32 wgn, r32; + int i; + + /* + * Initialize PLL if not already done by NFC + */ + wgn = readl(rb + CT2_WGN_STATUS); + if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { + writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + break; + udelay(1000); + } + } + + /* + * Mask the interrupts and clear any + * pending interrupts left by BIOS/EFI + */ + + writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); + writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); + + r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + } + r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + } + + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * Announce flash device presence, if flash was corrupted. + */ + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { + r32 = readl((rb + PSS_GPIO_OUT_REG)); + writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG)); + r32 = readl((rb + PSS_GPIO_OE_REG)); + writel((r32 | 1), (rb + PSS_GPIO_OE_REG)); + } + + bfa_ioc_ct2_mem_init(rb); + + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); + return BFA_STATUS_OK; +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c new file mode 100644 index 000000000000..dd36427f4752 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -0,0 +1,669 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +/** + * @file bfa_msgq.c MSGQ module source file. + */ + +#include "bfi.h" +#include "bfa_msgq.h" +#include "bfa_ioc.h" + +#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \ +{ \ + bfa_msgq_cmdcbfn_t cbfn; \ + void *cbarg; \ + cbfn = (_cmdq_ent)->cbfn; \ + cbarg = (_cmdq_ent)->cbarg; \ + (_cmdq_ent)->cbfn = NULL; \ + (_cmdq_ent)->cbarg = NULL; \ + if (cbfn) { \ + cbfn(cbarg, (_status)); \ + } \ +} + +static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq); +static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq); + +enum cmdq_event { + CMDQ_E_START = 1, + CMDQ_E_STOP = 2, + CMDQ_E_FAIL = 3, + CMDQ_E_POST = 4, + CMDQ_E_INIT_RESP = 5, + CMDQ_E_DB_READY = 6, +}; + +bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq, + enum cmdq_event); + +static void +cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) +{ + struct bfa_msgq_cmd_entry *cmdq_ent; + + cmdq->producer_index = 0; + cmdq->consumer_index = 0; + cmdq->flags = 0; + cmdq->token = 0; + cmdq->offset = 0; + cmdq->bytes_to_copy = 0; + while (!list_empty(&cmdq->pending_q)) { + bfa_q_deq(&cmdq->pending_q, &cmdq_ent); + bfa_q_qe_init(&cmdq_ent->qe); + call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED); + } +} + +static void +cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_START: + bfa_fsm_set_state(cmdq, cmdq_sm_init_wait); + break; + + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + /* No-op */ + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq) +{ + bfa_wc_down(&cmdq->msgq->init_wc); +} + +static void +cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + case CMDQ_E_INIT_RESP: + if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) { + cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE; + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + } else + bfa_fsm_set_state(cmdq, cmdq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq) +{ +} + +static void +cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq) +{ + bfa_msgq_cmdq_dbell(cmdq); +} + +static void +cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + case CMDQ_E_DB_READY: + if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) { + cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE; + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + } else + bfa_fsm_set_state(cmdq, cmdq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_msgq_cmdq_dbell_ready(void *arg) +{ + struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg; + bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY); +} + +static void +bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq) +{ + struct bfi_msgq_h2i_db *dbell = + (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]); + + memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db)); + bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0); + dbell->mh.mtag.i2htok = 0; + dbell->idx.cmdq_pi = htons(cmdq->producer_index); + + if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb, + bfa_msgq_cmdq_dbell_ready, cmdq)) { + bfa_msgq_cmdq_dbell_ready(cmdq); + } +} + +static void +__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd) +{ + size_t len = cmd->msg_size; + int num_entries = 0; + size_t to_copy; + u8 *src, *dst; + + src = (u8 *)cmd->msg_hdr; + dst = (u8 *)cmdq->addr.kva; + dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE); + + while (len) { + to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ? + len : BFI_MSGQ_CMD_ENTRY_SIZE; + memcpy(dst, src, to_copy); + len -= to_copy; + src += BFI_MSGQ_CMD_ENTRY_SIZE; + BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth); + dst = (u8 *)cmdq->addr.kva; + dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE); + num_entries++; + } + +} + +static void +bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb; + struct bfa_msgq_cmd_entry *cmd; + int posted = 0; + + cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci); + + /* Walk through pending list to see if the command can be posted */ + while (!list_empty(&cmdq->pending_q)) { + cmd = + (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q); + if (ntohs(cmd->msg_hdr->num_entries) <= + BFA_MSGQ_FREE_CNT(cmdq)) { + list_del(&cmd->qe); + __cmd_copy(cmdq, cmd); + posted = 1; + call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK); + } else { + break; + } + } + + if (posted) + bfa_fsm_send_event(cmdq, CMDQ_E_POST); +} + +static void +bfa_msgq_cmdq_copy_next(void *arg) +{ + struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg; + + if (cmdq->bytes_to_copy) + bfa_msgq_cmdq_copy_rsp(cmdq); +} + +static void +bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_cmdq_copy_req *req = + (struct bfi_msgq_i2h_cmdq_copy_req *)mb; + + cmdq->token = 0; + cmdq->offset = ntohs(req->offset); + cmdq->bytes_to_copy = ntohs(req->len); + bfa_msgq_cmdq_copy_rsp(cmdq); +} + +static void +bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq) +{ + struct bfi_msgq_h2i_cmdq_copy_rsp *rsp = + (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0]; + int copied; + u8 *addr = (u8 *)cmdq->addr.kva; + + memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp)); + bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0); + rsp->mh.mtag.i2htok = htons(cmdq->token); + copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ : + cmdq->bytes_to_copy; + addr += cmdq->offset; + memcpy(rsp->data, addr, copied); + + cmdq->token++; + cmdq->offset += copied; + cmdq->bytes_to_copy -= copied; + + if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb, + bfa_msgq_cmdq_copy_next, cmdq)) { + bfa_msgq_cmdq_copy_next(cmdq); + } +} + +static void +bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq) +{ + cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY; + INIT_LIST_HEAD(&cmdq->pending_q); + cmdq->msgq = msgq; + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); +} + +static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq); + +enum rspq_event { + RSPQ_E_START = 1, + RSPQ_E_STOP = 2, + RSPQ_E_FAIL = 3, + RSPQ_E_RESP = 4, + RSPQ_E_INIT_RESP = 5, + RSPQ_E_DB_READY = 6, +}; + +bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event); +bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq, + enum rspq_event); +bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event); +bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq, + enum rspq_event); + +static void +rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) +{ + rspq->producer_index = 0; + rspq->consumer_index = 0; + rspq->flags = 0; +} + +static void +rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_START: + bfa_fsm_set_state(rspq, rspq_sm_init_wait); + break; + + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) +{ + bfa_wc_down(&rspq->msgq->init_wc); +} + +static void +rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_FAIL: + case RSPQ_E_STOP: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_INIT_RESP: + bfa_fsm_set_state(rspq, rspq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) +{ +} + +static void +rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_RESP: + bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) +{ + if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc)) + bfa_msgq_rspq_dbell(rspq); +} + +static void +rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_RESP: + rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE; + break; + + case RSPQ_E_DB_READY: + if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) { + rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE; + bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); + } else + bfa_fsm_set_state(rspq, rspq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_msgq_rspq_dbell_ready(void *arg) +{ + struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; + bfa_fsm_send_event(rspq, RSPQ_E_DB_READY); +} + +static void +bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) +{ + struct bfi_msgq_h2i_db *dbell = + (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]); + + memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db)); + bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0); + dbell->mh.mtag.i2htok = 0; + dbell->idx.rspq_ci = htons(rspq->consumer_index); + + if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb, + bfa_msgq_rspq_dbell_ready, rspq)) { + bfa_msgq_rspq_dbell_ready(rspq); + } +} + +static void +bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb; + struct bfi_msgq_mhdr *msghdr; + int num_entries; + int mc; + u8 *rspq_qe; + + rspq->producer_index = ntohs(dbell->idx.rspq_pi); + + while (rspq->consumer_index != rspq->producer_index) { + rspq_qe = (u8 *)rspq->addr.kva; + rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE); + msghdr = (struct bfi_msgq_mhdr *)rspq_qe; + + mc = msghdr->msg_class; + num_entries = ntohs(msghdr->num_entries); + + if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL)) + break; + + (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr); + + BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries, + rspq->depth); + } + + bfa_fsm_send_event(rspq, RSPQ_E_RESP); +} + +static void +bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) +{ + rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY; + rspq->msgq = msgq; + bfa_fsm_set_state(rspq, rspq_sm_stopped); +} + +static void +bfa_msgq_init_rsp(struct bfa_msgq *msgq, + struct bfi_mbmsg *mb) +{ + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP); +} + +static void +bfa_msgq_init(void *arg) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)arg; + struct bfi_msgq_cfg_req *msgq_cfg = + (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0]; + + memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req)); + bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0); + msgq_cfg->mh.mtag.i2htok = 0; + + bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa); + msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); + bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa); + msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); + + bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL); +} + +static void +bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; + + switch (msg->mh.msg_id) { + case BFI_MSGQ_I2H_INIT_RSP: + bfa_msgq_init_rsp(msgq, msg); + break; + + case BFI_MSGQ_I2H_DOORBELL_PI: + bfa_msgq_rspq_pi_update(&msgq->rspq, msg); + break; + + case BFI_MSGQ_I2H_DOORBELL_CI: + bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg); + break; + + case BFI_MSGQ_I2H_CMDQ_COPY_REQ: + bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg); + break; + + default: + BUG_ON(1); + } +} + +static void +bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; + + switch (event) { + case BFA_IOC_E_ENABLED: + bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq); + bfa_wc_up(&msgq->init_wc); + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START); + bfa_wc_up(&msgq->init_wc); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START); + bfa_wc_wait(&msgq->init_wc); + break; + + case BFA_IOC_E_DISABLED: + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP); + break; + + case BFA_IOC_E_FAILED: + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL); + break; + + default: + break; + } +} + +u32 +bfa_msgq_meminfo(void) +{ + return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) + + roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ); +} + +void +bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa) +{ + msgq->cmdq.addr.kva = kva; + msgq->cmdq.addr.pa = pa; + + kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ); + pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ); + + msgq->rspq.addr.kva = kva; + msgq->rspq.addr.pa = pa; +} + +void +bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc) +{ + msgq->ioc = ioc; + + bfa_msgq_cmdq_attach(&msgq->cmdq, msgq); + bfa_msgq_rspq_attach(&msgq->rspq, msgq); + + bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq); + bfa_q_qe_init(&msgq->ioc_notify); + bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq); + bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify); +} + +void +bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, + bfa_msgq_mcfunc_t cbfn, void *cbarg) +{ + msgq->rspq.rsphdlr[mc].cbfn = cbfn; + msgq->rspq.rsphdlr[mc].cbarg = cbarg; +} + +void +bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd) +{ + if (ntohs(cmd->msg_hdr->num_entries) <= + BFA_MSGQ_FREE_CNT(&msgq->cmdq)) { + __cmd_copy(&msgq->cmdq, cmd); + call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK); + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST); + } else { + list_add_tail(&cmd->qe, &msgq->cmdq.pending_q); + } +} + +void +bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len) +{ + struct bfa_msgq_rspq *rspq = &msgq->rspq; + size_t len = buf_len; + size_t to_copy; + int ci; + u8 *src, *dst; + + ci = rspq->consumer_index; + src = (u8 *)rspq->addr.kva; + src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE); + dst = buf; + + while (len) { + to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ? + len : BFI_MSGQ_RSP_ENTRY_SIZE; + memcpy(dst, src, to_copy); + len -= to_copy; + dst += BFI_MSGQ_RSP_ENTRY_SIZE; + BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth); + src = (u8 *)rspq->addr.kva; + src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE); + } +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h new file mode 100644 index 000000000000..a6a565a366dc --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -0,0 +1,130 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#ifndef __BFA_MSGQ_H__ +#define __BFA_MSGQ_H__ + +#include "bfa_defs.h" +#include "bfi.h" +#include "bfa_ioc.h" +#include "bfa_cs.h" + +#define BFA_MSGQ_FREE_CNT(_q) \ + (((_q)->consumer_index - (_q)->producer_index - 1) & ((_q)->depth - 1)) + +#define BFA_MSGQ_INDX_ADD(_q_indx, _qe_num, _q_depth) \ + ((_q_indx) = (((_q_indx) + (_qe_num)) & ((_q_depth) - 1))) + +#define BFA_MSGQ_CMDQ_NUM_ENTRY 128 +#define BFA_MSGQ_CMDQ_SIZE \ + (BFI_MSGQ_CMD_ENTRY_SIZE * BFA_MSGQ_CMDQ_NUM_ENTRY) + +#define BFA_MSGQ_RSPQ_NUM_ENTRY 128 +#define BFA_MSGQ_RSPQ_SIZE \ + (BFI_MSGQ_RSP_ENTRY_SIZE * BFA_MSGQ_RSPQ_NUM_ENTRY) + +#define bfa_msgq_cmd_set(_cmd, _cbfn, _cbarg, _msg_size, _msg_hdr) \ +do { \ + (_cmd)->cbfn = (_cbfn); \ + (_cmd)->cbarg = (_cbarg); \ + (_cmd)->msg_size = (_msg_size); \ + (_cmd)->msg_hdr = (_msg_hdr); \ +} while (0) + +struct bfa_msgq; + +typedef void (*bfa_msgq_cmdcbfn_t)(void *cbarg, enum bfa_status status); + +struct bfa_msgq_cmd_entry { + struct list_head qe; + bfa_msgq_cmdcbfn_t cbfn; + void *cbarg; + size_t msg_size; + struct bfi_msgq_mhdr *msg_hdr; +}; + +enum bfa_msgq_cmdq_flags { + BFA_MSGQ_CMDQ_F_DB_UPDATE = 1, +}; + +struct bfa_msgq_cmdq { + bfa_fsm_t fsm; + enum bfa_msgq_cmdq_flags flags; + + u16 producer_index; + u16 consumer_index; + u16 depth; /* FW Q depth is 16 bits */ + struct bfa_dma addr; + struct bfa_mbox_cmd dbell_mb; + + u16 token; + int offset; + int bytes_to_copy; + struct bfa_mbox_cmd copy_mb; + + struct list_head pending_q; /* pending command queue */ + + struct bfa_msgq *msgq; +}; + +enum bfa_msgq_rspq_flags { + BFA_MSGQ_RSPQ_F_DB_UPDATE = 1, +}; + +typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr); + +struct bfa_msgq_rspq { + bfa_fsm_t fsm; + enum bfa_msgq_rspq_flags flags; + + u16 producer_index; + u16 consumer_index; + u16 depth; /* FW Q depth is 16 bits */ + struct bfa_dma addr; + struct bfa_mbox_cmd dbell_mb; + + int nmclass; + struct { + bfa_msgq_mcfunc_t cbfn; + void *cbarg; + } rsphdlr[BFI_MC_MAX]; + + struct bfa_msgq *msgq; +}; + +struct bfa_msgq { + struct bfa_msgq_cmdq cmdq; + struct bfa_msgq_rspq rspq; + + struct bfa_wc init_wc; + struct bfa_mbox_cmd init_mb; + + struct bfa_ioc_notify ioc_notify; + struct bfa_ioc *ioc; +}; + +u32 bfa_msgq_meminfo(void); +void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa); +void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc); +void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, + bfa_msgq_mcfunc_t cbfn, void *cbarg); +void bfa_msgq_cmd_post(struct bfa_msgq *msgq, + struct bfa_msgq_cmd_entry *cmd); +void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len); + +#endif diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h new file mode 100644 index 000000000000..7a1393aabd43 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -0,0 +1,481 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BFI_H__ +#define __BFI_H__ + +#include "bfa_defs.h" + +#pragma pack(1) + +/** + * BFI FW image type + */ +#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ +#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) + +/** + * Msg header common to all msgs + */ +struct bfi_mhdr { + u8 msg_class; /*!< @ref enum bfi_mclass */ + u8 msg_id; /*!< msg opcode with in the class */ + union { + struct { + u8 qid; + u8 fn_lpu; /*!< msg destination */ + } h2i; + u16 i2htok; /*!< token in msgs to host */ + } mtag; +}; + +#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu)) +#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1) +#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid) + +#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \ +} while (0) + +#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.i2htok = (_i2htok); \ +} while (0) + +/* + * Message opcodes: 0-127 to firmware, 128-255 to host + */ +#define BFI_I2H_OPCODE_BASE 128 +#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) + +/** + **************************************************************************** + * + * Scatter Gather Element and Page definition + * + **************************************************************************** + */ + +/** + * DMA addresses + */ +union bfi_addr_u { + struct { + u32 addr_lo; + u32 addr_hi; + } a32; +}; + +/* + * Large Message structure - 128 Bytes size Msgs + */ +#define BFI_LMSG_SZ 128 +#define BFI_LMSG_PL_WSZ \ + ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) + +/** + * Mailbox message structure + */ +#define BFI_MBMSG_SZ 7 +struct bfi_mbmsg { + struct bfi_mhdr mh; + u32 pl[BFI_MBMSG_SZ]; +}; + +/** + * Supported PCI function class codes (personality) + */ +enum bfi_pcifn_class { + BFI_PCIFN_CLASS_FC = 0x0c04, + BFI_PCIFN_CLASS_ETH = 0x0200, +}; + +/** + * Message Classes + */ +enum bfi_mclass { + BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ + BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ + BFI_MC_FLASH = 3, /*!< Flash message class */ + BFI_MC_CEE = 4, /*!< CEE */ + BFI_MC_FCPORT = 5, /*!< FC port */ + BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */ + BFI_MC_LL = 7, /*!< Link Layer */ + BFI_MC_UF = 8, /*!< Unsolicited frame receive */ + BFI_MC_FCXP = 9, /*!< FC Transport */ + BFI_MC_LPS = 10, /*!< lport fc login services */ + BFI_MC_RPORT = 11, /*!< Remote port */ + BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */ + BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */ + BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */ + BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */ + BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */ + BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */ + BFI_MC_TSKIM = 18, /*!< Initiator Task management */ + BFI_MC_SBOOT = 19, /*!< SAN boot services */ + BFI_MC_IPFC = 20, /*!< IP over FC Msgs */ + BFI_MC_PORT = 21, /*!< Physical port */ + BFI_MC_SFP = 22, /*!< SFP module */ + BFI_MC_MSGQ = 23, /*!< MSGQ */ + BFI_MC_ENET = 24, /*!< ENET commands/responses */ + BFI_MC_PHY = 25, /*!< External PHY message class */ + BFI_MC_NBOOT = 26, /*!< Network Boot */ + BFI_MC_TIO_READ = 27, /*!< read IO (Target mode) */ + BFI_MC_TIO_WRITE = 28, /*!< write IO (Target mode) */ + BFI_MC_TIO_DATA_XFERED = 29, /*!< ds transferred (target mode) */ + BFI_MC_TIO_IO = 30, /*!< IO (Target mode) */ + BFI_MC_TIO = 31, /*!< IO (target mode) */ + BFI_MC_MFG = 32, /*!< MFG/ASIC block commands */ + BFI_MC_EDMA = 33, /*!< EDMA copy commands */ + BFI_MC_MAX = 34 +}; + +#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ + +#define BFI_FWBOOT_ENV_OS 0 + +/** + *---------------------------------------------------------------------- + * IOC + *---------------------------------------------------------------------- + */ + +/** + * Different asic generations + */ +enum bfi_asic_gen { + BFI_ASIC_GEN_CB = 1, + BFI_ASIC_GEN_CT = 2, + BFI_ASIC_GEN_CT2 = 3, +}; + +enum bfi_asic_mode { + BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */ + BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ + BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ +}; + +enum bfi_ioc_h2i_msgs { + BFI_IOC_H2I_ENABLE_REQ = 1, + BFI_IOC_H2I_DISABLE_REQ = 2, + BFI_IOC_H2I_GETATTR_REQ = 3, + BFI_IOC_H2I_DBG_SYNC = 4, + BFI_IOC_H2I_DBG_DUMP = 5, +}; + +enum bfi_ioc_i2h_msgs { + BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), + BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), + BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), + BFI_IOC_I2H_HBEAT = BFA_I2HM(4), +}; + +/** + * BFI_IOC_H2I_GETATTR_REQ message + */ +struct bfi_ioc_getattr_req { + struct bfi_mhdr mh; + union bfi_addr_u attr_addr; +}; + +struct bfi_ioc_attr { + u64 mfg_pwwn; /*!< Mfg port wwn */ + u64 mfg_nwwn; /*!< Mfg node wwn */ + mac_t mfg_mac; /*!< Mfg mac */ + u8 port_mode; /* enum bfi_port_mode */ + u8 rsvd_a; + u64 pwwn; + u64 nwwn; + mac_t mac; /*!< PBC or Mfg mac */ + u16 rsvd_b; + mac_t fcoe_mac; + u16 rsvd_c; + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 rx_bbcredit; /*!< receive buffer credits */ + u32 adapter_prop; /*!< adapter properties */ + u16 maxfrsize; /*!< max receive frame size */ + char asic_rev; + u8 rsvd_d; + char fw_version[BFA_VERSION_LEN]; + char optrom_version[BFA_VERSION_LEN]; + struct bfa_mfg_vpd vpd; + u32 card_type; /*!< card type */ +}; + +/** + * BFI_IOC_I2H_GETATTR_REPLY message + */ +struct bfi_ioc_getattr_reply { + struct bfi_mhdr mh; /*!< Common msg header */ + u8 status; /*!< cfg reply status */ + u8 rsvd[3]; +}; + +/** + * Firmware memory page offsets + */ +#define BFI_IOC_SMEM_PG0_CB (0x40) +#define BFI_IOC_SMEM_PG0_CT (0x180) + +/** + * Firmware statistic offset + */ +#define BFI_IOC_FWSTATS_OFF (0x6B40) +#define BFI_IOC_FWSTATS_SZ (4096) + +/** + * Firmware trace offset + */ +#define BFI_IOC_TRC_OFF (0x4b00) +#define BFI_IOC_TRC_ENTS 256 + +#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFI_IOC_MD5SUM_SZ 4 +struct bfi_ioc_image_hdr { + u32 signature; /*!< constant signature */ + u8 asic_gen; /*!< asic generation */ + u8 asic_mode; + u8 port0_mode; /*!< device mode for port 0 */ + u8 port1_mode; /*!< device mode for port 1 */ + u32 exec; /*!< exec vector */ + u32 bootenv; /*!< firmware boot env */ + u32 rsvd_b[4]; + u32 md5sum[BFI_IOC_MD5SUM_SZ]; +}; + +#define BFI_FWBOOT_DEVMODE_OFF 4 +#define BFI_FWBOOT_TYPE_OFF 8 +#define BFI_FWBOOT_ENV_OFF 12 +#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \ + (((u32)(__asic_gen)) << 24 | \ + ((u32)(__asic_mode)) << 16 | \ + ((u32)(__p0_mode)) << 8 | \ + ((u32)(__p1_mode))) + +enum bfi_fwboot_type { + BFI_FWBOOT_TYPE_NORMAL = 0, + BFI_FWBOOT_TYPE_FLASH = 1, + BFI_FWBOOT_TYPE_MEMTEST = 2, +}; + +enum bfi_port_mode { + BFI_PORT_MODE_FC = 1, + BFI_PORT_MODE_ETH = 2, +}; + +struct bfi_ioc_hbeat { + struct bfi_mhdr mh; /*!< common msg header */ + u32 hb_count; /*!< current heart beat count */ +}; + +/** + * IOC hardware/firmware state + */ +enum bfi_ioc_state { + BFI_IOC_UNINIT = 0, /*!< not initialized */ + BFI_IOC_INITING = 1, /*!< h/w is being initialized */ + BFI_IOC_HWINIT = 2, /*!< h/w is initialized */ + BFI_IOC_CFG = 3, /*!< IOC configuration in progress */ + BFI_IOC_OP = 4, /*!< IOC is operational */ + BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */ + BFI_IOC_DISABLED = 6, /*!< IOC is disabled */ + BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */ + BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */ + BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */ +}; + +#define BFI_IOC_ENDIAN_SIG 0x12345678 + +enum { + BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */ + BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */ + BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */ + BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */ + BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */ + BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */ + BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */ + BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */ + BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */ + BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */ +}; + +#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ + (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ + BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_SETP(__prop, __val) \ + ((__val) << BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_IS_PROTO(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_PROTO) +#define BFI_ADAPTER_IS_TTV(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_TTV) +#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_UNSUPP) +#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \ + ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ + BFI_ADAPTER_UNSUPP)) + +/** + * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages + */ +struct bfi_ioc_ctrl_req { + struct bfi_mhdr mh; + u16 clscode; + u16 rsvd; + u32 tv_sec; +}; + +/** + * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages + */ +struct bfi_ioc_ctrl_reply { + struct bfi_mhdr mh; /*!< Common msg header */ + u8 status; /*!< enable/disable status */ + u8 port_mode; /*!< enum bfa_mode */ + u8 cap_bm; /*!< capability bit mask */ + u8 rsvd; +}; + +#define BFI_IOC_MSGSZ 8 +/** + * H2I Messages + */ +union bfi_ioc_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_ioc_ctrl_req enable_req; + struct bfi_ioc_ctrl_req disable_req; + struct bfi_ioc_getattr_req getattr_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/** + * I2H Messages + */ +union bfi_ioc_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_ioc_ctrl_reply fw_event; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/** + *---------------------------------------------------------------------- + * MSGQ + *---------------------------------------------------------------------- + */ + +enum bfi_msgq_h2i_msgs { + BFI_MSGQ_H2I_INIT_REQ = 1, + BFI_MSGQ_H2I_DOORBELL_PI = 2, + BFI_MSGQ_H2I_DOORBELL_CI = 3, + BFI_MSGQ_H2I_CMDQ_COPY_RSP = 4, +}; + +enum bfi_msgq_i2h_msgs { + BFI_MSGQ_I2H_INIT_RSP = BFA_I2HM(BFI_MSGQ_H2I_INIT_REQ), + BFI_MSGQ_I2H_DOORBELL_PI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_PI), + BFI_MSGQ_I2H_DOORBELL_CI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_CI), + BFI_MSGQ_I2H_CMDQ_COPY_REQ = BFA_I2HM(BFI_MSGQ_H2I_CMDQ_COPY_RSP), +}; + +/* Messages(commands/responsed/AENS will have the following header */ +struct bfi_msgq_mhdr { + u8 msg_class; + u8 msg_id; + u16 msg_token; + u16 num_entries; + u8 enet_id; + u8 rsvd[1]; +}; + +#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_mid); \ + (_mh).msg_token = (_tok); \ + (_mh).enet_id = (_enet_id); \ +} while (0) + +/* + * Mailbox for messaging interface + */ +#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */ + +#define bfi_msgq_num_cmd_entries(_size) \ + (((_size) + BFI_MSGQ_CMD_ENTRY_SIZE - 1) / BFI_MSGQ_CMD_ENTRY_SIZE) + +struct bfi_msgq { + union bfi_addr_u addr; + u16 q_depth; /* Total num of entries in the queue */ + u8 rsvd[2]; +}; + +/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */ +struct bfi_msgq_cfg_req { + struct bfi_mhdr mh; + struct bfi_msgq cmdq; + struct bfi_msgq rspq; +}; + +/* BFI_ENET_MSGQ_CFG_RSP */ +struct bfi_msgq_cfg_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* BFI_MSGQ_H2I_DOORBELL */ +struct bfi_msgq_h2i_db { + struct bfi_mhdr mh; + union { + u16 cmdq_pi; + u16 rspq_ci; + } idx; +}; + +/* BFI_MSGQ_I2H_DOORBELL */ +struct bfi_msgq_i2h_db { + struct bfi_mhdr mh; + union { + u16 rspq_pi; + u16 cmdq_ci; + } idx; +}; + +#define BFI_CMD_COPY_SZ 28 + +/* BFI_MSGQ_H2I_CMD_COPY_RSP */ +struct bfi_msgq_h2i_cmdq_copy_rsp { + struct bfi_mhdr mh; + u8 data[BFI_CMD_COPY_SZ]; +}; + +/* BFI_MSGQ_I2H_CMD_COPY_REQ */ +struct bfi_msgq_i2h_cmdq_copy_req { + struct bfi_mhdr mh; + u16 offset; + u16 len; +}; + +#pragma pack() + +#endif /* __BFI_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h new file mode 100644 index 000000000000..4eecabea397b --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -0,0 +1,199 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BFI_CNA_H__ +#define __BFI_CNA_H__ + +#include "bfi.h" +#include "bfa_defs_cna.h" + +#pragma pack(1) + +enum bfi_port_h2i { + BFI_PORT_H2I_ENABLE_REQ = (1), + BFI_PORT_H2I_DISABLE_REQ = (2), + BFI_PORT_H2I_GET_STATS_REQ = (3), + BFI_PORT_H2I_CLEAR_STATS_REQ = (4), +}; + +enum bfi_port_i2h { + BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3), + BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), +}; + +/** + * Generic REQ type + */ +struct bfi_port_generic_req { + struct bfi_mhdr mh; /*!< msg header */ + u32 msgtag; /*!< msgtag for reply */ + u32 rsvd; +}; + +/** + * Generic RSP type + */ +struct bfi_port_generic_rsp { + struct bfi_mhdr mh; /*!< common msg header */ + u8 status; /*!< port enable status */ + u8 rsvd[3]; + u32 msgtag; /*!< msgtag for reply */ +}; + +/** + * @todo + * BFI_PORT_H2I_ENABLE_REQ + */ + +/** + * @todo + * BFI_PORT_I2H_ENABLE_RSP + */ + +/** + * BFI_PORT_H2I_DISABLE_REQ + */ + +/** + * BFI_PORT_I2H_DISABLE_RSP + */ + +/** + * BFI_PORT_H2I_GET_STATS_REQ + */ +struct bfi_port_get_stats_req { + struct bfi_mhdr mh; /*!< common msg header */ + union bfi_addr_u dma_addr; +}; + +/** + * BFI_PORT_I2H_GET_STATS_RSP + */ + +/** + * BFI_PORT_H2I_CLEAR_STATS_REQ + */ + +/** + * BFI_PORT_I2H_CLEAR_STATS_RSP + */ + +union bfi_port_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_port_generic_req enable_req; + struct bfi_port_generic_req disable_req; + struct bfi_port_get_stats_req getstats_req; + struct bfi_port_generic_req clearstats_req; +}; + +union bfi_port_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_port_generic_rsp enable_rsp; + struct bfi_port_generic_rsp disable_rsp; + struct bfi_port_generic_rsp getstats_rsp; + struct bfi_port_generic_rsp clearstats_rsp; +}; + +/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */ +enum bfi_cee_h2i_msgs { + BFI_CEE_H2I_GET_CFG_REQ = 1, + BFI_CEE_H2I_RESET_STATS = 2, + BFI_CEE_H2I_GET_STATS_REQ = 3, +}; + +/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */ +enum bfi_cee_i2h_msgs { + BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1), + BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2), + BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3), +}; + +/* Data structures */ + +/* + * @brief H2I command structure for resetting the stats. + * BFI_CEE_H2I_RESET_STATS + */ +struct bfi_lldp_reset_stats { + struct bfi_mhdr mh; +}; + +/* + * @brief H2I command structure for resetting the stats. + * BFI_CEE_H2I_RESET_STATS + */ +struct bfi_cee_reset_stats { + struct bfi_mhdr mh; +}; + +/* + * @brief get configuration command from host + * BFI_CEE_H2I_GET_CFG_REQ + */ +struct bfi_cee_get_req { + struct bfi_mhdr mh; + union bfi_addr_u dma_addr; +}; + +/* + * @brief reply message from firmware + * BFI_CEE_I2H_GET_CFG_RSP + */ +struct bfi_cee_get_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* + * @brief get configuration command from host + * BFI_CEE_H2I_GET_STATS_REQ + */ +struct bfi_cee_stats_req { + struct bfi_mhdr mh; + union bfi_addr_u dma_addr; +}; + +/* + * @brief reply message from firmware + * BFI_CEE_I2H_GET_STATS_RSP + */ +struct bfi_cee_stats_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* @brief mailbox command structures from host to firmware */ +union bfi_cee_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_cee_get_req get_req; + struct bfi_cee_stats_req stats_req; +}; + +/* @brief mailbox message structures from firmware to host */ +union bfi_cee_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_cee_get_rsp get_rsp; + struct bfi_cee_stats_rsp stats_rsp; +}; + +#pragma pack() + +#endif /* __BFI_CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h new file mode 100644 index 000000000000..a90f1cf46b41 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -0,0 +1,901 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +/** + * @file bfi_enet.h BNA Hardware and Firmware Interface + */ + +/** + * Skipping statistics collection to avoid clutter. + * Command is no longer needed: + * MTU + * TxQ Stop + * RxQ Stop + * RxF Enable/Disable + * + * HDS-off request is dynamic + * keep structures as multiple of 32-bit fields for alignment. + * All values must be written in big-endian. + */ +#ifndef __BFI_ENET_H__ +#define __BFI_ENET_H__ + +#include "bfa_defs.h" +#include "bfi.h" + +#pragma pack(1) + +#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */ + +#define BFI_ENET_TXQ_PRIO_MAX 8 +#define BFI_ENET_RX_QSET_MAX 16 +#define BFI_ENET_TXQ_WI_VECT_MAX 4 + +#define BFI_ENET_VLAN_ID_MAX 4096 +#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */ +#define BFI_ENET_VLAN_BLOCKS_MAX \ + (BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE) +#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */ +#define BFI_ENET_VLAN_WORDS_MAX \ + (BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE) + +#define BFI_ENET_RSS_RIT_MAX 64 /* entries */ +#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */ + +union bfi_addr_be_u { + struct { + u32 addr_hi; /* Most Significant 32-bits */ + u32 addr_lo; /* Least Significant 32-Bits */ + } a32; +}; + +/** + * T X Q U E U E D E F I N E S + */ +/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ +/* TxQ Entry Opcodes */ +#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ +#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ +#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */ + +/* TxQ Entry Control Flags */ +#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8) +#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5) +#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4) +#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3) +#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2) +#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1) +#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0) + +struct bfi_enet_txq_wi_base { + u8 reserved; + u8 num_vectors; /* number of vectors present */ + u16 opcode; + /* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */ + u16 flags; /* OR of all the flags */ + u16 l4_hdr_size_n_offset; + u16 vlan_tag; + u16 lso_mss; /* Only 14 LSB are valid */ + u32 frame_length; /* Only 24 LSB are valid */ +}; + +struct bfi_enet_txq_wi_ext { + u16 reserved; + u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */ + u32 reserved2[3]; +}; + +struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */ + u16 reserved; + u16 length; /* Only 14 LSB are valid */ + union bfi_addr_be_u addr; +}; + +/** + * TxQ Entry Structure + * + */ +struct bfi_enet_txq_entry { + union { + struct bfi_enet_txq_wi_base base; + struct bfi_enet_txq_wi_ext ext; + } wi; + struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX]; +}; + +#define wi_hdr wi.base +#define wi_ext_hdr wi.ext + +#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ + (((_hdr_size) << 10) | ((_offset) & 0x3FF)) + +/** + * R X Q U E U E D E F I N E S + */ +struct bfi_enet_rxq_entry { + union bfi_addr_be_u rx_buffer; +}; + +/** + * R X C O M P L E T I O N Q U E U E D E F I N E S + */ +/* CQ Entry Flags */ +#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0) +#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1) +#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2) +#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3) + +#define BFI_ENET_CQ_EF_RSVD1 (1 << 4) +#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5) +#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6) +#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7) + +#define BFI_ENET_CQ_EF_UDP (1 << 8) +#define BFI_ENET_CQ_EF_TCP (1 << 9) +#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10) +#define BFI_ENET_CQ_EF_IPV6 (1 << 11) + +#define BFI_ENET_CQ_EF_IPV4 (1 << 12) +#define BFI_ENET_CQ_EF_VLAN (1 << 13) +#define BFI_ENET_CQ_EF_RSS (1 << 14) +#define BFI_ENET_CQ_EF_RSVD2 (1 << 15) + +#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16) +#define BFI_ENET_CQ_EF_MCAST (1 << 17) +#define BFI_ENET_CQ_EF_BCAST (1 << 18) +#define BFI_ENET_CQ_EF_REMOTE (1 << 19) + +#define BFI_ENET_CQ_EF_LOCAL (1 << 20) + +/* CQ Entry Structure */ +struct bfi_enet_cq_entry { + u32 flags; + u16 vlan_tag; + u16 length; + u32 rss_hash; + u8 valid; + u8 reserved1; + u8 reserved2; + u8 rxq_id; +}; + +/** + * E N E T C O N T R O L P A T H C O M M A N D S + */ +struct bfi_enet_q { + union bfi_addr_u pg_tbl; + union bfi_addr_u first_entry; + u16 pages; /* # of pages */ + u16 page_sz; +}; + +struct bfi_enet_txq { + struct bfi_enet_q q; + u8 priority; + u8 rsvd[3]; +}; + +struct bfi_enet_rxq { + struct bfi_enet_q q; + u16 rx_buffer_size; + u16 rsvd; +}; + +struct bfi_enet_cq { + struct bfi_enet_q q; +}; + +struct bfi_enet_ib_cfg { + u8 int_pkt_dma; + u8 int_enabled; + u8 int_pkt_enabled; + u8 continuous_coalescing; + u8 msix; + u8 rsvd[3]; + u32 coalescing_timeout; + u32 inter_pkt_timeout; + u8 inter_pkt_count; + u8 rsvd1[3]; +}; + +struct bfi_enet_ib { + union bfi_addr_u index_addr; + union { + u16 msix_index; + u16 intx_bitmask; + } intr; + u16 rsvd; +}; + +/** + * ENET command messages + */ +enum bfi_enet_h2i_msgs { + /* Rx Commands */ + BFI_ENET_H2I_RX_CFG_SET_REQ = 1, + BFI_ENET_H2I_RX_CFG_CLR_REQ = 2, + + BFI_ENET_H2I_RIT_CFG_REQ = 3, + BFI_ENET_H2I_RSS_CFG_REQ = 4, + BFI_ENET_H2I_RSS_ENABLE_REQ = 5, + BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6, + BFI_ENET_H2I_RX_DEFAULT_REQ = 7, + + BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8, + BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9, + BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11, + + BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12, + BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13, + BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14, + + BFI_ENET_H2I_RX_VLAN_SET_REQ = 15, + BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16, + + /* Tx Commands */ + BFI_ENET_H2I_TX_CFG_SET_REQ = 17, + BFI_ENET_H2I_TX_CFG_CLR_REQ = 18, + + /* Port Commands */ + BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19, + BFI_ENET_H2I_SET_PAUSE_REQ = 20, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21, + + /* Get Attributes Command */ + BFI_ENET_H2I_GET_ATTR_REQ = 22, + + /* Statistics Commands */ + BFI_ENET_H2I_STATS_GET_REQ = 23, + BFI_ENET_H2I_STATS_CLR_REQ = 24, + + BFI_ENET_H2I_WOL_MAGIC_REQ = 25, + BFI_ENET_H2I_WOL_FRAME_REQ = 26, + + BFI_ENET_H2I_MAX = 27, +}; + +enum bfi_enet_i2h_msgs { + /* Rx Responses */ + BFI_ENET_I2H_RX_CFG_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ), + BFI_ENET_I2H_RX_CFG_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ), + + BFI_ENET_I2H_RIT_CFG_RSP = + BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ), + BFI_ENET_I2H_RSS_CFG_RSP = + BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ), + BFI_ENET_I2H_RSS_ENABLE_RSP = + BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ), + BFI_ENET_I2H_RX_PROMISCUOUS_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ), + BFI_ENET_I2H_RX_DEFAULT_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ), + + BFI_ENET_I2H_MAC_UCAST_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ), + BFI_ENET_I2H_MAC_UCAST_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ), + BFI_ENET_I2H_MAC_UCAST_ADD_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ), + BFI_ENET_I2H_MAC_UCAST_DEL_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ), + + BFI_ENET_I2H_MAC_MCAST_ADD_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ), + BFI_ENET_I2H_MAC_MCAST_DEL_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ), + BFI_ENET_I2H_MAC_MCAST_FILTER_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ), + + BFI_ENET_I2H_RX_VLAN_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ), + + BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ), + + /* Tx Responses */ + BFI_ENET_I2H_TX_CFG_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ), + BFI_ENET_I2H_TX_CFG_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ), + + /* Port Responses */ + BFI_ENET_I2H_PORT_ADMIN_RSP = + BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ), + + BFI_ENET_I2H_SET_PAUSE_RSP = + BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ), + BFI_ENET_I2H_DIAG_LOOPBACK_RSP = + BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ), + + /* Attributes Response */ + BFI_ENET_I2H_GET_ATTR_RSP = + BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ), + + /* Statistics Responses */ + BFI_ENET_I2H_STATS_GET_RSP = + BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ), + BFI_ENET_I2H_STATS_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ), + + BFI_ENET_I2H_WOL_MAGIC_RSP = + BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ), + BFI_ENET_I2H_WOL_FRAME_RSP = + BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ), + + /* AENs */ + BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX), + BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1), + + BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2), + BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3), + + BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), +}; + +/** + * The following error codes can be returned by the enet commands + */ +enum bfi_enet_err { + BFI_ENET_CMD_OK = 0, + BFI_ENET_CMD_FAIL = 1, + BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */ + BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */ + BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */ + BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */ + BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */ + BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ +}; + +/** + * Generic Request + * + * bfi_enet_req is used by: + * BFI_ENET_H2I_RX_CFG_CLR_REQ + * BFI_ENET_H2I_TX_CFG_CLR_REQ + */ +struct bfi_enet_req { + struct bfi_msgq_mhdr mh; +}; + +/** + * Enable/Disable Request + * + * bfi_enet_enable_req is used by: + * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ + * BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero) + */ +struct bfi_enet_enable_req { + struct bfi_msgq_mhdr mh; + u8 enable; /* 1 = enable; 0 = disable */ + u8 rsvd[3]; +}; + +/** + * Generic Response + */ +struct bfi_enet_rsp { + struct bfi_msgq_mhdr mh; + u8 error; /*!< if error see cmd_offset */ + u8 rsvd; + u16 cmd_offset; /*!< offset to invalid parameter */ +}; + +/** + * GLOBAL CONFIGURATION + */ + +/** + * bfi_enet_attr_req is used by: + * BFI_ENET_H2I_GET_ATTR_REQ + */ +struct bfi_enet_attr_req { + struct bfi_msgq_mhdr mh; +}; + +/** + * bfi_enet_attr_rsp is used by: + * BFI_ENET_I2H_GET_ATTR_RSP + */ +struct bfi_enet_attr_rsp { + struct bfi_msgq_mhdr mh; + u8 error; /*!< if error see cmd_offset */ + u8 rsvd; + u16 cmd_offset; /*!< offset to invalid parameter */ + u32 max_cfg; + u32 max_ucmac; + u32 rit_size; +}; + +/** + * Tx Configuration + * + * bfi_enet_tx_cfg is used by: + * BFI_ENET_H2I_TX_CFG_SET_REQ + */ +enum bfi_enet_tx_vlan_mode { + BFI_ENET_TX_VLAN_NOP = 0, + BFI_ENET_TX_VLAN_INS = 1, + BFI_ENET_TX_VLAN_WI = 2, +}; + +struct bfi_enet_tx_cfg { + u8 vlan_mode; /*!< processing mode */ + u8 rsvd; + u16 vlan_id; + u8 admit_tagged_frame; + u8 apply_vlan_filter; + u8 add_to_vswitch; + u8 rsvd1[1]; +}; + +struct bfi_enet_tx_cfg_req { + struct bfi_msgq_mhdr mh; + u8 num_queues; /* # of Tx Queues */ + u8 rsvd[3]; + + struct { + struct bfi_enet_txq q; + struct bfi_enet_ib ib; + } q_cfg[BFI_ENET_TXQ_PRIO_MAX]; + + struct bfi_enet_ib_cfg ib_cfg; + + struct bfi_enet_tx_cfg tx_cfg; +}; + +struct bfi_enet_tx_cfg_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 hw_id; /* For debugging */ + u8 rsvd[2]; + struct { + u32 q_dbell; /* PCI base address offset */ + u32 i_dbell; /* PCI base address offset */ + u8 hw_qid; /* For debugging */ + u8 rsvd[3]; + } q_handles[BFI_ENET_TXQ_PRIO_MAX]; +}; + +/** + * Rx Configuration + * + * bfi_enet_rx_cfg is used by: + * BFI_ENET_H2I_RX_CFG_SET_REQ + */ +enum bfi_enet_rxq_type { + BFI_ENET_RXQ_SINGLE = 1, + BFI_ENET_RXQ_LARGE_SMALL = 2, + BFI_ENET_RXQ_HDS = 3, + BFI_ENET_RXQ_HDS_OPT_BASED = 4, +}; + +enum bfi_enet_hds_type { + BFI_ENET_HDS_FORCED = 0x01, + BFI_ENET_HDS_IPV6_UDP = 0x02, + BFI_ENET_HDS_IPV6_TCP = 0x04, + BFI_ENET_HDS_IPV4_TCP = 0x08, + BFI_ENET_HDS_IPV4_UDP = 0x10, +}; + +struct bfi_enet_rx_cfg { + u8 rxq_type; + u8 rsvd[3]; + + struct { + u8 max_header_size; + u8 force_offset; + u8 type; + u8 rsvd1; + } hds; + + u8 multi_buffer; + u8 strip_vlan; + u8 drop_untagged; + u8 rsvd2; +}; + +/* + * Multicast frames are received on the ql of q-set index zero. + * On the completion queue. RxQ ID = even is for large/data buffer queues + * and RxQ ID = odd is for small/header buffer queues. + */ +struct bfi_enet_rx_cfg_req { + struct bfi_msgq_mhdr mh; + u8 num_queue_sets; /* # of Rx Queue Sets */ + u8 rsvd[3]; + + struct { + struct bfi_enet_rxq ql; /* large/data/single buffers */ + struct bfi_enet_rxq qs; /* small/header buffers */ + struct bfi_enet_cq cq; + struct bfi_enet_ib ib; + } q_cfg[BFI_ENET_RX_QSET_MAX]; + + struct bfi_enet_ib_cfg ib_cfg; + + struct bfi_enet_rx_cfg rx_cfg; +}; + +struct bfi_enet_rx_cfg_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 hw_id; /* For debugging */ + u8 rsvd[2]; + struct { + u32 ql_dbell; /* PCI base address offset */ + u32 qs_dbell; /* PCI base address offset */ + u32 i_dbell; /* PCI base address offset */ + u8 hw_lqid; /* For debugging */ + u8 hw_sqid; /* For debugging */ + u8 hw_cqid; /* For debugging */ + u8 rsvd; + } q_handles[BFI_ENET_RX_QSET_MAX]; +}; + +/** + * RIT + * + * bfi_enet_rit_req is used by: + * BFI_ENET_H2I_RIT_CFG_REQ + */ +struct bfi_enet_rit_req { + struct bfi_msgq_mhdr mh; + u16 size; /* number of table-entries used */ + u8 rsvd[2]; + u8 table[BFI_ENET_RSS_RIT_MAX]; +}; + +/** + * RSS + * + * bfi_enet_rss_cfg_req is used by: + * BFI_ENET_H2I_RSS_CFG_REQ + */ +enum bfi_enet_rss_type { + BFI_ENET_RSS_IPV6 = 0x01, + BFI_ENET_RSS_IPV6_TCP = 0x02, + BFI_ENET_RSS_IPV4 = 0x04, + BFI_ENET_RSS_IPV4_TCP = 0x08 +}; + +struct bfi_enet_rss_cfg { + u8 type; + u8 mask; + u8 rsvd[2]; + u32 key[BFI_ENET_RSS_KEY_LEN]; +}; + +struct bfi_enet_rss_cfg_req { + struct bfi_msgq_mhdr mh; + struct bfi_enet_rss_cfg cfg; +}; + +/** + * MAC Unicast + * + * bfi_enet_rx_vlan_req is used by: + * BFI_ENET_H2I_MAC_UCAST_SET_REQ + * BFI_ENET_H2I_MAC_UCAST_CLR_REQ + * BFI_ENET_H2I_MAC_UCAST_ADD_REQ + * BFI_ENET_H2I_MAC_UCAST_DEL_REQ + */ +struct bfi_enet_ucast_req { + struct bfi_msgq_mhdr mh; + mac_t mac_addr; + u8 rsvd[2]; +}; + +/** + * MAC Unicast + VLAN + */ +struct bfi_enet_mac_n_vlan_req { + struct bfi_msgq_mhdr mh; + u16 vlan_id; + mac_t mac_addr; +}; + +/** + * MAC Multicast + * + * bfi_enet_mac_mfilter_add_req is used by: + * BFI_ENET_H2I_MAC_MCAST_ADD_REQ + */ +struct bfi_enet_mcast_add_req { + struct bfi_msgq_mhdr mh; + mac_t mac_addr; + u8 rsvd[2]; +}; + +/** + * bfi_enet_mac_mfilter_add_rsp is used by: + * BFI_ENET_I2H_MAC_MCAST_ADD_RSP + */ +struct bfi_enet_mcast_add_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 rsvd; + u16 cmd_offset; + u16 handle; + u8 rsvd1[2]; +}; + +/** + * bfi_enet_mac_mfilter_del_req is used by: + * BFI_ENET_H2I_MAC_MCAST_DEL_REQ + */ +struct bfi_enet_mcast_del_req { + struct bfi_msgq_mhdr mh; + u16 handle; + u8 rsvd[2]; +}; + +/** + * VLAN + * + * bfi_enet_rx_vlan_req is used by: + * BFI_ENET_H2I_RX_VLAN_SET_REQ + */ +struct bfi_enet_rx_vlan_req { + struct bfi_msgq_mhdr mh; + u8 block_idx; + u8 rsvd[3]; + u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; +}; + +/** + * PAUSE + * + * bfi_enet_set_pause_req is used by: + * BFI_ENET_H2I_SET_PAUSE_REQ + */ +struct bfi_enet_set_pause_req { + struct bfi_msgq_mhdr mh; + u8 rsvd[2]; + u8 tx_pause; /* 1 = enable; 0 = disable */ + u8 rx_pause; /* 1 = enable; 0 = disable */ +}; + +/** + * DIAGNOSTICS + * + * bfi_enet_diag_lb_req is used by: + * BFI_ENET_H2I_DIAG_LOOPBACK + */ +struct bfi_enet_diag_lb_req { + struct bfi_msgq_mhdr mh; + u8 rsvd[2]; + u8 mode; /* cable or Serdes */ + u8 enable; /* 1 = enable; 0 = disable */ +}; + +/** + * enum for Loopback opmodes + */ +enum { + BFI_ENET_DIAG_LB_OPMODE_EXT = 0, + BFI_ENET_DIAG_LB_OPMODE_CBL = 1, +}; + +/** + * STATISTICS + * + * bfi_enet_stats_req is used by: + * BFI_ENET_H2I_STATS_GET_REQ + * BFI_ENET_I2H_STATS_CLR_REQ + */ +struct bfi_enet_stats_req { + struct bfi_msgq_mhdr mh; + u16 stats_mask; + u8 rsvd[2]; + u32 rx_enet_mask; + u32 tx_enet_mask; + union bfi_addr_u host_buffer; +}; + +/** + * defines for "stats_mask" above. + */ +#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */ +#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */ +#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */ +#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */ +#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */ + +#define BFI_ENET_STATS_ALL 0x1f + +/* TxF Frame Statistics */ +struct bfi_enet_stats_txf { + u64 ucast_octets; + u64 ucast; + u64 ucast_vlan; + + u64 mcast_octets; + u64 mcast; + u64 mcast_vlan; + + u64 bcast_octets; + u64 bcast; + u64 bcast_vlan; + + u64 errors; + u64 filter_vlan; /* frames filtered due to VLAN */ + u64 filter_mac_sa; /* frames filtered due to SA check */ +}; + +/* RxF Frame Statistics */ +struct bfi_enet_stats_rxf { + u64 ucast_octets; + u64 ucast; + u64 ucast_vlan; + + u64 mcast_octets; + u64 mcast; + u64 mcast_vlan; + + u64 bcast_octets; + u64 bcast; + u64 bcast_vlan; + u64 frame_drops; +}; + +/* FC Tx Frame Statistics */ +struct bfi_enet_stats_fc_tx { + u64 txf_ucast_octets; + u64 txf_ucast; + u64 txf_ucast_vlan; + + u64 txf_mcast_octets; + u64 txf_mcast; + u64 txf_mcast_vlan; + + u64 txf_bcast_octets; + u64 txf_bcast; + u64 txf_bcast_vlan; + + u64 txf_parity_errors; + u64 txf_timeout; + u64 txf_fid_parity_errors; +}; + +/* FC Rx Frame Statistics */ +struct bfi_enet_stats_fc_rx { + u64 rxf_ucast_octets; + u64 rxf_ucast; + u64 rxf_ucast_vlan; + + u64 rxf_mcast_octets; + u64 rxf_mcast; + u64 rxf_mcast_vlan; + + u64 rxf_bcast_octets; + u64 rxf_bcast; + u64 rxf_bcast_vlan; +}; + +/* RAD Frame Statistics */ +struct bfi_enet_stats_rad { + u64 rx_frames; + u64 rx_octets; + u64 rx_vlan_frames; + + u64 rx_ucast; + u64 rx_ucast_octets; + u64 rx_ucast_vlan; + + u64 rx_mcast; + u64 rx_mcast_octets; + u64 rx_mcast_vlan; + + u64 rx_bcast; + u64 rx_bcast_octets; + u64 rx_bcast_vlan; + + u64 rx_drops; +}; + +/* BPC Tx Registers */ +struct bfi_enet_stats_bpc { + /* transmit stats */ + u64 tx_pause[8]; + u64 tx_zero_pause[8]; /*!< Pause cancellation */ + /*!<Pause initiation rather than retention */ + u64 tx_first_pause[8]; + + /* receive stats */ + u64 rx_pause[8]; + u64 rx_zero_pause[8]; /*!< Pause cancellation */ + /*!<Pause initiation rather than retention */ + u64 rx_first_pause[8]; +}; + +/* MAC Rx Statistics */ +struct bfi_enet_stats_mac { + u64 frame_64; /* both rx and tx counter */ + u64 frame_65_127; /* both rx and tx counter */ + u64 frame_128_255; /* both rx and tx counter */ + u64 frame_256_511; /* both rx and tx counter */ + u64 frame_512_1023; /* both rx and tx counter */ + u64 frame_1024_1518; /* both rx and tx counter */ + u64 frame_1519_1522; /* both rx and tx counter */ + + /* receive stats */ + u64 rx_bytes; + u64 rx_packets; + u64 rx_fcs_error; + u64 rx_multicast; + u64 rx_broadcast; + u64 rx_control_frames; + u64 rx_pause; + u64 rx_unknown_opcode; + u64 rx_alignment_error; + u64 rx_frame_length_error; + u64 rx_code_error; + u64 rx_carrier_sense_error; + u64 rx_undersize; + u64 rx_oversize; + u64 rx_fragments; + u64 rx_jabber; + u64 rx_drop; + + /* transmit stats */ + u64 tx_bytes; + u64 tx_packets; + u64 tx_multicast; + u64 tx_broadcast; + u64 tx_pause; + u64 tx_deferral; + u64 tx_excessive_deferral; + u64 tx_single_collision; + u64 tx_muliple_collision; + u64 tx_late_collision; + u64 tx_excessive_collision; + u64 tx_total_collision; + u64 tx_pause_honored; + u64 tx_drop; + u64 tx_jabber; + u64 tx_fcs_error; + u64 tx_control_frame; + u64 tx_oversize; + u64 tx_undersize; + u64 tx_fragments; +}; + +/** + * Complete statistics, DMAed from fw to host followed by + * BFI_ENET_I2H_STATS_GET_RSP + */ +struct bfi_enet_stats { + struct bfi_enet_stats_mac mac_stats; + struct bfi_enet_stats_bpc bpc_stats; + struct bfi_enet_stats_rad rad_stats; + struct bfi_enet_stats_rad rlb_stats; + struct bfi_enet_stats_fc_rx fc_rx_stats; + struct bfi_enet_stats_fc_tx fc_tx_stats; + struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX]; + struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX]; +}; + +#pragma pack() + +#endif /* __BFI_ENET_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h new file mode 100644 index 000000000000..efacff3ab51d --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -0,0 +1,452 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +/* + * bfi_reg.h ASIC register defines for all Brocade adapter ASICs + */ + +#ifndef __BFI_REG_H__ +#define __BFI_REG_H__ + +#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ +#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ +#define HOSTFN2_INT_STATUS 0x00014300 /* ct */ +#define HOSTFN3_INT_STATUS 0x00014400 /* ct */ +#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ +#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ +#define HOSTFN2_INT_MSK 0x00014304 /* ct */ +#define HOSTFN3_INT_MSK 0x00014404 /* ct */ + +#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ +#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ +#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */ +#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */ + +#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ +#define __P_LCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000 +#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_LCLK_RESET_TIMER_SH 17 +#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH) +#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_LCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH) +#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_LCLK_JITLMT0_1_SH 12 +#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH) +#define __APP_PLL_LCLK_HREF 0x00000800 +#define __APP_PLL_LCLK_HDIV 0x00000400 +#define __APP_PLL_LCLK_P0_1_MK 0x00000300 +#define __APP_PLL_LCLK_P0_1_SH 8 +#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH) +#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_LCLK_Z0_2_SH 5 +#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH) +#define __APP_PLL_LCLK_RSEL200500 0x00000010 +#define __APP_PLL_LCLK_ENARST 0x00000008 +#define __APP_PLL_LCLK_BYPASS 0x00000004 +#define __APP_PLL_LCLK_LRESETN 0x00000002 +#define __APP_PLL_LCLK_ENABLE 0x00000001 +#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_SCLK_RESET_TIMER_SH 17 +#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH) +#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_SCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH) +#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_SCLK_JITLMT0_1_SH 12 +#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH) +#define __APP_PLL_SCLK_HREF 0x00000800 +#define __APP_PLL_SCLK_HDIV 0x00000400 +#define __APP_PLL_SCLK_P0_1_MK 0x00000300 +#define __APP_PLL_SCLK_P0_1_SH 8 +#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH) +#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_SCLK_Z0_2_SH 5 +#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH) +#define __APP_PLL_SCLK_RSEL200500 0x00000010 +#define __APP_PLL_SCLK_ENARST 0x00000008 +#define __APP_PLL_SCLK_BYPASS 0x00000004 +#define __APP_PLL_SCLK_LRESETN 0x00000002 +#define __APP_PLL_SCLK_ENABLE 0x00000001 +#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */ +#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */ +#define __ENABLE_MAC_1 0x00200000 /* ct */ +#define __ENABLE_MAC_0 0x00100000 /* ct */ + +#define HOST_SEM0_REG 0x00014230 /* cb/ct */ +#define HOST_SEM1_REG 0x00014234 /* cb/ct */ +#define HOST_SEM2_REG 0x00014238 /* cb/ct */ +#define HOST_SEM3_REG 0x0001423c /* cb/ct */ +#define HOST_SEM4_REG 0x00014610 /* cb/ct */ +#define HOST_SEM5_REG 0x00014614 /* cb/ct */ +#define HOST_SEM6_REG 0x00014618 /* cb/ct */ +#define HOST_SEM7_REG 0x0001461c /* cb/ct */ +#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */ +#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */ +#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */ +#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */ +#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */ +#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */ +#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */ +#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */ + +#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */ +#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */ +#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */ +#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */ +#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */ +#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */ +#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */ +#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */ +#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */ +#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */ +#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */ +#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */ +#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */ +#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */ +#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */ +#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */ + +#define PSS_CTL_REG 0x00018800 /* cb/ct */ +#define __PSS_I2C_CLK_DIV_MK 0x007f0000 +#define __PSS_I2C_CLK_DIV_SH 16 +#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) +#define __PSS_LMEM_INIT_DONE 0x00001000 +#define __PSS_LMEM_RESET 0x00000200 +#define __PSS_LMEM_INIT_EN 0x00000100 +#define __PSS_LPU1_RESET 0x00000002 +#define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */ +#define ERR_SET_REG 0x00018818 /* cb/ct */ +#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */ +#define __PSS_GPIO_OUT_REG 0x00000fff +#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */ +#define __PSS_GPIO_OE_REG 0x000000ff + +#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */ +#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */ +#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */ +#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */ +#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */ +#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */ +#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */ +#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */ + +#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */ + +#define MBIST_CTL_REG 0x00014220 /* ct */ +#define __EDRAM_BISTR_START 0x00000004 +#define MBIST_STAT_REG 0x00014224 /* ct */ +#define ETH_MAC_SER_REG 0x00014288 /* ct */ +#define __APP_EMS_CKBUFAMPIN 0x00000020 +#define __APP_EMS_REFCLKSEL 0x00000010 +#define __APP_EMS_CMLCKSEL 0x00000008 +#define __APP_EMS_REFCKBUFEN2 0x00000004 +#define __APP_EMS_REFCKBUFEN1 0x00000002 +#define __APP_EMS_CHANNEL_SEL 0x00000001 +#define FNC_PERS_REG 0x00014604 /* ct */ +#define __F3_FUNCTION_ACTIVE 0x80000000 +#define __F3_FUNCTION_MODE 0x40000000 +#define __F3_PORT_MAP_MK 0x30000000 +#define __F3_PORT_MAP_SH 28 +#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) +#define __F3_VM_MODE 0x08000000 +#define __F3_INTX_STATUS_MK 0x07000000 +#define __F3_INTX_STATUS_SH 24 +#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) +#define __F2_FUNCTION_ACTIVE 0x00800000 +#define __F2_FUNCTION_MODE 0x00400000 +#define __F2_PORT_MAP_MK 0x00300000 +#define __F2_PORT_MAP_SH 20 +#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) +#define __F2_VM_MODE 0x00080000 +#define __F2_INTX_STATUS_MK 0x00070000 +#define __F2_INTX_STATUS_SH 16 +#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) +#define __F1_FUNCTION_ACTIVE 0x00008000 +#define __F1_FUNCTION_MODE 0x00004000 +#define __F1_PORT_MAP_MK 0x00003000 +#define __F1_PORT_MAP_SH 12 +#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) +#define __F1_VM_MODE 0x00000800 +#define __F1_INTX_STATUS_MK 0x00000700 +#define __F1_INTX_STATUS_SH 8 +#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) +#define __F0_FUNCTION_ACTIVE 0x00000080 +#define __F0_FUNCTION_MODE 0x00000040 +#define __F0_PORT_MAP_MK 0x00000030 +#define __F0_PORT_MAP_SH 4 +#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) +#define __F0_VM_MODE 0x00000008 +#define __F0_INTX_STATUS 0x00000007 +enum { + __F0_INTX_STATUS_MSIX = 0x0, + __F0_INTX_STATUS_INTA = 0x1, + __F0_INTX_STATUS_INTB = 0x2, + __F0_INTX_STATUS_INTC = 0x3, + __F0_INTX_STATUS_INTD = 0x4, +}; + +#define OP_MODE 0x0001460c +#define __APP_ETH_CLK_LOWSPEED 0x00000004 +#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 +#define __GLOBAL_FCOE_MODE 0x00000001 +#define FW_INIT_HALT_P0 0x000191ac +#define __FW_INIT_HALT_P 0x00000001 +#define FW_INIT_HALT_P1 0x000191bc +#define PMM_1T_RESET_REG_P0 0x0002381c +#define __PMM_1T_RESET_P 0x00000001 +#define PMM_1T_RESET_REG_P1 0x00023c1c + +/** + * Brocade 1860 Adapter specific defines + */ +#define CT2_PCI_CPQ_BASE 0x00030000 +#define CT2_PCI_APP_BASE 0x00030100 +#define CT2_PCI_ETH_BASE 0x00030400 + +/* + * APP block registers + */ +#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00) +#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04) +#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08) +#define __PME_STATUS_ 0x00200000 +#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000 +#define __PF_VF_BAR_SIZE_MODE__SH 19 +#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH) +#define __FC_LL_PORT_MAP__MK 0x00060000 +#define __FC_LL_PORT_MAP__SH 17 +#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH) +#define __PF_VF_ACTIVE_ 0x00010000 +#define __PF_VF_CFG_RDY_ 0x00008000 +#define __PF_VF_ENABLE_ 0x00004000 +#define __PF_DRIVER_ACTIVE_ 0x00002000 +#define __PF_PME_SEND_ENABLE_ 0x00001000 +#define __PF_EXROM_OFFSET__MK 0x00000ff0 +#define __PF_EXROM_OFFSET__SH 4 +#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH) +#define __FC_LL_MODE_ 0x00000008 +#define __PF_INTX_PIN_ 0x00000007 +#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C) +#define __PF_NUM_QUEUES1__MK 0xff000000 +#define __PF_NUM_QUEUES1__SH 24 +#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH) +#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000 +#define __PF_VF_QUE_OFFSET1__SH 16 +#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH) +#define __PF_VF_NUM_QUEUES__MK 0x0000ff00 +#define __PF_VF_NUM_QUEUES__SH 8 +#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH) +#define __PF_VF_QUE_OFFSET_ 0x000000ff +#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18) +#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) + +/* + * Brocade 1860 adapter CPQ block registers + */ +#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) +#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) +#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40) +#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60) +#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80) +#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84) +#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88) +#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c) +#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90) +#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94) +#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98) +#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C) +#define CT2_HOST_SEM0_REG 0x000148f0 +#define CT2_HOST_SEM1_REG 0x000148f4 +#define CT2_HOST_SEM2_REG 0x000148f8 +#define CT2_HOST_SEM3_REG 0x000148fc +#define CT2_HOST_SEM4_REG 0x00014900 +#define CT2_HOST_SEM5_REG 0x00014904 +#define CT2_HOST_SEM6_REG 0x00014908 +#define CT2_HOST_SEM7_REG 0x0001490c +#define CT2_HOST_SEM0_INFO_REG 0x000148b0 +#define CT2_HOST_SEM1_INFO_REG 0x000148b4 +#define CT2_HOST_SEM2_INFO_REG 0x000148b8 +#define CT2_HOST_SEM3_INFO_REG 0x000148bc +#define CT2_HOST_SEM4_INFO_REG 0x000148c0 +#define CT2_HOST_SEM5_INFO_REG 0x000148c4 +#define CT2_HOST_SEM6_INFO_REG 0x000148c8 +#define CT2_HOST_SEM7_INFO_REG 0x000148cc + +#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808 +#define __APP_LPUCLK_HALFSPEED 0x40000000 +#define __APP_PLL_LCLK_LOAD 0x20000000 +#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000 +#define __APP_PLL_LCLK_FBCNT_SH 21 +#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_LCLK_FBCNT_425_MHZ = 6, + __APP_PLL_LCLK_FBCNT_468_MHZ = 4, +}; +#define __APP_PLL_LCLK_EXTFB 0x00000800 +#define __APP_PLL_LCLK_ENOUTS 0x00000400 +#define __APP_PLL_LCLK_RATE 0x00000010 +#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000 +#define __APP_PLL_SCLK_CLK_DIV2 0x20000000 +#define __APP_PLL_SCLK_LOAD 0x10000000 +#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000 +#define __APP_PLL_SCLK_FBCNT_SH 20 +#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_SCLK_FBCNT_NORM = 6, + __APP_PLL_SCLK_FBCNT_10G_FC = 10, +}; +#define __APP_PLL_SCLK_EXTFB 0x00000800 +#define __APP_PLL_SCLK_ENOUTS 0x00000400 +#define __APP_PLL_SCLK_RATE 0x00000010 +#define CT2_PCIE_MISC_REG 0x00014804 +#define __ETH_CLK_ENABLE_PORT1 0x00000010 +#define CT2_CHIP_MISC_PRG 0x000148a4 +#define __ETH_CLK_ENABLE_PORT0 0x00004000 +#define __APP_LPU_SPEED 0x00000002 +#define CT2_MBIST_STAT_REG 0x00014818 +#define CT2_MBIST_CTL_REG 0x0001481c +#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c +#define __PMM_1T_PNDB_P 0x00000002 +#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c +#define CT2_WGN_STATUS 0x00014990 +#define __A2T_AHB_LOAD 0x00000800 +#define __WGN_READY 0x00000400 +#define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_CSR_SET_REG 0x00027424 +#define __HALT_NFC_CONTROLLER 0x00000002 +#define __NFC_CONTROLLER_HALTED 0x00001000 + +#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 +#define __CSI_MAC_RESET 0x00000010 +#define __CSI_MAC_AHB_RESET 0x00000008 +#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4 +#define CT2_CSI_MAC_CONTROL_REG(__n) \ + (CT2_CSI_MAC0_CONTROL_REG + \ + (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) + +/* + * Name semaphore registers based on usage + */ +#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG +#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG +#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG +#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG +#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG +#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG + +/* + * CT2 semaphore register locations changed + */ +#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG +#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG +#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG +#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG +#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG +#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG + +#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) + +/* + * And corresponding host interrupt status bit field defines + */ +#define __HFN_INT_CPE_Q0 0x00000001U +#define __HFN_INT_CPE_Q1 0x00000002U +#define __HFN_INT_CPE_Q2 0x00000004U +#define __HFN_INT_CPE_Q3 0x00000008U +#define __HFN_INT_CPE_Q4 0x00000010U +#define __HFN_INT_CPE_Q5 0x00000020U +#define __HFN_INT_CPE_Q6 0x00000040U +#define __HFN_INT_CPE_Q7 0x00000080U +#define __HFN_INT_RME_Q0 0x00000100U +#define __HFN_INT_RME_Q1 0x00000200U +#define __HFN_INT_RME_Q2 0x00000400U +#define __HFN_INT_RME_Q3 0x00000800U +#define __HFN_INT_RME_Q4 0x00001000U +#define __HFN_INT_RME_Q5 0x00002000U +#define __HFN_INT_RME_Q6 0x00004000U +#define __HFN_INT_RME_Q7 0x00008000U +#define __HFN_INT_ERR_EMC 0x00010000U +#define __HFN_INT_ERR_LPU0 0x00020000U +#define __HFN_INT_ERR_LPU1 0x00040000U +#define __HFN_INT_ERR_PSS 0x00080000U +#define __HFN_INT_MBOX_LPU0 0x00100000U +#define __HFN_INT_MBOX_LPU1 0x00200000U +#define __HFN_INT_MBOX1_LPU0 0x00400000U +#define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_LL_HALT 0x01000000U +#define __HFN_INT_CPE_MASK 0x000000ffU +#define __HFN_INT_RME_MASK 0x0000ff00U +#define __HFN_INT_ERR_MASK \ + (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \ + __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT) +#define __HFN_INT_FN0_MASK \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0) +#define __HFN_INT_FN1_MASK \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1) + +/* + * Host interrupt status defines for 1860 + */ +#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U +#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U +#define __HFN_INT_ERR_PSS_CT2 0x00040000U +#define __HFN_INT_ERR_LPU0_CT2 0x00080000U +#define __HFN_INT_ERR_LPU1_CT2 0x00100000U +#define __HFN_INT_CPQ_HALT_CT2 0x00200000U +#define __HFN_INT_ERR_WGN_CT2 0x00400000U +#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U +#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U +#define __HFN_INT_ERR_MASK_CT2 \ + (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \ + __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \ + __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \ + __HFN_INT_ERR_LEHTX_CT2) +#define __HFN_INT_FN0_MASK_CT2 \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2) +#define __HFN_INT_FN1_MASK_CT2 \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2) + +/* + * asic memory map. + */ +#define PSS_SMEM_PAGE_START 0x8000 +#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +#endif /* __BFI_REG_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h new file mode 100644 index 000000000000..4d7a5de08e12 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -0,0 +1,575 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BNA_H__ +#define __BNA_H__ + +#include "bfa_defs.h" +#include "bfa_ioc.h" +#include "bfi_enet.h" +#include "bna_types.h" + +extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; + +/** + * + * Macros and constants + * + */ + +#define BNA_IOC_TIMER_FREQ 200 + +/* Log string size */ +#define BNA_MESSAGE_SIZE 256 + +#define bna_is_small_rxq(_id) ((_id) & 0x1) + +#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \ + (!memcmp((_mac1), (_mac2), sizeof(mac_t))) + +#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0) + +#define BNA_TO_POWER_OF_2(x) \ +do { \ + int _shift = 0; \ + while ((x) && (x) != 1) { \ + (x) >>= 1; \ + _shift++; \ + } \ + (x) <<= _shift; \ +} while (0) + +#define BNA_TO_POWER_OF_2_HIGH(x) \ +do { \ + int n = 1; \ + while (n < (x)) \ + n <<= 1; \ + (x) = n; \ +} while (0) + +/* + * input : _addr-> os dma addr in host endian format, + * output : _bna_dma_addr-> pointer to hw dma addr + */ +#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \ +do { \ + u64 tmp_addr = \ + cpu_to_be64((u64)(_addr)); \ + (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \ + (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \ +} while (0) + +/* + * input : _bna_dma_addr-> pointer to hw dma addr + * output : _addr-> os dma addr in host endian format + */ +#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \ +do { \ + (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \ + | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \ +} while (0) + +#define containing_rec(addr, type, field) \ + ((type *)((unsigned char *)(addr) - \ + (unsigned char *)(&((type *)0)->field))) + +#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2) + +/* TxQ element is 64 bytes */ +#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6) +#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6) + +#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ +{ \ + unsigned int page_index; /* index within a page */ \ + void *page_addr; \ + page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \ + (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \ + page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\ + (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \ +} + +/* RxQ element is 8 bytes */ +#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3) +#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3) + +#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ +{ \ + unsigned int page_index; /* index within a page */ \ + void *page_addr; \ + page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \ + (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \ + page_addr = (_qpt_ptr)[((_qe_idx) >> \ + BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \ + (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \ +} + +/* CQ element is 16 bytes */ +#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4) +#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4) + +#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ +{ \ + unsigned int page_index; /* index within a page */ \ + void *page_addr; \ + \ + page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \ + (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \ + page_addr = (_qpt_ptr)[((_qe_idx) >> \ + BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \ + (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\ +} + +#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \ + (&((_cast *)(_q_base))[(_qe_idx)]) + +#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx)) + +#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ + ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) + +#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ + (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) + +#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \ + (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \ + ((_q_depth) - 1)) + +#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \ + ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \ + (_q_depth - 1)) + +#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index) + +#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index) + +#define BNA_Q_PI_ADD(_q_ptr, _num) \ + (_q_ptr)->q.producer_index = \ + (((_q_ptr)->q.producer_index + (_num)) & \ + ((_q_ptr)->q.q_depth - 1)) + +#define BNA_Q_CI_ADD(_q_ptr, _num) \ + (_q_ptr)->q.consumer_index = \ + (((_q_ptr)->q.consumer_index + (_num)) \ + & ((_q_ptr)->q.q_depth - 1)) + +#define BNA_Q_FREE_COUNT(_q_ptr) \ + (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth)) + +#define BNA_Q_IN_USE_COUNT(_q_ptr) \ + (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth)) + +#define BNA_LARGE_PKT_SIZE 1000 + +#define BNA_UPDATE_PKT_CNT(_pkt, _len) \ +do { \ + if ((_len) > BNA_LARGE_PKT_SIZE) { \ + (_pkt)->large_pkt_cnt++; \ + } else { \ + (_pkt)->small_pkt_cnt++; \ + } \ +} while (0) + +#define call_rxf_stop_cbfn(rxf) \ +do { \ + if ((rxf)->stop_cbfn) { \ + void (*cbfn)(struct bna_rx *); \ + struct bna_rx *cbarg; \ + cbfn = (rxf)->stop_cbfn; \ + cbarg = (rxf)->stop_cbarg; \ + (rxf)->stop_cbfn = NULL; \ + (rxf)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_rxf_start_cbfn(rxf) \ +do { \ + if ((rxf)->start_cbfn) { \ + void (*cbfn)(struct bna_rx *); \ + struct bna_rx *cbarg; \ + cbfn = (rxf)->start_cbfn; \ + cbarg = (rxf)->start_cbarg; \ + (rxf)->start_cbfn = NULL; \ + (rxf)->start_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_rxf_cam_fltr_cbfn(rxf) \ +do { \ + if ((rxf)->cam_fltr_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_rx *); \ + struct bnad *cbarg; \ + cbfn = (rxf)->cam_fltr_cbfn; \ + cbarg = (rxf)->cam_fltr_cbarg; \ + (rxf)->cam_fltr_cbfn = NULL; \ + (rxf)->cam_fltr_cbarg = NULL; \ + cbfn(cbarg, rxf->rx); \ + } \ +} while (0) + +#define call_rxf_pause_cbfn(rxf) \ +do { \ + if ((rxf)->oper_state_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_rx *); \ + struct bnad *cbarg; \ + cbfn = (rxf)->oper_state_cbfn; \ + cbarg = (rxf)->oper_state_cbarg; \ + (rxf)->oper_state_cbfn = NULL; \ + (rxf)->oper_state_cbarg = NULL; \ + cbfn(cbarg, rxf->rx); \ + } \ +} while (0) + +#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf) + +#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx)) + +#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx)) + +#define xxx_enable(mode, bitmask, xxx) \ +do { \ + bitmask |= xxx; \ + mode |= xxx; \ +} while (0) + +#define xxx_disable(mode, bitmask, xxx) \ +do { \ + bitmask |= xxx; \ + mode &= ~xxx; \ +} while (0) + +#define xxx_inactive(mode, bitmask, xxx) \ +do { \ + bitmask &= ~xxx; \ + mode &= ~xxx; \ +} while (0) + +#define is_promisc_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define is_promisc_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC) + +#define is_default_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define is_default_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define is_allmulti_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define is_allmulti_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define GET_RXQS(rxp, q0, q1) do { \ + switch ((rxp)->type) { \ + case BNA_RXP_SINGLE: \ + (q0) = rxp->rxq.single.only; \ + (q1) = NULL; \ + break; \ + case BNA_RXP_SLR: \ + (q0) = rxp->rxq.slr.large; \ + (q1) = rxp->rxq.slr.small; \ + break; \ + case BNA_RXP_HDS: \ + (q0) = rxp->rxq.hds.data; \ + (q1) = rxp->rxq.hds.hdr; \ + break; \ + } \ +} while (0) + +#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask) + +#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask) + +#define bna_tx_from_rid(_bna, _rid, _tx) \ +do { \ + struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \ + struct bna_tx *__tx; \ + struct list_head *qe; \ + _tx = NULL; \ + list_for_each(qe, &__tx_mod->tx_active_q) { \ + __tx = (struct bna_tx *)qe; \ + if (__tx->rid == (_rid)) { \ + (_tx) = __tx; \ + break; \ + } \ + } \ +} while (0) + +#define bna_rx_from_rid(_bna, _rid, _rx) \ +do { \ + struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \ + struct bna_rx *__rx; \ + struct list_head *qe; \ + _rx = NULL; \ + list_for_each(qe, &__rx_mod->rx_active_q) { \ + __rx = (struct bna_rx *)qe; \ + if (__rx->rid == (_rid)) { \ + (_rx) = __rx; \ + break; \ + } \ + } \ +} while (0) + +/** + * + * Inline functions + * + */ + +static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) +{ + struct bna_mac *mac = NULL; + struct list_head *qe; + list_for_each(qe, q) { + if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) { + mac = (struct bna_mac *)qe; + break; + } + } + return mac; +} + +#define bna_attr(_bna) (&(_bna)->ioceth.attr) + +/** + * + * Function prototypes + * + */ + +/** + * BNA + */ + +/* FW response handlers */ +void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); + +/* APIs for BNAD */ +void bna_res_req(struct bna_res_info *res_info); +void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info); +void bna_init(struct bna *bna, struct bnad *bnad, + struct bfa_pcidev *pcidev, + struct bna_res_info *res_info); +void bna_mod_init(struct bna *bna, struct bna_res_info *res_info); +void bna_uninit(struct bna *bna); +int bna_num_txq_set(struct bna *bna, int num_txq); +int bna_num_rxp_set(struct bna *bna, int num_rxp); +void bna_hw_stats_get(struct bna *bna); + +/* APIs for RxF */ +struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod); +void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, + struct bna_mac *mac); +struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod); +void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, + struct bna_mac *mac); +struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); +void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, + struct bna_mcam_handle *handle); + +/** + * MBOX + */ + +/* API for BNAD */ +void bna_mbox_handler(struct bna *bna, u32 intr_status); + +/** + * ETHPORT + */ + +/* Callbacks for RX */ +void bna_ethport_cb_rx_started(struct bna_ethport *ethport); +void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); + +/** + * TX MODULE AND TX + */ +/* FW response handelrs */ +void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod); + +/* APIs for BNA */ +void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, + struct bna_res_info *res_info); +void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod); + +/* APIs for ENET */ +void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type); +void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type); +void bna_tx_mod_fail(struct bna_tx_mod *tx_mod); + +/* APIs for BNAD */ +void bna_tx_res_req(int num_txq, int txq_depth, + struct bna_res_info *res_info); +struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad, + struct bna_tx_config *tx_cfg, + const struct bna_tx_event_cbfn *tx_cbfn, + struct bna_res_info *res_info, void *priv); +void bna_tx_destroy(struct bna_tx *tx); +void bna_tx_enable(struct bna_tx *tx); +void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_tx *)); +void bna_tx_cleanup_complete(struct bna_tx *tx); +void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); + +/** + * RX MODULE, RX, RXF + */ + +/* FW response handlers */ +void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); + +/* APIs for BNA */ +void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, + struct bna_res_info *res_info); +void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod); + +/* APIs for ENET */ +void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type); +void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type); +void bna_rx_mod_fail(struct bna_rx_mod *rx_mod); + +/* APIs for BNAD */ +void bna_rx_res_req(struct bna_rx_config *rx_config, + struct bna_res_info *res_info); +struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad, + struct bna_rx_config *rx_cfg, + const struct bna_rx_event_cbfn *rx_cbfn, + struct bna_res_info *res_info, void *priv); +void bna_rx_destroy(struct bna_rx *rx); +void bna_rx_enable(struct bna_rx *rx); +void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_rx *)); +void bna_rx_cleanup_complete(struct bna_rx *rx); +void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); +void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); +void bna_rx_dim_update(struct bna_ccb *ccb); +enum bna_cb_status +bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, + enum bna_rxmode bitmask, + void (*cbfn)(struct bnad *, struct bna_rx *)); +void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); +void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); +void bna_rx_vlanfilter_enable(struct bna_rx *rx); +/** + * ENET + */ + +/* API for RX */ +int bna_enet_mtu_get(struct bna_enet *enet); + +/* Callbacks for TX, RX */ +void bna_enet_cb_tx_stopped(struct bna_enet *enet); +void bna_enet_cb_rx_stopped(struct bna_enet *enet); + +/* API for BNAD */ +void bna_enet_enable(struct bna_enet *enet); +void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, + void (*cbfn)(void *)); +void bna_enet_pause_config(struct bna_enet *enet, + struct bna_pause_config *pause_config, + void (*cbfn)(struct bnad *)); +void bna_enet_mtu_set(struct bna_enet *enet, int mtu, + void (*cbfn)(struct bnad *)); +void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); + +/** + * IOCETH + */ + +/* APIs for BNAD */ +void bna_ioceth_enable(struct bna_ioceth *ioceth); +void bna_ioceth_disable(struct bna_ioceth *ioceth, + enum bna_cleanup_type type); + +/** + * BNAD + */ + +/* Callbacks for ENET */ +void bnad_cb_ethport_link_status(struct bnad *bnad, + enum bna_link_status status); + +/* Callbacks for IOCETH */ +void bnad_cb_ioceth_ready(struct bnad *bnad); +void bnad_cb_ioceth_failed(struct bnad *bnad); +void bnad_cb_ioceth_disabled(struct bnad *bnad); +void bnad_cb_mbox_intr_enable(struct bnad *bnad); +void bnad_cb_mbox_intr_disable(struct bnad *bnad); + +/* Callbacks for BNA */ +void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats); + +#endif /* __BNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c new file mode 100644 index 000000000000..26f5c5abfd1f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -0,0 +1,2144 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#include "bna.h" + +static inline int +ethport_can_be_up(struct bna_ethport *ethport) +{ + int ready = 0; + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && + (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && + (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); + else + ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && + (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && + !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); + return ready; +} + +#define ethport_is_up ethport_can_be_up + +enum bna_ethport_event { + ETHPORT_E_START = 1, + ETHPORT_E_STOP = 2, + ETHPORT_E_FAIL = 3, + ETHPORT_E_UP = 4, + ETHPORT_E_DOWN = 5, + ETHPORT_E_FWRESP_UP_OK = 6, + ETHPORT_E_FWRESP_DOWN = 7, + ETHPORT_E_FWRESP_UP_FAIL = 8, +}; + +enum bna_enet_event { + ENET_E_START = 1, + ENET_E_STOP = 2, + ENET_E_FAIL = 3, + ENET_E_PAUSE_CFG = 4, + ENET_E_MTU_CFG = 5, + ENET_E_FWRESP_PAUSE = 6, + ENET_E_CHLD_STOPPED = 7, +}; + +enum bna_ioceth_event { + IOCETH_E_ENABLE = 1, + IOCETH_E_DISABLE = 2, + IOCETH_E_IOC_RESET = 3, + IOCETH_E_IOC_FAILED = 4, + IOCETH_E_IOC_READY = 5, + IOCETH_E_ENET_ATTR_RESP = 6, + IOCETH_E_ENET_STOPPED = 7, + IOCETH_E_IOC_DISABLED = 8, +}; + +#define bna_stats_copy(_name, _type) \ +do { \ + count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \ + stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \ + stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \ + for (i = 0; i < count; i++) \ + stats_dst[i] = be64_to_cpu(stats_src[i]); \ +} while (0) \ + +/* + * FW response handlers + */ + +static void +bna_bfi_ethport_enable_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport_can_be_up(ethport)) + bfa_fsm_send_event(ethport, ETHPORT_E_UP); +} + +static void +bna_bfi_ethport_disable_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + int ethport_up = ethport_is_up(ethport); + + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport_up) + bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); +} + +static void +bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_enable_req *admin_req = + ðport->bfi_enet_cmd.admin_req; + struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; + + switch (admin_req->enable) { + case BNA_STATUS_T_ENABLED: + if (rsp->error == BFI_ENET_CMD_OK) + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); + else { + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); + } + break; + + case BNA_STATUS_T_DISABLED: + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); + break; + } +} + +static void +bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_diag_lb_req *diag_lb_req = + ðport->bfi_enet_cmd.lpbk_req; + struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; + + switch (diag_lb_req->enable) { + case BNA_STATUS_T_ENABLED: + if (rsp->error == BFI_ENET_CMD_OK) + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); + else { + ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); + } + break; + + case BNA_STATUS_T_DISABLED: + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); + break; + } +} + +static void +bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE); +} + +static void +bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr; + + /** + * Store only if not set earlier, since BNAD can override the HW + * attributes + */ + if (!ioceth->attr.fw_query_complete) { + ioceth->attr.num_txq = ntohl(rsp->max_cfg); + ioceth->attr.num_rxp = ntohl(rsp->max_cfg); + ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac); + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = ntohl(rsp->rit_size); + ioceth->attr.fw_query_complete = true; + } + + bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP); +} + +static void +bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; + u64 *stats_src; + u64 *stats_dst; + u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask); + u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask); + int count; + int i; + + bna_stats_copy(mac, mac); + bna_stats_copy(bpc, bpc); + bna_stats_copy(rad, rad); + bna_stats_copy(rlb, rad); + bna_stats_copy(fc_rx, fc_rx); + bna_stats_copy(fc_tx, fc_tx); + + stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]); + + /* Copy Rxf stats to SW area, scatter them while copying */ + for (i = 0; i < BFI_ENET_CFG_MAX; i++) { + stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]); + memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf)); + if (rx_enet_mask & ((u32)(1 << i))) { + int k; + count = sizeof(struct bfi_enet_stats_rxf) / + sizeof(u64); + for (k = 0; k < count; k++) { + stats_dst[k] = be64_to_cpu(*stats_src); + stats_src++; + } + } + } + + /* Copy Txf stats to SW area, scatter them while copying */ + for (i = 0; i < BFI_ENET_CFG_MAX; i++) { + stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]); + memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf)); + if (tx_enet_mask & ((u32)(1 << i))) { + int k; + count = sizeof(struct bfi_enet_stats_txf) / + sizeof(u64); + for (k = 0; k < count; k++) { + stats_dst[k] = be64_to_cpu(*stats_src); + stats_src++; + } + } + } + + bna->stats_mod.stats_get_busy = false; + bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats); +} + +static void +bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->link_status = BNA_LINK_UP; + + /* Dispatch events */ + ethport->link_cbfn(ethport->bna->bnad, ethport->link_status); +} + +static void +bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->link_status = BNA_LINK_DOWN; + + /* Dispatch events */ + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); +} + +static void +bna_err_handler(struct bna *bna, u32 intr_status) +{ + if (BNA_IS_HALT_INTR(bna, intr_status)) + bna_halt_clear(bna); + + bfa_nw_ioc_error_isr(&bna->ioceth.ioc); +} + +void +bna_mbox_handler(struct bna *bna, u32 intr_status) +{ + if (BNA_IS_ERR_INTR(bna, intr_status)) { + bna_err_handler(bna, intr_status); + return; + } + if (BNA_IS_MBOX_INTR(bna, intr_status)) + bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc); +} + +static void +bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) +{ + struct bna *bna = (struct bna *)arg; + struct bna_tx *tx; + struct bna_rx *rx; + + switch (msghdr->msg_id) { + case BFI_ENET_I2H_RX_CFG_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rx_enet_start_rsp(rx, msghdr); + break; + + case BFI_ENET_I2H_RX_CFG_CLR_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rx_enet_stop_rsp(rx, msghdr); + break; + + case BFI_ENET_I2H_RIT_CFG_RSP: + case BFI_ENET_I2H_RSS_CFG_RSP: + case BFI_ENET_I2H_RSS_ENABLE_RSP: + case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: + case BFI_ENET_I2H_RX_DEFAULT_RSP: + case BFI_ENET_I2H_MAC_UCAST_SET_RSP: + case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: + case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: + case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: + case BFI_ENET_I2H_MAC_MCAST_DEL_RSP: + case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP: + case BFI_ENET_I2H_RX_VLAN_SET_RSP: + case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_TX_CFG_SET_RSP: + bna_tx_from_rid(bna, msghdr->enet_id, tx); + if (tx) + bna_bfi_tx_enet_start_rsp(tx, msghdr); + break; + + case BFI_ENET_I2H_TX_CFG_CLR_RSP: + bna_tx_from_rid(bna, msghdr->enet_id, tx); + if (tx) + bna_bfi_tx_enet_stop_rsp(tx, msghdr); + break; + + case BFI_ENET_I2H_PORT_ADMIN_RSP: + bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_DIAG_LOOPBACK_RSP: + bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_SET_PAUSE_RSP: + bna_bfi_pause_set_rsp(&bna->enet, msghdr); + break; + + case BFI_ENET_I2H_GET_ATTR_RSP: + bna_bfi_attr_get_rsp(&bna->ioceth, msghdr); + break; + + case BFI_ENET_I2H_STATS_GET_RSP: + bna_bfi_stats_get_rsp(bna, msghdr); + break; + + case BFI_ENET_I2H_STATS_CLR_RSP: + /* No-op */ + break; + + case BFI_ENET_I2H_LINK_UP_AEN: + bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_LINK_DOWN_AEN: + bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_PORT_ENABLE_AEN: + bna_bfi_ethport_enable_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_PORT_DISABLE_AEN: + bna_bfi_ethport_disable_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_BW_UPDATE_AEN: + bna_bfi_bw_update_aen(&bna->tx_mod); + break; + + default: + break; + } +} + +/** + * ETHPORT + */ +#define call_ethport_stop_cbfn(_ethport) \ +do { \ + if ((_ethport)->stop_cbfn) { \ + void (*cbfn)(struct bna_enet *); \ + cbfn = (_ethport)->stop_cbfn; \ + (_ethport)->stop_cbfn = NULL; \ + cbfn(&(_ethport)->bna->enet); \ + } \ +} while (0) + +#define call_ethport_adminup_cbfn(ethport, status) \ +do { \ + if ((ethport)->adminup_cbfn) { \ + void (*cbfn)(struct bnad *, enum bna_cb_status); \ + cbfn = (ethport)->adminup_cbfn; \ + (ethport)->adminup_cbfn = NULL; \ + cbfn((ethport)->bna->bnad, status); \ + } \ +} while (0) + +static void +bna_bfi_ethport_admin_up(struct bna_ethport *ethport) +{ + struct bfi_enet_enable_req *admin_up_req = + ðport->bfi_enet_cmd.admin_req; + + bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); + admin_up_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + admin_up_req->enable = BNA_STATUS_T_ENABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &admin_up_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_admin_down(struct bna_ethport *ethport) +{ + struct bfi_enet_enable_req *admin_down_req = + ðport->bfi_enet_cmd.admin_req; + + bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); + admin_down_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + admin_down_req->enable = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &admin_down_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport) +{ + struct bfi_enet_diag_lb_req *lpbk_up_req = + ðport->bfi_enet_cmd.lpbk_req; + + bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); + lpbk_up_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); + lpbk_up_req->mode = (ethport->bna->enet.type == + BNA_ENET_T_LOOPBACK_INTERNAL) ? + BFI_ENET_DIAG_LB_OPMODE_EXT : + BFI_ENET_DIAG_LB_OPMODE_CBL; + lpbk_up_req->enable = BNA_STATUS_T_ENABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport) +{ + struct bfi_enet_diag_lb_req *lpbk_down_req = + ðport->bfi_enet_cmd.lpbk_req; + + bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); + lpbk_down_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); + lpbk_down_req->enable = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_up(struct bna_ethport *ethport) +{ + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + bna_bfi_ethport_admin_up(ethport); + else + bna_bfi_ethport_lpbk_up(ethport); +} + +static void +bna_bfi_ethport_down(struct bna_ethport *ethport) +{ + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + bna_bfi_ethport_admin_down(ethport); + else + bna_bfi_ethport_lpbk_down(ethport); +} + +bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport, + enum bna_ethport_event); + +static void +bna_ethport_sm_stopped_entry(struct bna_ethport *ethport) +{ + call_ethport_stop_cbfn(ethport); +} + +static void +bna_ethport_sm_stopped(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_START: + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + case ETHPORT_E_STOP: + call_ethport_stop_cbfn(ethport); + break; + + case ETHPORT_E_FAIL: + /* No-op */ + break; + + case ETHPORT_E_DOWN: + /* This event is received due to Rx objects failing */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_down_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_down(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_UP: + bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); + bna_bfi_ethport_up(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + break; + + case ETHPORT_E_FAIL: + call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT); + bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); + break; + + case ETHPORT_E_FWRESP_UP_OK: + call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS); + bfa_fsm_set_state(ethport, bna_ethport_sm_up); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + case ETHPORT_E_FWRESP_DOWN: + /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */ + bna_bfi_ethport_up(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport) +{ + /** + * NOTE: Do not call bna_bfi_ethport_down() here. That will over step + * mbox due to up_resp_wait -> down_resp_wait transition on event + * ETHPORT_E_DOWN + */ +} + +static void +bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_UP: + bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); + break; + + case ETHPORT_E_FWRESP_UP_OK: + /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */ + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + case ETHPORT_E_FWRESP_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_up_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_up(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); + bna_bfi_ethport_down(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + /** + * This event is received due to Rx objects stopping in + * parallel to ethport + */ + /* No-op */ + break; + + case ETHPORT_E_FWRESP_UP_OK: + /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */ + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + case ETHPORT_E_FWRESP_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_init(struct bna_ethport *ethport, struct bna *bna) +{ + ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED); + ethport->bna = bna; + + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn = bnad_cb_ethport_link_status; + + ethport->rx_started_count = 0; + + ethport->stop_cbfn = NULL; + ethport->adminup_cbfn = NULL; + + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); +} + +static void +bna_ethport_uninit(struct bna_ethport *ethport) +{ + ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + + ethport->bna = NULL; +} + +static void +bna_ethport_start(struct bna_ethport *ethport) +{ + bfa_fsm_send_event(ethport, ETHPORT_E_START); +} + +static void +bna_enet_cb_ethport_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +static void +bna_ethport_stop(struct bna_ethport *ethport) +{ + ethport->stop_cbfn = bna_enet_cb_ethport_stopped; + bfa_fsm_send_event(ethport, ETHPORT_E_STOP); +} + +static void +bna_ethport_fail(struct bna_ethport *ethport) +{ + /* Reset the physical port status to enabled */ + ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport->link_status != BNA_LINK_DOWN) { + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); + } + bfa_fsm_send_event(ethport, ETHPORT_E_FAIL); +} + +/* Should be called only when ethport is disabled */ +void +bna_ethport_cb_rx_started(struct bna_ethport *ethport) +{ + ethport->rx_started_count++; + + if (ethport->rx_started_count == 1) { + ethport->flags |= BNA_ETHPORT_F_RX_STARTED; + + if (ethport_can_be_up(ethport)) + bfa_fsm_send_event(ethport, ETHPORT_E_UP); + } +} + +void +bna_ethport_cb_rx_stopped(struct bna_ethport *ethport) +{ + int ethport_up = ethport_is_up(ethport); + + ethport->rx_started_count--; + + if (ethport->rx_started_count == 0) { + ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED; + + if (ethport_up) + bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); + } +} + +/** + * ENET + */ +#define bna_enet_chld_start(enet) \ +do { \ + enum bna_tx_type tx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bna_ethport_start(&(enet)->bna->ethport); \ + bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \ + bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ +} while (0) + +#define bna_enet_chld_stop(enet) \ +do { \ + enum bna_tx_type tx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_ethport_stop(&(enet)->bna->ethport); \ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ + bfa_wc_wait(&(enet)->chld_stop_wc); \ +} while (0) + +#define bna_enet_chld_fail(enet) \ +do { \ + bna_ethport_fail(&(enet)->bna->ethport); \ + bna_tx_mod_fail(&(enet)->bna->tx_mod); \ + bna_rx_mod_fail(&(enet)->bna->rx_mod); \ +} while (0) + +#define bna_enet_rx_start(enet) \ +do { \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ +} while (0) + +#define bna_enet_rx_stop(enet) \ +do { \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ + bfa_wc_wait(&(enet)->chld_stop_wc); \ +} while (0) + +#define call_enet_stop_cbfn(enet) \ +do { \ + if ((enet)->stop_cbfn) { \ + void (*cbfn)(void *); \ + void *cbarg; \ + cbfn = (enet)->stop_cbfn; \ + cbarg = (enet)->stop_cbarg; \ + (enet)->stop_cbfn = NULL; \ + (enet)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_enet_pause_cbfn(enet) \ +do { \ + if ((enet)->pause_cbfn) { \ + void (*cbfn)(struct bnad *); \ + cbfn = (enet)->pause_cbfn; \ + (enet)->pause_cbfn = NULL; \ + cbfn((enet)->bna->bnad); \ + } \ +} while (0) + +#define call_enet_mtu_cbfn(enet) \ +do { \ + if ((enet)->mtu_cbfn) { \ + void (*cbfn)(struct bnad *); \ + cbfn = (enet)->mtu_cbfn; \ + (enet)->mtu_cbfn = NULL; \ + cbfn((enet)->bna->bnad); \ + } \ +} while (0) + +static void bna_enet_cb_chld_stopped(void *arg); +static void bna_bfi_pause_set(struct bna_enet *enet); + +bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, started, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet, + enum bna_enet_event); + +static void +bna_enet_sm_stopped_entry(struct bna_enet *enet) +{ + call_enet_pause_cbfn(enet); + call_enet_mtu_cbfn(enet); + call_enet_stop_cbfn(enet); +} + +static void +bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event) +{ + switch (event) { + case ENET_E_START: + bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait); + break; + + case ENET_E_STOP: + call_enet_stop_cbfn(enet); + break; + + case ENET_E_FAIL: + /* No-op */ + break; + + case ENET_E_PAUSE_CFG: + call_enet_pause_cbfn(enet); + break; + + case ENET_E_MTU_CFG: + call_enet_mtu_cbfn(enet); + break; + + case ENET_E_CHLD_STOPPED: + /** + * This event is received due to Ethport, Tx and Rx objects + * failing + */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet) +{ + bna_bfi_pause_set(enet); +} + +static void +bna_enet_sm_pause_init_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait); + break; + + case ENET_E_FAIL: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + case ENET_E_PAUSE_CFG: + enet->flags |= BNA_ENET_F_PAUSE_CHANGED; + break; + + case ENET_E_MTU_CFG: + /* No-op */ + break; + + case ENET_E_FWRESP_PAUSE: + if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bna_bfi_pause_set(enet); + } else { + bfa_fsm_set_state(enet, bna_enet_sm_started); + bna_enet_chld_start(enet); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; +} + +static void +bna_enet_sm_last_resp_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + case ENET_E_FWRESP_PAUSE: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_started_entry(struct bna_enet *enet) +{ + /** + * NOTE: Do not call bna_enet_chld_start() here, since it will be + * inadvertently called during cfg_wait->started transition as well + */ + call_enet_pause_cbfn(enet); + call_enet_mtu_cbfn(enet); +} + +static void +bna_enet_sm_started(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); + break; + + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_PAUSE_CFG: + bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); + bna_bfi_pause_set(enet); + break; + + case ENET_E_MTU_CFG: + bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); + bna_enet_rx_stop(enet); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_cfg_wait_entry(struct bna_enet *enet) +{ +} + +static void +bna_enet_sm_cfg_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait); + break; + + case ENET_E_FAIL: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_PAUSE_CFG: + enet->flags |= BNA_ENET_F_PAUSE_CHANGED; + break; + + case ENET_E_MTU_CFG: + enet->flags |= BNA_ENET_F_MTU_CHANGED; + break; + + case ENET_E_CHLD_STOPPED: + bna_enet_rx_start(enet); + /* Fall through */ + case ENET_E_FWRESP_PAUSE: + if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bna_bfi_pause_set(enet); + } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) { + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bna_enet_rx_stop(enet); + } else { + bfa_fsm_set_state(enet, bna_enet_sm_started); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; +} + +static void +bna_enet_sm_cfg_stop_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_FWRESP_PAUSE: + case ENET_E_CHLD_STOPPED: + bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet) +{ + bna_enet_chld_stop(enet); +} + +static void +bna_enet_sm_chld_stop_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_CHLD_STOPPED: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_pause_set(struct bna_enet *enet) +{ + struct bfi_enet_set_pause_req *pause_req = &enet->pause_req; + + bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0); + pause_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req))); + pause_req->tx_pause = enet->pause_config.tx_pause; + pause_req->rx_pause = enet->pause_config.rx_pause; + + bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_set_pause_req), &pause_req->mh); + bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd); +} + +static void +bna_enet_cb_chld_stopped(void *arg) +{ + struct bna_enet *enet = (struct bna_enet *)arg; + + bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED); +} + +static void +bna_enet_init(struct bna_enet *enet, struct bna *bna) +{ + enet->bna = bna; + enet->flags = 0; + enet->mtu = 0; + enet->type = BNA_ENET_T_REGULAR; + + enet->stop_cbfn = NULL; + enet->stop_cbarg = NULL; + + enet->pause_cbfn = NULL; + + enet->mtu_cbfn = NULL; + + bfa_fsm_set_state(enet, bna_enet_sm_stopped); +} + +static void +bna_enet_uninit(struct bna_enet *enet) +{ + enet->flags = 0; + + enet->bna = NULL; +} + +static void +bna_enet_start(struct bna_enet *enet) +{ + enet->flags |= BNA_ENET_F_IOCETH_READY; + if (enet->flags & BNA_ENET_F_ENABLED) + bfa_fsm_send_event(enet, ENET_E_START); +} + +static void +bna_ioceth_cb_enet_stopped(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED); +} + +static void +bna_enet_stop(struct bna_enet *enet) +{ + enet->stop_cbfn = bna_ioceth_cb_enet_stopped; + enet->stop_cbarg = &enet->bna->ioceth; + + enet->flags &= ~BNA_ENET_F_IOCETH_READY; + bfa_fsm_send_event(enet, ENET_E_STOP); +} + +static void +bna_enet_fail(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_IOCETH_READY; + bfa_fsm_send_event(enet, ENET_E_FAIL); +} + +void +bna_enet_cb_tx_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +void +bna_enet_cb_rx_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +int +bna_enet_mtu_get(struct bna_enet *enet) +{ + return enet->mtu; +} + +void +bna_enet_enable(struct bna_enet *enet) +{ + if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped) + return; + + enet->flags |= BNA_ENET_F_ENABLED; + + if (enet->flags & BNA_ENET_F_IOCETH_READY) + bfa_fsm_send_event(enet, ENET_E_START); +} + +void +bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, + void (*cbfn)(void *)) +{ + if (type == BNA_SOFT_CLEANUP) { + (*cbfn)(enet->bna->bnad); + return; + } + + enet->stop_cbfn = cbfn; + enet->stop_cbarg = enet->bna->bnad; + + enet->flags &= ~BNA_ENET_F_ENABLED; + + bfa_fsm_send_event(enet, ENET_E_STOP); +} + +void +bna_enet_pause_config(struct bna_enet *enet, + struct bna_pause_config *pause_config, + void (*cbfn)(struct bnad *)) +{ + enet->pause_config = *pause_config; + + enet->pause_cbfn = cbfn; + + bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG); +} + +void +bna_enet_mtu_set(struct bna_enet *enet, int mtu, + void (*cbfn)(struct bnad *)) +{ + enet->mtu = mtu; + + enet->mtu_cbfn = cbfn; + + bfa_fsm_send_event(enet, ENET_E_MTU_CFG); +} + +void +bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac) +{ + *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); +} + +/** + * IOCETH + */ +#define enable_mbox_intr(_ioceth) \ +do { \ + u32 intr_status; \ + bna_intr_status_get((_ioceth)->bna, intr_status); \ + bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \ + bna_mbox_intr_enable((_ioceth)->bna); \ +} while (0) + +#define disable_mbox_intr(_ioceth) \ +do { \ + bna_mbox_intr_disable((_ioceth)->bna); \ + bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \ +} while (0) + +#define call_ioceth_stop_cbfn(_ioceth) \ +do { \ + if ((_ioceth)->stop_cbfn) { \ + void (*cbfn)(struct bnad *); \ + struct bnad *cbarg; \ + cbfn = (_ioceth)->stop_cbfn; \ + cbarg = (_ioceth)->stop_cbarg; \ + (_ioceth)->stop_cbfn = NULL; \ + (_ioceth)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define bna_stats_mod_uninit(_stats_mod) \ +do { \ +} while (0) + +#define bna_stats_mod_start(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = true; \ +} while (0) + +#define bna_stats_mod_stop(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = false; \ +} while (0) + +#define bna_stats_mod_fail(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = false; \ + (_stats_mod)->stats_get_busy = false; \ + (_stats_mod)->stats_clr_busy = false; \ +} while (0) + +static void bna_bfi_attr_get(struct bna_ioceth *ioceth); + +bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth, + enum bna_ioceth_event); + +static void +bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth) +{ + call_ioceth_stop_cbfn(ioceth); +} + +static void +bna_ioceth_sm_stopped(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_ENABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); + bfa_nw_ioc_enable(&ioceth->ioc); + break; + + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth) +{ + /** + * Do not call bfa_nw_ioc_enable() here. It must be called in the + * previous state due to failed -> ioc_ready_wait transition. + */ +} + +static void +bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + case IOCETH_E_IOC_READY: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth) +{ + bna_bfi_attr_get(ioceth); +} + +static void +bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + case IOCETH_E_ENET_ATTR_RESP: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth) +{ + bna_enet_start(&ioceth->bna->enet); + bna_stats_mod_start(&ioceth->bna->stats_mod); + bnad_cb_ioceth_ready(ioceth->bna->bnad); +} + +static void +bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bna_enet_fail(&ioceth->bna->enet); + bna_stats_mod_fail(&ioceth->bna->stats_mod); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth) +{ +} + +static void +bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_FAILED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + disable_mbox_intr(ioceth); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_ENET_ATTR_RESP: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth) +{ + bna_stats_mod_stop(&ioceth->bna->stats_mod); + bna_enet_stop(&ioceth->bna->enet); +} + +static void +bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_FAILED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + disable_mbox_intr(ioceth); + bna_enet_fail(&ioceth->bna->enet); + bna_stats_mod_fail(&ioceth->bna->stats_mod); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_ENET_STOPPED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth) +{ +} + +static void +bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_DISABLED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); + break; + + case IOCETH_E_ENET_STOPPED: + /* This event is received due to enet failing */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth) +{ + bnad_cb_ioceth_failed(ioceth->bna->bnad); +} + +static void +bna_ioceth_sm_failed(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); + break; + + case IOCETH_E_IOC_FAILED: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_attr_get(struct bna_ioceth *ioceth) +{ + struct bfi_enet_attr_req *attr_req = &ioceth->attr_req; + + bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_GET_ATTR_REQ, 0, 0); + attr_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req))); + bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_attr_req), &attr_req->mh); + bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd); +} + +/* IOC callback functions */ + +static void +bna_cb_ioceth_enable(void *arg, enum bfa_status error) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + if (error) + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); + else + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY); +} + +static void +bna_cb_ioceth_disable(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED); +} + +static void +bna_cb_ioceth_hbfail(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); +} + +static void +bna_cb_ioceth_reset(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET); +} + +static struct bfa_ioc_cbfn bna_ioceth_cbfn = { + bna_cb_ioceth_enable, + bna_cb_ioceth_disable, + bna_cb_ioceth_hbfail, + bna_cb_ioceth_reset +}; + +static void bna_attr_init(struct bna_ioceth *ioceth) +{ + ioceth->attr.num_txq = BFI_ENET_DEF_TXQ; + ioceth->attr.num_rxp = BFI_ENET_DEF_RXP; + ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM; + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ; + ioceth->attr.fw_query_complete = false; +} + +static void +bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, + struct bna_res_info *res_info) +{ + u64 dma; + u8 *kva; + + ioceth->bna = bna; + + /** + * Attach IOC and claim: + * 1. DMA memory for IOC attributes + * 2. Kernel memory for FW trace + */ + bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn); + bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH); + + BNA_GET_DMA_ADDR( + &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); + kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva; + bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma); + + kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; + + /** + * Attach common modules (Diag, SFP, CEE, Port) and claim respective + * DMA memory. + */ + BNA_GET_DMA_ADDR( + &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); + kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; + bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna); + bfa_nw_cee_mem_claim(&bna->cee, kva, dma); + kva += bfa_nw_cee_meminfo(); + dma += bfa_nw_cee_meminfo(); + + bfa_msgq_attach(&bna->msgq, &ioceth->ioc); + bfa_msgq_memclaim(&bna->msgq, kva, dma); + bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); + kva += bfa_msgq_meminfo(); + dma += bfa_msgq_meminfo(); + + ioceth->stop_cbfn = NULL; + ioceth->stop_cbarg = NULL; + + bna_attr_init(ioceth); + + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); +} + +static void +bna_ioceth_uninit(struct bna_ioceth *ioceth) +{ + bfa_nw_ioc_detach(&ioceth->ioc); + + ioceth->bna = NULL; +} + +void +bna_ioceth_enable(struct bna_ioceth *ioceth) +{ + if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) { + bnad_cb_ioceth_ready(ioceth->bna->bnad); + return; + } + + if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped) + bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE); +} + +void +bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type) +{ + if (type == BNA_SOFT_CLEANUP) { + bnad_cb_ioceth_disabled(ioceth->bna->bnad); + return; + } + + ioceth->stop_cbfn = bnad_cb_ioceth_disabled; + ioceth->stop_cbarg = ioceth->bna->bnad; + + bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE); +} + +static void +bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + ucam_mod->ucmac = (struct bna_mac *) + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&ucam_mod->free_q); + for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) { + bfa_q_qe_init(&ucam_mod->ucmac[i].qe); + list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); + } + + ucam_mod->bna = bna; +} + +static void +bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) +{ + struct list_head *qe; + int i = 0; + + list_for_each(qe, &ucam_mod->free_q) + i++; + + ucam_mod->bna = NULL; +} + +static void +bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + mcam_mod->mcmac = (struct bna_mac *) + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&mcam_mod->free_q); + for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) { + bfa_q_qe_init(&mcam_mod->mcmac[i].qe); + list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q); + } + + mcam_mod->mchandle = (struct bna_mcam_handle *) + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&mcam_mod->free_handle_q); + for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) { + bfa_q_qe_init(&mcam_mod->mchandle[i].qe); + list_add_tail(&mcam_mod->mchandle[i].qe, + &mcam_mod->free_handle_q); + } + + mcam_mod->bna = bna; +} + +static void +bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &mcam_mod->free_q) i++; + + i = 0; + list_for_each(qe, &mcam_mod->free_handle_q) i++; + + mcam_mod->bna = NULL; +} + +static void +bna_bfi_stats_get(struct bna *bna) +{ + struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; + + bna->stats_mod.stats_get_busy = true; + + bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_STATS_GET_REQ, 0, 0); + stats_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req))); + stats_req->stats_mask = htons(BFI_ENET_STATS_ALL); + stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask); + stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask); + stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb; + stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb; + + bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL, + sizeof(struct bfi_enet_stats_req), &stats_req->mh); + bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd); +} + +void +bna_res_req(struct bna_res_info *res_info) +{ + /* DMA memory for COMMON_MODULE */ + res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( + (bfa_nw_cee_meminfo() + + bfa_msgq_meminfo()), PAGE_SIZE); + + /* DMA memory for retrieving IOC attributes */ + res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = + ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE); + + /* Virtual memory for retreiving fw_trc */ + res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0; + + /* DMA memory for retreiving stats */ + res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len = + ALIGN(sizeof(struct bfi_enet_stats), + PAGE_SIZE); +} + +void +bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) +{ + struct bna_attr *attr = &bna->ioceth.attr; + + /* Virtual memory for Tx objects - stored by Tx module */ + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len = + attr->num_txq * sizeof(struct bna_tx); + + /* Virtual memory for TxQ - stored by Tx module */ + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len = + attr->num_txq * sizeof(struct bna_txq); + + /* Virtual memory for Rx objects - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len = + attr->num_rxp * sizeof(struct bna_rx); + + /* Virtual memory for RxPath - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len = + attr->num_rxp * sizeof(struct bna_rxp); + + /* Virtual memory for RxQ - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len = + (attr->num_rxp * 2) * sizeof(struct bna_rxq); + + /* Virtual memory for Unicast MAC address - stored by ucam module */ + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = + attr->num_ucmac * sizeof(struct bna_mac); + + /* Virtual memory for Multicast MAC address - stored by mcam module */ + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = + attr->num_mcmac * sizeof(struct bna_mac); + + /* Virtual memory for Multicast handle - stored by mcam module */ + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len = + attr->num_mcmac * sizeof(struct bna_mcam_handle); +} + +void +bna_init(struct bna *bna, struct bnad *bnad, + struct bfa_pcidev *pcidev, struct bna_res_info *res_info) +{ + bna->bnad = bnad; + bna->pcidev = *pcidev; + + bna->stats.hw_stats_kva = (struct bfi_enet_stats *) + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva; + bna->stats.hw_stats_dma.msb = + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb; + bna->stats.hw_stats_dma.lsb = + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb; + + bna_reg_addr_init(bna, &bna->pcidev); + + /* Also initializes diag, cee, sfp, phy_port, msgq */ + bna_ioceth_init(&bna->ioceth, bna, res_info); + + bna_enet_init(&bna->enet, bna); + bna_ethport_init(&bna->ethport, bna); +} + +void +bna_mod_init(struct bna *bna, struct bna_res_info *res_info) +{ + bna_tx_mod_init(&bna->tx_mod, bna, res_info); + + bna_rx_mod_init(&bna->rx_mod, bna, res_info); + + bna_ucam_mod_init(&bna->ucam_mod, bna, res_info); + + bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); + + bna->default_mode_rid = BFI_INVALID_RID; + bna->promisc_rid = BFI_INVALID_RID; + + bna->mod_flags |= BNA_MOD_F_INIT_DONE; +} + +void +bna_uninit(struct bna *bna) +{ + if (bna->mod_flags & BNA_MOD_F_INIT_DONE) { + bna_mcam_mod_uninit(&bna->mcam_mod); + bna_ucam_mod_uninit(&bna->ucam_mod); + bna_rx_mod_uninit(&bna->rx_mod); + bna_tx_mod_uninit(&bna->tx_mod); + bna->mod_flags &= ~BNA_MOD_F_INIT_DONE; + } + + bna_stats_mod_uninit(&bna->stats_mod); + bna_ethport_uninit(&bna->ethport); + bna_enet_uninit(&bna->enet); + + bna_ioceth_uninit(&bna->ioceth); + + bna->bnad = NULL; +} + +int +bna_num_txq_set(struct bna *bna, int num_txq) +{ + if (bna->ioceth.attr.fw_query_complete && + (num_txq <= bna->ioceth.attr.num_txq)) { + bna->ioceth.attr.num_txq = num_txq; + return BNA_CB_SUCCESS; + } + + return BNA_CB_FAIL; +} + +int +bna_num_rxp_set(struct bna *bna, int num_rxp) +{ + if (bna->ioceth.attr.fw_query_complete && + (num_rxp <= bna->ioceth.attr.num_rxp)) { + bna->ioceth.attr.num_rxp = num_rxp; + return BNA_CB_SUCCESS; + } + + return BNA_CB_FAIL; +} + +struct bna_mac * +bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod) +{ + struct list_head *qe; + + if (list_empty(&ucam_mod->free_q)) + return NULL; + + bfa_q_deq(&ucam_mod->free_q, &qe); + + return (struct bna_mac *)qe; +} + +void +bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac) +{ + list_add_tail(&mac->qe, &ucam_mod->free_q); +} + +struct bna_mac * +bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod) +{ + struct list_head *qe; + + if (list_empty(&mcam_mod->free_q)) + return NULL; + + bfa_q_deq(&mcam_mod->free_q, &qe); + + return (struct bna_mac *)qe; +} + +void +bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac) +{ + list_add_tail(&mac->qe, &mcam_mod->free_q); +} + +struct bna_mcam_handle * +bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod) +{ + struct list_head *qe; + + if (list_empty(&mcam_mod->free_handle_q)) + return NULL; + + bfa_q_deq(&mcam_mod->free_handle_q, &qe); + + return (struct bna_mcam_handle *)qe; +} + +void +bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, + struct bna_mcam_handle *handle) +{ + list_add_tail(&handle->qe, &mcam_mod->free_handle_q); +} + +void +bna_hw_stats_get(struct bna *bna) +{ + if (!bna->stats_mod.ioc_ready) { + bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); + return; + } + if (bna->stats_mod.stats_get_busy) { + bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats); + return; + } + + bna_bfi_stats_get(bna); +} diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h new file mode 100644 index 000000000000..4c6aab2a9534 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -0,0 +1,422 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +/** + * File for interrupt macros and functions + */ + +#ifndef __BNA_HW_DEFS_H__ +#define __BNA_HW_DEFS_H__ + +#include "bfi_reg.h" + +/** + * + * SW imposed limits + * + */ +#define BFI_ENET_DEF_TXQ 1 +#define BFI_ENET_DEF_RXP 1 +#define BFI_ENET_DEF_UCAM 1 +#define BFI_ENET_DEF_RITSZ 1 + +#define BFI_ENET_MAX_MCAM 256 + +#define BFI_INVALID_RID -1 + +#define BFI_IBIDX_SIZE 4 + +#define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */ +#define BFI_VLAN_WORD_MASK 0x1F +#define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */ +#define BFI_VLAN_BMASK_ALL 0xFF + +#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */ +#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */ +#define BFI_MAX_INTERPKT_COUNT 0xFF +#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */ +#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */ +#define BFI_TX_INTERPKT_COUNT 32 +#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */ +#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */ +#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */ + +#define BFI_TXQ_WI_SIZE 64 /* bytes */ +#define BFI_RXQ_WI_SIZE 8 /* bytes */ +#define BFI_CQ_WI_SIZE 16 /* bytes */ +#define BFI_TX_MAX_WRR_QUOTA 0xFFF + +#define BFI_TX_MAX_VECTORS_PER_WI 4 +#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF +#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF +#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF + +/* Small Q buffer size */ +#define BFI_SMALL_RXBUF_SIZE 128 + +#define BFI_TX_MAX_PRIO 8 +#define BFI_TX_PRIO_MAP_ALL 0xFF + +/* + * + * Register definitions and macros + * + */ + +#define BNA_PCI_REG_CT_ADDRSZ (0x40000) + +#define ct_reg_addr_init(_bna, _pcidev) \ +{ \ + struct bna_reg_offset reg_offset[] = \ + {{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \ + {HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \ + {HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \ + {HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \ + \ + (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ + reg_offset[(_pcidev)->pci_func].fn_int_status;\ + (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ + reg_offset[(_pcidev)->pci_func].fn_int_mask;\ +} + +#define ct_bit_defn_init(_bna, _pcidev) \ +{ \ + (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \ + __HFN_INT_MBOX_LPU1); \ + (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \ + __HFN_INT_MBOX_LPU1); \ + (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \ + (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \ + (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \ +} + +#define ct2_reg_addr_init(_bna, _pcidev) \ +{ \ + (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ + CT2_HOSTFN_INT_STATUS; \ + (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ + CT2_HOSTFN_INTR_MASK; \ +} + +#define ct2_bit_defn_init(_bna, _pcidev) \ +{ \ + (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ + __HFN_INT_MBOX_LPU1_CT2); \ + (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ + __HFN_INT_MBOX_LPU1_CT2); \ + (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \ + (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \ + (_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \ +} + +#define bna_reg_addr_init(_bna, _pcidev) \ +{ \ + switch ((_pcidev)->device_id) { \ + case PCI_DEVICE_ID_BROCADE_CT: \ + ct_reg_addr_init((_bna), (_pcidev)); \ + ct_bit_defn_init((_bna), (_pcidev)); \ + break; \ + case BFA_PCI_DEVICE_ID_CT2: \ + ct2_reg_addr_init((_bna), (_pcidev)); \ + ct2_bit_defn_init((_bna), (_pcidev)); \ + break; \ + } \ +} + +#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) +/** + * + * Interrupt related bits, flags and macros + * + */ + +#define IB_STATUS_BITS 0x0000ffff + +#define BNA_IS_MBOX_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.mbox_status_bits) + +#define BNA_IS_HALT_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.halt_status_bits) + +#define BNA_IS_ERR_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.error_status_bits) + +#define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \ + (BNA_IS_MBOX_INTR(_bna, _intr_status) | \ + BNA_IS_ERR_INTR(_bna, _intr_status)) + +#define BNA_IS_INTX_DATA_INTR(_intr_status) \ + ((_intr_status) & IB_STATUS_BITS) + +#define bna_halt_clear(_bna) \ +do { \ + u32 init_halt; \ + init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ + init_halt &= ~__FW_INIT_HALT_P; \ + writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \ + init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ +} while (0) + +#define bna_intx_disable(_bna, _cur_mask) \ +{ \ + (_cur_mask) = readl((_bna)->regs.fn_int_mask); \ + writel(0xffffffff, (_bna)->regs.fn_int_mask); \ +} + +#define bna_intx_enable(bna, new_mask) \ + writel((new_mask), (bna)->regs.fn_int_mask) +#define bna_mbox_intr_disable(bna) \ +do { \ + u32 mask; \ + mask = readl((bna)->regs.fn_int_mask); \ + writel((mask | (bna)->bits.mbox_mask_bits | \ + (bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \ + mask = readl((bna)->regs.fn_int_mask); \ +} while (0) + +#define bna_mbox_intr_enable(bna) \ +do { \ + u32 mask; \ + mask = readl((bna)->regs.fn_int_mask); \ + writel((mask & ~((bna)->bits.mbox_mask_bits | \ + (bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\ + mask = readl((bna)->regs.fn_int_mask); \ +} while (0) + +#define bna_intr_status_get(_bna, _status) \ +{ \ + (_status) = readl((_bna)->regs.fn_int_status); \ + if (_status) { \ + writel(((_status) & ~(_bna)->bits.mbox_status_bits), \ + (_bna)->regs.fn_int_status); \ + } \ +} + +/* + * MAX ACK EVENTS : No. of acks that can be accumulated in driver, + * before acking to h/w. The no. of bits is 16 in the doorbell register, + * however we keep this limited to 15 bits. + * This is because around the edge of 64K boundary (16 bits), one + * single poll can make the accumulated ACK counter cross the 64K boundary, + * causing problems, when we try to ack with a value greater than 64K. + * 15 bits (32K) should be large enough to accumulate, anyways, and the max. + * acked events to h/w can be (32K + max poll weight) (currently 64). + */ +#define BNA_IB_MAX_ACK_EVENTS (1 << 15) + +/* These macros build the data portion of the TxQ/RxQ doorbell */ +#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi)) +#define BNA_DOORBELL_Q_STOP (0x40000000) + +/* These macros build the data portion of the IB doorbell */ +#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \ + (0x80000000 | ((_timeout) << 16) | (_events)) +#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000) + +/* Set the coalescing timer for the given ib */ +#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \ + ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)); + +/* Acks 'events' # of events for a given ib while disabling interrupts */ +#define bna_ib_ack_disable_irq(_i_dbell, _events) \ + (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \ + (_i_dbell)->doorbell_addr)); + +/* Acks 'events' # of events for a given ib */ +#define bna_ib_ack(_i_dbell, _events) \ + (writel(((_i_dbell)->doorbell_ack | (_events)), \ + (_i_dbell)->doorbell_addr)); + +#define bna_ib_start(_bna, _ib, _is_regular) \ +{ \ + u32 intx_mask; \ + struct bna_ib *ib = _ib; \ + if ((ib->intr_type == BNA_INTR_T_INTX)) { \ + bna_intx_disable((_bna), intx_mask); \ + intx_mask &= ~(ib->intr_vector); \ + bna_intx_enable((_bna), intx_mask); \ + } \ + bna_ib_coalescing_timer_set(&ib->door_bell, \ + ib->coalescing_timeo); \ + if (_is_regular) \ + bna_ib_ack(&ib->door_bell, 0); \ +} + +#define bna_ib_stop(_bna, _ib) \ +{ \ + u32 intx_mask; \ + struct bna_ib *ib = _ib; \ + writel(BNA_DOORBELL_IB_INT_DISABLE, \ + ib->door_bell.doorbell_addr); \ + if (ib->intr_type == BNA_INTR_T_INTX) { \ + bna_intx_disable((_bna), intx_mask); \ + intx_mask |= ib->intr_vector; \ + bna_intx_enable((_bna), intx_mask); \ + } \ +} + +#define bna_txq_prod_indx_doorbell(_tcb) \ + (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ + (_tcb)->q_dbell)); + +#define bna_rxq_prod_indx_doorbell(_rcb) \ + (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ + (_rcb)->q_dbell)); + +/** + * + * TxQ, RxQ, CQ related bits, offsets, macros + * + */ + +/* TxQ Entry Opcodes */ +#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ +#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ +#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */ + +/* TxQ Entry Control Flags */ +#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8) +#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5) +#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4) +#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3) +#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2) +#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1) +#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0) + +#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ + (((_hdr_size) << 10) | ((_offset) & 0x3FF)) + +/* + * Completion Q defines + */ +/* CQ Entry Flags */ +#define BNA_CQ_EF_MAC_ERROR (1 << 0) +#define BNA_CQ_EF_FCS_ERROR (1 << 1) +#define BNA_CQ_EF_TOO_LONG (1 << 2) +#define BNA_CQ_EF_FC_CRC_OK (1 << 3) + +#define BNA_CQ_EF_RSVD1 (1 << 4) +#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5) +#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6) +#define BNA_CQ_EF_HDS_HEADER (1 << 7) + +#define BNA_CQ_EF_UDP (1 << 8) +#define BNA_CQ_EF_TCP (1 << 9) +#define BNA_CQ_EF_IP_OPTIONS (1 << 10) +#define BNA_CQ_EF_IPV6 (1 << 11) + +#define BNA_CQ_EF_IPV4 (1 << 12) +#define BNA_CQ_EF_VLAN (1 << 13) +#define BNA_CQ_EF_RSS (1 << 14) +#define BNA_CQ_EF_RSVD2 (1 << 15) + +#define BNA_CQ_EF_MCAST_MATCH (1 << 16) +#define BNA_CQ_EF_MCAST (1 << 17) +#define BNA_CQ_EF_BCAST (1 << 18) +#define BNA_CQ_EF_REMOTE (1 << 19) + +#define BNA_CQ_EF_LOCAL (1 << 20) + +/** + * + * Data structures + * + */ + +struct bna_reg_offset { + u32 fn_int_status; + u32 fn_int_mask; +}; + +struct bna_bit_defn { + u32 mbox_status_bits; + u32 mbox_mask_bits; + u32 error_status_bits; + u32 error_mask_bits; + u32 halt_status_bits; + u32 halt_mask_bits; +}; + +struct bna_reg { + void __iomem *fn_int_status; + void __iomem *fn_int_mask; +}; + +/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ +struct bna_dma_addr { + u32 msb; + u32 lsb; +}; + +struct bna_txq_wi_vector { + u16 reserved; + u16 length; /* Only 14 LSB are valid */ + struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ +}; + +/** + * TxQ Entry Structure + * + * BEWARE: Load values into this structure with correct endianess. + */ +struct bna_txq_entry { + union { + struct { + u8 reserved; + u8 num_vectors; /* number of vectors present */ + u16 opcode; /* Either */ + /* BNA_TXQ_WI_SEND or */ + /* BNA_TXQ_WI_SEND_LSO */ + u16 flags; /* OR of all the flags */ + u16 l4_hdr_size_n_offset; + u16 vlan_tag; + u16 lso_mss; /* Only 14 LSB are valid */ + u32 frame_length; /* Only 24 LSB are valid */ + } wi; + + struct { + u16 reserved; + u16 opcode; /* Must be */ + /* BNA_TXQ_WI_EXTENSION */ + u32 reserved2[3]; /* Place holder for */ + /* removed vector (12 bytes) */ + } wi_ext; + } hdr; + struct bna_txq_wi_vector vector[4]; +}; + +/* RxQ Entry Structure */ +struct bna_rxq_entry { /* Rx-Buffer */ + struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */ +}; + +/* CQ Entry Structure */ +struct bna_cq_entry { + u32 flags; + u16 vlan_tag; + u16 length; + u32 rss_hash; + u8 valid; + u8 reserved1; + u8 reserved2; + u8 rxq_id; +}; + +#endif /* __BNA_HW_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c new file mode 100644 index 000000000000..276fcb589f4b --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -0,0 +1,3798 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#include "bna.h" +#include "bfi.h" + +/** + * IB + */ +static void +bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) +{ + ib->coalescing_timeo = coalescing_timeo; + ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( + (u32)ib->coalescing_timeo, 0); +} + +/** + * RXF + */ + +#define bna_rxf_vlan_cfg_soft_reset(rxf) \ +do { \ + (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ + (rxf)->vlan_strip_pending = true; \ +} while (0) + +#define bna_rxf_rss_cfg_soft_reset(rxf) \ +do { \ + if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ + (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ + BNA_RSS_F_CFG_PENDING | \ + BNA_RSS_F_STATUS_PENDING); \ +} while (0) + +static int bna_rxf_cfg_apply(struct bna_rxf *rxf); +static void bna_rxf_cfg_reset(struct bna_rxf *rxf); +static int bna_rxf_fltr_clear(struct bna_rxf *rxf); +static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); +static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); +static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); + +bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, + enum bna_rxf_event); + +static void +bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) +{ + call_rxf_stop_cbfn(rxf); +} + +static void +bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_START: + if (rxf->flags & BNA_RXF_F_PAUSED) { + bfa_fsm_set_state(rxf, bna_rxf_sm_paused); + call_rxf_start_cbfn(rxf); + } else + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + case RXF_E_STOP: + call_rxf_stop_cbfn(rxf); + break; + + case RXF_E_FAIL: + /* No-op */ + break; + + case RXF_E_CONFIG: + call_rxf_cam_fltr_cbfn(rxf); + break; + + case RXF_E_PAUSE: + rxf->flags |= BNA_RXF_F_PAUSED; + call_rxf_pause_cbfn(rxf); + break; + + case RXF_E_RESUME: + rxf->flags &= ~BNA_RXF_F_PAUSED; + call_rxf_resume_cbfn(rxf); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_paused_entry(struct bna_rxf *rxf) +{ + call_rxf_pause_cbfn(rxf); +} + +static void +bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + case RXF_E_FAIL: + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + call_rxf_cam_fltr_cbfn(rxf); + break; + + case RXF_E_RESUME: + rxf->flags &= ~BNA_RXF_F_PAUSED; + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) +{ + if (!bna_rxf_cfg_apply(rxf)) { + /* No more pending config updates */ + bfa_fsm_set_state(rxf, bna_rxf_sm_started); + } +} + +static void +bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); + break; + + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + call_rxf_start_cbfn(rxf); + call_rxf_cam_fltr_cbfn(rxf); + call_rxf_resume_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + /* No-op */ + break; + + case RXF_E_PAUSE: + rxf->flags |= BNA_RXF_F_PAUSED; + call_rxf_start_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); + break; + + case RXF_E_FW_RESP: + if (!bna_rxf_cfg_apply(rxf)) { + /* No more pending config updates */ + bfa_fsm_set_state(rxf, bna_rxf_sm_started); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_started_entry(struct bna_rxf *rxf) +{ + call_rxf_start_cbfn(rxf); + call_rxf_cam_fltr_cbfn(rxf); + call_rxf_resume_cbfn(rxf); +} + +static void +bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + case RXF_E_PAUSE: + rxf->flags |= BNA_RXF_F_PAUSED; + if (!bna_rxf_fltr_clear(rxf)) + bfa_fsm_set_state(rxf, bna_rxf_sm_paused); + else + bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf) +{ +} + +static void +bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + call_rxf_pause_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_FW_RESP: + if (!bna_rxf_fltr_clear(rxf)) { + /* No more pending CAM entries to clear */ + bfa_fsm_set_state(rxf, bna_rxf_sm_paused); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) +{ +} + +static void +bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_FAIL: + case RXF_E_FW_RESP: + bna_rxf_cfg_reset(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, + enum bfi_enet_h2i_msgs req_type) +{ + struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); + memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_ucast_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) +{ + struct bfi_enet_mcast_add_req *req = + &rxf->bfi_enet_cmd.mcast_add_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, + 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); + memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_mcast_add_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) +{ + struct bfi_enet_mcast_del_req *req = + &rxf->bfi_enet_cmd.mcast_del_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, + 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); + req->handle = htons(handle); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_mcast_del_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) +{ + struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; + int i; + int j; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); + req->block_idx = block_idx; + for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { + j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) + req->bit_mask[i] = + htonl(rxf->vlan_filter_table[j]); + else + req->bit_mask[i] = 0xFFFFFFFF; + } + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rx_vlan_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = rxf->vlan_strip_status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rit_cfg(struct bna_rxf *rxf) +{ + struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); + req->size = htons(rxf->rit_size); + memcpy(&req->table[0], rxf->rit, rxf->rit_size); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rit_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rss_cfg(struct bna_rxf *rxf) +{ + struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; + int i; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); + req->cfg.type = rxf->rss_cfg.hash_type; + req->cfg.mask = rxf->rss_cfg.hash_mask; + for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) + req->cfg.key[i] = + htonl(rxf->rss_cfg.toeplitz_hash_key[i]); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rss_cfg_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rss_enable(struct bna_rxf *rxf) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = rxf->rss_status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +/* This function gets the multicast MAC that has already been added to CAM */ +static struct bna_mac * +bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr) +{ + struct bna_mac *mac; + struct list_head *qe; + + list_for_each(qe, &rxf->mcast_active_q) { + mac = (struct bna_mac *)qe; + if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) + return mac; + } + + list_for_each(qe, &rxf->mcast_pending_del_q) { + mac = (struct bna_mac *)qe; + if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) + return mac; + } + + return NULL; +} + +static struct bna_mcam_handle * +bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) +{ + struct bna_mcam_handle *mchandle; + struct list_head *qe; + + list_for_each(qe, &rxf->mcast_handle_q) { + mchandle = (struct bna_mcam_handle *)qe; + if (mchandle->handle == handle) + return mchandle; + } + + return NULL; +} + +static void +bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) +{ + struct bna_mac *mcmac; + struct bna_mcam_handle *mchandle; + + mcmac = bna_rxf_mcmac_get(rxf, mac_addr); + mchandle = bna_rxf_mchandle_get(rxf, handle); + if (mchandle == NULL) { + mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); + mchandle->handle = handle; + mchandle->refcnt = 0; + list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); + } + mchandle->refcnt++; + mcmac->handle = mchandle; +} + +static int +bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, + enum bna_cleanup_type cleanup) +{ + struct bna_mcam_handle *mchandle; + int ret = 0; + + mchandle = mac->handle; + if (mchandle == NULL) + return ret; + + mchandle->refcnt--; + if (mchandle->refcnt == 0) { + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_del_req(rxf, mchandle->handle); + ret = 1; + } + list_del(&mchandle->qe); + bfa_q_qe_init(&mchandle->qe); + bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); + } + mac->handle = NULL; + + return ret; +} + +static int +bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) +{ + struct bna_mac *mac = NULL; + struct list_head *qe; + int ret; + + /* Delete multicast entries previousely added */ + while (!list_empty(&rxf->mcast_pending_del_q)) { + bfa_q_deq(&rxf->mcast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); + bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + if (ret) + return ret; + } + + /* Add multicast entries */ + if (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + list_add_tail(&mac->qe, &rxf->mcast_active_q); + bna_bfi_mcast_add_req(rxf, mac); + return 1; + } + + return 0; +} + +static int +bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) +{ + u8 vlan_pending_bitmask; + int block_idx = 0; + + if (rxf->vlan_pending_bitmask) { + vlan_pending_bitmask = rxf->vlan_pending_bitmask; + while (!(vlan_pending_bitmask & 0x1)) { + block_idx++; + vlan_pending_bitmask >>= 1; + } + rxf->vlan_pending_bitmask &= ~(1 << block_idx); + bna_bfi_rx_vlan_filter_set(rxf, block_idx); + return 1; + } + + return 0; +} + +static int +bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct list_head *qe; + struct bna_mac *mac; + int ret; + + /* Throw away delete pending mcast entries */ + while (!list_empty(&rxf->mcast_pending_del_q)) { + bfa_q_deq(&rxf->mcast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + ret = bna_rxf_mcast_del(rxf, mac, cleanup); + bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + if (ret) + return ret; + } + + /* Move active mcast entries to pending_add_q */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + bfa_q_qe_init(qe); + list_add_tail(qe, &rxf->mcast_pending_add_q); + mac = (struct bna_mac *)qe; + if (bna_rxf_mcast_del(rxf, mac, cleanup)) + return 1; + } + + return 0; +} + +static int +bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) +{ + if (rxf->rss_pending) { + if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; + bna_bfi_rit_cfg(rxf); + return 1; + } + + if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; + bna_bfi_rss_cfg(rxf); + return 1; + } + + if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; + bna_bfi_rss_enable(rxf); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_cfg_apply(struct bna_rxf *rxf) +{ + if (bna_rxf_ucast_cfg_apply(rxf)) + return 1; + + if (bna_rxf_mcast_cfg_apply(rxf)) + return 1; + + if (bna_rxf_promisc_cfg_apply(rxf)) + return 1; + + if (bna_rxf_allmulti_cfg_apply(rxf)) + return 1; + + if (bna_rxf_vlan_cfg_apply(rxf)) + return 1; + + if (bna_rxf_vlan_strip_cfg_apply(rxf)) + return 1; + + if (bna_rxf_rss_cfg_apply(rxf)) + return 1; + + return 0; +} + +/* Only software reset */ +static int +bna_rxf_fltr_clear(struct bna_rxf *rxf) +{ + if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + return 0; +} + +static void +bna_rxf_cfg_reset(struct bna_rxf *rxf) +{ + bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_vlan_cfg_soft_reset(rxf); + bna_rxf_rss_cfg_soft_reset(rxf); +} + +static void +bna_rit_init(struct bna_rxf *rxf, int rit_size) +{ + struct bna_rx *rx = rxf->rx; + struct bna_rxp *rxp; + struct list_head *qe; + int offset = 0; + + rxf->rit_size = rit_size; + list_for_each(qe, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe; + rxf->rit[offset] = rxp->cq.ccb->id; + offset++; + } + +} + +void +bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void +bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_mcast_add_req *req = + &rxf->bfi_enet_cmd.mcast_add_req; + struct bfi_enet_mcast_add_rsp *rsp = + (struct bfi_enet_mcast_add_rsp *)msghdr; + + bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, + ntohs(rsp->handle)); + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +static void +bna_rxf_init(struct bna_rxf *rxf, + struct bna_rx *rx, + struct bna_rx_config *q_config, + struct bna_res_info *res_info) +{ + rxf->rx = rx; + + INIT_LIST_HEAD(&rxf->ucast_pending_add_q); + INIT_LIST_HEAD(&rxf->ucast_pending_del_q); + rxf->ucast_pending_set = 0; + rxf->ucast_active_set = 0; + INIT_LIST_HEAD(&rxf->ucast_active_q); + rxf->ucast_pending_mac = NULL; + + INIT_LIST_HEAD(&rxf->mcast_pending_add_q); + INIT_LIST_HEAD(&rxf->mcast_pending_del_q); + INIT_LIST_HEAD(&rxf->mcast_active_q); + INIT_LIST_HEAD(&rxf->mcast_handle_q); + + if (q_config->paused) + rxf->flags |= BNA_RXF_F_PAUSED; + + rxf->rit = (u8 *) + res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; + bna_rit_init(rxf, q_config->num_paths); + + rxf->rss_status = q_config->rss_status; + if (rxf->rss_status == BNA_STATUS_T_ENABLED) { + rxf->rss_cfg = q_config->rss_config; + rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; + rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; + rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; + } + + rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; + memset(rxf->vlan_filter_table, 0, + (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); + rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ + rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; + + rxf->vlan_strip_status = q_config->vlan_strip_status; + + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); +} + +static void +bna_rxf_uninit(struct bna_rxf *rxf) +{ + struct bna_mac *mac; + + rxf->ucast_pending_set = 0; + rxf->ucast_active_set = 0; + + while (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &mac); + bfa_q_qe_init(&mac->qe); + bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + } + + if (rxf->ucast_pending_mac) { + bfa_q_qe_init(&rxf->ucast_pending_mac->qe); + bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, + rxf->ucast_pending_mac); + rxf->ucast_pending_mac = NULL; + } + + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &mac); + bfa_q_qe_init(&mac->qe); + bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + } + + rxf->rxmode_pending = 0; + rxf->rxmode_pending_bitmask = 0; + if (rxf->rx->bna->promisc_rid == rxf->rx->rid) + rxf->rx->bna->promisc_rid = BFI_INVALID_RID; + if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) + rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; + + rxf->rss_pending = 0; + rxf->vlan_strip_pending = false; + + rxf->flags = 0; + + rxf->rx = NULL; +} + +static void +bna_rx_cb_rxf_started(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_RXF_STARTED); +} + +static void +bna_rxf_start(struct bna_rxf *rxf) +{ + rxf->start_cbfn = bna_rx_cb_rxf_started; + rxf->start_cbarg = rxf->rx; + bfa_fsm_send_event(rxf, RXF_E_START); +} + +static void +bna_rx_cb_rxf_stopped(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); +} + +static void +bna_rxf_stop(struct bna_rxf *rxf) +{ + rxf->stop_cbfn = bna_rx_cb_rxf_stopped; + rxf->stop_cbarg = rxf->rx; + bfa_fsm_send_event(rxf, RXF_E_STOP); +} + +static void +bna_rxf_fail(struct bna_rxf *rxf) +{ + bfa_fsm_send_event(rxf, RXF_E_FAIL); +} + +enum bna_cb_status +bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->ucast_pending_mac == NULL) { + rxf->ucast_pending_mac = + bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); + if (rxf->ucast_pending_mac == NULL) + return BNA_CB_UCAST_CAM_FULL; + bfa_q_qe_init(&rxf->ucast_pending_mac->qe); + } + + memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN); + rxf->ucast_pending_set = 1; + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; +} + +enum bna_cb_status +bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct bna_mac *mac; + + /* Check if already added or pending addition */ + if (bna_mac_find(&rxf->mcast_active_q, addr) || + bna_mac_find(&rxf->mcast_pending_add_q, addr)) { + if (cbfn) + cbfn(rx->bna->bnad, rx); + return BNA_CB_SUCCESS; + } + + mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); + if (mac == NULL) + return BNA_CB_MCAST_LIST_FULL; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, addr, ETH_ALEN); + list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; +} + +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + struct list_head *qe; + u8 *mcaddr; + struct bna_mac *mac; + int i; + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = mclist; i < count; i++) { + mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); + if (mac == NULL) + goto err_return; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, mcaddr, ETH_ALEN); + list_add_tail(&mac->qe, &list_head); + + mcaddr += ETH_ALEN; + } + + /* Purge the pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); + } + + /* Add the new entries */ + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); + } + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + } + + return BNA_CB_MCAST_LIST_FULL; +} + +void +bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) +{ + struct bna_rxf *rxf = &rx->rxf; + int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); + int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); + int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); + + rxf->vlan_filter_table[index] |= bit; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { + rxf->vlan_pending_bitmask |= (1 << group_id); + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) +{ + struct bna_rxf *rxf = &rx->rxf; + int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); + int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); + int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); + + rxf->vlan_filter_table[index] &= ~bit; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { + rxf->vlan_pending_bitmask |= (1 << group_id); + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +static int +bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) +{ + struct bna_mac *mac = NULL; + struct list_head *qe; + + /* Delete MAC addresses previousely added */ + if (!list_empty(&rxf->ucast_pending_del_q)) { + bfa_q_deq(&rxf->ucast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + return 1; + } + + /* Set default unicast MAC */ + if (rxf->ucast_pending_set) { + rxf->ucast_pending_set = 0; + memcpy(rxf->ucast_active_mac.addr, + rxf->ucast_pending_mac->addr, ETH_ALEN); + rxf->ucast_active_set = 1; + bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, + BFI_ENET_H2I_MAC_UCAST_SET_REQ); + return 1; + } + + /* Add additional MAC entries */ + if (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + list_add_tail(&mac->qe, &rxf->ucast_active_q); + bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); + return 1; + } + + return 0; +} + +static int +bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct list_head *qe; + struct bna_mac *mac; + + /* Throw away delete pending ucast entries */ + while (!list_empty(&rxf->ucast_pending_del_q)) { + bfa_q_deq(&rxf->ucast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + if (cleanup == BNA_SOFT_CLEANUP) + bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + else { + bna_bfi_ucast_req(rxf, mac, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + return 1; + } + } + + /* Move active ucast entries to pending_add_q */ + while (!list_empty(&rxf->ucast_active_q)) { + bfa_q_deq(&rxf->ucast_active_q, &qe); + bfa_q_qe_init(qe); + list_add_tail(qe, &rxf->ucast_pending_add_q); + if (cleanup == BNA_HARD_CLEANUP) { + mac = (struct bna_mac *)qe; + bna_bfi_ucast_req(rxf, mac, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + return 1; + } + } + + if (rxf->ucast_active_set) { + rxf->ucast_pending_set = 1; + rxf->ucast_active_set = 0; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, + BFI_ENET_H2I_MAC_UCAST_CLR_REQ); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + + /* Enable/disable promiscuous mode */ + if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move promisc configuration from pending -> active */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_PROMISC; + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } else if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move promisc configuration from pending -> active */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + bna->promisc_rid = BFI_INVALID_RID; + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + + return 0; +} + +static int +bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct bna *bna = rxf->rx->bna; + + /* Clear pending promisc mode disable */ + if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + bna->promisc_rid = BFI_INVALID_RID; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + } + + /* Move promisc mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { + promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) +{ + /* Enable/disable allmulti mode */ + if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move allmulti configuration from pending -> active */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } else if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move allmulti configuration from pending -> active */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + + return 0; +} + +static int +bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + /* Clear pending allmulti mode disable */ + if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + } + + /* Move allmulti mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { + allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_promisc_enable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { + /* Do nothing if pending enable or already enabled */ + } else if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending disable command */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else { + /* Schedule enable */ + promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->promisc_rid = rxf->rx->rid; + ret = 1; + } + + return ret; +} + +static int +bna_rxf_promisc_disable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { + /* Do nothing if pending disable or already disabled */ + } else if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending enable command */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->promisc_rid = BFI_INVALID_RID; + } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { + /* Schedule disable */ + promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_allmulti_enable(struct bna_rxf *rxf) +{ + int ret = 0; + + if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { + /* Do nothing if pending enable or already enabled */ + } else if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending disable command */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else { + /* Schedule enable */ + allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_allmulti_disable(struct bna_rxf *rxf) +{ + int ret = 0; + + if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { + /* Do nothing if pending disable or already disabled */ + } else if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending enable command */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { + /* Schedule disable */ + allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) +{ + if (rxf->vlan_strip_pending) { + rxf->vlan_strip_pending = false; + bna_bfi_vlan_strip_enable(rxf); + return 1; + } + + return 0; +} + +/** + * RX + */ + +#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ + (qcfg)->num_paths : ((qcfg)->num_paths * 2)) + +#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ + (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) + +#define call_rx_stop_cbfn(rx) \ +do { \ + if ((rx)->stop_cbfn) { \ + void (*cbfn)(void *, struct bna_rx *); \ + void *cbarg; \ + cbfn = (rx)->stop_cbfn; \ + cbarg = (rx)->stop_cbarg; \ + (rx)->stop_cbfn = NULL; \ + (rx)->stop_cbarg = NULL; \ + cbfn(cbarg, rx); \ + } \ +} while (0) + +#define call_rx_stall_cbfn(rx) \ +do { \ + if ((rx)->rx_stall_cbfn) \ + (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ +} while (0) + +#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ +do { \ + struct bna_dma_addr cur_q_addr = \ + *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ + (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ + (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ + (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ + (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ + (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ + (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ +} while (0) + +static void bna_bfi_rx_enet_start(struct bna_rx *rx); +static void bna_rx_enet_stop(struct bna_rx *rx); +static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); + +bfa_fsm_state_decl(bna_rx, stopped, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, start_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, rxf_start_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, started, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, rxf_stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, cleanup_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, failed, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, quiesce_wait, + struct bna_rx, enum bna_rx_event); + +static void bna_rx_sm_stopped_entry(struct bna_rx *rx) +{ + call_rx_stop_cbfn(rx); +} + +static void bna_rx_sm_stopped(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_START: + bfa_fsm_set_state(rx, bna_rx_sm_start_wait); + break; + + case RX_E_STOP: + call_rx_stop_cbfn(rx); + break; + + case RX_E_FAIL: + /* no-op */ + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) +{ + bna_bfi_rx_enet_start(rx); +} + +void +bna_rx_sm_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_STARTED: + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_start_wait(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + case RX_E_STARTED: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) +{ + rx->rx_post_cbfn(rx->bna->bnad, rx); + bna_rxf_start(&rx->rxf); +} + +void +bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_RXF_STARTED: + bna_rxf_stop(&rx->rxf); + break; + + case RX_E_RXF_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); + call_rx_stall_cbfn(rx); + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + break; + } + +} + +void +bna_rx_sm_started_entry(struct bna_rx *rx) +{ + struct bna_rxp *rxp; + struct list_head *qe_rxp; + int is_regular = (rx->type == BNA_RX_T_REGULAR); + + /* Start IB */ + list_for_each(qe_rxp, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe_rxp; + bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); + } + + bna_ethport_cb_rx_started(&rx->bna->ethport); +} + +static void +bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); + bna_ethport_cb_rx_stopped(&rx->bna->ethport); + bna_rxf_stop(&rx->rxf); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + bna_ethport_cb_rx_stopped(&rx->bna->ethport); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_RXF_STARTED: + bfa_fsm_set_state(rx, bna_rx_sm_started); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +void +bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) +{ +} + +void +bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_RXF_STOPPED: + /* No-op */ + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_rx_sm_failed_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_START: + bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); + break; + + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + break; + + case RX_E_FAIL: + case RX_E_RXF_STARTED: + case RX_E_RXF_STOPPED: + /* No-op */ + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + break; +} } + +static void +bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_start_wait); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_bfi_rx_enet_start(struct bna_rx *rx) +{ + struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; + struct bna_rxp *rxp = NULL; + struct bna_rxq *q0 = NULL, *q1 = NULL; + struct list_head *rxp_qe; + int i; + + bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); + cfg_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); + + cfg_req->num_queue_sets = rx->num_paths; + for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); + i < rx->num_paths; + i++, rxp_qe = bfa_q_next(rxp_qe)) { + rxp = (struct bna_rxp *)rxp_qe; + + GET_RXQS(rxp, q0, q1); + switch (rxp->type) { + case BNA_RXP_SLR: + case BNA_RXP_HDS: + /* Small RxQ */ + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, + &q1->qpt); + cfg_req->q_cfg[i].qs.rx_buffer_size = + htons((u16)q1->buffer_size); + /* Fall through */ + + case BNA_RXP_SINGLE: + /* Large/Single RxQ */ + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, + &q0->qpt); + q0->buffer_size = + bna_enet_mtu_get(&rx->bna->enet); + cfg_req->q_cfg[i].ql.rx_buffer_size = + htons((u16)q0->buffer_size); + break; + + default: + BUG_ON(1); + } + + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, + &rxp->cq.qpt); + + cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = + rxp->cq.ib.ib_seg_host_addr.lsb; + cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = + rxp->cq.ib.ib_seg_host_addr.msb; + cfg_req->q_cfg[i].ib.intr.msix_index = + htons((u16)rxp->cq.ib.intr_vector); + } + + cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) + ? BNA_STATUS_T_ENABLED : + BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.coalescing_timeout = + htonl((u32)rxp->cq.ib.coalescing_timeo); + cfg_req->ib_cfg.inter_pkt_timeout = + htonl((u32)rxp->cq.ib.interpkt_timeo); + cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; + + switch (rxp->type) { + case BNA_RXP_SLR: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; + break; + + case BNA_RXP_HDS: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; + cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; + cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; + cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; + break; + + case BNA_RXP_SINGLE: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; + break; + + default: + BUG_ON(1); + } + cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; + + bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); + bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); +} + +static void +bna_bfi_rx_enet_stop(struct bna_rx *rx) +{ + struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); + bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), + &req->mh); + bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); +} + +static void +bna_rx_enet_stop(struct bna_rx *rx) +{ + struct bna_rxp *rxp; + struct list_head *qe_rxp; + + /* Stop IB */ + list_for_each(qe_rxp, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe_rxp; + bna_ib_stop(rx->bna, &rxp->cq.ib); + } + + bna_bfi_rx_enet_stop(rx); +} + +static int +bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) +{ + if ((rx_mod->rx_free_count == 0) || + (rx_mod->rxp_free_count == 0) || + (rx_mod->rxq_free_count == 0)) + return 0; + + if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { + if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || + (rx_mod->rxq_free_count < rx_cfg->num_paths)) + return 0; + } else { + if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || + (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) + return 0; + } + + return 1; +} + +static struct bna_rxq * +bna_rxq_get(struct bna_rx_mod *rx_mod) +{ + struct bna_rxq *rxq = NULL; + struct list_head *qe = NULL; + + bfa_q_deq(&rx_mod->rxq_free_q, &qe); + rx_mod->rxq_free_count--; + rxq = (struct bna_rxq *)qe; + bfa_q_qe_init(&rxq->qe); + + return rxq; +} + +static void +bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) +{ + bfa_q_qe_init(&rxq->qe); + list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); + rx_mod->rxq_free_count++; +} + +static struct bna_rxp * +bna_rxp_get(struct bna_rx_mod *rx_mod) +{ + struct list_head *qe = NULL; + struct bna_rxp *rxp = NULL; + + bfa_q_deq(&rx_mod->rxp_free_q, &qe); + rx_mod->rxp_free_count--; + rxp = (struct bna_rxp *)qe; + bfa_q_qe_init(&rxp->qe); + + return rxp; +} + +static void +bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) +{ + bfa_q_qe_init(&rxp->qe); + list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); + rx_mod->rxp_free_count++; +} + +static struct bna_rx * +bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct list_head *qe = NULL; + struct bna_rx *rx = NULL; + + if (type == BNA_RX_T_REGULAR) { + bfa_q_deq(&rx_mod->rx_free_q, &qe); + } else + bfa_q_deq_tail(&rx_mod->rx_free_q, &qe); + + rx_mod->rx_free_count--; + rx = (struct bna_rx *)qe; + bfa_q_qe_init(&rx->qe); + list_add_tail(&rx->qe, &rx_mod->rx_active_q); + rx->type = type; + + return rx; +} + +static void +bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) +{ + struct list_head *prev_qe = NULL; + struct list_head *qe; + + bfa_q_qe_init(&rx->qe); + + list_for_each(qe, &rx_mod->rx_free_q) { + if (((struct bna_rx *)qe)->rid < rx->rid) + prev_qe = qe; + else + break; + } + + if (prev_qe == NULL) { + /* This is the first entry */ + bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe); + } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) { + /* This is the last entry */ + list_add_tail(&rx->qe, &rx_mod->rx_free_q); + } else { + /* Somewhere in the middle */ + bfa_q_next(&rx->qe) = bfa_q_next(prev_qe); + bfa_q_prev(&rx->qe) = prev_qe; + bfa_q_next(prev_qe) = &rx->qe; + bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe; + } + + rx_mod->rx_free_count++; +} + +static void +bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, + struct bna_rxq *q1) +{ + switch (rxp->type) { + case BNA_RXP_SINGLE: + rxp->rxq.single.only = q0; + rxp->rxq.single.reserved = NULL; + break; + case BNA_RXP_SLR: + rxp->rxq.slr.large = q0; + rxp->rxq.slr.small = q1; + break; + case BNA_RXP_HDS: + rxp->rxq.hds.data = q0; + rxp->rxq.hds.hdr = q1; + break; + default: + break; + } +} + +static void +bna_rxq_qpt_setup(struct bna_rxq *rxq, + struct bna_rxp *rxp, + u32 page_count, + u32 page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + int i; + + rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + rxq->qpt.kv_qpt_ptr = qpt_mem->kva; + rxq->qpt.page_count = page_count; + rxq->qpt.page_size = page_size; + + rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; + + for (i = 0; i < rxq->qpt.page_count; i++) { + rxq->rcb->sw_qpt[i] = page_mem[i].kva; + ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = + page_mem[i].dma.lsb; + ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = + page_mem[i].dma.msb; + } +} + +static void +bna_rxp_cqpt_setup(struct bna_rxp *rxp, + u32 page_count, + u32 page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + int i; + + rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; + rxp->cq.qpt.page_count = page_count; + rxp->cq.qpt.page_size = page_size; + + rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; + + for (i = 0; i < rxp->cq.qpt.page_count; i++) { + rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; + + ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = + page_mem[i].dma.lsb; + ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = + page_mem[i].dma.msb; + } +} + +static void +bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) +{ + struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; + + bfa_wc_down(&rx_mod->rx_stop_wc); +} + +static void +bna_rx_mod_cb_rx_stopped_all(void *arg) +{ + struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; + + if (rx_mod->stop_cbfn) + rx_mod->stop_cbfn(&rx_mod->bna->enet); + rx_mod->stop_cbfn = NULL; +} + +static void +bna_rx_start(struct bna_rx *rx) +{ + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + if (rx->rx_flags & BNA_RX_F_ENABLED) + bfa_fsm_send_event(rx, RX_E_START); +} + +static void +bna_rx_stop(struct bna_rx *rx) +{ + rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; + if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) + bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); + else { + rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; + rx->stop_cbarg = &rx->bna->rx_mod; + bfa_fsm_send_event(rx, RX_E_STOP); + } +} + +static void +bna_rx_fail(struct bna_rx *rx) +{ + /* Indicate Enet is not enabled, and failed */ + rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; + bfa_fsm_send_event(rx, RX_E_FAIL); +} + +void +bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx; + struct list_head *qe; + + rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; + if (type == BNA_RX_T_LOOPBACK) + rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &rx_mod->rx_active_q) { + rx = (struct bna_rx *)qe; + if (rx->type == type) + bna_rx_start(rx); + } +} + +void +bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx; + struct list_head *qe; + + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; + + rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; + + bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); + + list_for_each(qe, &rx_mod->rx_active_q) { + rx = (struct bna_rx *)qe; + if (rx->type == type) { + bfa_wc_up(&rx_mod->rx_stop_wc); + bna_rx_stop(rx); + } + } + + bfa_wc_wait(&rx_mod->rx_stop_wc); +} + +void +bna_rx_mod_fail(struct bna_rx_mod *rx_mod) +{ + struct bna_rx *rx; + struct list_head *qe; + + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &rx_mod->rx_active_q) { + rx = (struct bna_rx *)qe; + bna_rx_fail(rx); + } +} + +void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int index; + struct bna_rx *rx_ptr; + struct bna_rxp *rxp_ptr; + struct bna_rxq *rxq_ptr; + + rx_mod->bna = bna; + rx_mod->flags = 0; + + rx_mod->rx = (struct bna_rx *) + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; + rx_mod->rxp = (struct bna_rxp *) + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; + rx_mod->rxq = (struct bna_rxq *) + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; + + /* Initialize the queues */ + INIT_LIST_HEAD(&rx_mod->rx_free_q); + rx_mod->rx_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rxq_free_q); + rx_mod->rxq_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rxp_free_q); + rx_mod->rxp_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rx_active_q); + + /* Build RX queues */ + for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { + rx_ptr = &rx_mod->rx[index]; + + bfa_q_qe_init(&rx_ptr->qe); + INIT_LIST_HEAD(&rx_ptr->rxp_q); + rx_ptr->bna = NULL; + rx_ptr->rid = index; + rx_ptr->stop_cbfn = NULL; + rx_ptr->stop_cbarg = NULL; + + list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); + rx_mod->rx_free_count++; + } + + /* build RX-path queue */ + for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { + rxp_ptr = &rx_mod->rxp[index]; + bfa_q_qe_init(&rxp_ptr->qe); + list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); + rx_mod->rxp_free_count++; + } + + /* build RXQ queue */ + for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { + rxq_ptr = &rx_mod->rxq[index]; + bfa_q_qe_init(&rxq_ptr->qe); + list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); + rx_mod->rxq_free_count++; + } +} + +void +bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &rx_mod->rx_free_q) + i++; + + i = 0; + list_for_each(qe, &rx_mod->rxp_free_q) + i++; + + i = 0; + list_for_each(qe, &rx_mod->rxq_free_q) + i++; + + rx_mod->bna = NULL; +} + +void +bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; + struct bna_rxp *rxp = NULL; + struct bna_rxq *q0 = NULL, *q1 = NULL; + struct list_head *rxp_qe; + int i; + + bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, + sizeof(struct bfi_enet_rx_cfg_rsp)); + + rx->hw_id = cfg_rsp->hw_id; + + for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); + i < rx->num_paths; + i++, rxp_qe = bfa_q_next(rxp_qe)) { + rxp = (struct bna_rxp *)rxp_qe; + GET_RXQS(rxp, q0, q1); + + /* Setup doorbells */ + rxp->cq.ccb->i_dbell->doorbell_addr = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].i_dbell); + rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; + q0->rcb->q_dbell = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].ql_dbell); + q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; + if (q1) { + q1->rcb->q_dbell = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].qs_dbell); + q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; + } + + /* Initialize producer/consumer indexes */ + (*rxp->cq.ccb->hw_producer_index) = 0; + rxp->cq.ccb->producer_index = 0; + q0->rcb->producer_index = q0->rcb->consumer_index = 0; + if (q1) + q1->rcb->producer_index = q1->rcb->consumer_index = 0; + } + + bfa_fsm_send_event(rx, RX_E_STARTED); +} + +void +bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(rx, RX_E_STOPPED); +} + +void +bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) +{ + u32 cq_size, hq_size, dq_size; + u32 cpage_count, hpage_count, dpage_count; + struct bna_mem_info *mem_info; + u32 cq_depth; + u32 hq_depth; + u32 dq_depth; + + dq_depth = q_cfg->q_depth; + hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); + cq_depth = dq_depth + hq_depth; + + BNA_TO_POWER_OF_2_HIGH(cq_depth); + cq_size = cq_depth * BFI_CQ_WI_SIZE; + cq_size = ALIGN(cq_size, PAGE_SIZE); + cpage_count = SIZE_TO_PAGES(cq_size); + + BNA_TO_POWER_OF_2_HIGH(dq_depth); + dq_size = dq_depth * BFI_RXQ_WI_SIZE; + dq_size = ALIGN(dq_size, PAGE_SIZE); + dpage_count = SIZE_TO_PAGES(dq_size); + + if (BNA_RXP_SINGLE != q_cfg->rxp_type) { + BNA_TO_POWER_OF_2_HIGH(hq_depth); + hq_size = hq_depth * BFI_RXQ_WI_SIZE; + hq_size = ALIGN(hq_size, PAGE_SIZE); + hpage_count = SIZE_TO_PAGES(hq_size); + } else + hpage_count = 0; + + res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_ccb); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_rcb); + mem_info->num = BNA_GET_RXQS(q_cfg); + + res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = cpage_count * sizeof(struct bna_dma_addr); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = cpage_count * sizeof(void *); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE; + mem_info->num = cpage_count * q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = dpage_count * sizeof(struct bna_dma_addr); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = dpage_count * sizeof(void *); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE; + mem_info->num = dpage_count * q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = hpage_count * sizeof(struct bna_dma_addr); + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = hpage_count * sizeof(void *); + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = (hpage_count ? PAGE_SIZE : 0); + mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); + + res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = BFI_IBIDX_SIZE; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = BFI_ENET_RSS_RIT_MAX; + mem_info->num = 1; + + res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; + res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; + res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; +} + +struct bna_rx * +bna_rx_create(struct bna *bna, struct bnad *bnad, + struct bna_rx_config *rx_cfg, + const struct bna_rx_event_cbfn *rx_cbfn, + struct bna_res_info *res_info, + void *priv) +{ + struct bna_rx_mod *rx_mod = &bna->rx_mod; + struct bna_rx *rx; + struct bna_rxp *rxp; + struct bna_rxq *q0; + struct bna_rxq *q1; + struct bna_intr_info *intr_info; + u32 page_count; + struct bna_mem_descr *ccb_mem; + struct bna_mem_descr *rcb_mem; + struct bna_mem_descr *unmapq_mem; + struct bna_mem_descr *cqpt_mem; + struct bna_mem_descr *cswqpt_mem; + struct bna_mem_descr *cpage_mem; + struct bna_mem_descr *hqpt_mem; + struct bna_mem_descr *dqpt_mem; + struct bna_mem_descr *hsqpt_mem; + struct bna_mem_descr *dsqpt_mem; + struct bna_mem_descr *hpage_mem; + struct bna_mem_descr *dpage_mem; + int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0; + int dpage_count, hpage_count, rcb_idx; + + if (!bna_rx_res_check(rx_mod, rx_cfg)) + return NULL; + + intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; + ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; + rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; + unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; + cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; + cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; + cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; + hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; + dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; + hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; + dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; + hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; + dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; + + page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num / + rx_cfg->num_paths; + + dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num / + rx_cfg->num_paths; + + hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num / + rx_cfg->num_paths; + + rx = bna_rx_get(rx_mod, rx_cfg->rx_type); + rx->bna = bna; + rx->rx_flags = 0; + INIT_LIST_HEAD(&rx->rxp_q); + rx->stop_cbfn = NULL; + rx->stop_cbarg = NULL; + rx->priv = priv; + + rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; + rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; + rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; + rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; + rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; + /* Following callbacks are mandatory */ + rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; + rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; + + if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { + switch (rx->type) { + case BNA_RX_T_REGULAR: + if (!(rx->bna->rx_mod.flags & + BNA_RX_MOD_F_ENET_LOOPBACK)) + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + break; + case BNA_RX_T_LOOPBACK: + if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + break; + } + } + + rx->num_paths = rx_cfg->num_paths; + for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { + rxp = bna_rxp_get(rx_mod); + list_add_tail(&rxp->qe, &rx->rxp_q); + rxp->type = rx_cfg->rxp_type; + rxp->rx = rx; + rxp->cq.rx = rx; + + q0 = bna_rxq_get(rx_mod); + if (BNA_RXP_SINGLE == rx_cfg->rxp_type) + q1 = NULL; + else + q1 = bna_rxq_get(rx_mod); + + if (1 == intr_info->num) + rxp->vector = intr_info->idl[0].vector; + else + rxp->vector = intr_info->idl[i].vector; + + /* Setup IB */ + + rxp->cq.ib.ib_seg_host_addr.lsb = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; + rxp->cq.ib.ib_seg_host_addr.msb = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; + rxp->cq.ib.ib_seg_host_addr_kva = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; + rxp->cq.ib.intr_type = intr_info->intr_type; + if (intr_info->intr_type == BNA_INTR_T_MSIX) + rxp->cq.ib.intr_vector = rxp->vector; + else + rxp->cq.ib.intr_vector = (1 << rxp->vector); + rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; + rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; + rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; + + bna_rxp_add_rxqs(rxp, q0, q1); + + /* Setup large Q */ + + q0->rx = rx; + q0->rxp = rxp; + + q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; + q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; + rcb_idx++; + q0->rcb->q_depth = rx_cfg->q_depth; + q0->rcb->rxq = q0; + q0->rcb->bnad = bna->bnad; + q0->rcb->id = 0; + q0->rx_packets = q0->rx_bytes = 0; + q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; + + bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, + &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]); + q0->rcb->page_idx = dpage_idx; + q0->rcb->page_count = dpage_count; + dpage_idx += dpage_count; + + if (rx->rcb_setup_cbfn) + rx->rcb_setup_cbfn(bnad, q0->rcb); + + /* Setup small Q */ + + if (q1) { + q1->rx = rx; + q1->rxp = rxp; + + q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; + q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; + rcb_idx++; + q1->rcb->q_depth = rx_cfg->q_depth; + q1->rcb->rxq = q1; + q1->rcb->bnad = bna->bnad; + q1->rcb->id = 1; + q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? + rx_cfg->hds_config.forced_offset + : rx_cfg->small_buff_size; + q1->rx_packets = q1->rx_bytes = 0; + q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; + + bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, + &hqpt_mem[i], &hsqpt_mem[i], + &hpage_mem[hpage_idx]); + q1->rcb->page_idx = hpage_idx; + q1->rcb->page_count = hpage_count; + hpage_idx += hpage_count; + + if (rx->rcb_setup_cbfn) + rx->rcb_setup_cbfn(bnad, q1->rcb); + } + + /* Setup CQ */ + + rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; + rxp->cq.ccb->q_depth = rx_cfg->q_depth + + ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? + 0 : rx_cfg->q_depth); + rxp->cq.ccb->cq = &rxp->cq; + rxp->cq.ccb->rcb[0] = q0->rcb; + q0->rcb->ccb = rxp->cq.ccb; + if (q1) { + rxp->cq.ccb->rcb[1] = q1->rcb; + q1->rcb->ccb = rxp->cq.ccb; + } + rxp->cq.ccb->hw_producer_index = + (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; + rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; + rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; + rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; + rxp->cq.ccb->rx_coalescing_timeo = + rxp->cq.ib.coalescing_timeo; + rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; + rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; + rxp->cq.ccb->bnad = bna->bnad; + rxp->cq.ccb->id = i; + + bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, + &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]); + rxp->cq.ccb->page_idx = cpage_idx; + rxp->cq.ccb->page_count = page_count; + cpage_idx += page_count; + + if (rx->ccb_setup_cbfn) + rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); + } + + rx->hds_cfg = rx_cfg->hds_config; + + bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); + + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + + rx_mod->rid_mask |= (1 << rx->rid); + + return rx; +} + +void +bna_rx_destroy(struct bna_rx *rx) +{ + struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; + struct bna_rxq *q0 = NULL; + struct bna_rxq *q1 = NULL; + struct bna_rxp *rxp; + struct list_head *qe; + + bna_rxf_uninit(&rx->rxf); + + while (!list_empty(&rx->rxp_q)) { + bfa_q_deq(&rx->rxp_q, &rxp); + GET_RXQS(rxp, q0, q1); + if (rx->rcb_destroy_cbfn) + rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); + q0->rcb = NULL; + q0->rxp = NULL; + q0->rx = NULL; + bna_rxq_put(rx_mod, q0); + + if (q1) { + if (rx->rcb_destroy_cbfn) + rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); + q1->rcb = NULL; + q1->rxp = NULL; + q1->rx = NULL; + bna_rxq_put(rx_mod, q1); + } + rxp->rxq.slr.large = NULL; + rxp->rxq.slr.small = NULL; + + if (rx->ccb_destroy_cbfn) + rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); + rxp->cq.ccb = NULL; + rxp->rx = NULL; + bna_rxp_put(rx_mod, rxp); + } + + list_for_each(qe, &rx_mod->rx_active_q) { + if (qe == &rx->qe) { + list_del(&rx->qe); + bfa_q_qe_init(&rx->qe); + break; + } + } + + rx_mod->rid_mask &= ~(1 << rx->rid); + + rx->bna = NULL; + rx->priv = NULL; + bna_rx_put(rx_mod, rx); +} + +void +bna_rx_enable(struct bna_rx *rx) +{ + if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) + return; + + rx->rx_flags |= BNA_RX_F_ENABLED; + if (rx->rx_flags & BNA_RX_F_ENET_STARTED) + bfa_fsm_send_event(rx, RX_E_START); +} + +void +bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_rx *)) +{ + if (type == BNA_SOFT_CLEANUP) { + /* h/w should not be accessed. Treat we're stopped */ + (*cbfn)(rx->bna->bnad, rx); + } else { + rx->stop_cbfn = cbfn; + rx->stop_cbarg = rx->bna->bnad; + + rx->rx_flags &= ~BNA_RX_F_ENABLED; + + bfa_fsm_send_event(rx, RX_E_STOP); + } +} + +void +bna_rx_cleanup_complete(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); +} + +enum bna_cb_status +bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, + enum bna_rxmode bitmask, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + int need_hw_config = 0; + + /* Error checks */ + + if (is_promisc_enable(new_mode, bitmask)) { + /* If promisc mode is already enabled elsewhere in the system */ + if ((rx->bna->promisc_rid != BFI_INVALID_RID) && + (rx->bna->promisc_rid != rxf->rx->rid)) + goto err_return; + + /* If default mode is already enabled in the system */ + if (rx->bna->default_mode_rid != BFI_INVALID_RID) + goto err_return; + + /* Trying to enable promiscuous and default mode together */ + if (is_default_enable(new_mode, bitmask)) + goto err_return; + } + + if (is_default_enable(new_mode, bitmask)) { + /* If default mode is already enabled elsewhere in the system */ + if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && + (rx->bna->default_mode_rid != rxf->rx->rid)) { + goto err_return; + } + + /* If promiscuous mode is already enabled in the system */ + if (rx->bna->promisc_rid != BFI_INVALID_RID) + goto err_return; + } + + /* Process the commands */ + + if (is_promisc_enable(new_mode, bitmask)) { + if (bna_rxf_promisc_enable(rxf)) + need_hw_config = 1; + } else if (is_promisc_disable(new_mode, bitmask)) { + if (bna_rxf_promisc_disable(rxf)) + need_hw_config = 1; + } + + if (is_allmulti_enable(new_mode, bitmask)) { + if (bna_rxf_allmulti_enable(rxf)) + need_hw_config = 1; + } else if (is_allmulti_disable(new_mode, bitmask)) { + if (bna_rxf_allmulti_disable(rxf)) + need_hw_config = 1; + } + + /* Trigger h/w if needed */ + + if (need_hw_config) { + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } else if (cbfn) + (*cbfn)(rx->bna->bnad, rx); + + return BNA_CB_SUCCESS; + +err_return: + return BNA_CB_FAIL; +} + +void +bna_rx_vlanfilter_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; + rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) +{ + struct bna_rxp *rxp; + struct list_head *qe; + + list_for_each(qe, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe; + rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; + bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); + } +} + +void +bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) +{ + int i, j; + + for (i = 0; i < BNA_LOAD_T_MAX; i++) + for (j = 0; j < BNA_BIAS_T_MAX; j++) + bna->rx_mod.dim_vector[i][j] = vector[i][j]; +} + +void +bna_rx_dim_update(struct bna_ccb *ccb) +{ + struct bna *bna = ccb->cq->rx->bna; + u32 load, bias; + u32 pkt_rt, small_rt, large_rt; + u8 coalescing_timeo; + + if ((ccb->pkt_rate.small_pkt_cnt == 0) && + (ccb->pkt_rate.large_pkt_cnt == 0)) + return; + + /* Arrive at preconfigured coalescing timeo value based on pkt rate */ + + small_rt = ccb->pkt_rate.small_pkt_cnt; + large_rt = ccb->pkt_rate.large_pkt_cnt; + + pkt_rt = small_rt + large_rt; + + if (pkt_rt < BNA_PKT_RATE_10K) + load = BNA_LOAD_T_LOW_4; + else if (pkt_rt < BNA_PKT_RATE_20K) + load = BNA_LOAD_T_LOW_3; + else if (pkt_rt < BNA_PKT_RATE_30K) + load = BNA_LOAD_T_LOW_2; + else if (pkt_rt < BNA_PKT_RATE_40K) + load = BNA_LOAD_T_LOW_1; + else if (pkt_rt < BNA_PKT_RATE_50K) + load = BNA_LOAD_T_HIGH_1; + else if (pkt_rt < BNA_PKT_RATE_60K) + load = BNA_LOAD_T_HIGH_2; + else if (pkt_rt < BNA_PKT_RATE_80K) + load = BNA_LOAD_T_HIGH_3; + else + load = BNA_LOAD_T_HIGH_4; + + if (small_rt > (large_rt << 1)) + bias = 0; + else + bias = 1; + + ccb->pkt_rate.small_pkt_cnt = 0; + ccb->pkt_rate.large_pkt_cnt = 0; + + coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; + ccb->rx_coalescing_timeo = coalescing_timeo; + + /* Set it to IB */ + bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); +} + +const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { + {12, 12}, + {6, 10}, + {5, 10}, + {4, 8}, + {3, 6}, + {3, 6}, + {2, 4}, + {1, 2}, +}; + +/** + * TX + */ +#define call_tx_stop_cbfn(tx) \ +do { \ + if ((tx)->stop_cbfn) { \ + void (*cbfn)(void *, struct bna_tx *); \ + void *cbarg; \ + cbfn = (tx)->stop_cbfn; \ + cbarg = (tx)->stop_cbarg; \ + (tx)->stop_cbfn = NULL; \ + (tx)->stop_cbarg = NULL; \ + cbfn(cbarg, (tx)); \ + } \ +} while (0) + +#define call_tx_prio_change_cbfn(tx) \ +do { \ + if ((tx)->prio_change_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_tx *); \ + cbfn = (tx)->prio_change_cbfn; \ + (tx)->prio_change_cbfn = NULL; \ + cbfn((tx)->bna->bnad, (tx)); \ + } \ +} while (0) + +static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); +static void bna_bfi_tx_enet_start(struct bna_tx *tx); +static void bna_tx_enet_stop(struct bna_tx *tx); + +enum bna_tx_event { + TX_E_START = 1, + TX_E_STOP = 2, + TX_E_FAIL = 3, + TX_E_STARTED = 4, + TX_E_STOPPED = 5, + TX_E_PRIO_CHANGE = 6, + TX_E_CLEANUP_DONE = 7, + TX_E_BW_UPDATE = 8, +}; + +bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, + enum bna_tx_event); + +static void +bna_tx_sm_stopped_entry(struct bna_tx *tx) +{ + call_tx_stop_cbfn(tx); +} + +static void +bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_START: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + case TX_E_STOP: + call_tx_stop_cbfn(tx); + break; + + case TX_E_FAIL: + /* No-op */ + break; + + case TX_E_PRIO_CHANGE: + call_tx_prio_change_cbfn(tx); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_start_wait_entry(struct bna_tx *tx) +{ + bna_bfi_tx_enet_start(tx); +} + +static void +bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + break; + + case TX_E_FAIL: + tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + case TX_E_STARTED: + if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { + tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | + BNA_TX_F_BW_UPDATED); + bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); + } else + bfa_fsm_set_state(tx, bna_tx_sm_started); + break; + + case TX_E_PRIO_CHANGE: + tx->flags |= BNA_TX_F_PRIO_CHANGED; + break; + + case TX_E_BW_UPDATE: + tx->flags |= BNA_TX_F_BW_UPDATED; + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_started_entry(struct bna_tx *tx) +{ + struct bna_txq *txq; + struct list_head *qe; + int is_regular = (tx->type == BNA_TX_T_REGULAR); + + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + txq->tcb->priority = txq->priority; + /* Start IB */ + bna_ib_start(tx->bna, &txq->ib, is_regular); + } + tx->tx_resume_cbfn(tx->bna->bnad, tx); +} + +static void +bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + tx->tx_stall_cbfn(tx->bna->bnad, tx); + bna_tx_enet_stop(tx); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + tx->tx_stall_cbfn(tx->bna->bnad, tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_stop_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_FAIL: + case TX_E_STOPPED: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_STARTED: + /** + * We are here due to start_wait -> stop_wait transition on + * TX_E_STOP event + */ + bna_tx_enet_stop(tx); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_FAIL: + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) +{ + tx->tx_stall_cbfn(tx->bna->bnad, tx); + bna_tx_enet_stop(tx); +} + +static void +bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + call_tx_prio_change_cbfn(tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_STOPPED: + bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) +{ + call_tx_prio_change_cbfn(tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); +} + +static void +bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_failed_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_START: + bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); + break; + + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_tx_enet_start(struct bna_tx *tx) +{ + struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; + struct bna_txq *txq = NULL; + struct list_head *qe; + int i; + + bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); + cfg_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); + + cfg_req->num_queues = tx->num_txq; + for (i = 0, qe = bfa_q_first(&tx->txq_q); + i < tx->num_txq; + i++, qe = bfa_q_next(qe)) { + txq = (struct bna_txq *)qe; + + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); + cfg_req->q_cfg[i].q.priority = txq->priority; + + cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = + txq->ib.ib_seg_host_addr.lsb; + cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = + txq->ib.ib_seg_host_addr.msb; + cfg_req->q_cfg[i].ib.intr.msix_index = + htons((u16)txq->ib.intr_vector); + } + + cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) + ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.coalescing_timeout = + htonl((u32)txq->ib.coalescing_timeo); + cfg_req->ib_cfg.inter_pkt_timeout = + htonl((u32)txq->ib.interpkt_timeo); + cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; + + cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; + cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); + cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; + cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); + bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); +} + +static void +bna_bfi_tx_enet_stop(struct bna_tx *tx) +{ + struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); + bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), + &req->mh); + bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); +} + +static void +bna_tx_enet_stop(struct bna_tx *tx) +{ + struct bna_txq *txq; + struct list_head *qe; + + /* Stop IB */ + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + bna_ib_stop(tx->bna, &txq->ib); + } + + bna_bfi_tx_enet_stop(tx); +} + +static void +bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + int i; + + txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + txq->qpt.kv_qpt_ptr = qpt_mem->kva; + txq->qpt.page_count = page_count; + txq->qpt.page_size = page_size; + + txq->tcb->sw_qpt = (void **) swqpt_mem->kva; + + for (i = 0; i < page_count; i++) { + txq->tcb->sw_qpt[i] = page_mem[i].kva; + + ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = + page_mem[i].dma.lsb; + ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = + page_mem[i].dma.msb; + } +} + +static struct bna_tx * +bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct list_head *qe = NULL; + struct bna_tx *tx = NULL; + + if (list_empty(&tx_mod->tx_free_q)) + return NULL; + if (type == BNA_TX_T_REGULAR) { + bfa_q_deq(&tx_mod->tx_free_q, &qe); + } else { + bfa_q_deq_tail(&tx_mod->tx_free_q, &qe); + } + tx = (struct bna_tx *)qe; + bfa_q_qe_init(&tx->qe); + tx->type = type; + + return tx; +} + +static void +bna_tx_free(struct bna_tx *tx) +{ + struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; + struct bna_txq *txq; + struct list_head *prev_qe; + struct list_head *qe; + + while (!list_empty(&tx->txq_q)) { + bfa_q_deq(&tx->txq_q, &txq); + bfa_q_qe_init(&txq->qe); + txq->tcb = NULL; + txq->tx = NULL; + list_add_tail(&txq->qe, &tx_mod->txq_free_q); + } + + list_for_each(qe, &tx_mod->tx_active_q) { + if (qe == &tx->qe) { + list_del(&tx->qe); + bfa_q_qe_init(&tx->qe); + break; + } + } + + tx->bna = NULL; + tx->priv = NULL; + + prev_qe = NULL; + list_for_each(qe, &tx_mod->tx_free_q) { + if (((struct bna_tx *)qe)->rid < tx->rid) + prev_qe = qe; + else { + break; + } + } + + if (prev_qe == NULL) { + /* This is the first entry */ + bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); + } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) { + /* This is the last entry */ + list_add_tail(&tx->qe, &tx_mod->tx_free_q); + } else { + /* Somewhere in the middle */ + bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); + bfa_q_prev(&tx->qe) = prev_qe; + bfa_q_next(prev_qe) = &tx->qe; + bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; + } +} + +static void +bna_tx_start(struct bna_tx *tx) +{ + tx->flags |= BNA_TX_F_ENET_STARTED; + if (tx->flags & BNA_TX_F_ENABLED) + bfa_fsm_send_event(tx, TX_E_START); +} + +static void +bna_tx_stop(struct bna_tx *tx) +{ + tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; + tx->stop_cbarg = &tx->bna->tx_mod; + + tx->flags &= ~BNA_TX_F_ENET_STARTED; + bfa_fsm_send_event(tx, TX_E_STOP); +} + +static void +bna_tx_fail(struct bna_tx *tx) +{ + tx->flags &= ~BNA_TX_F_ENET_STARTED; + bfa_fsm_send_event(tx, TX_E_FAIL); +} + +void +bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; + struct bna_txq *txq = NULL; + struct list_head *qe; + int i; + + bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, + sizeof(struct bfi_enet_tx_cfg_rsp)); + + tx->hw_id = cfg_rsp->hw_id; + + for (i = 0, qe = bfa_q_first(&tx->txq_q); + i < tx->num_txq; i++, qe = bfa_q_next(qe)) { + txq = (struct bna_txq *)qe; + + /* Setup doorbells */ + txq->tcb->i_dbell->doorbell_addr = + tx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].i_dbell); + txq->tcb->q_dbell = + tx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].q_dbell); + txq->hw_id = cfg_rsp->q_handles[i].hw_qid; + + /* Initialize producer/consumer indexes */ + (*txq->tcb->hw_consumer_index) = 0; + txq->tcb->producer_index = txq->tcb->consumer_index = 0; + } + + bfa_fsm_send_event(tx, TX_E_STARTED); +} + +void +bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(tx, TX_E_STOPPED); +} + +void +bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) +{ + struct bna_tx *tx; + struct list_head *qe; + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + bfa_fsm_send_event(tx, TX_E_BW_UPDATE); + } +} + +void +bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) +{ + u32 q_size; + u32 page_count; + struct bna_mem_info *mem_info; + + res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_tcb); + mem_info->num = num_txq; + + q_size = txq_depth * BFI_TXQ_WI_SIZE; + q_size = ALIGN(q_size, PAGE_SIZE); + page_count = q_size >> PAGE_SHIFT; + + res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = page_count * sizeof(struct bna_dma_addr); + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = page_count * sizeof(void *); + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE; + mem_info->num = num_txq * page_count; + + res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = BFI_IBIDX_SIZE; + mem_info->num = num_txq; + + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = + BNA_INTR_T_MSIX; + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; +} + +struct bna_tx * +bna_tx_create(struct bna *bna, struct bnad *bnad, + struct bna_tx_config *tx_cfg, + const struct bna_tx_event_cbfn *tx_cbfn, + struct bna_res_info *res_info, void *priv) +{ + struct bna_intr_info *intr_info; + struct bna_tx_mod *tx_mod = &bna->tx_mod; + struct bna_tx *tx; + struct bna_txq *txq; + struct list_head *qe; + int page_count; + int page_size; + int page_idx; + int i; + + intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; + page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) / + tx_cfg->num_txq; + page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len; + + /** + * Get resources + */ + + if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) + return NULL; + + /* Tx */ + + tx = bna_tx_get(tx_mod, tx_cfg->tx_type); + if (!tx) + return NULL; + tx->bna = bna; + tx->priv = priv; + + /* TxQs */ + + INIT_LIST_HEAD(&tx->txq_q); + for (i = 0; i < tx_cfg->num_txq; i++) { + if (list_empty(&tx_mod->txq_free_q)) + goto err_return; + + bfa_q_deq(&tx_mod->txq_free_q, &txq); + bfa_q_qe_init(&txq->qe); + list_add_tail(&txq->qe, &tx->txq_q); + txq->tx = tx; + } + + /* + * Initialize + */ + + /* Tx */ + + tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; + tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; + /* Following callbacks are mandatory */ + tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; + tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; + tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; + + list_add_tail(&tx->qe, &tx_mod->tx_active_q); + + tx->num_txq = tx_cfg->num_txq; + + tx->flags = 0; + if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { + switch (tx->type) { + case BNA_TX_T_REGULAR: + if (!(tx->bna->tx_mod.flags & + BNA_TX_MOD_F_ENET_LOOPBACK)) + tx->flags |= BNA_TX_F_ENET_STARTED; + break; + case BNA_TX_T_LOOPBACK: + if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) + tx->flags |= BNA_TX_F_ENET_STARTED; + break; + } + } + + /* TxQ */ + + i = 0; + page_idx = 0; + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + txq->tcb = (struct bna_tcb *) + res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; + txq->tx_packets = 0; + txq->tx_bytes = 0; + + /* IB */ + txq->ib.ib_seg_host_addr.lsb = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; + txq->ib.ib_seg_host_addr.msb = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; + txq->ib.ib_seg_host_addr_kva = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; + txq->ib.intr_type = intr_info->intr_type; + txq->ib.intr_vector = (intr_info->num == 1) ? + intr_info->idl[0].vector : + intr_info->idl[i].vector; + if (intr_info->intr_type == BNA_INTR_T_INTX) + txq->ib.intr_vector = (1 << txq->ib.intr_vector); + txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; + txq->ib.interpkt_timeo = 0; /* Not used */ + txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; + + /* TCB */ + + txq->tcb->q_depth = tx_cfg->txq_depth; + txq->tcb->unmap_q = (void *) + res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; + txq->tcb->hw_consumer_index = + (u32 *)txq->ib.ib_seg_host_addr_kva; + txq->tcb->i_dbell = &txq->ib.door_bell; + txq->tcb->intr_type = txq->ib.intr_type; + txq->tcb->intr_vector = txq->ib.intr_vector; + txq->tcb->txq = txq; + txq->tcb->bnad = bnad; + txq->tcb->id = i; + + /* QPT, SWQPT, Pages */ + bna_txq_qpt_setup(txq, page_count, page_size, + &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], + &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], + &res_info[BNA_TX_RES_MEM_T_PAGE]. + res_u.mem_info.mdl[page_idx]); + txq->tcb->page_idx = page_idx; + txq->tcb->page_count = page_count; + page_idx += page_count; + + /* Callback to bnad for setting up TCB */ + if (tx->tcb_setup_cbfn) + (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); + + if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) + txq->priority = txq->tcb->id; + else + txq->priority = tx_mod->default_prio; + + i++; + } + + tx->txf_vlan_id = 0; + + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + + tx_mod->rid_mask |= (1 << tx->rid); + + return tx; + +err_return: + bna_tx_free(tx); + return NULL; +} + +void +bna_tx_destroy(struct bna_tx *tx) +{ + struct bna_txq *txq; + struct list_head *qe; + + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + if (tx->tcb_destroy_cbfn) + (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); + } + + tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); + bna_tx_free(tx); +} + +void +bna_tx_enable(struct bna_tx *tx) +{ + if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) + return; + + tx->flags |= BNA_TX_F_ENABLED; + + if (tx->flags & BNA_TX_F_ENET_STARTED) + bfa_fsm_send_event(tx, TX_E_START); +} + +void +bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_tx *)) +{ + if (type == BNA_SOFT_CLEANUP) { + (*cbfn)(tx->bna->bnad, tx); + return; + } + + tx->stop_cbfn = cbfn; + tx->stop_cbarg = tx->bna->bnad; + + tx->flags &= ~BNA_TX_F_ENABLED; + + bfa_fsm_send_event(tx, TX_E_STOP); +} + +void +bna_tx_cleanup_complete(struct bna_tx *tx) +{ + bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); +} + +static void +bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) +{ + struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; + + bfa_wc_down(&tx_mod->tx_stop_wc); +} + +static void +bna_tx_mod_cb_tx_stopped_all(void *arg) +{ + struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; + + if (tx_mod->stop_cbfn) + tx_mod->stop_cbfn(&tx_mod->bna->enet); + tx_mod->stop_cbfn = NULL; +} + +void +bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + tx_mod->bna = bna; + tx_mod->flags = 0; + + tx_mod->tx = (struct bna_tx *) + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; + tx_mod->txq = (struct bna_txq *) + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&tx_mod->tx_free_q); + INIT_LIST_HEAD(&tx_mod->tx_active_q); + + INIT_LIST_HEAD(&tx_mod->txq_free_q); + + for (i = 0; i < bna->ioceth.attr.num_txq; i++) { + tx_mod->tx[i].rid = i; + bfa_q_qe_init(&tx_mod->tx[i].qe); + list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); + bfa_q_qe_init(&tx_mod->txq[i].qe); + list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); + } + + tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; + tx_mod->default_prio = 0; + tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; + tx_mod->iscsi_prio = -1; +} + +void +bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &tx_mod->tx_free_q) + i++; + + i = 0; + list_for_each(qe, &tx_mod->txq_free_q) + i++; + + tx_mod->bna = NULL; +} + +void +bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx; + struct list_head *qe; + + tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; + if (type == BNA_TX_T_LOOPBACK) + tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + if (tx->type == type) + bna_tx_start(tx); + } +} + +void +bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx; + struct list_head *qe; + + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; + + tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; + + bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + if (tx->type == type) { + bfa_wc_up(&tx_mod->tx_stop_wc); + bna_tx_stop(tx); + } + } + + bfa_wc_wait(&tx_mod->tx_stop_wc); +} + +void +bna_tx_mod_fail(struct bna_tx_mod *tx_mod) +{ + struct bna_tx *tx; + struct list_head *qe; + + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + bna_tx_fail(tx); + } +} + +void +bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) +{ + struct bna_txq *txq; + struct list_head *qe; + + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); + } +} diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h new file mode 100644 index 000000000000..d090fbfb12fa --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -0,0 +1,987 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BNA_TYPES_H__ +#define __BNA_TYPES_H__ + +#include "cna.h" +#include "bna_hw_defs.h" +#include "bfa_cee.h" +#include "bfa_msgq.h" + +/** + * + * Forward declarations + * + */ + +struct bna_mcam_handle; +struct bna_txq; +struct bna_tx; +struct bna_rxq; +struct bna_cq; +struct bna_rx; +struct bna_rxf; +struct bna_enet; +struct bna; +struct bnad; + +/** + * + * Enums, primitive data types + * + */ + +enum bna_status { + BNA_STATUS_T_DISABLED = 0, + BNA_STATUS_T_ENABLED = 1 +}; + +enum bna_cleanup_type { + BNA_HARD_CLEANUP = 0, + BNA_SOFT_CLEANUP = 1 +}; + +enum bna_cb_status { + BNA_CB_SUCCESS = 0, + BNA_CB_FAIL = 1, + BNA_CB_INTERRUPT = 2, + BNA_CB_BUSY = 3, + BNA_CB_INVALID_MAC = 4, + BNA_CB_MCAST_LIST_FULL = 5, + BNA_CB_UCAST_CAM_FULL = 6, + BNA_CB_WAITING = 7, + BNA_CB_NOT_EXEC = 8 +}; + +enum bna_res_type { + BNA_RES_T_MEM = 1, + BNA_RES_T_INTR = 2 +}; + +enum bna_mem_type { + BNA_MEM_T_KVA = 1, + BNA_MEM_T_DMA = 2 +}; + +enum bna_intr_type { + BNA_INTR_T_INTX = 1, + BNA_INTR_T_MSIX = 2 +}; + +enum bna_res_req_type { + BNA_RES_MEM_T_COM = 0, + BNA_RES_MEM_T_ATTR = 1, + BNA_RES_MEM_T_FWTRC = 2, + BNA_RES_MEM_T_STATS = 3, + BNA_RES_T_MAX +}; + +enum bna_mod_res_req_type { + BNA_MOD_RES_MEM_T_TX_ARRAY = 0, + BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1, + BNA_MOD_RES_MEM_T_RX_ARRAY = 2, + BNA_MOD_RES_MEM_T_RXP_ARRAY = 3, + BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4, + BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5, + BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6, + BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7, + BNA_MOD_RES_T_MAX +}; + +enum bna_tx_res_req_type { + BNA_TX_RES_MEM_T_TCB = 0, + BNA_TX_RES_MEM_T_UNMAPQ = 1, + BNA_TX_RES_MEM_T_QPT = 2, + BNA_TX_RES_MEM_T_SWQPT = 3, + BNA_TX_RES_MEM_T_PAGE = 4, + BNA_TX_RES_MEM_T_IBIDX = 5, + BNA_TX_RES_INTR_T_TXCMPL = 6, + BNA_TX_RES_T_MAX, +}; + +enum bna_rx_mem_type { + BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ + BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ + BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */ + BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */ + BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */ + BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */ + BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */ + BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */ + BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */ + BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */ + BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */ + BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */ + BNA_RX_RES_MEM_T_IBIDX = 12, + BNA_RX_RES_MEM_T_RIT = 13, + BNA_RX_RES_T_INTR = 14, /* Rx interrupts */ + BNA_RX_RES_T_MAX = 15 +}; + +enum bna_tx_type { + BNA_TX_T_REGULAR = 0, + BNA_TX_T_LOOPBACK = 1, +}; + +enum bna_tx_flags { + BNA_TX_F_ENET_STARTED = 1, + BNA_TX_F_ENABLED = 2, + BNA_TX_F_PRIO_CHANGED = 4, + BNA_TX_F_BW_UPDATED = 8, +}; + +enum bna_tx_mod_flags { + BNA_TX_MOD_F_ENET_STARTED = 1, + BNA_TX_MOD_F_ENET_LOOPBACK = 2, +}; + +enum bna_rx_type { + BNA_RX_T_REGULAR = 0, + BNA_RX_T_LOOPBACK = 1, +}; + +enum bna_rxp_type { + BNA_RXP_SINGLE = 1, + BNA_RXP_SLR = 2, + BNA_RXP_HDS = 3 +}; + +enum bna_rxmode { + BNA_RXMODE_PROMISC = 1, + BNA_RXMODE_DEFAULT = 2, + BNA_RXMODE_ALLMULTI = 4 +}; + +enum bna_rx_event { + RX_E_START = 1, + RX_E_STOP = 2, + RX_E_FAIL = 3, + RX_E_STARTED = 4, + RX_E_STOPPED = 5, + RX_E_RXF_STARTED = 6, + RX_E_RXF_STOPPED = 7, + RX_E_CLEANUP_DONE = 8, +}; + +enum bna_rx_flags { + BNA_RX_F_ENET_STARTED = 1, + BNA_RX_F_ENABLED = 2, +}; + +enum bna_rx_mod_flags { + BNA_RX_MOD_F_ENET_STARTED = 1, + BNA_RX_MOD_F_ENET_LOOPBACK = 2, +}; + +enum bna_rxf_flags { + BNA_RXF_F_PAUSED = 1, +}; + +enum bna_rxf_event { + RXF_E_START = 1, + RXF_E_STOP = 2, + RXF_E_FAIL = 3, + RXF_E_CONFIG = 4, + RXF_E_PAUSE = 5, + RXF_E_RESUME = 6, + RXF_E_FW_RESP = 7, +}; + +enum bna_enet_type { + BNA_ENET_T_REGULAR = 0, + BNA_ENET_T_LOOPBACK_INTERNAL = 1, + BNA_ENET_T_LOOPBACK_EXTERNAL = 2, +}; + +enum bna_link_status { + BNA_LINK_DOWN = 0, + BNA_LINK_UP = 1, + BNA_CEE_UP = 2 +}; + +enum bna_ethport_flags { + BNA_ETHPORT_F_ADMIN_UP = 1, + BNA_ETHPORT_F_PORT_ENABLED = 2, + BNA_ETHPORT_F_RX_STARTED = 4, +}; + +enum bna_enet_flags { + BNA_ENET_F_IOCETH_READY = 1, + BNA_ENET_F_ENABLED = 2, + BNA_ENET_F_PAUSE_CHANGED = 4, + BNA_ENET_F_MTU_CHANGED = 8 +}; + +enum bna_rss_flags { + BNA_RSS_F_RIT_PENDING = 1, + BNA_RSS_F_CFG_PENDING = 2, + BNA_RSS_F_STATUS_PENDING = 4, +}; + +enum bna_mod_flags { + BNA_MOD_F_INIT_DONE = 1, +}; + +enum bna_pkt_rates { + BNA_PKT_RATE_10K = 10000, + BNA_PKT_RATE_20K = 20000, + BNA_PKT_RATE_30K = 30000, + BNA_PKT_RATE_40K = 40000, + BNA_PKT_RATE_50K = 50000, + BNA_PKT_RATE_60K = 60000, + BNA_PKT_RATE_70K = 70000, + BNA_PKT_RATE_80K = 80000, +}; + +enum bna_dim_load_types { + BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */ + BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */ + BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */ + BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */ + BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */ + BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */ + BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */ + BNA_LOAD_T_LOW_4 = 7, /* r < 10K */ + BNA_LOAD_T_MAX = 8 +}; + +enum bna_dim_bias_types { + BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */ + BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */ + BNA_BIAS_T_MAX = 2 +}; + +#define BNA_MAX_NAME_SIZE 64 +struct bna_ident { + int id; + char name[BNA_MAX_NAME_SIZE]; +}; + +struct bna_mac { + /* This should be the first one */ + struct list_head qe; + u8 addr[ETH_ALEN]; + struct bna_mcam_handle *handle; +}; + +struct bna_mem_descr { + u32 len; + void *kva; + struct bna_dma_addr dma; +}; + +struct bna_mem_info { + enum bna_mem_type mem_type; + u32 len; + u32 num; + u32 align_sz; /* 0/1 = no alignment */ + struct bna_mem_descr *mdl; + void *cookie; /* For bnad to unmap dma later */ +}; + +struct bna_intr_descr { + int vector; +}; + +struct bna_intr_info { + enum bna_intr_type intr_type; + int num; + struct bna_intr_descr *idl; +}; + +union bna_res_u { + struct bna_mem_info mem_info; + struct bna_intr_info intr_info; +}; + +struct bna_res_info { + enum bna_res_type res_type; + union bna_res_u res_u; +}; + +/* HW QPT */ +struct bna_qpt { + struct bna_dma_addr hw_qpt_ptr; + void *kv_qpt_ptr; + u32 page_count; + u32 page_size; +}; + +struct bna_attr { + bool fw_query_complete; + int num_txq; + int num_rxp; + int num_ucmac; + int num_mcmac; + int max_rit_size; +}; + +/** + * + * IOCEth + * + */ + +struct bna_ioceth { + bfa_fsm_t fsm; + struct bfa_ioc ioc; + + struct bna_attr attr; + struct bfa_msgq_cmd_entry msgq_cmd; + struct bfi_enet_attr_req attr_req; + + void (*stop_cbfn)(struct bnad *bnad); + struct bnad *stop_cbarg; + + struct bna *bna; +}; + +/** + * + * Enet + * + */ + +/* Pause configuration */ +struct bna_pause_config { + enum bna_status tx_pause; + enum bna_status rx_pause; +}; + +struct bna_enet { + bfa_fsm_t fsm; + enum bna_enet_flags flags; + + enum bna_enet_type type; + + struct bna_pause_config pause_config; + int mtu; + + /* Callback for bna_enet_disable(), enet_stop() */ + void (*stop_cbfn)(void *); + void *stop_cbarg; + + /* Callback for bna_enet_pause_config() */ + void (*pause_cbfn)(struct bnad *); + + /* Callback for bna_enet_mtu_set() */ + void (*mtu_cbfn)(struct bnad *); + + struct bfa_wc chld_stop_wc; + + struct bfa_msgq_cmd_entry msgq_cmd; + struct bfi_enet_set_pause_req pause_req; + + struct bna *bna; +}; + +/** + * + * Ethport + * + */ + +struct bna_ethport { + bfa_fsm_t fsm; + enum bna_ethport_flags flags; + + enum bna_link_status link_status; + + int rx_started_count; + + void (*stop_cbfn)(struct bna_enet *); + + void (*adminup_cbfn)(struct bnad *, enum bna_cb_status); + + void (*link_cbfn)(struct bnad *, enum bna_link_status); + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_enable_req admin_req; + struct bfi_enet_diag_lb_req lpbk_req; + } bfi_enet_cmd; + + struct bna *bna; +}; + +/** + * + * Interrupt Block + * + */ + +/* Doorbell structure */ +struct bna_ib_dbell { + void *__iomem doorbell_addr; + u32 doorbell_ack; +}; + +/* IB structure */ +struct bna_ib { + struct bna_dma_addr ib_seg_host_addr; + void *ib_seg_host_addr_kva; + + struct bna_ib_dbell door_bell; + + enum bna_intr_type intr_type; + int intr_vector; + + u8 coalescing_timeo; /* Unit is 5usec. */ + + int interpkt_count; + int interpkt_timeo; +}; + +/** + * + * Tx object + * + */ + +/* Tx datapath control structure */ +#define BNA_Q_NAME_SIZE 16 +struct bna_tcb { + /* Fast path */ + void **sw_qpt; + void *unmap_q; + u32 producer_index; + u32 consumer_index; + volatile u32 *hw_consumer_index; + u32 q_depth; + void *__iomem q_dbell; + struct bna_ib_dbell *i_dbell; + int page_idx; + int page_count; + /* Control path */ + struct bna_txq *txq; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + enum bna_intr_type intr_type; + int intr_vector; + u8 priority; /* Current priority */ + unsigned long flags; /* Used by bnad as required */ + int id; + char name[BNA_Q_NAME_SIZE]; +}; + +/* TxQ QPT and configuration */ +struct bna_txq { + /* This should be the first one */ + struct list_head qe; + + u8 priority; + + struct bna_qpt qpt; + struct bna_tcb *tcb; + struct bna_ib ib; + + struct bna_tx *tx; + + int hw_id; + + u64 tx_packets; + u64 tx_bytes; +}; + +/* Tx object */ +struct bna_tx { + /* This should be the first one */ + struct list_head qe; + int rid; + int hw_id; + + bfa_fsm_t fsm; + enum bna_tx_flags flags; + + enum bna_tx_type type; + int num_txq; + + struct list_head txq_q; + u16 txf_vlan_id; + + /* Tx event handlers */ + void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); + void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); + void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); + + /* callback for bna_tx_disable(), bna_tx_stop() */ + void (*stop_cbfn)(void *arg, struct bna_tx *tx); + void *stop_cbarg; + + /* callback for bna_tx_prio_set() */ + void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx); + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_tx_cfg_req cfg_req; + struct bfi_enet_req req; + struct bfi_enet_tx_cfg_rsp cfg_rsp; + } bfi_enet_cmd; + + struct bna *bna; + void *priv; /* bnad's cookie */ +}; + +/* Tx object configuration used during creation */ +struct bna_tx_config { + int num_txq; + int txq_depth; + int coalescing_timeo; + enum bna_tx_type tx_type; +}; + +struct bna_tx_event_cbfn { + /* Optional */ + void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); + void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); + /* Mandatory */ + void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); +}; + +/* Tx module - keeps track of free, active tx objects */ +struct bna_tx_mod { + struct bna_tx *tx; /* BFI_MAX_TXQ entries */ + struct bna_txq *txq; /* BFI_MAX_TXQ entries */ + + struct list_head tx_free_q; + struct list_head tx_active_q; + + struct list_head txq_free_q; + + /* callback for bna_tx_mod_stop() */ + void (*stop_cbfn)(struct bna_enet *enet); + + struct bfa_wc tx_stop_wc; + + enum bna_tx_mod_flags flags; + + u8 prio_map; + int default_prio; + int iscsi_over_cee; + int iscsi_prio; + int prio_reconfigured; + + u32 rid_mask; + + struct bna *bna; +}; + +/** + * + * Rx object + * + */ + +/* Rx datapath control structure */ +struct bna_rcb { + /* Fast path */ + void **sw_qpt; + void *unmap_q; + u32 producer_index; + u32 consumer_index; + u32 q_depth; + void *__iomem q_dbell; + int page_idx; + int page_count; + /* Control path */ + struct bna_rxq *rxq; + struct bna_ccb *ccb; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + unsigned long flags; + int id; +}; + +/* RxQ structure - QPT, configuration */ +struct bna_rxq { + struct list_head qe; + + int buffer_size; + int q_depth; + + struct bna_qpt qpt; + struct bna_rcb *rcb; + + struct bna_rxp *rxp; + struct bna_rx *rx; + + int hw_id; + + u64 rx_packets; + u64 rx_bytes; + u64 rx_packets_with_error; + u64 rxbuf_alloc_failed; +}; + +/* RxQ pair */ +union bna_rxq_u { + struct { + struct bna_rxq *hdr; + struct bna_rxq *data; + } hds; + struct { + struct bna_rxq *small; + struct bna_rxq *large; + } slr; + struct { + struct bna_rxq *only; + struct bna_rxq *reserved; + } single; +}; + +/* Packet rate for Dynamic Interrupt Moderation */ +struct bna_pkt_rate { + u32 small_pkt_cnt; + u32 large_pkt_cnt; +}; + +/* Completion control structure */ +struct bna_ccb { + /* Fast path */ + void **sw_qpt; + u32 producer_index; + volatile u32 *hw_producer_index; + u32 q_depth; + struct bna_ib_dbell *i_dbell; + struct bna_rcb *rcb[2]; + void *ctrl; /* For bnad */ + struct bna_pkt_rate pkt_rate; + int page_idx; + int page_count; + + /* Control path */ + struct bna_cq *cq; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + enum bna_intr_type intr_type; + int intr_vector; + u8 rx_coalescing_timeo; /* For NAPI */ + int id; + char name[BNA_Q_NAME_SIZE]; +}; + +/* CQ QPT, configuration */ +struct bna_cq { + struct bna_qpt qpt; + struct bna_ccb *ccb; + + struct bna_ib ib; + + struct bna_rx *rx; +}; + +struct bna_rss_config { + enum bfi_enet_rss_type hash_type; + u8 hash_mask; + u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN]; +}; + +struct bna_hds_config { + enum bfi_enet_hds_type hdr_type; + int forced_offset; +}; + +/* Rx object configuration used during creation */ +struct bna_rx_config { + enum bna_rx_type rx_type; + int num_paths; + enum bna_rxp_type rxp_type; + int paused; + int q_depth; + int coalescing_timeo; + /* + * Small/Large (or Header/Data) buffer size to be configured + * for SLR and HDS queue type. Large buffer size comes from + * enet->mtu. + */ + int small_buff_size; + + enum bna_status rss_status; + struct bna_rss_config rss_config; + + struct bna_hds_config hds_config; + + enum bna_status vlan_strip_status; +}; + +/* Rx Path structure - one per MSIX vector/CPU */ +struct bna_rxp { + /* This should be the first one */ + struct list_head qe; + + enum bna_rxp_type type; + union bna_rxq_u rxq; + struct bna_cq cq; + + struct bna_rx *rx; + + /* MSI-x vector number for configuring RSS */ + int vector; + int hw_id; +}; + +/* RxF structure (hardware Rx Function) */ +struct bna_rxf { + bfa_fsm_t fsm; + enum bna_rxf_flags flags; + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_enable_req req; + struct bfi_enet_rss_cfg_req rss_req; + struct bfi_enet_rit_req rit_req; + struct bfi_enet_rx_vlan_req vlan_req; + struct bfi_enet_mcast_add_req mcast_add_req; + struct bfi_enet_mcast_del_req mcast_del_req; + struct bfi_enet_ucast_req ucast_req; + } bfi_enet_cmd; + + /* callback for bna_rxf_start() */ + void (*start_cbfn) (struct bna_rx *rx); + struct bna_rx *start_cbarg; + + /* callback for bna_rxf_stop() */ + void (*stop_cbfn) (struct bna_rx *rx); + struct bna_rx *stop_cbarg; + + /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */ + void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx); + struct bnad *oper_state_cbarg; + + /** + * callback for: + * bna_rxf_ucast_set() + * bna_rxf_{ucast/mcast}_add(), + * bna_rxf_{ucast/mcast}_del(), + * bna_rxf_mode_set() + */ + void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx); + struct bnad *cam_fltr_cbarg; + + /* List of unicast addresses yet to be applied to h/w */ + struct list_head ucast_pending_add_q; + struct list_head ucast_pending_del_q; + struct bna_mac *ucast_pending_mac; + int ucast_pending_set; + /* ucast addresses applied to the h/w */ + struct list_head ucast_active_q; + struct bna_mac ucast_active_mac; + int ucast_active_set; + + /* List of multicast addresses yet to be applied to h/w */ + struct list_head mcast_pending_add_q; + struct list_head mcast_pending_del_q; + /* multicast addresses applied to the h/w */ + struct list_head mcast_active_q; + struct list_head mcast_handle_q; + + /* Rx modes yet to be applied to h/w */ + enum bna_rxmode rxmode_pending; + enum bna_rxmode rxmode_pending_bitmask; + /* Rx modes applied to h/w */ + enum bna_rxmode rxmode_active; + + u8 vlan_pending_bitmask; + enum bna_status vlan_filter_status; + u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32]; + bool vlan_strip_pending; + enum bna_status vlan_strip_status; + + enum bna_rss_flags rss_pending; + enum bna_status rss_status; + struct bna_rss_config rss_cfg; + u8 *rit; + int rit_size; + + struct bna_rx *rx; +}; + +/* Rx object */ +struct bna_rx { + /* This should be the first one */ + struct list_head qe; + int rid; + int hw_id; + + bfa_fsm_t fsm; + + enum bna_rx_type type; + + int num_paths; + struct list_head rxp_q; + + struct bna_hds_config hds_cfg; + + struct bna_rxf rxf; + + enum bna_rx_flags rx_flags; + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_rx_cfg_req cfg_req; + struct bfi_enet_req req; + struct bfi_enet_rx_cfg_rsp cfg_rsp; + } bfi_enet_cmd; + + /* Rx event handlers */ + void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); + void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); + void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); + void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); + void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); + + /* callback for bna_rx_disable(), bna_rx_stop() */ + void (*stop_cbfn)(void *arg, struct bna_rx *rx); + void *stop_cbarg; + + struct bna *bna; + void *priv; /* bnad's cookie */ +}; + +struct bna_rx_event_cbfn { + /* Optional */ + void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); + void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); + void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); + void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); + void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); + /* Mandatory */ + void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); +}; + +/* Rx module - keeps track of free, active rx objects */ +struct bna_rx_mod { + struct bna *bna; /* back pointer to parent */ + struct bna_rx *rx; /* BFI_MAX_RXQ entries */ + struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */ + struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */ + + struct list_head rx_free_q; + struct list_head rx_active_q; + int rx_free_count; + + struct list_head rxp_free_q; + int rxp_free_count; + + struct list_head rxq_free_q; + int rxq_free_count; + + enum bna_rx_mod_flags flags; + + /* callback for bna_rx_mod_stop() */ + void (*stop_cbfn)(struct bna_enet *enet); + + struct bfa_wc rx_stop_wc; + u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX]; + u32 rid_mask; +}; + +/** + * + * CAM + * + */ + +struct bna_ucam_mod { + struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */ + struct list_head free_q; + + struct bna *bna; +}; + +struct bna_mcam_handle { + /* This should be the first one */ + struct list_head qe; + int handle; + int refcnt; +}; + +struct bna_mcam_mod { + struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */ + struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */ + struct list_head free_q; + struct list_head free_handle_q; + + struct bna *bna; +}; + +/** + * + * Statistics + * + */ + +struct bna_stats { + struct bna_dma_addr hw_stats_dma; + struct bfi_enet_stats *hw_stats_kva; + struct bfi_enet_stats hw_stats; +}; + +struct bna_stats_mod { + bool ioc_ready; + bool stats_get_busy; + bool stats_clr_busy; + struct bfa_msgq_cmd_entry stats_get_cmd; + struct bfa_msgq_cmd_entry stats_clr_cmd; + struct bfi_enet_stats_req stats_get; + struct bfi_enet_stats_req stats_clr; +}; + +/** + * + * BNA + * + */ + +struct bna { + struct bna_ident ident; + struct bfa_pcidev pcidev; + + struct bna_reg regs; + struct bna_bit_defn bits; + + struct bna_stats stats; + + struct bna_ioceth ioceth; + struct bfa_cee cee; + struct bfa_msgq msgq; + + struct bna_ethport ethport; + struct bna_enet enet; + struct bna_stats_mod stats_mod; + + struct bna_tx_mod tx_mod; + struct bna_rx_mod rx_mod; + struct bna_ucam_mod ucam_mod; + struct bna_mcam_mod mcam_mod; + + enum bna_mod_flags mod_flags; + + int default_mode_rid; + int promisc_rid; + + struct bnad *bnad; +}; +#endif /* __BNA_TYPES_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c new file mode 100644 index 000000000000..5d7872ecff52 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -0,0 +1,3510 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#include <linux/bitops.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/in.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> +#include <linux/ip.h> +#include <linux/prefetch.h> + +#include "bnad.h" +#include "bna.h" +#include "cna.h" + +static DEFINE_MUTEX(bnad_fwimg_mutex); + +/* + * Module params + */ +static uint bnad_msix_disable; +module_param(bnad_msix_disable, uint, 0444); +MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); + +static uint bnad_ioc_auto_recover = 1; +module_param(bnad_ioc_auto_recover, uint, 0444); +MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); + +/* + * Global variables + */ +u32 bnad_rxqs_per_cq = 2; + +static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +/* + * Local MACROS + */ +#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2) + +#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth) + +#define BNAD_GET_MBOX_IRQ(_bnad) \ + (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ + ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ + ((_bnad)->pcidev->irq)) + +#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \ +do { \ + (_res_info)->res_type = BNA_RES_T_MEM; \ + (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ + (_res_info)->res_u.mem_info.num = (_num); \ + (_res_info)->res_u.mem_info.len = \ + sizeof(struct bnad_unmap_q) + \ + (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ +} while (0) + +#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ + +/* + * Reinitialize completions in CQ, once Rx is taken down + */ +static void +bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bna_cq_entry *cmpl, *next_cmpl; + unsigned int wi_range, wis = 0, ccb_prod = 0; + int i; + + BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl, + wi_range); + + for (i = 0; i < ccb->q_depth; i++) { + wis++; + if (likely(--wi_range)) + next_cmpl = cmpl + 1; + else { + BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth); + wis = 0; + BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, + next_cmpl, wi_range); + } + cmpl->valid = 0; + cmpl = next_cmpl; + } +} + +static u32 +bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, + u32 index, u32 depth, struct sk_buff *skb, u32 frag) +{ + int j; + array[index].skb = NULL; + + dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr), + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&array[index], dma_addr, 0); + BNA_QE_INDX_ADD(index, 1, depth); + + for (j = 0; j < frag; j++) { + dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), + skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); + dma_unmap_addr_set(&array[index], dma_addr, 0); + BNA_QE_INDX_ADD(index, 1, depth); + } + + return index; +} + +/* + * Frees all pending Tx Bufs + * At this point no activity is expected on the Q, + * so DMA unmap & freeing is fine. + */ +static void +bnad_free_all_txbufs(struct bnad *bnad, + struct bna_tcb *tcb) +{ + u32 unmap_cons; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + struct bnad_skb_unmap *unmap_array; + struct sk_buff *skb = NULL; + int q; + + unmap_array = unmap_q->unmap_array; + + for (q = 0; q < unmap_q->q_depth; q++) { + skb = unmap_array[q].skb; + if (!skb) + continue; + + unmap_cons = q; + unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, + unmap_cons, unmap_q->q_depth, skb, + skb_shinfo(skb)->nr_frags); + + dev_kfree_skb_any(skb); + } +} + +/* Data Path Handlers */ + +/* + * bnad_free_txbufs : Frees the Tx bufs on Tx completion + * Can be called in a) Interrupt context + * b) Sending context + * c) Tasklet context + */ +static u32 +bnad_free_txbufs(struct bnad *bnad, + struct bna_tcb *tcb) +{ + u32 unmap_cons, sent_packets = 0, sent_bytes = 0; + u16 wis, updated_hw_cons; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + struct bnad_skb_unmap *unmap_array; + struct sk_buff *skb; + + /* + * Just return if TX is stopped. This check is useful + * when bnad_free_txbufs() runs out of a tasklet scheduled + * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit + * but this routine runs actually after the cleanup has been + * executed. + */ + if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + return 0; + + updated_hw_cons = *(tcb->hw_consumer_index); + + wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, + updated_hw_cons, tcb->q_depth); + + BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); + + unmap_array = unmap_q->unmap_array; + unmap_cons = unmap_q->consumer_index; + + prefetch(&unmap_array[unmap_cons + 1]); + while (wis) { + skb = unmap_array[unmap_cons].skb; + + sent_packets++; + sent_bytes += skb->len; + wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); + + unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, + unmap_cons, unmap_q->q_depth, skb, + skb_shinfo(skb)->nr_frags); + + dev_kfree_skb_any(skb); + } + + /* Update consumer pointers. */ + tcb->consumer_index = updated_hw_cons; + unmap_q->consumer_index = unmap_cons; + + tcb->txq->tx_packets += sent_packets; + tcb->txq->tx_bytes += sent_bytes; + + return sent_packets; +} + +/* Tx Free Tasklet function */ +/* Frees for all the tcb's in all the Tx's */ +/* + * Scheduled from sending context, so that + * the fat Tx lock is not held for too long + * in the sending context. + */ +static void +bnad_tx_free_tasklet(unsigned long bnad_ptr) +{ + struct bnad *bnad = (struct bnad *)bnad_ptr; + struct bna_tcb *tcb; + u32 acked = 0; + int i, j; + + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + if (!tcb) + continue; + if (((u16) (*tcb->hw_consumer_index) != + tcb->consumer_index) && + (!test_and_set_bit(BNAD_TXQ_FREE_SENT, + &tcb->flags))) { + acked = bnad_free_txbufs(bnad, tcb); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags))) + bna_ib_ack(tcb->i_dbell, acked); + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags))) + continue; + if (netif_queue_stopped(bnad->netdev)) { + if (acked && netif_carrier_ok(bnad->netdev) && + BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= + BNAD_NETIF_WAKE_THRESHOLD) { + netif_wake_queue(bnad->netdev); + /* TODO */ + /* Counters for individual TxQs? */ + BNAD_UPDATE_CTR(bnad, + netif_queue_wakeup); + } + } + } + } +} + +static u32 +bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct net_device *netdev = bnad->netdev; + u32 sent = 0; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + return 0; + + sent = bnad_free_txbufs(bnad, tcb); + if (sent) { + if (netif_queue_stopped(netdev) && + netif_carrier_ok(netdev) && + BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= + BNAD_NETIF_WAKE_THRESHOLD) { + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + } + + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, sent); + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + return sent; +} + +/* MSIX Tx Completion Handler */ +static irqreturn_t +bnad_msix_tx(int irq, void *data) +{ + struct bna_tcb *tcb = (struct bna_tcb *)data; + struct bnad *bnad = tcb->bnad; + + bnad_tx(bnad, tcb); + + return IRQ_HANDLED; +} + +static void +bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + rcb->producer_index = 0; + rcb->consumer_index = 0; + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; +} + +static void +bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q; + struct bnad_skb_unmap *unmap_array; + struct sk_buff *skb; + int unmap_cons; + + unmap_q = rcb->unmap_q; + unmap_array = unmap_q->unmap_array; + for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { + skb = unmap_array[unmap_cons].skb; + if (!skb) + continue; + unmap_array[unmap_cons].skb = NULL; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], + dma_addr), + rcb->rxq->buffer_size, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + bnad_reset_rcb(bnad, rcb); +} + +static void +bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +{ + u16 to_alloc, alloced, unmap_prod, wi_range; + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_skb_unmap *unmap_array; + struct bna_rxq_entry *rxent; + struct sk_buff *skb; + dma_addr_t dma_addr; + + alloced = 0; + to_alloc = + BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth); + + unmap_array = unmap_q->unmap_array; + unmap_prod = unmap_q->producer_index; + + BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); + + while (to_alloc--) { + if (!wi_range) + BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, + wi_range); + skb = netdev_alloc_skb_ip_align(bnad->netdev, + rcb->rxq->buffer_size); + if (unlikely(!skb)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; + goto finishing; + } + unmap_array[unmap_prod].skb = skb; + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + rcb->rxq->buffer_size, + DMA_FROM_DEVICE); + dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, + dma_addr); + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + + rxent++; + wi_range--; + alloced++; + } + +finishing: + if (likely(alloced)) { + unmap_q->producer_index = unmap_prod; + rcb->producer_index = unmap_prod; + smp_mb(); + if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); + } +} + +static inline void +bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { + if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) + >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) + bnad_alloc_n_post_rxbufs(bnad, rcb); + smp_mb__before_clear_bit(); + clear_bit(BNAD_RXQ_REFILL, &rcb->flags); + } +} + +static u32 +bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) +{ + struct bna_cq_entry *cmpl, *next_cmpl; + struct bna_rcb *rcb = NULL; + unsigned int wi_range, packets = 0, wis = 0; + struct bnad_unmap_q *unmap_q; + struct bnad_skb_unmap *unmap_array; + struct sk_buff *skb; + u32 flags, unmap_cons; + struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; + struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); + + set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); + + if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) { + clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); + return 0; + } + + prefetch(bnad->netdev); + BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, + wi_range); + BUG_ON(!(wi_range <= ccb->q_depth)); + while (cmpl->valid && packets < budget) { + packets++; + BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); + + if (bna_is_small_rxq(cmpl->rxq_id)) + rcb = ccb->rcb[1]; + else + rcb = ccb->rcb[0]; + + unmap_q = rcb->unmap_q; + unmap_array = unmap_q->unmap_array; + unmap_cons = unmap_q->consumer_index; + + skb = unmap_array[unmap_cons].skb; + BUG_ON(!(skb)); + unmap_array[unmap_cons].skb = NULL; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], + dma_addr), + rcb->rxq->buffer_size, + DMA_FROM_DEVICE); + BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); + + /* Should be more efficient ? Performance ? */ + BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); + + wis++; + if (likely(--wi_range)) + next_cmpl = cmpl + 1; + else { + BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); + wis = 0; + BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, + next_cmpl, wi_range); + BUG_ON(!(wi_range <= ccb->q_depth)); + } + prefetch(next_cmpl); + + flags = ntohl(cmpl->flags); + if (unlikely + (flags & + (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | + BNA_CQ_EF_TOO_LONG))) { + dev_kfree_skb_any(skb); + rcb->rxq->rx_packets_with_error++; + goto next; + } + + skb_put(skb, ntohs(cmpl->length)); + if (likely + ((bnad->netdev->features & NETIF_F_RXCSUM) && + (((flags & BNA_CQ_EF_IPV4) && + (flags & BNA_CQ_EF_L3_CKSUM_OK)) || + (flags & BNA_CQ_EF_IPV6)) && + (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && + (flags & BNA_CQ_EF_L4_CKSUM_OK))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + rcb->rxq->rx_packets++; + rcb->rxq->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, bnad->netdev); + + if (flags & BNA_CQ_EF_VLAN) + __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); + + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ctrl->napi, skb); + else { + netif_receive_skb(skb); + } + +next: + cmpl->valid = 0; + cmpl = next_cmpl; + } + + BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); + + if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + bna_ib_ack_disable_irq(ccb->i_dbell, packets); + + bnad_refill_rxq(bnad, ccb->rcb[0]); + if (ccb->rcb[1]) + bnad_refill_rxq(bnad, ccb->rcb[1]); + + clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); + + return packets; +} + +static void +bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); + struct napi_struct *napi = &rx_ctrl->napi; + + if (likely(napi_schedule_prep(napi))) { + __napi_schedule(napi); + rx_ctrl->rx_schedule++; + } +} + +/* MSIX Rx Path Handler */ +static irqreturn_t +bnad_msix_rx(int irq, void *data) +{ + struct bna_ccb *ccb = (struct bna_ccb *)data; + + if (ccb) { + ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; + bnad_netif_rx_schedule_poll(ccb->bnad, ccb); + } + + return IRQ_HANDLED; +} + +/* Interrupt handlers */ + +/* Mbox Interrupt Handlers */ +static irqreturn_t +bnad_msix_mbox_handler(int irq, void *data) +{ + u32 intr_status; + unsigned long flags; + struct bnad *bnad = (struct bnad *)data; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_HANDLED; + } + + bna_intr_status_get(&bnad->bna, intr_status); + + if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnad_isr(int irq, void *data) +{ + int i, j; + u32 intr_status; + unsigned long flags; + struct bnad *bnad = (struct bnad *)data; + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + struct bna_tcb *tcb = NULL; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_NONE; + } + + bna_intr_status_get(&bnad->bna, intr_status); + + if (unlikely(!intr_status)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_NONE; + } + + if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (!BNA_IS_INTX_DATA_INTR(intr_status)) + return IRQ_HANDLED; + + /* Process data interrupts */ + /* Tx processing */ + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + bnad_tx(bnad, bnad->tx_info[i].tcb[j]); + } + } + /* Rx processing */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } + return IRQ_HANDLED; +} + +/* + * Called in interrupt / callback context + * with bna_lock held, so cfg_flags access is OK + */ +static void +bnad_enable_mbox_irq(struct bnad *bnad) +{ + clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); +} + +/* + * Called with bnad->bna_lock held b'cos of + * bnad->cfg_flags access. + */ +static void +bnad_disable_mbox_irq(struct bnad *bnad) +{ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); +} + +static void +bnad_set_netdev_perm_addr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + + memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); + if (is_zero_ether_addr(netdev->dev_addr)) + memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); +} + +/* Control Path Handlers */ + +/* Callbacks */ +void +bnad_cb_mbox_intr_enable(struct bnad *bnad) +{ + bnad_enable_mbox_irq(bnad); +} + +void +bnad_cb_mbox_intr_disable(struct bnad *bnad) +{ + bnad_disable_mbox_irq(bnad); +} + +void +bnad_cb_ioceth_ready(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.ioc_comp); +} + +void +bnad_cb_ioceth_failed(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; + complete(&bnad->bnad_completions.ioc_comp); +} + +void +bnad_cb_ioceth_disabled(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.ioc_comp); +} + +static void +bnad_cb_enet_disabled(void *arg) +{ + struct bnad *bnad = (struct bnad *)arg; + + netif_carrier_off(bnad->netdev); + complete(&bnad->bnad_completions.enet_comp); +} + +void +bnad_cb_ethport_link_status(struct bnad *bnad, + enum bna_link_status link_status) +{ + bool link_up = 0; + + link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); + + if (link_status == BNA_CEE_UP) { + if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) + BNAD_UPDATE_CTR(bnad, cee_toggle); + set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + } else { + if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) + BNAD_UPDATE_CTR(bnad, cee_toggle); + clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + } + + if (link_up) { + if (!netif_carrier_ok(bnad->netdev)) { + uint tx_id, tcb_id; + printk(KERN_WARNING "bna: %s link up\n", + bnad->netdev->name); + netif_carrier_on(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { + for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; + tcb_id++) { + struct bna_tcb *tcb = + bnad->tx_info[tx_id].tcb[tcb_id]; + u32 txq_id; + if (!tcb) + continue; + + txq_id = tcb->id; + + if (test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags)) { + /* + * Force an immediate + * Transmit Schedule */ + printk(KERN_INFO "bna: %s %d " + "TXQ_STARTED\n", + bnad->netdev->name, + txq_id); + netif_wake_subqueue( + bnad->netdev, + txq_id); + BNAD_UPDATE_CTR(bnad, + netif_queue_wakeup); + } else { + netif_stop_subqueue( + bnad->netdev, + txq_id); + BNAD_UPDATE_CTR(bnad, + netif_queue_stop); + } + } + } + } + } else { + if (netif_carrier_ok(bnad->netdev)) { + printk(KERN_WARNING "bna: %s link down\n", + bnad->netdev->name); + netif_carrier_off(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + } + } +} + +static void +bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.tx_comp); +} + +static void +bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + + tx_info->tcb[tcb->id] = tcb; + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH; +} + +static void +bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + + while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + cpu_relax(); + + bnad_free_all_txbufs(bnad, tcb); + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + tx_info->tcb[tcb->id] = NULL; +} + +static void +bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; +} + +static void +bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) +{ + bnad_free_all_rxbufs(bnad, rcb); +} + +static void +bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = ccb; + ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; +} + +static void +bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = NULL; +} + +static void +bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tx->priv; + struct bna_tcb *tcb; + u32 txq_id; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + txq_id = tcb->id; + clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + netif_stop_subqueue(bnad->netdev, txq_id); + printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n", + bnad->netdev->name, txq_id); + } +} + +static void +bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; + struct bna_tcb *tcb; + struct bnad_unmap_q *unmap_q; + u32 txq_id; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + txq_id = tcb->id; + + unmap_q = tcb->unmap_q; + + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + continue; + + while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + cpu_relax(); + + bnad_free_all_txbufs(bnad, tcb); + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + + if (netif_carrier_ok(bnad->netdev)) { + printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", + bnad->netdev->name, txq_id); + netif_wake_subqueue(bnad->netdev, txq_id); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + /* + * Workaround for first ioceth enable failure & we + * get a 0 MAC address. We try to get the MAC address + * again here. + */ + if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { + bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + } +} + +static void +bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; + struct bna_tcb *tcb; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + } + + mdelay(BNAD_TXRX_SYNC_MDELAY); + bna_tx_cleanup_complete(tx); +} + +static void +bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bna_ccb *ccb; + struct bnad_rx_ctrl *rx_ctrl; + int i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) + clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); + } +} + +static void +bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bna_ccb *ccb; + struct bnad_rx_ctrl *rx_ctrl; + int i; + + mdelay(BNAD_TXRX_SYNC_MDELAY); + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); + + while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags)) + cpu_relax(); + } + + bna_rx_cleanup_complete(rx); +} + +static void +bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bna_ccb *ccb; + struct bna_rcb *rcb; + struct bnad_rx_ctrl *rx_ctrl; + struct bnad_unmap_q *unmap_q; + int i; + int j; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + bnad_cq_cmpl_init(bnad, ccb); + + for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { + rcb = ccb->rcb[j]; + if (!rcb) + continue; + bnad_free_all_rxbufs(bnad, rcb); + + set_bit(BNAD_RXQ_STARTED, &rcb->flags); + set_bit(BNAD_RXQ_POST_OK, &rcb->flags); + unmap_q = rcb->unmap_q; + + /* Now allocate & post buffers for this RCB */ + /* !!Allocation in callback context */ + if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { + if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) + >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) + bnad_alloc_n_post_rxbufs(bnad, rcb); + smp_mb__before_clear_bit(); + clear_bit(BNAD_RXQ_REFILL, &rcb->flags); + } + } + } +} + +static void +bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.rx_comp); +} + +static void +bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) +{ + bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.mcast_comp); +} + +void +bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats) +{ + if (status == BNA_CB_SUCCESS) + BNAD_UPDATE_CTR(bnad, hw_stats_updates); + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); +} + +static void +bnad_cb_enet_mtu_set(struct bnad *bnad) +{ + bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.mtu_comp); +} + +/* Resource allocation, free functions */ + +static void +bnad_mem_free(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if (mem_info->mdl == NULL) + return; + + for (i = 0; i < mem_info->num; i++) { + if (mem_info->mdl[i].kva != NULL) { + if (mem_info->mem_type == BNA_MEM_T_DMA) { + BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), + dma_pa); + dma_free_coherent(&bnad->pcidev->dev, + mem_info->mdl[i].len, + mem_info->mdl[i].kva, dma_pa); + } else + kfree(mem_info->mdl[i].kva); + } + } + kfree(mem_info->mdl); + mem_info->mdl = NULL; +} + +static int +bnad_mem_alloc(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if ((mem_info->num == 0) || (mem_info->len == 0)) { + mem_info->mdl = NULL; + return 0; + } + + mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), + GFP_KERNEL); + if (mem_info->mdl == NULL) + return -ENOMEM; + + if (mem_info->mem_type == BNA_MEM_T_DMA) { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = + dma_alloc_coherent(&bnad->pcidev->dev, + mem_info->len, &dma_pa, + GFP_KERNEL); + + if (mem_info->mdl[i].kva == NULL) + goto err_return; + + BNA_SET_DMA_ADDR(dma_pa, + &(mem_info->mdl[i].dma)); + } + } else { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = kzalloc(mem_info->len, + GFP_KERNEL); + if (mem_info->mdl[i].kva == NULL) + goto err_return; + } + } + + return 0; + +err_return: + bnad_mem_free(bnad, mem_info); + return -ENOMEM; +} + +/* Free IRQ for Mailbox */ +static void +bnad_mbox_irq_free(struct bnad *bnad) +{ + int irq; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_disable_mbox_irq(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + irq = BNAD_GET_MBOX_IRQ(bnad); + free_irq(irq, bnad); +} + +/* + * Allocates IRQ for Mailbox, but keep it disabled + * This will be enabled once we get the mbox enable callback + * from bna + */ +static int +bnad_mbox_irq_alloc(struct bnad *bnad) +{ + int err = 0; + unsigned long irq_flags, flags; + u32 irq; + irq_handler_t irq_handler; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) { + irq_handler = (irq_handler_t)bnad_msix_mbox_handler; + irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; + irq_flags = 0; + } else { + irq_handler = (irq_handler_t)bnad_isr; + irq = bnad->pcidev->irq; + irq_flags = IRQF_SHARED; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); + + /* + * Set the Mbox IRQ disable flag, so that the IRQ handler + * called from request_irq() for SHARED IRQs do not execute + */ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); + + err = request_irq(irq, irq_handler, irq_flags, + bnad->mbox_irq_name, bnad); + + return err; +} + +static void +bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) +{ + kfree(intr_info->idl); + intr_info->idl = NULL; +} + +/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ +static int +bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, + u32 txrx_id, struct bna_intr_info *intr_info) +{ + int i, vector_start = 0; + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + intr_info->intr_type = BNA_INTR_T_MSIX; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; + break; + + case BNAD_INTR_RX: + vector_start = BNAD_MAILBOX_MSIX_VECTORS + + (bnad->num_tx * bnad->num_txq_per_tx) + + txrx_id; + break; + + default: + BUG(); + } + + for (i = 0; i < intr_info->num; i++) + intr_info->idl[i].vector = vector_start + i; + } else { + intr_info->intr_type = BNA_INTR_T_INTX; + intr_info->num = 1; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; + break; + + case BNAD_INTR_RX: + intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; + break; + } + } + return 0; +} + +/** + * NOTE: Should be called for MSIX only + * Unregisters Tx MSIX vector(s) from the kernel + */ +static void +bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, + int num_txqs) +{ + int i; + int vector_num; + + for (i = 0; i < num_txqs; i++) { + if (tx_info->tcb[i] == NULL) + continue; + + vector_num = tx_info->tcb[i]->intr_vector; + free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); + } +} + +/** + * NOTE: Should be called for MSIX only + * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel + */ +static int +bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, + u32 tx_id, int num_txqs) +{ + int i; + int err; + int vector_num; + + for (i = 0; i < num_txqs; i++) { + vector_num = tx_info->tcb[i]->intr_vector; + sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); + err = request_irq(bnad->msix_table[vector_num].vector, + (irq_handler_t)bnad_msix_tx, 0, + tx_info->tcb[i]->name, + tx_info->tcb[i]); + if (err) + goto err_return; + } + + return 0; + +err_return: + if (i > 0) + bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); + return -1; +} + +/** + * NOTE: Should be called for MSIX only + * Unregisters Rx MSIX vector(s) from the kernel + */ +static void +bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, + int num_rxps) +{ + int i; + int vector_num; + + for (i = 0; i < num_rxps; i++) { + if (rx_info->rx_ctrl[i].ccb == NULL) + continue; + + vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; + free_irq(bnad->msix_table[vector_num].vector, + rx_info->rx_ctrl[i].ccb); + } +} + +/** + * NOTE: Should be called for MSIX only + * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel + */ +static int +bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, + u32 rx_id, int num_rxps) +{ + int i; + int err; + int vector_num; + + for (i = 0; i < num_rxps; i++) { + vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; + sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", + bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); + err = request_irq(bnad->msix_table[vector_num].vector, + (irq_handler_t)bnad_msix_rx, 0, + rx_info->rx_ctrl[i].ccb->name, + rx_info->rx_ctrl[i].ccb); + if (err) + goto err_return; + } + + return 0; + +err_return: + if (i > 0) + bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); + return -1; +} + +/* Free Tx object Resources */ +static void +bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) +{ + int i; + + for (i = 0; i < BNA_TX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); + } +} + +/* Allocates memory and interrupt resources for Tx object */ +static int +bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + u32 tx_id) +{ + int i, err = 0; + + for (i = 0; i < BNA_TX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + err = bnad_mem_alloc(bnad, + &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, + &res_info[i].res_u.intr_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_tx_res_free(bnad, res_info); + return err; +} + +/* Free Rx object Resources */ +static void +bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) +{ + int i; + + for (i = 0; i < BNA_RX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); + } +} + +/* Allocates memory and interrupt resources for Rx object */ +static int +bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + uint rx_id) +{ + int i, err = 0; + + /* All memory needs to be allocated before setup_ccbs */ + for (i = 0; i < BNA_RX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + err = bnad_mem_alloc(bnad, + &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, + &res_info[i].res_u.intr_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_rx_res_free(bnad, res_info); + return err; +} + +/* Timer callbacks */ +/* a) IOC timer */ +static void +bnad_ioc_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_ioc_hb_check(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_sem_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * All timer routines use bnad->bna_lock to protect against + * the following race, which may occur in case of no locking: + * Time CPU m CPU n + * 0 1 = test_bit + * 1 clear_bit + * 2 del_timer_sync + * 3 mod_timer + */ + +/* b) Dynamic Interrupt Moderation Timer */ +static void +bnad_dim_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + int i, j; + unsigned long flags; + + if (!netif_carrier_ok(bnad->netdev)) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (!rx_ctrl->ccb) + continue; + bna_rx_dim_update(rx_ctrl->ccb); + } + } + + /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ + if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) + mod_timer(&bnad->dim_timer, + jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* c) Statistics Timer */ +static void +bnad_stats_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_hw_stats_get(&bnad->bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * Set up timer for DIM + * Called with bnad->bna_lock held + */ +void +bnad_dim_timer_start(struct bnad *bnad) +{ + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { + setup_timer(&bnad->dim_timer, bnad_dim_timeout, + (unsigned long)bnad); + set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + mod_timer(&bnad->dim_timer, + jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); + } +} + +/* + * Set up timer for statistics + * Called with mutex_lock(&bnad->conf_mutex) held + */ +static void +bnad_stats_timer_start(struct bnad *bnad) +{ + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { + setup_timer(&bnad->stats_timer, bnad_stats_timeout, + (unsigned long)bnad); + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * Stops the stats timer + * Called with mutex_lock(&bnad->conf_mutex) held + */ +static void +bnad_stats_timer_stop(struct bnad *bnad) +{ + int to_del = 0; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + to_del = 1; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->stats_timer); +} + +/* Utilities */ + +static void +bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) +{ + int i = 1; /* Index 0 has broadcast address */ + struct netdev_hw_addr *mc_addr; + + netdev_for_each_mc_addr(mc_addr, netdev) { + memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], + ETH_ALEN); + i++; + } +} + +static int +bnad_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct bnad_rx_ctrl *rx_ctrl = + container_of(napi, struct bnad_rx_ctrl, napi); + struct bnad *bnad = rx_ctrl->bnad; + int rcvd = 0; + + rx_ctrl->rx_poll_ctr++; + + if (!netif_carrier_ok(bnad->netdev)) + goto poll_exit; + + rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); + if (rcvd >= budget) + return rcvd; + +poll_exit: + napi_complete(napi); + + rx_ctrl->rx_complete++; + + if (rx_ctrl->ccb) + bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); + + return rcvd; +} + +#define BNAD_NAPI_POLL_QUOTA 64 +static void +bnad_napi_init(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_ctrl *rx_ctrl; + int i; + + /* Initialize & enable NAPI */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) { + rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; + netif_napi_add(bnad->netdev, &rx_ctrl->napi, + bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); + } +} + +static void +bnad_napi_enable(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_ctrl *rx_ctrl; + int i; + + /* Initialize & enable NAPI */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) { + rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; + + napi_enable(&rx_ctrl->napi); + } +} + +static void +bnad_napi_disable(struct bnad *bnad, u32 rx_id) +{ + int i; + + /* First disable and then clean up */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) { + napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi); + netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); + } +} + +/* Should be held with conf_lock held */ +void +bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) +{ + struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; + struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; + unsigned long flags; + + if (!tx_info->tx) + return; + + init_completion(&bnad->bnad_completions.tx_comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&bnad->bnad_completions.tx_comp); + + if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) + bnad_tx_msix_unregister(bnad, tx_info, + bnad->num_txq_per_tx); + + if (0 == tx_id) + tasklet_kill(&bnad->tx_free_tasklet); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_destroy(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + tx_info->tx = NULL; + tx_info->tx_id = 0; + + bnad_tx_res_free(bnad, res_info); +} + +/* Should be held with conf_lock held */ +int +bnad_setup_tx(struct bnad *bnad, u32 tx_id) +{ + int err; + struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; + struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; + struct bna_intr_info *intr_info = + &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; + struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; + static const struct bna_tx_event_cbfn tx_cbfn = { + .tcb_setup_cbfn = bnad_cb_tcb_setup, + .tcb_destroy_cbfn = bnad_cb_tcb_destroy, + .tx_stall_cbfn = bnad_cb_tx_stall, + .tx_resume_cbfn = bnad_cb_tx_resume, + .tx_cleanup_cbfn = bnad_cb_tx_cleanup, + }; + + struct bna_tx *tx; + unsigned long flags; + + tx_info->tx_id = tx_id; + + /* Initialize the Tx object configuration */ + tx_config->num_txq = bnad->num_txq_per_tx; + tx_config->txq_depth = bnad->txq_depth; + tx_config->tx_type = BNA_TX_T_REGULAR; + tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; + + /* Get BNA's resource requirement for one tx object */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_res_req(bnad->num_txq_per_tx, + bnad->txq_depth, res_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Fill Unmap Q memory requirements */ + BNAD_FILL_UNMAPQ_MEM_REQ( + &res_info[BNA_TX_RES_MEM_T_UNMAPQ], + bnad->num_txq_per_tx, + BNAD_TX_UNMAPQ_DEPTH); + + /* Allocate resources */ + err = bnad_tx_res_alloc(bnad, res_info, tx_id); + if (err) + return err; + + /* Ask BNA to create one Tx object, supplying required resources */ + spin_lock_irqsave(&bnad->bna_lock, flags); + tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, + tx_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (!tx) + goto err_return; + tx_info->tx = tx; + + /* Register ISR for the Tx object */ + if (intr_info->intr_type == BNA_INTR_T_MSIX) { + err = bnad_tx_msix_register(bnad, tx_info, + tx_id, bnad->num_txq_per_tx); + if (err) + goto err_return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_enable(tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return 0; + +err_return: + bnad_tx_res_free(bnad, res_info); + return err; +} + +/* Setup the rx config for bna_rx_create */ +/* bnad decides the configuration */ +static void +bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) +{ + rx_config->rx_type = BNA_RX_T_REGULAR; + rx_config->num_paths = bnad->num_rxp_per_rx; + rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; + + if (bnad->num_rxp_per_rx > 1) { + rx_config->rss_status = BNA_STATUS_T_ENABLED; + rx_config->rss_config.hash_type = + (BFI_ENET_RSS_IPV6 | + BFI_ENET_RSS_IPV6_TCP | + BFI_ENET_RSS_IPV4 | + BFI_ENET_RSS_IPV4_TCP); + rx_config->rss_config.hash_mask = + bnad->num_rxp_per_rx - 1; + get_random_bytes(rx_config->rss_config.toeplitz_hash_key, + sizeof(rx_config->rss_config.toeplitz_hash_key)); + } else { + rx_config->rss_status = BNA_STATUS_T_DISABLED; + memset(&rx_config->rss_config, 0, + sizeof(rx_config->rss_config)); + } + rx_config->rxp_type = BNA_RXP_SLR; + rx_config->q_depth = bnad->rxq_depth; + + rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; + + rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; +} + +static void +bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + int i; + + for (i = 0; i < bnad->num_rxp_per_rx; i++) + rx_info->rx_ctrl[i].bnad = bnad; +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +void +bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; + struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; + unsigned long flags; + int to_del = 0; + + if (!rx_info->rx) + return; + + if (0 == rx_id) { + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { + clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + to_del = 1; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + } + + init_completion(&bnad->bnad_completions.rx_comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&bnad->bnad_completions.rx_comp); + + if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) + bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); + + bnad_napi_disable(bnad, rx_id); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_destroy(rx_info->rx); + + rx_info->rx = NULL; + rx_info->rx_id = 0; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad_rx_res_free(bnad, res_info); +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +int +bnad_setup_rx(struct bnad *bnad, u32 rx_id) +{ + int err; + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; + struct bna_intr_info *intr_info = + &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; + static const struct bna_rx_event_cbfn rx_cbfn = { + .rcb_setup_cbfn = bnad_cb_rcb_setup, + .rcb_destroy_cbfn = bnad_cb_rcb_destroy, + .ccb_setup_cbfn = bnad_cb_ccb_setup, + .ccb_destroy_cbfn = bnad_cb_ccb_destroy, + .rx_stall_cbfn = bnad_cb_rx_stall, + .rx_cleanup_cbfn = bnad_cb_rx_cleanup, + .rx_post_cbfn = bnad_cb_rx_post, + }; + struct bna_rx *rx; + unsigned long flags; + + rx_info->rx_id = rx_id; + + /* Initialize the Rx object configuration */ + bnad_init_rx_config(bnad, rx_config); + + /* Get BNA's resource requirement for one Rx object */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_res_req(rx_config, res_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Fill Unmap Q memory requirements */ + BNAD_FILL_UNMAPQ_MEM_REQ( + &res_info[BNA_RX_RES_MEM_T_UNMAPQ], + rx_config->num_paths + + ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 : + rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH); + + /* Allocate resource */ + err = bnad_rx_res_alloc(bnad, res_info, rx_id); + if (err) + return err; + + bnad_rx_ctrl_init(bnad, rx_id); + + /* Ask BNA to create one Rx object, supplying required resources */ + spin_lock_irqsave(&bnad->bna_lock, flags); + rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, + rx_info); + if (!rx) { + err = -ENOMEM; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto err_return; + } + rx_info->rx = rx; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* + * Init NAPI, so that state is set to NAPI_STATE_SCHED, + * so that IRQ handler cannot schedule NAPI at this point. + */ + bnad_napi_init(bnad, rx_id); + + /* Register ISR for the Rx object */ + if (intr_info->intr_type == BNA_INTR_T_MSIX) { + err = bnad_rx_msix_register(bnad, rx_info, rx_id, + rx_config->num_paths); + if (err) + goto err_return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (0 == rx_id) { + /* Set up Dynamic Interrupt Moderation Vector */ + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) + bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); + + /* Enable VLAN filtering only on the default Rx */ + bna_rx_vlanfilter_enable(rx); + + /* Start the DIM timer */ + bnad_dim_timer_start(bnad); + } + + bna_rx_enable(rx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Enable scheduling of NAPI */ + bnad_napi_enable(bnad, rx_id); + + return 0; + +err_return: + bnad_cleanup_rx(bnad, rx_id); + return err; +} + +/* Called with conf_lock & bnad->bna_lock held */ +void +bnad_tx_coalescing_timeo_set(struct bnad *bnad) +{ + struct bnad_tx_info *tx_info; + + tx_info = &bnad->tx_info[0]; + if (!tx_info->tx) + return; + + bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); +} + +/* Called with conf_lock & bnad->bna_lock held */ +void +bnad_rx_coalescing_timeo_set(struct bnad *bnad) +{ + struct bnad_rx_info *rx_info; + int i; + + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + bna_rx_coalescing_timeo_set(rx_info->rx, + bnad->rx_coalescing_timeo); + } +} + +/* + * Called with bnad->bna_lock held + */ +int +bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) +{ + int ret; + + if (!is_valid_ether_addr(mac_addr)) + return -EADDRNOTAVAIL; + + /* If datapath is down, pretend everything went through */ + if (!bnad->rx_info[0].rx) + return 0; + + ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); + if (ret != BNA_CB_SUCCESS) + return -EADDRNOTAVAIL; + + return 0; +} + +/* Should be called with conf_lock held */ +int +bnad_enable_default_bcast(struct bnad *bnad) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[0]; + int ret; + unsigned long flags; + + init_completion(&bnad->bnad_completions.mcast_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, + bnad_cb_rx_mcast_add); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (ret == BNA_CB_SUCCESS) + wait_for_completion(&bnad->bnad_completions.mcast_comp); + else + return -ENODEV; + + if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) + return -ENODEV; + + return 0; +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +void +bnad_restore_vlans(struct bnad *bnad, u32 rx_id) +{ + u16 vid; + unsigned long flags; + + for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } +} + +/* Statistics utilities */ +void +bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) +{ + int i, j; + + for (i = 0; i < bnad->num_rx; i++) { + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + if (bnad->rx_info[i].rx_ctrl[j].ccb) { + stats->rx_packets += bnad->rx_info[i]. + rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; + stats->rx_bytes += bnad->rx_info[i]. + rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + stats->rx_packets += + bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]->rxq->rx_packets; + stats->rx_bytes += + bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]->rxq->rx_bytes; + } + } + } + } + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + if (bnad->tx_info[i].tcb[j]) { + stats->tx_packets += + bnad->tx_info[i].tcb[j]->txq->tx_packets; + stats->tx_bytes += + bnad->tx_info[i].tcb[j]->txq->tx_bytes; + } + } + } +} + +/* + * Must be called with the bna_lock held. + */ +void +bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) +{ + struct bfi_enet_stats_mac *mac_stats; + u32 bmap; + int i; + + mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; + stats->rx_errors = + mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + + mac_stats->rx_frame_length_error + mac_stats->rx_code_error + + mac_stats->rx_undersize; + stats->tx_errors = mac_stats->tx_fcs_error + + mac_stats->tx_undersize; + stats->rx_dropped = mac_stats->rx_drop; + stats->tx_dropped = mac_stats->tx_drop; + stats->multicast = mac_stats->rx_multicast; + stats->collisions = mac_stats->tx_total_collision; + + stats->rx_length_errors = mac_stats->rx_frame_length_error; + + /* receive ring buffer overflow ?? */ + + stats->rx_crc_errors = mac_stats->rx_fcs_error; + stats->rx_frame_errors = mac_stats->rx_alignment_error; + /* recv'r fifo overrun */ + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats->rx_fifo_errors += + bnad->stats.bna_stats-> + hw_stats.rxf_stats[i].frame_drops; + break; + } + bmap >>= 1; + } +} + +static void +bnad_mbox_irq_sync(struct bnad *bnad) +{ + u32 irq; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) + irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; + else + irq = bnad->pcidev->irq; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + synchronize_irq(irq); +} + +/* Utility used by bnad_start_xmit, for doing TSO */ +static int +bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) +{ + int err; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) { + BNAD_UPDATE_CTR(bnad, tso_err); + return err; + } + } + + /* + * For TSO, the TCP checksum field is seeded with pseudo-header sum + * excluding the length field. + */ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + /* Do we really need these? */ + iph->tot_len = 0; + iph->check = 0; + + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + BNAD_UPDATE_CTR(bnad, tso4); + } else { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + ipv6h->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, + IPPROTO_TCP, 0); + BNAD_UPDATE_CTR(bnad, tso6); + } + + return 0; +} + +/* + * Initialize Q numbers depending on Rx Paths + * Called with bnad->bna_lock held, because of cfg_flags + * access. + */ +static void +bnad_q_num_init(struct bnad *bnad) +{ + int rxps; + + rxps = min((uint)num_online_cpus(), + (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); + + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) + rxps = 1; /* INTx */ + + bnad->num_rx = 1; + bnad->num_tx = 1; + bnad->num_rxp_per_rx = rxps; + bnad->num_txq_per_tx = BNAD_TXQ_NUM; +} + +/* + * Adjusts the Q numbers, given a number of msix vectors + * Give preference to RSS as opposed to Tx priority Queues, + * in such a case, just use 1 Tx Q + * Called with bnad->bna_lock held b'cos of cfg_flags access + */ +static void +bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) +{ + bnad->num_txq_per_tx = 1; + if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + + bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && + (bnad->cfg_flags & BNAD_CF_MSIX)) { + bnad->num_rxp_per_rx = msix_vectors - + (bnad->num_tx * bnad->num_txq_per_tx) - + BNAD_MAILBOX_MSIX_VECTORS; + } else + bnad->num_rxp_per_rx = 1; +} + +/* Enable / disable ioceth */ +static int +bnad_ioceth_disable(struct bnad *bnad) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + init_completion(&bnad->bnad_completions.ioc_comp); + bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, + msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); + + err = bnad->bnad_completions.ioc_comp_status; + return err; +} + +static int +bnad_ioceth_enable(struct bnad *bnad) +{ + int err = 0; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + init_completion(&bnad->bnad_completions.ioc_comp); + bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; + bna_ioceth_enable(&bnad->bna.ioceth); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, + msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); + + err = bnad->bnad_completions.ioc_comp_status; + + return err; +} + +/* Free BNA resources */ +static void +bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, + u32 res_val_max) +{ + int i; + + for (i = 0; i < res_val_max; i++) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); +} + +/* Allocates memory and interrupt resources for BNA */ +static int +bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + u32 res_val_max) +{ + int i, err; + + for (i = 0; i < res_val_max; i++) { + err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_res_free(bnad, res_info, res_val_max); + return err; +} + +/* Interrupt enable / disable */ +static void +bnad_enable_msix(struct bnad *bnad) +{ + int i, ret; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (bnad->msix_table) + return; + + bnad->msix_table = + kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); + + if (!bnad->msix_table) + goto intx_mode; + + for (i = 0; i < bnad->msix_num; i++) + bnad->msix_table[i].entry = i; + + ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); + if (ret > 0) { + /* Not enough MSI-X vectors. */ + pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", + ret, bnad->msix_num); + + spin_lock_irqsave(&bnad->bna_lock, flags); + /* ret = #of vectors that we got */ + bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, + (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + + BNAD_MAILBOX_MSIX_VECTORS; + + if (bnad->msix_num > ret) + goto intx_mode; + + /* Try once more with adjusted numbers */ + /* If this fails, fall back to INTx */ + ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, + bnad->msix_num); + if (ret) + goto intx_mode; + + } else if (ret < 0) + goto intx_mode; + + pci_intx(bnad->pcidev, 0); + + return; + +intx_mode: + pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); + + kfree(bnad->msix_table); + bnad->msix_table = NULL; + bnad->msix_num = 0; + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad->cfg_flags &= ~BNAD_CF_MSIX; + bnad_q_num_init(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_disable_msix(struct bnad *bnad) +{ + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + if (bnad->cfg_flags & BNAD_CF_MSIX) + bnad->cfg_flags &= ~BNAD_CF_MSIX; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + pci_disable_msix(bnad->pcidev); + kfree(bnad->msix_table); + bnad->msix_table = NULL; + } +} + +/* Netdev entry points */ +static int +bnad_open(struct net_device *netdev) +{ + int err; + struct bnad *bnad = netdev_priv(netdev); + struct bna_pause_config pause_config; + int mtu; + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + + /* Tx */ + err = bnad_setup_tx(bnad, 0); + if (err) + goto err_return; + + /* Rx */ + err = bnad_setup_rx(bnad, 0); + if (err) + goto cleanup_tx; + + /* Port */ + pause_config.tx_pause = 0; + pause_config.rx_pause = 0; + + mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); + bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); + bna_enet_enable(&bnad->bna.enet); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Enable broadcast */ + bnad_enable_default_bcast(bnad); + + /* Restore VLANs, if any */ + bnad_restore_vlans(bnad, 0); + + /* Set the UCAST address */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Start the stats timer */ + bnad_stats_timer_start(bnad); + + mutex_unlock(&bnad->conf_mutex); + + return 0; + +cleanup_tx: + bnad_cleanup_tx(bnad, 0); + +err_return: + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static int +bnad_stop(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + + /* Stop the stats timer */ + bnad_stats_timer_stop(bnad); + + init_completion(&bnad->bnad_completions.enet_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, + bnad_cb_enet_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion(&bnad->bnad_completions.enet_comp); + + bnad_cleanup_tx(bnad, 0); + bnad_cleanup_rx(bnad, 0); + + /* Synchronize mailbox IRQ */ + bnad_mbox_irq_sync(bnad); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +/* TX */ +/* + * bnad_start_xmit : Netdev entry point for Transmit + * Called under lock held by net_device + */ +static netdev_tx_t +bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + u32 txq_id = 0; + struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id]; + + u16 txq_prod, vlan_tag = 0; + u32 unmap_prod, wis, wis_used, wi_range; + u32 vectors, vect_id, i, acked; + int err; + unsigned int len; + u32 gso_size; + + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + dma_addr_t dma_addr; + struct bna_txq_entry *txqent; + u16 flags; + + if (unlikely(skb->len <= ETH_HLEN)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_too_short); + return NETDEV_TX_OK; + } + if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long); + return NETDEV_TX_OK; + } + if (unlikely(skb_headlen(skb) == 0)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + + /* + * Takes care of the Tx that is scheduled between clearing the flag + * and the netif_tx_stop_all_queues() call. + */ + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_stopping); + return NETDEV_TX_OK; + } + + vectors = 1 + skb_shinfo(skb)->nr_frags; + if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); + return NETDEV_TX_OK; + } + wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ + acked = 0; + if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || + vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { + if ((u16) (*tcb->hw_consumer_index) != + tcb->consumer_index && + !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + acked = bnad_free_txbufs(bnad, tcb); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, acked); + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } else { + netif_stop_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + } + + smp_mb(); + /* + * Check again to deal with race condition between + * netif_stop_queue here, and netif_wake_queue in + * interrupt handler which is not inside netif tx lock. + */ + if (likely + (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || + vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + return NETDEV_TX_BUSY; + } else { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + unmap_prod = unmap_q->producer_index; + flags = 0; + + txq_prod = tcb->producer_index; + BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); + txqent->hdr.wi.reserved = 0; + txqent->hdr.wi.num_vectors = vectors; + + if (vlan_tx_tag_present(skb)) { + vlan_tag = (u16) vlan_tx_tag_get(skb); + flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); + } + if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { + vlan_tag = + (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff); + flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); + } + + txqent->hdr.wi.vlan_tag = htons(vlan_tag); + + if (skb_is_gso(skb)) { + gso_size = skb_shinfo(skb)->gso_size; + + if (unlikely(gso_size > netdev->mtu)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); + return NETDEV_TX_OK; + } + if (unlikely((gso_size + skb_transport_offset(skb) + + tcp_hdrlen(skb)) >= skb->len)) { + txqent->hdr.wi.opcode = + __constant_htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); + } else { + txqent->hdr.wi.opcode = + __constant_htons(BNA_TXQ_WI_SEND_LSO); + txqent->hdr.wi.lso_mss = htons(gso_size); + } + + err = bnad_tso_prepare(bnad, skb); + if (unlikely(err)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); + return NETDEV_TX_OK; + } + flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (tcp_hdrlen(skb) >> 2, + skb_transport_offset(skb))); + } else { + txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + + if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); + return NETDEV_TX_OK; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 proto = 0; + + if (skb->protocol == __constant_htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == + __constant_htons(ETH_P_IPV6)) { + /* nexthdr may not be TCP immediately. */ + proto = ipv6_hdr(skb)->nexthdr; + } + if (proto == IPPROTO_TCP) { + flags |= BNA_TXQ_WI_CF_TCP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, tcpcsum_offload); + + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + tcp_hdrlen(skb))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); + return NETDEV_TX_OK; + } + + } else if (proto == IPPROTO_UDP) { + flags |= BNA_TXQ_WI_CF_UDP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, udpcsum_offload); + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + + sizeof(struct udphdr))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); + return NETDEV_TX_OK; + } + } else { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); + return NETDEV_TX_OK; + } + } else { + txqent->hdr.wi.l4_hdr_size_n_offset = 0; + } + } + + txqent->hdr.wi.flags = htons(flags); + + txqent->hdr.wi.frame_length = htonl(skb->len); + + unmap_q->unmap_array[unmap_prod].skb = skb; + len = skb_headlen(skb); + txqent->vector[0].length = htons(len); + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, + dma_addr); + + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); + BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + + vect_id = 0; + wis_used = 1; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + u16 size = skb_frag_size(frag); + + if (unlikely(size == 0)) { + unmap_prod = unmap_q->producer_index; + + unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, + unmap_q->unmap_array, + unmap_prod, unmap_q->q_depth, skb, + i); + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); + return NETDEV_TX_OK; + } + + len += size; + + if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { + vect_id = 0; + if (--wi_range) + txqent++; + else { + BNA_QE_INDX_ADD(txq_prod, wis_used, + tcb->q_depth); + wis_used = 0; + BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, + txqent, wi_range); + } + wis_used++; + txqent->hdr.wi_ext.opcode = + __constant_htons(BNA_TXQ_WI_EXTENSION); + } + + BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); + txqent->vector[vect_id].length = htons(size); + dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, + 0, size, DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, + dma_addr); + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); + BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + } + + if (unlikely(len != skb->len)) { + unmap_prod = unmap_q->producer_index; + + unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, + unmap_q->unmap_array, unmap_prod, + unmap_q->q_depth, skb, + skb_shinfo(skb)->nr_frags); + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); + return NETDEV_TX_OK; + } + + unmap_q->producer_index = unmap_prod; + BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); + tcb->producer_index = txq_prod; + + smp_mb(); + + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + return NETDEV_TX_OK; + + bna_txq_prod_indx_doorbell(tcb); + smp_mb(); + + if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) + tasklet_schedule(&bnad->tx_free_tasklet); + + return NETDEV_TX_OK; +} + +/* + * Used spin_lock to synchronize reading of stats structures, which + * is written by BNA under the same lock. + */ +static struct rtnl_link_stats64 * +bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + bnad_netdev_qstats_fill(bnad, stats); + bnad_netdev_hwstats_fill(bnad, stats); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return stats; +} + +void +bnad_set_rx_mode(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + u32 new_mask, valid_mask; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + new_mask = valid_mask = 0; + + if (netdev->flags & IFF_PROMISC) { + if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { + new_mask = BNAD_RXMODE_PROMISC_DEFAULT; + valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; + bnad->cfg_flags |= BNAD_CF_PROMISC; + } + } else { + if (bnad->cfg_flags & BNAD_CF_PROMISC) { + new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; + valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; + bnad->cfg_flags &= ~BNAD_CF_PROMISC; + } + } + + if (netdev->flags & IFF_ALLMULTI) { + if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { + new_mask |= BNA_RXMODE_ALLMULTI; + valid_mask |= BNA_RXMODE_ALLMULTI; + bnad->cfg_flags |= BNAD_CF_ALLMULTI; + } + } else { + if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { + new_mask &= ~BNA_RXMODE_ALLMULTI; + valid_mask |= BNA_RXMODE_ALLMULTI; + bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; + } + } + + if (bnad->rx_info[0].rx == NULL) + goto unlock; + + bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); + + if (!netdev_mc_empty(netdev)) { + u8 *mcaddr_list; + int mc_count = netdev_mc_count(netdev); + + /* Index 0 holds the broadcast address */ + mcaddr_list = + kzalloc((mc_count + 1) * ETH_ALEN, + GFP_ATOMIC); + if (!mcaddr_list) + goto unlock; + + memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); + + /* Copy rest of the MC addresses */ + bnad_netdev_mc_list_get(netdev, mcaddr_list); + + bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, + mcaddr_list, NULL); + + /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ + kfree(mcaddr_list); + } +unlock: + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * bna_lock is used to sync writes to netdev->addr + * conf_lock cannot be used since this call may be made + * in a non-blocking context. + */ +static int +bnad_set_mac_address(struct net_device *netdev, void *mac_addr) +{ + int err; + struct bnad *bnad = netdev_priv(netdev); + struct sockaddr *sa = (struct sockaddr *)mac_addr; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + err = bnad_mac_addr_set_locked(bnad, sa->sa_data); + + if (!err) + memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return err; +} + +static int +bnad_mtu_set(struct bnad *bnad, int mtu) +{ + unsigned long flags; + + init_completion(&bnad->bnad_completions.mtu_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion(&bnad->bnad_completions.mtu_comp); + + return bnad->bnad_completions.mtu_comp_status; +} + +static int +bnad_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err, mtu = netdev->mtu; + struct bnad *bnad = netdev_priv(netdev); + + if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + + netdev->mtu = new_mtu; + + mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; + err = bnad_mtu_set(bnad, mtu); + if (err) + err = -EBUSY; + + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static void +bnad_vlan_rx_add_vid(struct net_device *netdev, + unsigned short vid) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + if (!bnad->rx_info[0].rx) + return; + + mutex_lock(&bnad->conf_mutex); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[0].rx, vid); + set_bit(vid, bnad->active_vlans); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); +} + +static void +bnad_vlan_rx_kill_vid(struct net_device *netdev, + unsigned short vid) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + if (!bnad->rx_info[0].rx) + return; + + mutex_lock(&bnad->conf_mutex); + + spin_lock_irqsave(&bnad->bna_lock, flags); + clear_bit(vid, bnad->active_vlans); + bna_rx_vlan_del(bnad->rx_info[0].rx, vid); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +bnad_netpoll(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + u32 curr_mask; + int i, j; + + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { + bna_intx_disable(&bnad->bna, curr_mask); + bnad_isr(bnad->pcidev->irq, netdev); + bna_intx_enable(&bnad->bna, curr_mask); + } else { + /* + * Tx processing may happen in sending context, so no need + * to explicitly process completions here + */ + + /* Rx processing */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } + } +} +#endif + +static const struct net_device_ops bnad_netdev_ops = { + .ndo_open = bnad_open, + .ndo_stop = bnad_stop, + .ndo_start_xmit = bnad_start_xmit, + .ndo_get_stats64 = bnad_get_stats64, + .ndo_set_rx_mode = bnad_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnad_set_mac_address, + .ndo_change_mtu = bnad_change_mtu, + .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnad_netpoll +#endif +}; + +static void +bnad_netdev_init(struct bnad *bnad, bool using_dac) +{ + struct net_device *netdev = bnad->netdev; + + netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; + + netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->mem_start = bnad->mmio_start; + netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; + + netdev->netdev_ops = &bnad_netdev_ops; + bnad_set_ethtool_ops(netdev); +} + +/* + * 1. Initialize the bnad structure + * 2. Setup netdev pointer in pci_dev + * 3. Initialze Tx free tasklet + * 4. Initialize no. of TxQ & CQs & MSIX vectors + */ +static int +bnad_init(struct bnad *bnad, + struct pci_dev *pdev, struct net_device *netdev) +{ + unsigned long flags; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + bnad->netdev = netdev; + bnad->pcidev = pdev; + bnad->mmio_start = pci_resource_start(pdev, 0); + bnad->mmio_len = pci_resource_len(pdev, 0); + bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); + if (!bnad->bar0) { + dev_err(&pdev->dev, "ioremap for bar0 failed\n"); + pci_set_drvdata(pdev, NULL); + return -ENOMEM; + } + pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, + (unsigned long long) bnad->mmio_len); + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!bnad_msix_disable) + bnad->cfg_flags = BNAD_CF_MSIX; + + bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; + + bnad_q_num_init(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + + (bnad->num_rx * bnad->num_rxp_per_rx) + + BNAD_MAILBOX_MSIX_VECTORS; + + bnad->txq_depth = BNAD_TXQ_DEPTH; + bnad->rxq_depth = BNAD_RXQ_DEPTH; + + bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; + bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; + + tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, + (unsigned long)bnad); + + return 0; +} + +/* + * Must be called after bnad_pci_uninit() + * so that iounmap() and pci_set_drvdata(NULL) + * happens only after PCI uninitialization. + */ +static void +bnad_uninit(struct bnad *bnad) +{ + if (bnad->bar0) + iounmap(bnad->bar0); + pci_set_drvdata(bnad->pcidev, NULL); +} + +/* + * Initialize locks + a) Per ioceth mutes used for serializing configuration + changes from OS interface + b) spin lock used to protect bna state machine + */ +static void +bnad_lock_init(struct bnad *bnad) +{ + spin_lock_init(&bnad->bna_lock); + mutex_init(&bnad->conf_mutex); +} + +static void +bnad_lock_uninit(struct bnad *bnad) +{ + mutex_destroy(&bnad->conf_mutex); +} + +/* PCI Initialization */ +static int +bnad_pci_init(struct bnad *bnad, + struct pci_dev *pdev, bool *using_dac) +{ + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + err = pci_request_regions(pdev, BNAD_NAME); + if (err) + goto disable_device; + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + *using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) + goto release_regions; + } + *using_dac = 0; + } + pci_set_master(pdev); + return 0; + +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); + + return err; +} + +static void +bnad_pci_uninit(struct pci_dev *pdev) +{ + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int __devinit +bnad_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pcidev_id) +{ + bool using_dac; + int err; + struct bnad *bnad; + struct bna *bna; + struct net_device *netdev; + struct bfa_pcidev pcidev_info; + unsigned long flags; + + pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", + pdev, pcidev_id, PCI_FUNC(pdev->devfn)); + + mutex_lock(&bnad_fwimg_mutex); + if (!cna_get_firmware_buf(pdev)) { + mutex_unlock(&bnad_fwimg_mutex); + pr_warn("Failed to load Firmware Image!\n"); + return -ENODEV; + } + mutex_unlock(&bnad_fwimg_mutex); + + /* + * Allocates sizeof(struct net_device + struct bnad) + * bnad = netdev->priv + */ + netdev = alloc_etherdev(sizeof(struct bnad)); + if (!netdev) { + dev_err(&pdev->dev, "netdev allocation failed\n"); + err = -ENOMEM; + return err; + } + bnad = netdev_priv(netdev); + + bnad_lock_init(bnad); + + mutex_lock(&bnad->conf_mutex); + /* + * PCI initialization + * Output : using_dac = 1 for 64 bit DMA + * = 0 for 32 bit DMA + */ + err = bnad_pci_init(bnad, pdev, &using_dac); + if (err) + goto unlock_mutex; + + /* + * Initialize bnad structure + * Setup relation between pci_dev & netdev + * Init Tx free tasklet + */ + err = bnad_init(bnad, pdev, netdev); + if (err) + goto pci_uninit; + + /* Initialize netdev structure, set up ethtool ops */ + bnad_netdev_init(bnad, using_dac); + + /* Set link to down state */ + netif_carrier_off(netdev); + + /* Get resource requirement form bna */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_res_req(&bnad->res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Allocate resources from bna */ + err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); + if (err) + goto drv_uninit; + + bna = &bnad->bna; + + /* Setup pcidev_info for bna_init() */ + pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); + pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); + pcidev_info.device_id = bnad->pcidev->device; + pcidev_info.pci_bar_kva = bnad->bar0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->stats.bna_stats = &bna->stats; + + bnad_enable_msix(bnad); + err = bnad_mbox_irq_alloc(bnad); + if (err) + goto res_free; + + + /* Set up timers */ + setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, + ((unsigned long)bnad)); + + /* Now start the timer before calling IOC */ + mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, + jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); + + /* + * Start the chip + * If the call back comes with error, we bail out. + * This is a catastrophic error. + */ + err = bnad_ioceth_enable(bnad); + if (err) { + pr_err("BNA: Initialization failed err=%d\n", + err); + goto probe_success; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || + bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { + bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, + bna_attr(bna)->num_rxp - 1); + if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || + bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) + err = -EIO; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (err) + goto disable_ioceth; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); + if (err) { + err = -EIO; + goto disable_ioceth; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Get the burnt-in mac */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + /* Finally, reguister with net_device layer */ + err = register_netdev(netdev); + if (err) { + pr_err("BNA : Registering with netdev failed\n"); + goto probe_uninit; + } + set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); + + return 0; + +probe_success: + mutex_unlock(&bnad->conf_mutex); + return 0; + +probe_uninit: + mutex_lock(&bnad->conf_mutex); + bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); +disable_ioceth: + bnad_ioceth_disable(bnad); + del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_uninit(bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_mbox_irq_free(bnad); + bnad_disable_msix(bnad); +res_free: + bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); +drv_uninit: + bnad_uninit(bnad); +pci_uninit: + bnad_pci_uninit(pdev); +unlock_mutex: + mutex_unlock(&bnad->conf_mutex); + bnad_lock_uninit(bnad); + free_netdev(netdev); + return err; +} + +static void __devexit +bnad_pci_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnad *bnad; + struct bna *bna; + unsigned long flags; + + if (!netdev) + return; + + pr_info("%s bnad_pci_remove\n", netdev->name); + bnad = netdev_priv(netdev); + bna = &bnad->bna; + + if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) + unregister_netdev(netdev); + + mutex_lock(&bnad->conf_mutex); + bnad_ioceth_disable(bnad); + del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_uninit(bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); + bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); + bnad_mbox_irq_free(bnad); + bnad_disable_msix(bnad); + bnad_pci_uninit(pdev); + mutex_unlock(&bnad->conf_mutex); + bnad_lock_uninit(bnad); + bnad_uninit(bnad); + free_netdev(netdev); +} + +static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = { + { + PCI_DEVICE(PCI_VENDOR_ID_BROCADE, + PCI_DEVICE_ID_BROCADE_CT), + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00 + }, + { + PCI_DEVICE(PCI_VENDOR_ID_BROCADE, + BFA_PCI_DEVICE_ID_CT2), + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00 + }, + {0, }, +}; + +MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); + +static struct pci_driver bnad_pci_driver = { + .name = BNAD_NAME, + .id_table = bnad_pci_id_table, + .probe = bnad_pci_probe, + .remove = __devexit_p(bnad_pci_remove), +}; + +static int __init +bnad_module_init(void) +{ + int err; + + pr_info("Brocade 10G Ethernet driver - version: %s\n", + BNAD_VERSION); + + bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); + + err = pci_register_driver(&bnad_pci_driver); + if (err < 0) { + pr_err("bna : PCI registration failed in module init " + "(%d)\n", err); + return err; + } + + return 0; +} + +static void __exit +bnad_module_exit(void) +{ + pci_unregister_driver(&bnad_pci_driver); + + if (bfi_fw) + release_firmware(bfi_fw); +} + +module_init(bnad_module_init); +module_exit(bnad_module_exit); + +MODULE_AUTHOR("Brocade"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); +MODULE_VERSION(BNAD_VERSION); +MODULE_FIRMWARE(CNA_FW_FILE_CT); +MODULE_FIRMWARE(CNA_FW_FILE_CT2); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h new file mode 100644 index 000000000000..5487ca42d018 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -0,0 +1,380 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#ifndef __BNAD_H__ +#define __BNAD_H__ + +#include <linux/rtnetlink.h> +#include <linux/workqueue.h> +#include <linux/ipv6.h> +#include <linux/etherdevice.h> +#include <linux/mutex.h> +#include <linux/firmware.h> +#include <linux/if_vlan.h> + +/* Fix for IA64 */ +#include <asm/checksum.h> +#include <net/ip6_checksum.h> + +#include <net/ip.h> +#include <net/tcp.h> + +#include "bna.h" + +#define BNAD_TXQ_DEPTH 2048 +#define BNAD_RXQ_DEPTH 2048 + +#define BNAD_MAX_TX 1 +#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */ +#define BNAD_TXQ_NUM 1 + +#define BNAD_MAX_RX 1 +#define BNAD_MAX_RXP_PER_RX 16 +#define BNAD_MAX_RXQ_PER_RXP 2 + +/* + * Control structure pointed to ccb->ctrl, which + * determines the NAPI / LRO behavior CCB + * There is 1:1 corres. between ccb & ctrl + */ +struct bnad_rx_ctrl { + struct bna_ccb *ccb; + struct bnad *bnad; + unsigned long flags; + struct napi_struct napi; + u64 rx_intr_ctr; + u64 rx_poll_ctr; + u64 rx_schedule; + u64 rx_keep_poll; + u64 rx_complete; +}; + +#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC + +/* + * GLOBAL #defines (CONSTANTS) + */ +#define BNAD_NAME "bna" +#define BNAD_NAME_LEN 64 + +#define BNAD_VERSION "3.0.2.2" + +#define BNAD_MAILBOX_MSIX_INDEX 0 +#define BNAD_MAILBOX_MSIX_VECTORS 1 +#define BNAD_INTX_TX_IB_BITMASK 0x1 +#define BNAD_INTX_RX_IB_BITMASK 0x2 + +#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ +#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ + +#define BNAD_IOCETH_TIMEOUT 10000 + +#define BNAD_MAX_Q_DEPTH 0x10000 +#define BNAD_MIN_Q_DEPTH 0x200 + +#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq) +/* keeping MAX TX and RX Q depth equal */ +#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH + +#define BNAD_JUMBO_MTU 9000 + +#define BNAD_NETIF_WAKE_THRESHOLD 8 + +#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3 + +/* Bit positions for tcb->flags */ +#define BNAD_TXQ_FREE_SENT 0 +#define BNAD_TXQ_TX_STARTED 1 + +/* Bit positions for rcb->flags */ +#define BNAD_RXQ_REFILL 0 +#define BNAD_RXQ_STARTED 1 +#define BNAD_RXQ_POST_OK 2 + +/* Resource limits */ +#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) +#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) + +/* + * DATA STRUCTURES + */ + +/* enums */ +enum bnad_intr_source { + BNAD_INTR_TX = 1, + BNAD_INTR_RX = 2 +}; + +enum bnad_link_state { + BNAD_LS_DOWN = 0, + BNAD_LS_UP = 1 +}; + +struct bnad_completion { + struct completion ioc_comp; + struct completion ucast_comp; + struct completion mcast_comp; + struct completion tx_comp; + struct completion rx_comp; + struct completion stats_comp; + struct completion enet_comp; + struct completion mtu_comp; + + u8 ioc_comp_status; + u8 ucast_comp_status; + u8 mcast_comp_status; + u8 tx_comp_status; + u8 rx_comp_status; + u8 stats_comp_status; + u8 port_comp_status; + u8 mtu_comp_status; +}; + +/* Tx Rx Control Stats */ +struct bnad_drv_stats { + u64 netif_queue_stop; + u64 netif_queue_wakeup; + u64 netif_queue_stopped; + u64 tso4; + u64 tso6; + u64 tso_err; + u64 tcpcsum_offload; + u64 udpcsum_offload; + u64 csum_help; + u64 tx_skb_too_short; + u64 tx_skb_stopping; + u64 tx_skb_max_vectors; + u64 tx_skb_mss_too_long; + u64 tx_skb_tso_too_short; + u64 tx_skb_tso_prepare; + u64 tx_skb_non_tso_too_long; + u64 tx_skb_tcp_hdr; + u64 tx_skb_udp_hdr; + u64 tx_skb_csum_err; + u64 tx_skb_headlen_too_long; + u64 tx_skb_headlen_zero; + u64 tx_skb_frag_zero; + u64 tx_skb_len_mismatch; + + u64 hw_stats_updates; + u64 netif_rx_dropped; + + u64 link_toggle; + u64 cee_toggle; + + u64 rxp_info_alloc_failed; + u64 mbox_intr_disabled; + u64 mbox_intr_enabled; + u64 tx_unmap_q_alloc_failed; + u64 rx_unmap_q_alloc_failed; + + u64 rxbuf_alloc_failed; +}; + +/* Complete driver stats */ +struct bnad_stats { + struct bnad_drv_stats drv_stats; + struct bna_stats *bna_stats; +}; + +/* Tx / Rx Resources */ +struct bnad_tx_res_info { + struct bna_res_info res_info[BNA_TX_RES_T_MAX]; +}; + +struct bnad_rx_res_info { + struct bna_res_info res_info[BNA_RX_RES_T_MAX]; +}; + +struct bnad_tx_info { + struct bna_tx *tx; /* 1:1 between tx_info & tx */ + struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; + u32 tx_id; +} ____cacheline_aligned; + +struct bnad_rx_info { + struct bna_rx *rx; /* 1:1 between rx_info & rx */ + + struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; + u32 rx_id; +} ____cacheline_aligned; + +/* Unmap queues for Tx / Rx cleanup */ +struct bnad_skb_unmap { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); +}; + +struct bnad_unmap_q { + u32 producer_index; + u32 consumer_index; + u32 q_depth; + /* This should be the last one */ + struct bnad_skb_unmap unmap_array[1]; +}; + +/* Bit mask values for bnad->cfg_flags */ +#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ +#define BNAD_CF_PROMISC 0x02 +#define BNAD_CF_ALLMULTI 0x04 +#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */ + +/* Defines for run_flags bit-mask */ +/* Set, tested & cleared using xxx_bit() functions */ +/* Values indicated bit positions */ +#define BNAD_RF_CEE_RUNNING 0 +#define BNAD_RF_MTU_SET 1 +#define BNAD_RF_MBOX_IRQ_DISABLED 2 +#define BNAD_RF_NETDEV_REGISTERED 3 +#define BNAD_RF_DIM_TIMER_RUNNING 4 +#define BNAD_RF_STATS_TIMER_RUNNING 5 +#define BNAD_RF_TX_PRIO_SET 6 + + +/* Define for Fast Path flags */ +/* Defined as bit positions */ +#define BNAD_FP_IN_RX_PATH 0 + +struct bnad { + struct net_device *netdev; + + /* Data path */ + struct bnad_tx_info tx_info[BNAD_MAX_TX]; + struct bnad_rx_info rx_info[BNAD_MAX_RX]; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* + * These q numbers are global only because + * they are used to calculate MSIx vectors. + * Actually the exact # of queues are per Tx/Rx + * object. + */ + u32 num_tx; + u32 num_rx; + u32 num_txq_per_tx; + u32 num_rxp_per_rx; + + u32 txq_depth; + u32 rxq_depth; + + u8 tx_coalescing_timeo; + u8 rx_coalescing_timeo; + + struct bna_rx_config rx_config[BNAD_MAX_RX]; + struct bna_tx_config tx_config[BNAD_MAX_TX]; + + void __iomem *bar0; /* BAR0 address */ + + struct bna bna; + + u32 cfg_flags; + unsigned long run_flags; + + struct pci_dev *pcidev; + u64 mmio_start; + u64 mmio_len; + + u32 msix_num; + struct msix_entry *msix_table; + + struct mutex conf_mutex; + spinlock_t bna_lock ____cacheline_aligned; + + /* Timers */ + struct timer_list ioc_timer; + struct timer_list dim_timer; + struct timer_list stats_timer; + + /* Control path resources, memory & irq */ + struct bna_res_info res_info[BNA_RES_T_MAX]; + struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX]; + struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX]; + struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX]; + + struct bnad_completion bnad_completions; + + /* Burnt in MAC address */ + mac_t perm_addr; + + struct tasklet_struct tx_free_tasklet; + + /* Statistics */ + struct bnad_stats stats; + + struct bnad_diag *diag; + + char adapter_name[BNAD_NAME_LEN]; + char port_name[BNAD_NAME_LEN]; + char mbox_irq_name[BNAD_NAME_LEN]; +}; + +/* + * EXTERN VARIABLES + */ +extern struct firmware *bfi_fw; +extern u32 bnad_rxqs_per_cq; + +/* + * EXTERN PROTOTYPES + */ +extern u32 *cna_get_firmware_buf(struct pci_dev *pdev); +/* Netdev entry point prototypes */ +extern void bnad_set_rx_mode(struct net_device *netdev); +extern struct net_device_stats *bnad_get_netdev_stats( + struct net_device *netdev); +extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); +extern int bnad_enable_default_bcast(struct bnad *bnad); +extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); +extern void bnad_set_ethtool_ops(struct net_device *netdev); + +/* Configuration & setup */ +extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); +extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); + +extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); +extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); +extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id); +extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id); + +/* Timer start/stop protos */ +extern void bnad_dim_timer_start(struct bnad *bnad); + +/* Statistics */ +extern void bnad_netdev_qstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); +extern void bnad_netdev_hwstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); + +/** + * MACROS + */ +/* To set & get the stats counters */ +#define BNAD_UPDATE_CTR(_bnad, _ctr) \ + (((_bnad)->stats.drv_stats._ctr)++) + +#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr) + +#define bnad_enable_rx_irq_unsafe(_ccb) \ +{ \ + if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\ + bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ + (_ccb)->rx_coalescing_timeo); \ + bna_ib_ack((_ccb)->i_dbell, 0); \ + } \ +} + +#endif /* __BNAD_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c new file mode 100644 index 000000000000..fd3dcc1e9145 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -0,0 +1,958 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#include "cna.h" + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> + +#include "bna.h" + +#include "bnad.h" + +#define BNAD_NUM_TXF_COUNTERS 12 +#define BNAD_NUM_RXF_COUNTERS 10 +#define BNAD_NUM_CQ_COUNTERS (3 + 5) +#define BNAD_NUM_RXQ_COUNTERS 6 +#define BNAD_NUM_TXQ_COUNTERS 5 + +#define BNAD_ETHTOOL_STATS_NUM \ + (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ + sizeof(struct bnad_drv_stats) / sizeof(u64) + \ + offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) + +static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", + "tx_errors", + "rx_dropped", + "tx_dropped", + "multicast", + "collisions", + + "rx_length_errors", + "rx_over_errors", + "rx_crc_errors", + "rx_frame_errors", + "rx_fifo_errors", + "rx_missed_errors", + + "tx_aborted_errors", + "tx_carrier_errors", + "tx_fifo_errors", + "tx_heartbeat_errors", + "tx_window_errors", + + "rx_compressed", + "tx_compressed", + + "netif_queue_stop", + "netif_queue_wakeup", + "netif_queue_stopped", + "tso4", + "tso6", + "tso_err", + "tcpcsum_offload", + "udpcsum_offload", + "csum_help", + "tx_skb_too_short", + "tx_skb_stopping", + "tx_skb_max_vectors", + "tx_skb_mss_too_long", + "tx_skb_tso_too_short", + "tx_skb_tso_prepare", + "tx_skb_non_tso_too_long", + "tx_skb_tcp_hdr", + "tx_skb_udp_hdr", + "tx_skb_csum_err", + "tx_skb_headlen_too_long", + "tx_skb_headlen_zero", + "tx_skb_frag_zero", + "tx_skb_len_mismatch", + "hw_stats_updates", + "netif_rx_dropped", + + "link_toggle", + "cee_toggle", + + "rxp_info_alloc_failed", + "mbox_intr_disabled", + "mbox_intr_enabled", + "tx_unmap_q_alloc_failed", + "rx_unmap_q_alloc_failed", + "rxbuf_alloc_failed", + + "mac_frame_64", + "mac_frame_65_127", + "mac_frame_128_255", + "mac_frame_256_511", + "mac_frame_512_1023", + "mac_frame_1024_1518", + "mac_frame_1518_1522", + "mac_rx_bytes", + "mac_rx_packets", + "mac_rx_fcs_error", + "mac_rx_multicast", + "mac_rx_broadcast", + "mac_rx_control_frames", + "mac_rx_pause", + "mac_rx_unknown_opcode", + "mac_rx_alignment_error", + "mac_rx_frame_length_error", + "mac_rx_code_error", + "mac_rx_carrier_sense_error", + "mac_rx_undersize", + "mac_rx_oversize", + "mac_rx_fragments", + "mac_rx_jabber", + "mac_rx_drop", + + "mac_tx_bytes", + "mac_tx_packets", + "mac_tx_multicast", + "mac_tx_broadcast", + "mac_tx_pause", + "mac_tx_deferral", + "mac_tx_excessive_deferral", + "mac_tx_single_collision", + "mac_tx_muliple_collision", + "mac_tx_late_collision", + "mac_tx_excessive_collision", + "mac_tx_total_collision", + "mac_tx_pause_honored", + "mac_tx_drop", + "mac_tx_jabber", + "mac_tx_fcs_error", + "mac_tx_control_frame", + "mac_tx_oversize", + "mac_tx_undersize", + "mac_tx_fragments", + + "bpc_tx_pause_0", + "bpc_tx_pause_1", + "bpc_tx_pause_2", + "bpc_tx_pause_3", + "bpc_tx_pause_4", + "bpc_tx_pause_5", + "bpc_tx_pause_6", + "bpc_tx_pause_7", + "bpc_tx_zero_pause_0", + "bpc_tx_zero_pause_1", + "bpc_tx_zero_pause_2", + "bpc_tx_zero_pause_3", + "bpc_tx_zero_pause_4", + "bpc_tx_zero_pause_5", + "bpc_tx_zero_pause_6", + "bpc_tx_zero_pause_7", + "bpc_tx_first_pause_0", + "bpc_tx_first_pause_1", + "bpc_tx_first_pause_2", + "bpc_tx_first_pause_3", + "bpc_tx_first_pause_4", + "bpc_tx_first_pause_5", + "bpc_tx_first_pause_6", + "bpc_tx_first_pause_7", + + "bpc_rx_pause_0", + "bpc_rx_pause_1", + "bpc_rx_pause_2", + "bpc_rx_pause_3", + "bpc_rx_pause_4", + "bpc_rx_pause_5", + "bpc_rx_pause_6", + "bpc_rx_pause_7", + "bpc_rx_zero_pause_0", + "bpc_rx_zero_pause_1", + "bpc_rx_zero_pause_2", + "bpc_rx_zero_pause_3", + "bpc_rx_zero_pause_4", + "bpc_rx_zero_pause_5", + "bpc_rx_zero_pause_6", + "bpc_rx_zero_pause_7", + "bpc_rx_first_pause_0", + "bpc_rx_first_pause_1", + "bpc_rx_first_pause_2", + "bpc_rx_first_pause_3", + "bpc_rx_first_pause_4", + "bpc_rx_first_pause_5", + "bpc_rx_first_pause_6", + "bpc_rx_first_pause_7", + + "rad_rx_frames", + "rad_rx_octets", + "rad_rx_vlan_frames", + "rad_rx_ucast", + "rad_rx_ucast_octets", + "rad_rx_ucast_vlan", + "rad_rx_mcast", + "rad_rx_mcast_octets", + "rad_rx_mcast_vlan", + "rad_rx_bcast", + "rad_rx_bcast_octets", + "rad_rx_bcast_vlan", + "rad_rx_drops", + + "rlb_rad_rx_frames", + "rlb_rad_rx_octets", + "rlb_rad_rx_vlan_frames", + "rlb_rad_rx_ucast", + "rlb_rad_rx_ucast_octets", + "rlb_rad_rx_ucast_vlan", + "rlb_rad_rx_mcast", + "rlb_rad_rx_mcast_octets", + "rlb_rad_rx_mcast_vlan", + "rlb_rad_rx_bcast", + "rlb_rad_rx_bcast_octets", + "rlb_rad_rx_bcast_vlan", + "rlb_rad_rx_drops", + + "fc_rx_ucast_octets", + "fc_rx_ucast", + "fc_rx_ucast_vlan", + "fc_rx_mcast_octets", + "fc_rx_mcast", + "fc_rx_mcast_vlan", + "fc_rx_bcast_octets", + "fc_rx_bcast", + "fc_rx_bcast_vlan", + + "fc_tx_ucast_octets", + "fc_tx_ucast", + "fc_tx_ucast_vlan", + "fc_tx_mcast_octets", + "fc_tx_mcast", + "fc_tx_mcast_vlan", + "fc_tx_bcast_octets", + "fc_tx_bcast", + "fc_tx_bcast_vlan", + "fc_tx_parity_errors", + "fc_tx_timeout", + "fc_tx_fid_parity_errors", +}; + +static int +bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + cmd->supported = SUPPORTED_10000baseT_Full; + cmd->advertising = ADVERTISED_10000baseT_Full; + cmd->autoneg = AUTONEG_DISABLE; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->phy_address = 0; + + if (netif_carrier_ok(netdev)) { + ethtool_cmd_speed_set(cmd, SPEED_10000); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, -1); + cmd->duplex = -1; + } + cmd->transceiver = XCVR_EXTERNAL; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + return 0; +} + +static int +bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + /* 10G full duplex setting supported only */ + if (cmd->autoneg == AUTONEG_ENABLE) + return -EOPNOTSUPP; else { + if ((ethtool_cmd_speed(cmd) == SPEED_10000) + && (cmd->duplex == DUPLEX_FULL)) + return 0; + } + + return -EOPNOTSUPP; +} + +static void +bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bfa_ioc_attr *ioc_attr; + unsigned long flags; + + strcpy(drvinfo->driver, BNAD_NAME); + strcpy(drvinfo->version, BNAD_VERSION); + + ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); + if (ioc_attr) { + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, + sizeof(drvinfo->fw_version) - 1); + kfree(ioc_attr); + } + + strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN); +} + +static void +bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) +{ + wolinfo->supported = 0; + wolinfo->wolopts = 0; +} + +static int +bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + /* Lock rqd. to access bnad->bna_lock */ + spin_lock_irqsave(&bnad->bna_lock, flags); + coalesce->use_adaptive_rx_coalesce = + (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * + BFI_COALESCING_TIMER_UNIT; + coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * + BFI_COALESCING_TIMER_UNIT; + coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT; + + return 0; +} + +static int +bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + int to_del = 0; + + if (coalesce->rx_coalesce_usecs == 0 || + coalesce->rx_coalesce_usecs > + BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) + return -EINVAL; + + if (coalesce->tx_coalesce_usecs == 0 || + coalesce->tx_coalesce_usecs > + BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + /* + * Do not need to store rx_coalesce_usecs here + * Every time DIM is disabled, we can get it from the + * stack. + */ + spin_lock_irqsave(&bnad->bna_lock, flags); + if (coalesce->use_adaptive_rx_coalesce) { + if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { + bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; + bnad_dim_timer_start(bnad); + } + } else { + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { + bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags)) { + clear_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags); + to_del = 1; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_rx_coalescing_timeo_set(bnad); + } + } + if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT) { + bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT; + bnad_tx_coalescing_timeo_set(bnad); + } + + if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT) { + bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT; + + if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) + bnad_rx_coalescing_timeo_set(bnad); + + } + + /* Add Tx Inter-pkt DMA count? */ + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + return 0; +} + +static void +bnad_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct bnad *bnad = netdev_priv(netdev); + + ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH; + ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH; + + ringparam->rx_pending = bnad->rxq_depth; + ringparam->tx_pending = bnad->txq_depth; +} + +static int +bnad_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + int i, current_err, err = 0; + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + if (ringparam->rx_pending == bnad->rxq_depth && + ringparam->tx_pending == bnad->txq_depth) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || + ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH || + !BNA_POWER_OF_2(ringparam->rx_pending)) { + mutex_unlock(&bnad->conf_mutex); + return -EINVAL; + } + if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || + ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH || + !BNA_POWER_OF_2(ringparam->tx_pending)) { + mutex_unlock(&bnad->conf_mutex); + return -EINVAL; + } + + if (ringparam->rx_pending != bnad->rxq_depth) { + bnad->rxq_depth = ringparam->rx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + bnad_cleanup_rx(bnad, i); + current_err = bnad_setup_rx(bnad, i); + if (current_err && !err) + err = current_err; + } + + if (!err && bnad->rx_info[0].rx) { + /* restore rx configuration */ + bnad_restore_vlans(bnad, 0); + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | + BNAD_CF_PROMISC); + bnad_set_rx_mode(netdev); + } + } + if (ringparam->tx_pending != bnad->txq_depth) { + bnad->txq_depth = ringparam->tx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + bnad_cleanup_tx(bnad, i); + current_err = bnad_setup_tx(bnad, i); + if (current_err && !err) + err = current_err; + } + } + + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static void +bnad_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct bnad *bnad = netdev_priv(netdev); + + pauseparam->autoneg = 0; + pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; + pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; +} + +static int +bnad_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bna_pause_config pause_config; + unsigned long flags; + + if (pauseparam->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || + pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { + pause_config.rx_pause = pauseparam->rx_pause; + pause_config.tx_pause = pauseparam->tx_pause; + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + mutex_unlock(&bnad->conf_mutex); + return 0; +} + +static void +bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, q_num; + u32 bmap; + + mutex_lock(&bnad->conf_mutex); + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { + BUG_ON(!(strlen(bnad_net_stats_strings[i]) < + ETH_GSTRING_LEN)); + memcpy(string, bnad_net_stats_strings[i], + ETH_GSTRING_LEN); + string += ETH_GSTRING_LEN; + } + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + sprintf(string, "txf%d_ucast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_ucast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_ucast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_mcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_mcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_mcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_bcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_bcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_bcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_errors", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_filter_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_filter_mac_sa", i); + string += ETH_GSTRING_LEN; + } + bmap >>= 1; + } + + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + sprintf(string, "rxf%d_ucast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_ucast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_ucast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_mcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_mcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_mcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_bcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_bcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_bcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_frame_drops", i); + string += ETH_GSTRING_LEN; + } + bmap >>= 1; + } + + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + sprintf(string, "cq%d_producer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_consumer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_hw_producer_index", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_intr", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_poll", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_schedule", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_keep_poll", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_complete", q_num); + string += ETH_GSTRING_LEN; + q_num++; + } + } + + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + sprintf(string, "rxq%d_packets", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_bytes", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_packets_with_error", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_allocbuf_failed", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_producer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_consumer_index", q_num); + string += ETH_GSTRING_LEN; + q_num++; + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + sprintf(string, "rxq%d_packets", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_bytes", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, + "rxq%d_packets_with_error", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_allocbuf_failed", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_producer_index", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_consumer_index", + q_num); + string += ETH_GSTRING_LEN; + q_num++; + } + } + } + + q_num = 0; + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) { + sprintf(string, "txq%d_packets", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_bytes", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_producer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_consumer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_hw_consumer_index", + q_num); + string += ETH_GSTRING_LEN; + q_num++; + } + } + + break; + + default: + break; + } + + mutex_unlock(&bnad->conf_mutex); +} + +static int +bnad_get_stats_count_locked(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0; + u32 bmap; + + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + txf_active_num++; + bmap >>= 1; + } + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + rxf_active_num++; + bmap >>= 1; + } + count = BNAD_ETHTOOL_STATS_NUM + + txf_active_num * BNAD_NUM_TXF_COUNTERS + + rxf_active_num * BNAD_NUM_RXF_COUNTERS; + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; + count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) + count += BNAD_NUM_RXQ_COUNTERS; + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; + } + return count; +} + +static int +bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) +{ + int i, j; + struct bna_rcb *rcb = NULL; + struct bna_tcb *tcb = NULL; + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { + buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. + ccb->producer_index; + buf[bi++] = 0; /* ccb->consumer_index */ + buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. + ccb->hw_producer_index); + + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_intr_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_poll_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_schedule; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_keep_poll; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_complete; + } + } + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb) { + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[0]->rxq) { + rcb = bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[0]; + buf[bi++] = rcb->rxq->rx_packets; + buf[bi++] = rcb->rxq->rx_bytes; + buf[bi++] = rcb->rxq-> + rx_packets_with_error; + buf[bi++] = rcb->rxq-> + rxbuf_alloc_failed; + buf[bi++] = rcb->producer_index; + buf[bi++] = rcb->consumer_index; + } + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + rcb = bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]; + buf[bi++] = rcb->rxq->rx_packets; + buf[bi++] = rcb->rxq->rx_bytes; + buf[bi++] = rcb->rxq-> + rx_packets_with_error; + buf[bi++] = rcb->rxq-> + rxbuf_alloc_failed; + buf[bi++] = rcb->producer_index; + buf[bi++] = rcb->consumer_index; + } + } + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) + if (bnad->tx_info[i].tcb[j] && + bnad->tx_info[i].tcb[j]->txq) { + tcb = bnad->tx_info[i].tcb[j]; + buf[bi++] = tcb->txq->tx_packets; + buf[bi++] = tcb->txq->tx_bytes; + buf[bi++] = tcb->producer_index; + buf[bi++] = tcb->consumer_index; + buf[bi++] = *(tcb->hw_consumer_index); + } + } + + return bi; +} + +static void +bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *buf) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, bi; + unsigned long flags; + struct rtnl_link_stats64 *net_stats64; + u64 *stats64; + u32 bmap; + + mutex_lock(&bnad->conf_mutex); + if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { + mutex_unlock(&bnad->conf_mutex); + return; + } + + /* + * Used bna_lock to sync reads from bna_stats, which is written + * under the same lock + */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bi = 0; + memset(buf, 0, stats->n_stats * sizeof(u64)); + + net_stats64 = (struct rtnl_link_stats64 *)buf; + bnad_netdev_qstats_fill(bnad, net_stats64); + bnad_netdev_hwstats_fill(bnad, net_stats64); + + bi = sizeof(*net_stats64) / sizeof(u64); + + /* Get netif_queue_stopped from stack */ + bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); + + /* Fill driver stats into ethtool buffers */ + stats64 = (u64 *)&bnad->stats.drv_stats; + for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) + buf[bi++] = stats64[i]; + + /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ + stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; + for (i = 0; + i < offsetof(struct bfi_enet_stats, rxf_stats[0]) / + sizeof(u64); + i++) + buf[bi++] = stats64[i]; + + /* Fill txf stats into ethtool buffers */ + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats64 = (u64 *)&bnad->stats.bna_stats-> + hw_stats.txf_stats[i]; + for (j = 0; j < sizeof(struct bfi_enet_stats_txf) / + sizeof(u64); j++) + buf[bi++] = stats64[j]; + } + bmap >>= 1; + } + + /* Fill rxf stats into ethtool buffers */ + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats64 = (u64 *)&bnad->stats.bna_stats-> + hw_stats.rxf_stats[i]; + for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) / + sizeof(u64); j++) + buf[bi++] = stats64[j]; + } + bmap >>= 1; + } + + /* Fill per Q stats into ethtool buffers */ + bi = bnad_per_q_stats_fill(bnad, buf, bi); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); +} + +static int +bnad_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return bnad_get_stats_count_locked(netdev); + default: + return -EOPNOTSUPP; + } +} + +static struct ethtool_ops bnad_ethtool_ops = { + .get_settings = bnad_get_settings, + .set_settings = bnad_set_settings, + .get_drvinfo = bnad_get_drvinfo, + .get_wol = bnad_get_wol, + .get_link = ethtool_op_get_link, + .get_coalesce = bnad_get_coalesce, + .set_coalesce = bnad_set_coalesce, + .get_ringparam = bnad_get_ringparam, + .set_ringparam = bnad_set_ringparam, + .get_pauseparam = bnad_get_pauseparam, + .set_pauseparam = bnad_set_pauseparam, + .get_strings = bnad_get_strings, + .get_ethtool_stats = bnad_get_ethtool_stats, + .get_sset_count = bnad_get_sset_count +}; + +void +bnad_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); +} diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h new file mode 100644 index 000000000000..1b3e90dfbd9a --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -0,0 +1,107 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2006-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ + +#ifndef __CNA_H__ +#define __CNA_H__ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> + +#define bfa_sm_fault(__event) do { \ + pr_err("SM Assertion failure: %s: %d: event = %d\n", \ + __FILE__, __LINE__, __event); \ +} while (0) + +extern char bfa_version[]; + +#define CNA_FW_FILE_CT "ctfw.bin" +#define CNA_FW_FILE_CT2 "ct2fw.bin" +#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ + +#pragma pack(1) + +#define MAC_ADDRLEN (6) +typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t; + +#pragma pack() + +#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) +#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) +#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev) + +/* + * bfa_q_qe_init - to initialize a queue element + */ +#define bfa_q_qe_init(_qe) { \ + bfa_q_next(_qe) = (struct list_head *) NULL; \ + bfa_q_prev(_qe) = (struct list_head *) NULL; \ +} + +/* + * bfa_q_deq - dequeue an element from head of the queue + */ +#define bfa_q_deq(_q, _qe) { \ + if (!list_empty(_q)) { \ + (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ + bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \ + bfa_q_qe_init(*((struct list_head **) _qe)); \ + } else { \ + *((struct list_head **)(_qe)) = NULL; \ + } \ +} + +/* + * bfa_q_deq_tail - dequeue an element from tail of the queue + */ +#define bfa_q_deq_tail(_q, _qe) { \ + if (!list_empty(_q)) { \ + *((struct list_head **) (_qe)) = bfa_q_prev(_q); \ + bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ + bfa_q_qe_init(*((struct list_head **) _qe)); \ + } else { \ + *((struct list_head **) (_qe)) = (struct list_head *) NULL; \ + } \ +} + +/* + * bfa_add_tail_head - enqueue an element at the head of queue + */ +#define bfa_q_enq_head(_q, _qe) { \ + if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \ + pr_err("Assertion failure: %s:%d: %d", \ + __FILE__, __LINE__, \ + (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\ + bfa_q_next(_qe) = bfa_q_next(_q); \ + bfa_q_prev(_qe) = (struct list_head *) (_q); \ + bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \ + bfa_q_next(_q) = (struct list_head *) (_qe); \ +} + +#endif /* __CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c new file mode 100644 index 000000000000..725b9fff337f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -0,0 +1,92 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#include <linux/firmware.h> +#include "bfi.h" +#include "cna.h" + +const struct firmware *bfi_fw; +static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna; +static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size; + +static u32 * +cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name) +{ + const struct firmware *fw; + + if (request_firmware(&fw, fw_name, &pdev->dev)) { + pr_alert("Can't locate firmware %s\n", fw_name); + goto error; + } + + *bfi_image = (u32 *)fw->data; + *bfi_image_size = fw->size/sizeof(u32); + bfi_fw = fw; + + return *bfi_image; +error: + return NULL; +} + +u32 * +cna_get_firmware_buf(struct pci_dev *pdev) +{ + if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { + if (bfi_image_ct2_cna_size == 0) + cna_read_firmware(pdev, &bfi_image_ct2_cna, + &bfi_image_ct2_cna_size, CNA_FW_FILE_CT2); + return bfi_image_ct2_cna; + } else if (bfa_asic_id_ct(pdev->device)) { + if (bfi_image_ct_cna_size == 0) + cna_read_firmware(pdev, &bfi_image_ct_cna, + &bfi_image_ct_cna_size, CNA_FW_FILE_CT); + return bfi_image_ct_cna; + } + + return NULL; +} + +u32 * +bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CT: + return (u32 *)(bfi_image_ct_cna + off); + break; + case BFI_ASIC_GEN_CT2: + return (u32 *)(bfi_image_ct2_cna + off); + break; + default: + return NULL; + } +} + +u32 +bfa_cb_image_get_size(enum bfi_asic_gen asic_gen) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CT: + return bfi_image_ct_cna_size; + break; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_cna_size; + break; + default: + return 0; + } +} |