diff options
Diffstat (limited to 'drivers/scsi/lpfc')
28 files changed, 5145 insertions, 469 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 17028861234b..befeb7c34290 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -114,6 +114,12 @@ struct lpfc_sli2_slim; #define LPFC_MBX_NO_WAIT 0 #define LPFC_MBX_WAIT 1 +#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005 +#define LPFC_PORT_CFG_NAME "/cfg/port.cfg" + +#define lpfc_rangecheck(val, min, max) \ + ((uint)(val) >= (uint)(min) && (val) <= (max)) + enum lpfc_polling_flags { ENABLE_FCP_RING_POLLING = 0x1, DISABLE_FCP_RING_INT = 0x2 @@ -403,6 +409,160 @@ struct lpfc_trunk_link { link3; }; +/* Format of congestion module parameters */ +struct lpfc_cgn_param { + uint32_t cgn_param_magic; + uint8_t cgn_param_version; /* version 1 */ + uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */ +#define LPFC_CFG_OFF 0 +#define LPFC_CFG_MANAGED 1 +#define LPFC_CFG_MONITOR 2 + uint8_t cgn_rsvd1; + uint8_t cgn_rsvd2; + uint8_t cgn_param_level0; + uint8_t cgn_param_level1; + uint8_t cgn_param_level2; + uint8_t byte11; + uint8_t byte12; + uint8_t byte13; + uint8_t byte14; + uint8_t byte15; +}; + +/* Max number of days of congestion data */ +#define LPFC_MAX_CGN_DAYS 10 + +/* Format of congestion buffer info + * This structure defines memory thats allocated and registered with + * the HBA firmware. When adding or removing fields from this structure + * the alignment must match the HBA firmware. + */ + +struct lpfc_cgn_info { + /* Header */ + __le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */ + uint8_t cgn_info_version; /* represents format of structure */ +#define LPFC_CGN_INFO_V1 1 +#define LPFC_CGN_INFO_V2 2 +#define LPFC_CGN_INFO_V3 3 + uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */ + uint8_t cgn_info_detect; + uint8_t cgn_info_action; + uint8_t cgn_info_level0; + uint8_t cgn_info_level1; + uint8_t cgn_info_level2; + + /* Start Time */ + uint8_t cgn_info_month; + uint8_t cgn_info_day; + uint8_t cgn_info_year; + uint8_t cgn_info_hour; + uint8_t cgn_info_minute; + uint8_t cgn_info_second; + + /* minute / hours / daily indices */ + uint8_t cgn_index_minute; + uint8_t cgn_index_hour; + uint8_t cgn_index_day; + + __le16 cgn_warn_freq; + __le16 cgn_alarm_freq; + __le16 cgn_lunq; + uint8_t cgn_pad1[8]; + + /* Driver Information */ + __le16 cgn_drvr_min[60]; + __le32 cgn_drvr_hr[24]; + __le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS]; + + /* Congestion Warnings */ + __le16 cgn_warn_min[60]; + __le32 cgn_warn_hr[24]; + __le32 cgn_warn_day[LPFC_MAX_CGN_DAYS]; + + /* Latency Information */ + __le32 cgn_latency_min[60]; + __le32 cgn_latency_hr[24]; + __le32 cgn_latency_day[LPFC_MAX_CGN_DAYS]; + + /* Bandwidth Information */ + __le16 cgn_bw_min[60]; + __le16 cgn_bw_hr[24]; + __le16 cgn_bw_day[LPFC_MAX_CGN_DAYS]; + + /* Congestion Alarms */ + __le16 cgn_alarm_min[60]; + __le32 cgn_alarm_hr[24]; + __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS]; + + /* Start of congestion statistics */ + uint8_t cgn_stat_npm; /* Notifications per minute */ + + /* Start Time */ + uint8_t cgn_stat_month; + uint8_t cgn_stat_day; + uint8_t cgn_stat_year; + uint8_t cgn_stat_hour; + uint8_t cgn_stat_minute; + uint8_t cgn_pad2[2]; + + __le32 cgn_notification; + __le32 cgn_peer_notification; + __le32 link_integ_notification; + __le32 delivery_notification; + + uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */ + uint8_t cgn_stat_cgn_day; + uint8_t cgn_stat_cgn_year; + uint8_t cgn_stat_cgn_hour; + uint8_t cgn_stat_cgn_min; + uint8_t cgn_stat_cgn_sec; + + uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */ + uint8_t cgn_stat_peer_day; + uint8_t cgn_stat_peer_year; + uint8_t cgn_stat_peer_hour; + uint8_t cgn_stat_peer_min; + uint8_t cgn_stat_peer_sec; + + uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */ + uint8_t cgn_stat_lnk_day; + uint8_t cgn_stat_lnk_year; + uint8_t cgn_stat_lnk_hour; + uint8_t cgn_stat_lnk_min; + uint8_t cgn_stat_lnk_sec; + + uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */ + uint8_t cgn_stat_del_day; + uint8_t cgn_stat_del_year; + uint8_t cgn_stat_del_hour; + uint8_t cgn_stat_del_min; + uint8_t cgn_stat_del_sec; +#define LPFC_CGN_STAT_SIZE 48 +#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \ + LPFC_CGN_STAT_SIZE - sizeof(uint32_t)) + + __le32 cgn_info_crc; +#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41 +#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF +}; + +#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \ + sizeof(uint32_t)) + +struct lpfc_cgn_stat { + atomic64_t total_bytes; + atomic64_t rcv_bytes; + atomic64_t rx_latency; +#define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL + atomic_t rx_io_cnt; +}; + +struct lpfc_cgn_acqe_stat { + atomic64_t alarm; + atomic64_t warn; +}; + struct lpfc_vport { struct lpfc_hba *phba; struct list_head listentry; @@ -869,7 +1029,10 @@ struct lpfc_hba { * capability */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ +#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */ +#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ +#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ #define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ #define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */ #define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ @@ -922,7 +1085,6 @@ struct lpfc_hba { uint8_t wwpn[8]; uint32_t RandomData[7]; uint8_t fcp_embed_io; - uint8_t nvme_support; /* Firmware supports NVME */ uint8_t nvmet_support; /* driver supports NVMET */ #define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; @@ -1121,6 +1283,7 @@ struct lpfc_hba { uint32_t total_iocbq_bufs; struct list_head active_rrq_list; spinlock_t hbalock; + struct work_struct unblock_request_work; /* SCSI layer unblock IOs */ /* dma_mem_pools */ struct dma_pool *lpfc_sg_dma_buf_pool; @@ -1194,6 +1357,8 @@ struct lpfc_hba { #ifdef LPFC_HDWQ_LOCK_STAT struct dentry *debug_lockstat; #endif + struct dentry *debug_cgn_buffer; + struct dentry *debug_rx_monitor; struct dentry *debug_ras_log; atomic_t nvmeio_trc_cnt; uint32_t nvmeio_trc_size; @@ -1344,6 +1509,76 @@ struct lpfc_hba { uint64_t ktime_seg10_min; uint64_t ktime_seg10_max; #endif + /* CMF objects */ + struct lpfc_cgn_stat __percpu *cmf_stat; + uint32_t cmf_interval_rate; /* timer interval limit in ms */ + uint32_t cmf_timer_cnt; +#define LPFC_CMF_INTERVAL 90 + uint64_t cmf_link_byte_count; + uint64_t cmf_max_line_rate; + uint64_t cmf_max_bytes_per_interval; + uint64_t cmf_last_sync_bw; +#define LPFC_CMF_BLK_SIZE 512 + struct hrtimer cmf_timer; + atomic_t cmf_bw_wait; + atomic_t cmf_busy; + atomic_t cmf_stop_io; /* To block request and stop IO's */ + uint32_t cmf_active_mode; + uint32_t cmf_info_per_interval; +#define LPFC_MAX_CMF_INFO 32 + struct timespec64 cmf_latency; /* Interval congestion timestamp */ + uint32_t cmf_last_ts; /* Interval congestion time (ms) */ + uint32_t cmf_active_info; + + /* Signal / FPIN handling for Congestion Mgmt */ + u8 cgn_reg_fpin; /* Negotiated value from RDF */ + u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */ +#define LPFC_CGN_FPIN_NONE 0x0 +#define LPFC_CGN_FPIN_WARN 0x1 +#define LPFC_CGN_FPIN_ALARM 0x2 +#define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM) + + u8 cgn_reg_signal; /* Negotiated value from EDC */ + u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */ + /* cgn_reg_signal and cgn_init_reg_signal use + * enum fc_edc_cg_signal_cap_types + */ + u16 cgn_fpin_frequency; +#define LPFC_FPIN_INIT_FREQ 0xffff + u32 cgn_sig_freq; + u32 cgn_acqe_cnt; + + /* RX monitor handling for CMF */ + struct rxtable_entry *rxtable; /* RX_monitor information */ + atomic_t rxtable_idx_head; +#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73) + atomic_t rxtable_idx_tail; + atomic_t rx_max_read_cnt; /* Maximum read bytes */ + uint64_t rx_block_cnt; + + /* Congestion parameters from flash */ + struct lpfc_cgn_param cgn_p; + + /* Statistics counter for ACQE cgn alarms and warnings */ + struct lpfc_cgn_acqe_stat cgn_acqe_stat; + + /* Congestion buffer information */ + struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */ + atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */ + atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */ + atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */ + atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */ + atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */ + atomic_t cgn_latency_evt_cnt; + struct timespec64 cgn_daily_ts; + atomic64_t cgn_latency_evt; /* Avg latency per minute */ + unsigned long cgn_evt_timestamp; +#define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */ + uint32_t cgn_evt_minute; +#define LPFC_SEC_MIN 60 +#define LPFC_MIN_HOUR 60 +#define LPFC_HOUR_DAY 24 +#define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY) struct hlist_node cpuhp; /* used for cpuhp per hba callback */ struct timer_list cpuhp_poll_timer; @@ -1364,6 +1599,22 @@ struct lpfc_hba { struct dbg_log_ent dbg_log[DBG_LOG_SZ]; }; +#define LPFC_MAX_RXMONITOR_ENTRY 800 +#define LPFC_MAX_RXMONITOR_DUMP 32 +struct rxtable_entry { + uint64_t total_bytes; /* Total no of read bytes requested */ + uint64_t rcv_bytes; /* Total no of read bytes completed */ + uint64_t avg_io_size; + uint64_t avg_io_latency;/* Average io latency in microseconds */ + uint64_t max_read_cnt; /* Maximum read bytes */ + uint64_t max_bytes_per_interval; + uint32_t cmf_busy; + uint32_t cmf_info; /* CMF_SYNC_WQE info */ + uint32_t io_cnt; + uint32_t timer_utilization; + uint32_t timer_interval; +}; + static inline struct Scsi_Host * lpfc_shost_from_vport(struct lpfc_vport *vport) { diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index eb88aaaf36eb..b35bf70a8c0d 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -57,6 +57,8 @@ #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 +#define LPFC_MAX_INFO_TMP_LEN 100 +#define LPFC_INFO_MORE_STR "\nCould be more info...\n" /* * Write key size should be multiple of 4. If write key is changed * make sure that library write key is also changed. @@ -112,6 +114,186 @@ lpfc_jedec_to_ascii(int incr, char hdw[]) return; } +static ssize_t +lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_cgn_info *cp = NULL; + struct lpfc_cgn_stat *cgs; + int len = 0; + int cpu; + u64 rcv, total; + char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; + + if (phba->cgn_i) + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + scnprintf(tmp, sizeof(tmp), + "Congestion Mgmt Info: E2Eattr %d Ver %d " + "CMF %d cnt %d\n", + phba->sli4_hba.pc_sli4_params.mi_ver, + cp ? cp->cgn_info_version : 0, + phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); + + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (!phba->sli4_hba.pc_sli4_params.cmf) + goto buffer_done; + + switch (phba->cgn_init_reg_signal) { + case EDC_CG_SIG_WARN_ONLY: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:WARN "); + break; + case EDC_CG_SIG_WARN_ALARM: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:WARN|ALARM "); + break; + default: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:NONE "); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + switch (phba->cgn_init_reg_fpin) { + case LPFC_CGN_FPIN_WARN: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN\n"); + break; + case LPFC_CGN_FPIN_ALARM: + scnprintf(tmp, sizeof(tmp), + "FPIN:ALARM\n"); + break; + case LPFC_CGN_FPIN_BOTH: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN|ALARM\n"); + break; + default: + scnprintf(tmp, sizeof(tmp), + "FPIN:NONE\n"); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + switch (phba->cgn_reg_signal) { + case EDC_CG_SIG_WARN_ONLY: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:WARN "); + break; + case EDC_CG_SIG_WARN_ALARM: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:WARN|ALARM "); + break; + default: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:NONE "); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + switch (phba->cgn_reg_fpin) { + case LPFC_CGN_FPIN_WARN: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + case LPFC_CGN_FPIN_ALARM: + scnprintf(tmp, sizeof(tmp), + "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + case LPFC_CGN_FPIN_BOTH: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + default: + scnprintf(tmp, sizeof(tmp), + "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt); + break; + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) { + switch (phba->cmf_active_mode) { + case LPFC_CFG_OFF: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n"); + break; + case LPFC_CFG_MANAGED: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n"); + break; + case LPFC_CFG_MONITOR: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n"); + break; + default: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n"); + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Off "); + break; + case LPFC_CFG_MANAGED: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed "); + break; + case LPFC_CFG_MONITOR: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor "); + break; + default: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown "); + } + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + total = 0; + rcv = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_read(&cgs->total_bytes); + rcv += atomic64_read(&cgs->rcv_bytes); + } + + scnprintf(tmp, sizeof(tmp), + "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n", + atomic_read(&phba->cmf_busy), + phba->cmf_active_info, rcv, total); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "Port_speed:%d Link_byte_cnt:%ld " + "Max_byte_per_interval:%ld\n", + lpfc_sli_port_speed_get(phba), + (unsigned long)phba->cmf_link_byte_count, + (unsigned long)phba->cmf_max_bytes_per_interval); + strlcat(buf, tmp, PAGE_SIZE); + +buffer_done: + len = strnlen(buf, PAGE_SIZE); + + if (unlikely(len >= (PAGE_SIZE - 1))) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6312 Catching potential buffer " + "overflow > PAGE_SIZE = %lu bytes\n", + PAGE_SIZE); + strscpy(buf + PAGE_SIZE - 1 - + strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), + LPFC_INFO_MORE_STR, + strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1) + + 1); + } + return len; +} + /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. @@ -168,7 +350,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, char *statep; int i; int len = 0; - char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; + char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); @@ -512,9 +694,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, "6314 Catching potential buffer " "overflow > PAGE_SIZE = %lu bytes\n", PAGE_SIZE); - strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR), - LPFC_NVME_INFO_MORE_STR, - sizeof(LPFC_NVME_INFO_MORE_STR) + 1); + strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), + LPFC_INFO_MORE_STR, + sizeof(LPFC_INFO_MORE_STR) + 1); } return len; @@ -2248,11 +2430,6 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } -static inline bool lpfc_rangecheck(uint val, uint min, uint max) -{ - return val >= min && val <= max; -} - /** * lpfc_enable_bbcr_set: Sets an attribute value. * @phba: pointer the the adapter structure. @@ -2641,6 +2818,7 @@ static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); +static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; #define WWN_SZ 8 @@ -4038,6 +4216,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, const char *val_buf = buf; int err; uint32_t prev_val; + u8 sli_family, if_type; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; @@ -4061,13 +4240,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, /* * The 'topology' is not a configurable parameter if : * - persistent topology enabled - * - G7/G6 with no private loop support + * - ASIC_GEN_NUM >= 0xC, with no private loop support */ - + sli_family = bf_get(lpfc_sli_intf_sli_family, + &phba->sli4_hba.sli_intf); + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); if ((phba->hba_flag & HBA_PERSISTENT_TOPO || - (!phba->sli4_hba.pc_sli4_params.pls && - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) && + (!phba->sli4_hba.pc_sli4_params.pls && + (sli_family == LPFC_SLI_INTF_FAMILY_G6 || + if_type == LPFC_SLI_INTF_IF_TYPE_6))) && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); @@ -5412,9 +5594,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, /* # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range -# is [0,1]. Default value is 0. +# is [0,1]. Default value is 1. */ -LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, +LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* @@ -6146,6 +6328,19 @@ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); */ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); +/* Signaling module parameters */ +int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ +module_param(lpfc_fabric_cgn_frequency, int, 0444); +MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); + +int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ +module_param(lpfc_acqe_cgn_frequency, int, 0444); +MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); + +int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ +module_param(lpfc_use_cgn_signal, int, 0444); +MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available"); + /* * lpfc_enable_dpp: Enable DPP on G7 * 0 = DPP on G7 disabled @@ -6320,6 +6515,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_enable_bbcr, &dev_attr_lpfc_enable_dpp, &dev_attr_lpfc_enable_mi, + &dev_attr_cmf_info, &dev_attr_lpfc_max_vmid, &dev_attr_lpfc_vmid_inactivity_timeout, &dev_attr_lpfc_vmid_app_header, @@ -6350,6 +6546,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_static_vport, + &dev_attr_cmf_info, NULL, }; @@ -6741,6 +6938,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost) case LPFC_LINK_SPEED_128GHZ: fc_host_speed(shost) = FC_PORTSPEED_128GBIT; break; + case LPFC_LINK_SPEED_256GHZ: + fc_host_speed(shost) = FC_PORTSPEED_256GBIT; + break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; @@ -6908,6 +7108,9 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; + hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn); + hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm); + hs->link_failure_count -= lso->link_failure_count; hs->loss_of_sync_count -= lso->loss_of_sync_count; hs->loss_of_signal_count -= lso->loss_of_signal_count; @@ -7019,6 +7222,12 @@ lpfc_reset_stats(struct Scsi_Host *shost) else lso->link_events = (phba->fc_eventTag >> 1); + atomic64_set(&phba->cgn_acqe_stat.warn, 0); + atomic64_set(&phba->cgn_acqe_stat.alarm, 0); + + memset(&shost_to_fc_host(shost)->fpin_stats, 0, + sizeof(shost_to_fc_host(shost)->fpin_stats)); + psli->stats_start = ktime_get_seconds(); mempool_free(pmboxq, phba->mbox_mem_pool); @@ -7452,6 +7661,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_dpp_init(phba, lpfc_enable_dpp); lpfc_enable_mi_init(phba, lpfc_enable_mi); + phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF; + phba->cmf_active_mode = LPFC_CFG_OFF; + if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX || + lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN) + lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ + if (phba->sli_rev != LPFC_SLI_REV4) { /* NVME only supported on SLI4 */ phba->nvmet_support = 0; diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 38cfe1bc6a4d..fdf08cb57207 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -5751,6 +5751,92 @@ job_error: } +static int +lpfc_get_cgnbuf_info(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct get_cgnbuf_info_req *cgnbuf_req; + struct lpfc_cgn_info *cp; + uint8_t *cgn_buff; + int size, cinfosz; + int rc = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct get_cgnbuf_info_req)) { + rc = -ENOMEM; + goto job_exit; + } + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + rc = -ENOENT; + goto job_exit; + } + + if (!phba->cgn_i || !phba->cgn_i->virt) { + rc = -ENOENT; + goto job_exit; + } + + cp = phba->cgn_i->virt; + if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { + rc = -EPERM; + goto job_exit; + } + + cgnbuf_req = (struct get_cgnbuf_info_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* For reset or size == 0 */ + bsg_reply->reply_payload_rcv_len = 0; + + if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { + lpfc_init_congestion_stat(phba); + goto job_exit; + } + + /* We don't want to include the CRC at the end */ + cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); + + size = cgnbuf_req->read_size; + if (!size) + goto job_exit; + + if (size < cinfosz) { + /* Just copy back what we can */ + cinfosz = size; + rc = -E2BIG; + } + + /* Allocate memory to read congestion info */ + cgn_buff = vmalloc(cinfosz); + if (!cgn_buff) { + rc = -ENOMEM; + goto job_exit; + } + + memcpy(cgn_buff, cp, cinfosz); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + cgn_buff, cinfosz); + + vfree(cgn_buff); + +job_exit: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + else + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2724 GET CGNBUF error: %d\n", rc); + return rc; +} + /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle @@ -5813,6 +5899,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job) case LPFC_BSG_VENDOR_GET_TRUNK_INFO: rc = lpfc_get_trunk_info(job); break; + case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: + rc = lpfc_get_cgnbuf_info(job); + break; default: rc = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 2dc71243775d..749d6c43cfce 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -43,6 +43,7 @@ #define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18 #define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19 #define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20 +#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO 21 struct set_ct_event { uint32_t command; @@ -386,6 +387,13 @@ struct get_trunk_info_req { uint32_t command; }; +struct get_cgnbuf_info_req { + uint32_t command; + uint32_t read_size; + uint32_t reset; +#define LPFC_BSG_CGN_RESET_STAT 1 +}; + /* driver only */ #define SLI_CONFIG_NOT_HANDLED 0 #define SLI_CONFIG_HANDLED 1 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 737483c3f01d..c512f4199142 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -58,6 +58,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *, uint16_t, uint16_t, bool); int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_reg_congestion_buf(struct lpfc_hba *phba); +int lpfc_unreg_congestion_buf(struct lpfc_hba *phba); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); @@ -74,6 +76,20 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); void lpfc_free_iocb_list(struct lpfc_hba *phba); int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq, int count, int idx); +uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba); +void lpfc_cmf_signal_init(struct lpfc_hba *phba); +void lpfc_cmf_start(struct lpfc_hba *phba); +void lpfc_cmf_stop(struct lpfc_hba *phba); +void lpfc_init_congestion_stat(struct lpfc_hba *phba); +void lpfc_init_congestion_buf(struct lpfc_hba *phba); +int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba); +uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed); +int lpfc_config_cgn_signal(struct lpfc_hba *phba); +int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total); +void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba); +void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag); +void lpfc_unblock_requests(struct lpfc_hba *phba); +void lpfc_block_requests(struct lpfc_hba *phba); void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -87,6 +103,8 @@ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); +void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); +void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_set_disctmo(struct lpfc_vport *); int lpfc_can_disctmo(struct lpfc_vport *); @@ -141,6 +159,8 @@ int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry); int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry); int lpfc_issue_fabric_reglogin(struct lpfc_vport *); int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry); +int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry); +void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length); int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, @@ -213,6 +233,9 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); +int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, + uint32_t len); + void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba); int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path); void lpfc_sli4_poll_hbtimer(struct timer_list *t); @@ -459,6 +482,9 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); void lpfc_create_static_vport(struct lpfc_hba *); void lpfc_stop_hba_timers(struct lpfc_hba *); void lpfc_stop_port(struct lpfc_hba *); +int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz); +int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz, + struct Scsi_Host *shost); void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); @@ -605,6 +631,10 @@ extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; extern unsigned long lpfc_no_hba_reset[]; +extern int lpfc_acqe_cgn_frequency; +extern int lpfc_fabric_cgn_frequency; +extern int lpfc_use_cgn_signal; + extern union lpfc_wqe128 lpfc_iread_cmd_template; extern union lpfc_wqe128 lpfc_iwrite_cmd_template; extern union lpfc_wqe128 lpfc_icmnd_cmd_template; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 610b6dabb3b5..dfcb7d4bd7fa 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -2288,6 +2288,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* No retry on Vendor, RPA only done on physical port */ if (phba->link_flag & LS_CT_VEN_RPA) { phba->link_flag &= ~LS_CT_VEN_RPA; + if (phba->cmf_active_mode == LPFC_CFG_OFF) + return; lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_ELS, "6460 VEN FDMI RPA failure\n"); @@ -2332,24 +2334,29 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; case SLI_MGMT_RPA: if (vport->port_type == LPFC_PHYSICAL_PORT && - phba->cfg_enable_mi && - phba->sli4_hba.pc_sli4_params.mi_ver > LPFC_MIB1_SUPPORT) { + phba->sli4_hba.pc_sli4_params.mi_ver) { /* mi is only for the phyical port, no vports */ if (phba->link_flag & LS_CT_VEN_RPA) { lpfc_printf_vlog(vport, KERN_INFO, - LOG_DISCOVERY | LOG_ELS, + LOG_DISCOVERY | LOG_ELS | + LOG_CGN_MGMT, "6449 VEN RPA FDMI Success\n"); phba->link_flag &= ~LS_CT_VEN_RPA; break; } + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6210 Issue Vendor MI FDMI %x\n", + phba->sli4_hba.pc_sli4_params.mi_ver); + + /* CGN is only for the physical port, no vports */ if (lpfc_fdmi_cmd(vport, ndlp, cmd, LPFC_FDMI_VENDOR_ATTR_mi) == 0) phba->link_flag |= LS_CT_VEN_RPA; lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "6458 Send MI FDMI:%x Flag x%x\n", - phba->sli4_hba.pc_sli4_params.mi_value, + phba->sli4_hba.pc_sli4_params.mi_ver, phba->link_flag); } else { lpfc_printf_log(phba, KERN_INFO, @@ -2846,6 +2853,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, ae->un.AttrInt = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (phba->lmt & LMT_256Gb) + ae->un.AttrInt |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_128Gb) ae->un.AttrInt |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_64Gb) @@ -2927,6 +2936,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, case LPFC_LINK_SPEED_128GHZ: ae->un.AttrInt = HBA_PORTSPEED_128GFC; break; + case LPFC_LINK_SPEED_256GHZ: + ae->un.AttrInt = HBA_PORTSPEED_256GFC; + break; default: ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN; break; @@ -3343,7 +3355,7 @@ lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 256); sprintf(mibrevision, "ELXE2EM:%04d", - phba->sli4_hba.pc_sli4_params.mi_value); + phba->sli4_hba.pc_sli4_params.mi_ver); strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString)); len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); len += (len & 3) ? (4 - (len & 3)) : 4; @@ -3884,9 +3896,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /** * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort * @vport: pointer to a host virtual N_Port data structure. - * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) - * cmdcode: FDMI command to send - * mask: Mask of HBA or PORT Attributes to send + * @cmdcode: application server command code to send + * @vmid: pointer to vmid info structure * * Builds and sends a FDMI command using the CT subsystem. */ diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 6ff85ae57e79..bd6d459afce5 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -5429,6 +5429,180 @@ lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes, return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } +static int +lpfc_cgn_buffer_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *buffer = debug->buffer; + uint32_t *ptr; + int cnt, len = 0; + + if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Mgmt is not supported\n"); + goto out; + } + ptr = (uint32_t *)phba->cgn_i->virt; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Buffer Header\n"); + /* Dump the first 32 bytes */ + cnt = 32; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "000: %08x %08x %08x %08x %08x %08x %08x %08x\n", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), + *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); + ptr += 8; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Buffer Data\n"); + while (cnt < sizeof(struct lpfc_cgn_info)) { + if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Truncated . . .\n"); + break; + } + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "%03x: %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + cnt, *ptr, *(ptr + 1), *(ptr + 2), + *(ptr + 3), *(ptr + 4), *(ptr + 5), + *(ptr + 6), *(ptr + 7)); + cnt += 32; + ptr += 8; + } +out: + return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); +} + +static int +lpfc_cgn_buffer_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + +static int +lpfc_rx_monitor_open(struct inode *inode, struct file *file) +{ + struct lpfc_rx_monitor_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_rx_monitor_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *buffer = debug->buffer; + struct rxtable_entry *entry; + int i, len = 0, head, tail, last, start; + + head = atomic_read(&phba->rxtable_idx_head); + while (head == LPFC_RXMONITOR_TABLE_IN_USE) { + /* Table is getting updated */ + msleep(20); + head = atomic_read(&phba->rxtable_idx_head); + } + + tail = atomic_xchg(&phba->rxtable_idx_tail, head); + if (!phba->rxtable || head == tail) { + len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, + "Rxtable is empty\n"); + goto out; + } + last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY; + start = tail; + + len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, + " MaxBPI\t Total Data Cmd Total Data Cmpl " + " Latency(us) Avg IO Size\tMax IO Size IO cnt " + "Info BWutil(ms)\n"); +get_table: + for (i = start; i < last; i++) { + entry = &phba->rxtable[i]; + len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, + "%3d:%12lld %12lld\t%12lld\t" + "%8lldus\t%8lld\t%10lld " + "%8d %2d %2d(%2d)\n", + i, entry->max_bytes_per_interval, + entry->total_bytes, + entry->rcv_bytes, + entry->avg_io_latency, + entry->avg_io_size, + entry->max_read_cnt, + entry->io_cnt, + entry->cmf_info, + entry->timer_utilization, + entry->timer_interval); + } + + if (head != last) { + start = 0; + last = head; + goto get_table; + } +out: + return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); +} + +static int +lpfc_rx_monitor_release(struct inode *inode, struct file *file) +{ + struct lpfc_rx_monitor_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -5657,6 +5831,23 @@ static const struct file_operations lpfc_idiag_op_extAcc = { .write = lpfc_idiag_extacc_write, .release = lpfc_idiag_cmd_release, }; +#undef lpfc_cgn_buffer_op +static const struct file_operations lpfc_cgn_buffer_op = { + .owner = THIS_MODULE, + .open = lpfc_cgn_buffer_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_cgn_buffer_read, + .release = lpfc_cgn_buffer_release, +}; + +#undef lpfc_rx_monitor_op +static const struct file_operations lpfc_rx_monitor_op = { + .owner = THIS_MODULE, + .open = lpfc_rx_monitor_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_rx_monitor_read, + .release = lpfc_rx_monitor_release, +}; #endif /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command @@ -5907,6 +6098,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + /* Congestion Info Buffer */ + scnprintf(name, sizeof(name), "cgn_buffer"); + phba->debug_cgn_buffer = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_cgn_buffer_op); + if (!phba->debug_cgn_buffer) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6527 Cannot create debugfs " + "cgn_buffer\n"); + goto debug_failed; + } + + /* RX Monitor */ + scnprintf(name, sizeof(name), "rx_monitor"); + phba->debug_rx_monitor = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_rx_monitor_op); + if (!phba->debug_rx_monitor) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6528 Cannot create debugfs " + "rx_monitor\n"); + goto debug_failed; + } + /* RAS log */ snprintf(name, sizeof(name), "ras_log"); phba->debug_ras_log = @@ -6335,6 +6552,12 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; + debugfs_remove(phba->debug_cgn_buffer); + phba->debug_cgn_buffer = NULL; + + debugfs_remove(phba->debug_rx_monitor); + phba->debug_rx_monitor = NULL; + debugfs_remove(phba->debug_ras_log); phba->debug_ras_log = NULL; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 7ab6d3b08698..a5bf71b34972 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -52,6 +52,9 @@ /* scsistat output buffer size */ #define LPFC_SCSISTAT_SIZE 8192 +/* Congestion Info Buffer size */ +#define LPFC_CGN_BUF_SIZE 8192 + #define LPFC_DEBUG_OUT_LINE_SZ 80 /* @@ -279,6 +282,12 @@ struct lpfc_idiag { void *ptr_private; }; +#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY) +struct lpfc_rx_monitor_debug { + char *i_private; + char *buffer; +}; + #else #define lpfc_nvmeio_data(phba, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 131374a61d7e..871b665bd72e 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -78,10 +78,11 @@ struct lpfc_node_rrqs { }; enum lpfc_fc4_xpt_flags { - NLP_WAIT_FOR_UNREG = 0x1, - SCSI_XPT_REGD = 0x2, - NVME_XPT_REGD = 0x4, - NLP_XPT_HAS_HH = 0x8, + NLP_XPT_REGD = 0x1, + SCSI_XPT_REGD = 0x2, + NVME_XPT_REGD = 0x4, + NVME_XPT_UNREG_WAIT = 0x8, + NLP_XPT_HAS_HH = 0x10 }; struct lpfc_nodelist { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index e481f5fe29d7..1254a575fd47 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -56,6 +56,9 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry); static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb); +static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); @@ -1664,6 +1667,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (!new_ndlp || (new_ndlp == ndlp)) return ndlp; + /* + * Unregister from backend if not done yet. Could have been skipped + * due to ADISC + */ + lpfc_nlp_unreg_node(vport, new_ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) { active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, GFP_KERNEL); @@ -2025,9 +2034,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) - goto check_plogi; - else + if (!lpfc_error_lost_link(irsp)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); @@ -2080,7 +2087,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, NLP_EVT_CMPL_PLOGI); } - check_plogi: if (disc && vport->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); @@ -2607,6 +2613,14 @@ lpfc_adisc_done(struct lpfc_vport *vport) if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + + /* + * If link is down, clear_la and reg_vpi will be done after + * flogi following a link up event + */ + if (!lpfc_is_link_up(phba)) + return; + /* The ADISCs are complete. Doesn't matter if they * succeeded or failed because the ADISC completion * routine guarantees to call the state machine and @@ -2749,12 +2763,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "2755 ADISC failure DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); - /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) - goto check_adisc; - else - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_CMPL_ADISC); + + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_ADISC); /* As long as this node is not registered with the SCSI or NVMe * transport, it is no longer an active node. Otherwise @@ -2772,7 +2783,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); - check_adisc: /* Check to see if there are more ADISCs to be sent */ if (disc && vport->num_disc_nodes) lpfc_more_adisc(vport); @@ -3253,7 +3263,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.elsreq64.remoteID); /* ELS cmd tag <ulpIoTag> completes */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " "x%x\n", irsp->ulpIoTag, irsp->ulpStatus, @@ -3279,6 +3289,9 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case ELS_CMD_SCR: lpfc_issue_els_scr(vport, cmdiocb->retry); break; + case ELS_CMD_EDC: + lpfc_issue_els_edc(vport, cmdiocb->retry); + break; case ELS_CMD_RDF: cmdiocb->context1 = NULL; /* save ndlp refcnt */ lpfc_issue_els_rdf(vport, cmdiocb->retry); @@ -3288,6 +3301,11 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } phba->fc_stat.elsRetryExceeded++; } + if (cmd == ELS_CMD_EDC) { + /* must be called before checking uplStatus and returning */ + lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); + return; + } if (irsp->ulpStatus) { /* ELS discovery cmd completes with error */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, @@ -3312,11 +3330,14 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, for (i = 0; i < ELS_RDF_REG_TAG_CNT && i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "4677 Fabric RDF Notification Grant Data: " - "0x%08x\n", - be32_to_cpu( - prdf->reg_d1.desc_tags[i])); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_CGN_MGMT, + "4677 Fabric RDF Notification Grant " + "Data: 0x%08x Reg: %x %x\n", + be32_to_cpu( + prdf->reg_d1.desc_tags[i]), + phba->cgn_reg_signal, + phba->cgn_reg_fpin); } out: @@ -3375,6 +3396,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_reg_fab_ctrl_node(vport, ndlp); if (rc) { + lpfc_els_free_iocb(phba, elsiocb); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0937 %s: Failed to reg fc node, rc %d\n", __func__, rc); @@ -3413,7 +3435,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) return 1; } - /* Keep the ndlp just in case RDF is being sent */ return 0; } @@ -3657,28 +3678,15 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) lpfc_enqueue_node(vport, ndlp); } - /* RDF ELS is not required on an NPIV VN_Port. */ - if (vport->port_type == LPFC_NPIV_PORT) { - lpfc_nlp_put(ndlp); + /* RDF ELS is not required on an NPIV VN_Port. */ + if (vport->port_type == LPFC_NPIV_PORT) return -EACCES; - } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RDF); if (!elsiocb) return -ENOMEM; - if (phba->sli_rev == LPFC_SLI_REV4 && - !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, - "0939 %s: FC_NODE x%x RPI x%x flag x%x " - "ste x%x type x%x Not registered\n", - __func__, ndlp->nlp_DID, ndlp->nlp_rpi, - ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_type); - return -ENODEV; - } - /* Configure the payload for the supported FPIN events. */ prdf = (struct lpfc_els_rdf_req *) (((struct lpfc_dmabuf *)elsiocb->context2)->virt); @@ -3695,10 +3703,12 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6444 Xmit RDF to remote NPORT x%x\n", - ndlp->nlp_DID); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", + ndlp->nlp_DID, phba->cgn_reg_signal, + phba->cgn_reg_fpin); + phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->context1 = lpfc_nlp_get(ndlp); if (!elsiocb->context1) { @@ -3739,7 +3749,7 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, { /* Send LS_ACC */ if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "1623 Failed to RDF_ACC from x%x for x%x\n", ndlp->nlp_DID, vport->fc_myDID); return -EIO; @@ -3747,7 +3757,7 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Issue new RDF for reregistering */ if (lpfc_issue_els_rdf(vport, 0)) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "2623 Failed to re register RDF for x%x\n", vport->fc_myDID); return -EIO; @@ -3757,6 +3767,448 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } /** + * lpfc_least_capable_settings - helper function for EDC rsp processing + * @phba: pointer to lpfc hba data structure. + * @pcgd: pointer to congestion detection descriptor in EDC rsp. + * + * This helper routine determines the least capable setting for + * congestion signals, signal freq, including scale, from the + * congestion detection descriptor in the EDC rsp. The routine + * sets @phba values in preparation for a set_featues mailbox. + **/ +static void +lpfc_least_capable_settings(struct lpfc_hba *phba, + struct fc_diag_cg_sig_desc *pcgd) +{ + u32 rsp_sig_cap = 0, drv_sig_cap = 0; + u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; + struct lpfc_cgn_info *cp; + u32 crc; + u16 sig_freq; + + /* Get rsp signal and frequency capabilities. */ + rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); + rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); + rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); + + /* If the Fport does not support signals. Set FPIN only */ + if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) + goto out_no_support; + + /* Apply the xmt scale to the xmt cycle to get the correct frequency. + * Adapter default is 100 millisSeconds. Convert all xmt cycle values + * to milliSeconds. + */ + switch (rsp_sig_freq_scale) { + case EDC_CG_SIGFREQ_SEC: + rsp_sig_freq_cyc *= MSEC_PER_SEC; + break; + case EDC_CG_SIGFREQ_MSEC: + rsp_sig_freq_cyc = 1; + break; + default: + goto out_no_support; + } + + /* Convenient shorthand. */ + drv_sig_cap = phba->cgn_reg_signal; + + /* Choose the least capable frequency. */ + if (rsp_sig_freq_cyc > phba->cgn_sig_freq) + phba->cgn_sig_freq = rsp_sig_freq_cyc; + + /* Should be some common signals support. Settle on least capable + * signal and adjust FPIN values. Initialize defaults to ease the + * decision. + */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && + (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || + drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { + if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; + } + if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + } + + if (!phba->cgn_i) + return; + + /* Update signal frequency in congestion info buffer */ + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + /* Frequency (in ms) Signal Warning/Signal Congestion Notifications + * are received by the HBA + */ + sig_freq = phba->cgn_sig_freq; + + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) + cp->cgn_warn_freq = cpu_to_le16(sig_freq); + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + cp->cgn_alarm_freq = cpu_to_le16(sig_freq); + cp->cgn_warn_freq = cpu_to_le16(sig_freq); + } + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + return; + +out_no_support: + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = 0; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; +} + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, + FC_LS_TLV_DTAG_INIT); + +/** + * lpfc_cmpl_els_edc - Completion callback function for EDC + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function for issuing the Exchange + * Diagnostic Capabilities (EDC) command. The driver issues an EDC to + * notify the FPort of its Congestion and Link Fault capabilities. This + * routine parses the FPort's response and decides on the least common + * values applicable to both FPort and NPort for Warnings and Alarms that + * are communicated via hardware signals. + **/ +static void +lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + IOCB_t *irsp; + struct fc_els_edc_resp *edc_rsp; + struct fc_tlv_desc *tlv; + struct fc_diag_cg_sig_desc *pcgd; + struct fc_diag_lnkflt_desc *plnkflt; + struct lpfc_dmabuf *pcmd, *prsp; + const char *dtag_nm; + u32 *pdata, dtag; + int desc_cnt = 0, bytes_remain; + bool rcv_cap_desc = false; + struct lpfc_nodelist *ndlp; + + irsp = &rspiocb->iocb; + ndlp = cmdiocb->context1; + + lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, + "EDC cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.elsreq64.remoteID); + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", + irsp->ulpIoTag, irsp->ulpStatus, + irsp->un.ulpWord[4], irsp->ulpTimeout); + + pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; + if (!pcmd) + goto out; + + pdata = (u32 *)pcmd->virt; + if (!pdata) + goto out; + + /* Need to clear signal values, send features MB and RDF with FPIN. */ + if (irsp->ulpStatus) + goto out; + + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + edc_rsp = prsp->virt; + if (!edc_rsp) + goto out; + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4676 Fabric EDC Rsp: " + "0x%02x, 0x%08x\n", + edc_rsp->acc_hdr.la_cmd, + be32_to_cpu(edc_rsp->desc_list_len)); + + /* + * Payload length in bytes is the response descriptor list + * length minus the 12 bytes of Link Service Request + * Information descriptor in the reply. + */ + bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - + sizeof(struct fc_els_lsri_desc); + if (bytes_remain <= 0) + goto out; + + tlv = edc_rsp->desc; + + /* + * cycle through EDC diagnostic descriptors to find the + * congestion signaling capability descriptor + */ + while (bytes_remain) { + if (bytes_remain < FC_TLV_DESC_HDR_SZ) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6461 Truncated TLV hdr on " + "Diagnostic descriptor[%d]\n", + desc_cnt); + goto out; + } + + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_FAULT_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_lnkflt_desc)) { + lpfc_printf_log( + phba, KERN_WARNING, LOG_CGN_MGMT, + "6462 Truncated Link Fault Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_cg_sig_desc)); + goto out; + } + plnkflt = (struct fc_diag_lnkflt_desc *)tlv; + lpfc_printf_log( + phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4617 Link Fault Desc Data: 0x%08x 0x%08x " + "0x%08x 0x%08x 0x%08x\n", + be32_to_cpu(plnkflt->desc_tag), + be32_to_cpu(plnkflt->desc_len), + be32_to_cpu( + plnkflt->degrade_activate_threshold), + be32_to_cpu( + plnkflt->degrade_deactivate_threshold), + be32_to_cpu(plnkflt->fec_degrade_interval)); + break; + case ELS_DTAG_CG_SIGNAL_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_cg_sig_desc)) { + lpfc_printf_log( + phba, KERN_WARNING, LOG_CGN_MGMT, + "6463 Truncated Cgn Signal Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_cg_sig_desc)); + goto out; + } + + pcgd = (struct fc_diag_cg_sig_desc *)tlv; + lpfc_printf_log( + phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4616 CGN Desc Data: 0x%08x 0x%08x " + "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", + be32_to_cpu(pcgd->desc_tag), + be32_to_cpu(pcgd->desc_len), + be32_to_cpu(pcgd->xmt_signal_capability), + be32_to_cpu(pcgd->xmt_signal_frequency.count), + be32_to_cpu(pcgd->xmt_signal_frequency.units), + be32_to_cpu(pcgd->rcv_signal_capability), + be32_to_cpu(pcgd->rcv_signal_frequency.count), + be32_to_cpu(pcgd->rcv_signal_frequency.units)); + + /* Compare driver and Fport capabilities and choose + * least common. + */ + lpfc_least_capable_settings(phba, pcgd); + rcv_cap_desc = true; + break; + default: + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4919 unknown Diagnostic " + "Descriptor[%d]: tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + } + + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + desc_cnt++; + } + +out: + if (!rcv_cap_desc) { + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = 0; + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, + "4202 EDC rsp error - sending RDF " + "for FPIN only.\n"); + } + + lpfc_config_cgn_signal(phba); + + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(phba->pport); + lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, + "EDC Cmpl: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +static void +lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) +{ + /* We are assuming cgd was zero'ed before calling this routine */ + + /* Configure the congestion detection capability */ + cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); + + /* Descriptor len doesn't include the tag or len fields. */ + cgd->desc_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); + + /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. + * xmt_signal_frequency.count already set to 0. + * xmt_signal_frequency.units already set to 0. + */ + + if (phba->cmf_active_mode == LPFC_CFG_OFF) { + /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. + * rcv_signal_frequency.count already set to 0. + * rcv_signal_frequency.units already set to 0. + */ + phba->cgn_sig_freq = 0; + return; + } + switch (phba->cgn_reg_signal) { + case EDC_CG_SIG_WARN_ONLY: + cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); + break; + case EDC_CG_SIG_WARN_ALARM: + cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); + break; + default: + /* rcv_signal_capability left 0 thus no support */ + break; + } + + /* We start negotiation with lpfc_fabric_cgn_frequency, after + * the completion we settle on the higher frequency. + */ + cgd->rcv_signal_frequency.count = + cpu_to_be16(lpfc_fabric_cgn_frequency); + cgd->rcv_signal_frequency.units = + cpu_to_be16(EDC_CG_SIGFREQ_MSEC); +} + + /** + * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command iocb. + * + * This routine issues an ELS EDC to the F-Port Controller to communicate + * this N_Port's support of hardware signals in its Congestion + * Capabilities Descriptor. + * + * Note: This routine does not check if one or more signals are + * set in the cgn_reg_signal parameter. The caller makes the + * decision to enforce cgn_reg_signal as nonzero or zero depending + * on the conditions. During Fabric requests, the driver + * requires cgn_reg_signals to be nonzero. But a dynamic request + * to set the congestion mode to OFF from Monitor or Manage + * would correctly issue an EDC with no signals enabled to + * turn off switch functionality and then update the FW. + * + * Return code + * 0 - Successfully issued edc command + * 1 - Failed to issue edc command + **/ +int +lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_els_edc_req *edc_req; + struct fc_diag_cg_sig_desc *cgn_desc; + u16 cmdsize; + struct lpfc_nodelist *ndlp; + u8 *pcmd = NULL; + u32 edc_req_size, cgn_desc_size; + int rc; + + if (vport->port_type == LPFC_NPIV_PORT) + return -EACCES; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENODEV; + + /* If HBA doesn't support signals, drop into RDF */ + if (!phba->cgn_init_reg_signal) + goto try_rdf; + + edc_req_size = sizeof(struct fc_els_edc); + cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); + cmdsize = edc_req_size + cgn_desc_size; + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_EDC); + if (!elsiocb) + goto try_rdf; + + /* Configure the payload for the supported Diagnostics capabilities. */ + pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + memset(pcmd, 0, cmdsize); + edc_req = (struct lpfc_els_edc_req *)pcmd; + edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); + edc_req->edc.edc_cmd = ELS_EDC; + + cgn_desc = &edc_req->cgn_desc; + + lpfc_format_edc_cgn_desc(phba, cgn_desc); + + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT, + "4623 Xmit EDC to remote " + "NPORT x%x reg_sig x%x reg_fpin:x%x\n", + ndlp->nlp_DID, phba->cgn_reg_signal, + phba->cgn_reg_fpin); + + elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; + elsiocb->context1 = lpfc_nlp_get(ndlp); + if (!elsiocb->context1) { + lpfc_els_free_iocb(phba, elsiocb); + return -EIO; + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue EDC: did:x%x refcnt %d", + ndlp->nlp_DID, kref_read(&ndlp->kref), 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the rlease of + * the node. + */ + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + goto try_rdf; + } + return 0; +try_rdf: + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + rc = lpfc_issue_els_rdf(vport, 0); + return rc; +} + +/** * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry * @vport: pointer to a host virtual N_Port data structure. * @nlp: pointer to a node-list data structure. @@ -4378,7 +4830,7 @@ out_retry: (cmd == ELS_CMD_NVMEPRLI)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); - else + else if (cmd != ELS_CMD_ADISC) lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); ndlp->nlp_last_elscmd = cmd; @@ -4520,7 +4972,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) { struct lpfc_dmabuf *buf_ptr, *buf_ptr1; - /* The I/O job is complete. Clear the context1 data. */ + /* The I/O iocb is complete. Clear the context1 data. */ elsiocb->context1 = NULL; /* context2 = cmd, context2->next = rsp, context3 = bpl */ @@ -4612,6 +5064,15 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { + + /* If PLOGI is being retried, PLOGI completion will cleanup the + * node. The NLP_NPR_2B_DISC flag needs to be retained to make + * progress on nodes discovered from last RSCN. + */ + if ((ndlp->nlp_flag & NLP_DELAY_TMO) && + (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) + goto out; + /* NPort Recovery mode or node is just allocated */ if (!lpfc_nlp_not_used(ndlp)) { /* A LOGO is completing and the node is in NPR state. @@ -5158,6 +5619,86 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 0; } + /** + * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: NPort to where rsp is directed + * + * This routine issues an EDC ACC RSP to the F-Port Controller to communicate + * this N_Port's support of hardware signals in its Congestion + * Capabilities Descriptor. + * + * Return code + * 0 - Successfully issued edc rsp command + * 1 - Failed to issue edc rsp command + **/ +static int +lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_els_edc_rsp *edc_rsp; + struct lpfc_iocbq *elsiocb; + IOCB_t *icmd, *cmd; + uint8_t *pcmd; + int cmdsize, rc; + + cmdsize = sizeof(struct lpfc_els_edc_rsp); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + icmd = &elsiocb->iocb; + cmd = &cmdiocb->iocb; + icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; + pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); + memset(pcmd, 0, cmdsize); + + edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; + edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; + edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); + edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); + edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( + FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); + edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; + lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref)); + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + + phba->fc_stat.elsXmitACC++; + elsiocb->context1 = lpfc_nlp_get(ndlp); + if (!elsiocb->context1) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return 1; + } + + /* Xmit ELS ACC response tag <ulpIoTag> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "RPI: x%x, fc_flag x%x\n", + rc, elsiocb->iotag, elsiocb->sli4_xritag, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, vport->fc_flag); + + return 0; +} + /** * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd * @vport: pointer to a virtual N_Port data structure. @@ -5657,25 +6198,40 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS ADISCs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(vport, ndlp, 0); - sentadisc++; - vport->num_disc_nodes++; - if (vport->num_disc_nodes >= - vport->cfg_discovery_threads) { - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_NLP_MORE; - spin_unlock_irq(shost->host_lock); - break; - } + + if (ndlp->nlp_state != NLP_STE_NPR_NODE || + !(ndlp->nlp_flag & NLP_NPR_ADISC)) + continue; + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + + if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + /* This node was marked for ADISC but was not picked + * for discovery. This is possible if the node was + * missing in gidft response. + * + * At time of marking node for ADISC, we skipped unreg + * from backend + */ + lpfc_nlp_unreg_node(vport, ndlp); + continue; + } + + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, 0); + sentadisc++; + vport->num_disc_nodes++; + if (vport->num_disc_nodes >= + vport->cfg_discovery_threads) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); + break; } + } if (sentadisc == 0) { spin_lock_irq(shost->host_lock); @@ -6087,6 +6643,12 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) case LPFC_LINK_SPEED_64GHZ: rdp_speed = RDP_PS_64GB; break; + case LPFC_LINK_SPEED_128GHZ: + rdp_speed = RDP_PS_128GB; + break; + case LPFC_LINK_SPEED_256GHZ: + rdp_speed = RDP_PS_256GB; + break; default: rdp_speed = RDP_PS_UNKNOWN; break; @@ -6094,6 +6656,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) desc->info.port_speed.speed = cpu_to_be16(rdp_speed); + if (phba->lmt & LMT_256Gb) + rdp_cap |= RDP_PS_256GB; if (phba->lmt & LMT_128Gb) rdp_cap |= RDP_PS_128GB; if (phba->lmt & LMT_64Gb) @@ -6886,13 +7450,6 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) continue; } - /* Check to see if we need to NVME rescan this target - * remoteport. - */ - if (ndlp->nlp_fc4_type & NLP_FC4_NVME && - ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) - lpfc_nvme_rescan_port(vport, ndlp); - lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_cancel_retry_delay_tmo(vport, ndlp); @@ -8212,6 +8769,125 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } /** + * lpfc_els_rcv_edc - Process an unsolicited EDC iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully processed echo iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_els_edc *edc_req; + struct fc_tlv_desc *tlv; + uint8_t *payload; + uint32_t *ptr, dtag; + const char *dtag_nm; + int desc_cnt = 0, bytes_remain; + bool rcv_cap_desc = false; + + payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; + + edc_req = (struct fc_els_edc *)payload; + bytes_remain = be32_to_cpu(edc_req->desc_len); + + ptr = (uint32_t *)payload; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "3319 Rcv EDC payload len %d: x%x x%x x%x\n", + bytes_remain, be32_to_cpu(*ptr), + be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); + + /* No signal support unless there is a congestion descriptor */ + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = 0; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + + if (bytes_remain <= 0) + goto out; + + tlv = edc_req->desc; + + /* + * cycle through EDC diagnostic descriptors to find the + * congestion signaling capability descriptor + */ + while (bytes_remain && !rcv_cap_desc) { + if (bytes_remain < FC_TLV_DESC_HDR_SZ) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6464 Truncated TLV hdr on " + "Diagnostic descriptor[%d]\n", + desc_cnt); + goto out; + } + + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_FAULT_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_lnkflt_desc)) { + lpfc_printf_log( + phba, KERN_WARNING, LOG_CGN_MGMT, + "6465 Truncated Link Fault Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_cg_sig_desc)); + goto out; + } + /* No action for Link Fault descriptor for now */ + break; + case ELS_DTAG_CG_SIGNAL_CAP: + if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || + FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != + sizeof(struct fc_diag_cg_sig_desc)) { + lpfc_printf_log( + phba, KERN_WARNING, LOG_CGN_MGMT, + "6466 Truncated cgn signal Diagnostic " + "descriptor[%d]: %d vs 0x%zx 0x%zx\n", + desc_cnt, bytes_remain, + FC_TLV_DESC_SZ_FROM_LENGTH(tlv), + sizeof(struct fc_diag_cg_sig_desc)); + goto out; + } + + phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = phba->cgn_init_reg_signal; + + /* We start negotiation with lpfc_fabric_cgn_frequency. + * When we process the EDC, we will settle on the + * higher frequency. + */ + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + lpfc_least_capable_settings( + phba, (struct fc_diag_cg_sig_desc *)tlv); + rcv_cap_desc = true; + break; + default: + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6467 unknown Diagnostic " + "Descriptor[%d]: tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + } + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + desc_cnt++; + } +out: + /* Need to send back an ACC */ + lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); + + lpfc_config_cgn_signal(phba); + return 0; +} + +/** * lpfc_els_timeout - Handler funciton to the els timer * @t: timer context used to obtain the vport. * @@ -8668,50 +9344,304 @@ lpfc_send_els_event(struct lpfc_vport *vport, } -DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, - FC_LS_TLV_DTAG_INIT); - DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, FC_FPIN_LI_EVT_TYPES_INIT); +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, + FC_FPIN_DELI_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, + FC_FPIN_CONGN_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, + fc_fpin_congn_severity_types, + FC_FPIN_CONGN_SEVERITY_INIT); + + +/** + * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port + * @phba: Pointer to phba object. + * @wwnlist: Pointer to list of WWPNs in FPIN payload + * @cnt: count of WWPNs in FPIN payload + * + * This routine is called by LI and PC descriptors. + * Limit the number of WWPNs displayed to 6 log messages, 6 per log message + */ +static void +lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) +{ + char buf[LPFC_FPIN_WWPN_LINE_SZ]; + __be64 wwn; + u64 wwpn; + int i, len; + int line = 0; + int wcnt = 0; + bool endit = false; + + len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); + for (i = 0; i < cnt; i++) { + /* Are we on the last WWPN */ + if (i == (cnt - 1)) + endit = true; + + /* Extract the next WWPN from the payload */ + wwn = *wwnlist++; + wwpn = be64_to_cpu(wwn); + len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, + " %016llx", wwpn); + + /* Log a message if we are on the last WWPN + * or if we hit the max allowed per message. + */ + wcnt++; + if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { + buf[len] = 0; + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4686 %s\n", buf); + + /* Check if we reached the last WWPN */ + if (endit) + return; + + /* Limit the number of log message displayed per FPIN */ + line++; + if (line == LPFC_FPIN_WWPN_NUM_LINE) { + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4687 %d WWPNs Truncated\n", + cnt - i - 1); + return; + } + + /* Start over with next log message */ + wcnt = 0; + len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, + "Additional WWPNs:"); + } + } +} + /** * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. - * @vport: Pointer to vport object. + * @phba: Pointer to phba object. * @tlv: Pointer to the Link Integrity Notification Descriptor. * - * This function processes a link integrity FPIN event by - * logging a message + * This function processes a Link Integrity FPIN event by logging a message. **/ static void -lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv) +lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; const char *li_evt_str; - u32 li_evt; + u32 li_evt, cnt; li_evt = be16_to_cpu(li->event_type); li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); + cnt = be32_to_cpu(li->pname_count); - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "4680 FPIN Link Integrity %s (x%x) " - "Detecting PN x%016llx Attached PN x%016llx " - "Duration %d mSecs Count %d Port Cnt %d\n", - li_evt_str, li_evt, - be64_to_cpu(li->detecting_wwpn), - be64_to_cpu(li->attached_wwpn), - be32_to_cpu(li->event_threshold), - be32_to_cpu(li->event_count), - be32_to_cpu(li->pname_count)); + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4680 FPIN Link Integrity %s (x%x) " + "Detecting PN x%016llx Attached PN x%016llx " + "Duration %d mSecs Count %d Port Cnt %d\n", + li_evt_str, li_evt, + be64_to_cpu(li->detecting_wwpn), + be64_to_cpu(li->attached_wwpn), + be32_to_cpu(li->event_threshold), + be32_to_cpu(li->event_count), cnt); + + lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); +} + +/** + * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Delivery Notification Descriptor TLV + * + * This function processes a Delivery FPIN event by logging a message. + **/ +static void +lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; + const char *del_rsn_str; + u32 del_rsn; + __be32 *frame; + + del_rsn = be16_to_cpu(del->deli_reason_code); + del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); + + /* Skip over desc_tag/desc_len header to payload */ + frame = (__be32 *)(del + 1); + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4681 FPIN Delivery %s (x%x) " + "Detecting PN x%016llx Attached PN x%016llx " + "DiscHdr0 x%08x " + "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " + "DiscHdr4 x%08x DiscHdr5 x%08x\n", + del_rsn_str, del_rsn, + be64_to_cpu(del->detecting_wwpn), + be64_to_cpu(del->attached_wwpn), + be32_to_cpu(frame[0]), + be32_to_cpu(frame[1]), + be32_to_cpu(frame[2]), + be32_to_cpu(frame[3]), + be32_to_cpu(frame[4]), + be32_to_cpu(frame[5])); } +/** + * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV + * + * This function processes a Peer Congestion FPIN event by logging a message. + **/ static void -lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin, - u32 fpin_length) +lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { - struct fc_tlv_desc *tlv; + struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; + const char *pc_evt_str; + u32 pc_evt, cnt; + + pc_evt = be16_to_cpu(pc->event_type); + pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); + cnt = be32_to_cpu(pc->pname_count); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, + "4684 FPIN Peer Congestion %s (x%x) " + "Duration %d mSecs " + "Detecting PN x%016llx Attached PN x%016llx " + "Impacted Port Cnt %d\n", + pc_evt_str, pc_evt, + be32_to_cpu(pc->event_period), + be64_to_cpu(pc->detecting_wwpn), + be64_to_cpu(pc->attached_wwpn), + cnt); + + lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); +} + +/** + * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification + * @phba: Pointer to hba object. + * @tlv: Pointer to the Congestion Notification Descriptor TLV + * + * This function processes an FPIN Congestion Notifiction. The notification + * could be an Alarm or Warning. This routine feeds that data into driver's + * running congestion algorithm. It also processes the FPIN by + * logging a message. It returns 1 to indicate deliver this message + * to the upper layer or 0 to indicate don't deliver it. + **/ +static int +lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct lpfc_cgn_info *cp; + struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; + const char *cgn_evt_str; + u32 cgn_evt; + const char *cgn_sev_str; + u32 cgn_sev; + uint16_t value; + u32 crc; + bool nm_log = false; + int rc = 1; + + cgn_evt = be16_to_cpu(cgn->event_type); + cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); + cgn_sev = cgn->severity; + cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); + + /* The driver only takes action on a Credit Stall or Oversubscription + * event type to engage the IO algorithm. The driver prints an + * unmaskable message only for Lost Credit and Credit Stall. + * TODO: Still need to have definition of host action on clear, + * lost credit and device specific event types. + */ + switch (cgn_evt) { + case FPIN_CONGN_LOST_CREDIT: + nm_log = true; + break; + case FPIN_CONGN_CREDIT_STALL: + nm_log = true; + fallthrough; + case FPIN_CONGN_OVERSUBSCRIPTION: + if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) + nm_log = false; + switch (cgn_sev) { + case FPIN_CONGN_SEVERITY_ERROR: + /* Take action here for an Alarm event */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { + /* Track of alarm cnt for cgn_info */ + atomic_inc(&phba->cgn_fabric_alarm_cnt); + /* Track of alarm cnt for SYNC_WQE */ + atomic_inc(&phba->cgn_sync_alarm_cnt); + } + goto cleanup; + } + break; + case FPIN_CONGN_SEVERITY_WARNING: + /* Take action here for a Warning event */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { + /* Track of warning cnt for cgn_info */ + atomic_inc(&phba->cgn_fabric_warn_cnt); + /* Track of warning cnt for SYNC_WQE */ + atomic_inc(&phba->cgn_sync_warn_cnt); + } +cleanup: + /* Save frequency in ms */ + phba->cgn_fpin_frequency = + be32_to_cpu(cgn->event_period); + value = phba->cgn_fpin_frequency; + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *) + phba->cgn_i->virt; + if (phba->cgn_reg_fpin & + LPFC_CGN_FPIN_ALARM) + cp->cgn_alarm_freq = + cpu_to_le16(value); + if (phba->cgn_reg_fpin & + LPFC_CGN_FPIN_WARN) + cp->cgn_warn_freq = + cpu_to_le16(value); + crc = lpfc_cgn_calc_crc32 + (cp, + LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } + + /* Don't deliver to upper layer since + * driver took action on this tlv. + */ + rc = 0; + } + break; + } + break; + } + + /* Change the log level to unmaskable for the following event types. */ + lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), + LOG_CGN_MGMT | LOG_ELS, + "4683 FPIN CONGESTION %s type %s (x%x) Event " + "Duration %d mSecs\n", + cgn_sev_str, cgn_evt_str, cgn_evt, + be32_to_cpu(cgn->event_period)); + return rc; +} + +void +lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; + struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; const char *dtag_nm; - uint32_t desc_cnt = 0, bytes_remain; - u32 dtag; + int desc_cnt = 0, bytes_remain, cnt; + u32 dtag, deliver = 0; + int len; /* FPINs handled only if we are in the right discovery state */ if (vport->port_state < LPFC_DISC_AUTH) @@ -8721,35 +9651,92 @@ lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin, if (fpin_length < sizeof(struct fc_els_fpin)) return; + /* Sanity check descriptor length. The desc_len value does not + * include space for the ELS command and the desc_len fields. + */ + len = be32_to_cpu(fpin->desc_len); + if (fpin_length < len + sizeof(struct fc_els_fpin)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4671 Bad ELS FPIN length %d: %d\n", + len, fpin_length); + return; + } + tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; + first_tlv = tlv; bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); - /* process each descriptor */ + /* process each descriptor separately */ while (bytes_remain >= FC_TLV_DESC_HDR_SZ && bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { - dtag = be32_to_cpu(tlv->desc_tag); switch (dtag) { case ELS_DTAG_LNK_INTEGRITY: - lpfc_els_rcv_fpin_li(vport, tlv); + lpfc_els_rcv_fpin_li(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_DELIVERY: + lpfc_els_rcv_fpin_del(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_PEER_CONGEST: + lpfc_els_rcv_fpin_peer_cgn(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_CONGESTION: + deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); break; default: dtag_nm = lpfc_get_tlv_dtag_nm(dtag); - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "4678 skipped FPIN descriptor[%d]: " - "tag x%x (%s)\n", - desc_cnt, dtag, dtag_nm); - break; + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4678 unknown FPIN descriptor[%d]: " + "tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + + /* If descriptor is bad, drop the rest of the data */ + return; } + lpfc_cgn_update_stat(phba, dtag); + cnt = be32_to_cpu(tlv->desc_len); - desc_cnt++; + /* Sanity check descriptor length. The desc_len value does not + * include space for the desc_tag and the desc_len fields. + */ + len -= (cnt + sizeof(struct fc_tlv_desc)); + if (len < 0) { + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4672 Bad FPIN descriptor TLV length " + "%d: %d %d %s\n", + cnt, len, fpin_length, dtag_nm); + return; + } + + current_tlv = tlv; bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); tlv = fc_tlv_next_desc(tlv); - } - fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length, - (char *)fpin); + /* Format payload such that the FPIN delivered to the + * upper layer is a single descriptor FPIN. + */ + if (desc_cnt) + memcpy(first_tlv, current_tlv, + (cnt + sizeof(struct fc_els_fpin))); + + /* Adjust the length so that it only reflects a + * single descriptor FPIN. + */ + fpin_length = cnt + sizeof(struct fc_els_fpin); + fpin->desc_len = cpu_to_be32(fpin_length); + fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ + + /* Send every descriptor individually to the upper layer */ + if (deliver) + fc_host_fpin_rcv(lpfc_shost_from_vport(vport), + fpin_length, (char *)fpin); + desc_cnt++; + } } /** @@ -8948,6 +9935,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -9137,6 +10127,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* There are no replies, so no rjt codes */ break; + case ELS_CMD_EDC: + lpfc_els_rcv_edc(vport, elsiocb, ndlp); + break; case ELS_CMD_RDF: phba->fc_stat.elsRcvRDF++; /* Accept RDF only from fabric controller */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 7cc5920979f8..7195ca0275f9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -3331,6 +3331,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) case LPFC_LINK_SPEED_32GHZ: case LPFC_LINK_SPEED_64GHZ: case LPFC_LINK_SPEED_128GHZ: + case LPFC_LINK_SPEED_256GHZ: break; default: phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; @@ -3646,6 +3647,10 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) phba->wait_4_mlo_maint_flg); } lpfc_mbx_process_link_up(phba, la); + + if (phba->cmf_active_mode != LPFC_CFG_OFF) + lpfc_cmf_signal_init(phba); + } else if (attn_type == LPFC_ATT_LINK_DOWN || attn_type == LPFC_ATT_UNEXP_WWPN) { phba->fc_stat.LinkDown++; @@ -4208,6 +4213,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; struct lpfc_vport *vport = pmb->vport; + int rc; pmb->ctx_buf = NULL; pmb->ctx_ndlp = NULL; @@ -4283,9 +4289,23 @@ out: /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, 0); - if (!phba->cfg_enable_mi || - phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT) + /* Link was bounced or a Fabric LOGO occurred. Start EDC + * with initial FW values provided the congestion mode is + * not off. Note that signals may or may not be supported + * by the adapter but FPIN is provided by default for 1 + * or both missing signals support. + */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = phba->cgn_init_reg_signal; + rc = lpfc_issue_els_edc(vport, 0); + lpfc_printf_log(phba, KERN_INFO, + LOG_INIT | LOG_ELS | LOG_DISCOVERY, + "4220 EDC issue error x%x, Data: x%x\n", + rc, phba->cgn_init_reg_signal); + } else { lpfc_issue_els_rdf(vport, 0); + } } vport->fc_ns_retry = 0; @@ -4501,10 +4521,152 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) spin_unlock_irqrestore(shost->host_lock, iflags); } +/* Register a node with backend if not already done */ +void +lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + + unsigned long iflags; + + spin_lock_irqsave(&ndlp->lock, iflags); + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + /* Already registered with backend, trigger rescan */ + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && + ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { + lpfc_nvme_rescan_port(vport, ndlp); + } + return; + } + + ndlp->fc4_xpt_flags |= NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (lpfc_valid_xpt_node(ndlp)) { + vport->phba->nport_event_cnt++; + /* + * Tell the fc transport about the port, if we haven't + * already. If we have, and it's a scsi entity, be + */ + lpfc_register_remote_port(vport, ndlp); + } + + /* We are done if we do not have any NVME remote node */ + if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) + return; + + /* Notify the NVME transport of this new rport. */ + if (vport->phba->sli_rev >= LPFC_SLI_REV4 && + ndlp->nlp_fc4_type & NLP_FC4_NVME) { + if (vport->phba->nvmet_support == 0) { + /* Register this rport with the transport. + * Only NVME Target Rports are registered with + * the transport. + */ + if (ndlp->nlp_type & NLP_NVME_TARGET) { + vport->phba->nport_event_cnt++; + lpfc_nvme_register_port(vport, ndlp); + } + } else { + /* Just take an NDLP ref count since the + * target does not register rports. + */ + lpfc_nlp_get(ndlp); + } + } +} + +/* Unregister a node with backend if not already done */ +void +lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + spin_lock_irqsave(&ndlp->lock, iflags); + if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { + spin_unlock_irqrestore(&ndlp->lock, iflags); + return; + } + + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (ndlp->rport && + ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { + vport->phba->nport_event_cnt++; + lpfc_unregister_remote_port(ndlp); + } + + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { + vport->phba->nport_event_cnt++; + if (vport->phba->nvmet_support == 0) { + /* Start devloss if target. */ + if (ndlp->nlp_type & NLP_NVME_TARGET) + lpfc_nvme_unregister_port(vport, ndlp); + } else { + /* NVMET has no upcall. */ + lpfc_nlp_put(ndlp); + } + } + +} + +/* + * Adisc state change handling + */ +static void +lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int new_state) +{ + switch (new_state) { + /* + * Any state to ADISC_ISSUE + * Do nothing, adisc cmpl handling will trigger state changes + */ + case NLP_STE_ADISC_ISSUE: + break; + + /* + * ADISC_ISSUE to mapped states + * Trigger a registration with backend, it will be nop if + * already registered + */ + case NLP_STE_UNMAPPED_NODE: + ndlp->nlp_type |= NLP_FC_NODE; + fallthrough; + case NLP_STE_MAPPED_NODE: + ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + lpfc_nlp_reg_node(vport, ndlp); + break; + + /* + * ADISC_ISSUE to non-mapped states + * We are moving from ADISC_ISSUE to a non-mapped state because + * ADISC failed, we would have skipped unregistering with + * backend, attempt it now + */ + case NLP_STE_NPR_NODE: + ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + fallthrough; + default: + lpfc_nlp_unreg_node(vport, ndlp); + break; + } + +} + static void lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { + /* Trap ADISC changes here */ + if (new_state == NLP_STE_ADISC_ISSUE || + old_state == NLP_STE_ADISC_ISSUE) { + lpfc_handle_adisc_state(vport, ndlp, new_state); + return; + } + if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; ndlp->nlp_type |= NLP_FC_NODE; @@ -4514,60 +4676,17 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; - /* FCP and NVME Transport interface */ + /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { - if (ndlp->rport && - lpfc_valid_xpt_node(ndlp)) { - vport->phba->nport_event_cnt++; - lpfc_unregister_remote_port(ndlp); - } - - if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { - vport->phba->nport_event_cnt++; - if (vport->phba->nvmet_support == 0) { - /* Start devloss if target. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) - lpfc_nvme_unregister_port(vport, ndlp); - } else { - /* NVMET has no upcall. */ - lpfc_nlp_put(ndlp); - } - } + /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + lpfc_nlp_unreg_node(vport, ndlp); } - /* FCP and NVME Transport interfaces */ - if (new_state == NLP_STE_MAPPED_NODE || - new_state == NLP_STE_UNMAPPED_NODE) { - if (lpfc_valid_xpt_node(ndlp)) { - vport->phba->nport_event_cnt++; - /* - * Tell the fc transport about the port, if we haven't - * already. If we have, and it's a scsi entity, be - */ - lpfc_register_remote_port(vport, ndlp); - } - /* Notify the NVME transport of this new rport. */ - if (vport->phba->sli_rev >= LPFC_SLI_REV4 && - ndlp->nlp_fc4_type & NLP_FC4_NVME) { - if (vport->phba->nvmet_support == 0) { - /* Register this rport with the transport. - * Only NVME Target Rports are registered with - * the transport. - */ - if (ndlp->nlp_type & NLP_NVME_TARGET) { - vport->phba->nport_event_cnt++; - lpfc_nvme_register_port(vport, ndlp); - } - } else { - /* Just take an NDLP ref count since the - * target does not register rports. - */ - lpfc_nlp_get(ndlp); - } - } - } + new_state == NLP_STE_UNMAPPED_NODE) + lpfc_nlp_reg_node(vport, ndlp); if ((new_state == NLP_STE_MAPPED_NODE) && (vport->stat_data_enabled)) { diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 4a5a85ed42ec..634f8fff7425 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -608,6 +608,7 @@ struct fc_vft_header { #define ELS_CMD_LIRR 0x7A000000 #define ELS_CMD_LCB 0x81000000 #define ELS_CMD_FPIN 0x16000000 +#define ELS_CMD_EDC 0x17000000 #define ELS_CMD_QFPA 0xB0000000 #define ELS_CMD_UVEM 0xB1000000 #else /* __LITTLE_ENDIAN_BITFIELD */ @@ -652,6 +653,7 @@ struct fc_vft_header { #define ELS_CMD_LIRR 0x7A #define ELS_CMD_LCB 0x81 #define ELS_CMD_FPIN ELS_FPIN +#define ELS_CMD_EDC ELS_EDC #define ELS_CMD_QFPA 0xB0 #define ELS_CMD_UVEM 0xB1 #endif @@ -1694,6 +1696,7 @@ struct lpfc_fdmi_reg_portattr { #define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268 #define PCI_DEVICE_ID_LANCER_G6_FC 0xe300 #define PCI_DEVICE_ID_LANCER_G7_FC 0xf400 +#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500 #define PCI_DEVICE_ID_SAT_SMB 0xf011 #define PCI_DEVICE_ID_SAT_MID 0xf015 #define PCI_DEVICE_ID_RFLY 0xf095 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index eb8c735a243b..79a4872c2edb 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -20,6 +20,7 @@ * included with this package. * *******************************************************************/ +#include <uapi/scsi/fc/fc_fs.h> #include <uapi/scsi/fc/fc_els.h> /* Macros to deal with bit fields. Each bit field must have 3 #defines @@ -94,6 +95,9 @@ struct lpfc_sli_intf { #define LPFC_SLI_INTF_FAMILY_BE3 0x1 #define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa #define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb +#define LPFC_SLI_INTF_FAMILY_G6 0xc +#define LPFC_SLI_INTF_FAMILY_G7 0xd +#define LPFC_SLI_INTF_FAMILY_G7P 0xe #define lpfc_sli_intf_slirev_SHIFT 4 #define lpfc_sli_intf_slirev_MASK 0x0000000F #define lpfc_sli_intf_slirev_WORD word0 @@ -393,6 +397,12 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF #define lpfc_wcqe_c_ersp0_WORD word0 uint32_t total_data_placed; +#define lpfc_wcqe_c_cmf_cg_SHIFT 31 +#define lpfc_wcqe_c_cmf_cg_MASK 0x00000001 +#define lpfc_wcqe_c_cmf_cg_WORD total_data_placed +#define lpfc_wcqe_c_cmf_bw_SHIFT 0 +#define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF +#define lpfc_wcqe_c_cmf_bw_WORD total_data_placed uint32_t parameter; #define lpfc_wcqe_c_bg_edir_SHIFT 5 #define lpfc_wcqe_c_bg_edir_MASK 0x00000001 @@ -687,6 +697,7 @@ struct lpfc_register { #define lpfc_sliport_eqdelay_id_MASK 0xfff #define lpfc_sliport_eqdelay_id_WORD word0 #define LPFC_SEC_TO_USEC 1000000 +#define LPFC_SEC_TO_MSEC 1000 /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically * reside in BAR 2. @@ -959,6 +970,12 @@ union lpfc_sli4_cfg_shdr { #define lpfc_mbox_hdr_add_status_SHIFT 8 #define lpfc_mbox_hdr_add_status_MASK 0x000000FF #define lpfc_mbox_hdr_add_status_WORD word7 +#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2 +#define lpfc_mbox_hdr_add_status_2_SHIFT 16 +#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_2_WORD word7 +#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01 +#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02 uint32_t response_length; uint32_t actual_response_length; } response; @@ -1015,6 +1032,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D #define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73 #define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74 +#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF 0x8E #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A #define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B #define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C @@ -1123,6 +1141,12 @@ struct lpfc_mbx_sge { uint32_t length; }; +struct lpfc_mbx_host_buf { + uint32_t length; + uint32_t pa_lo; + uint32_t pa_hi; +}; + struct lpfc_mbx_nembed_cmd { struct lpfc_sli4_cfg_mhdr cfg_mhdr; #define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 @@ -1133,6 +1157,31 @@ struct lpfc_mbx_nembed_sge_virt { void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; }; +#define LPFC_MBX_OBJECT_NAME_LEN_DW 26 +struct lpfc_mbx_read_object { /* Version 0 */ + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rd_object_rlen_SHIFT 0 +#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF +#define lpfc_mbx_rd_object_rlen_WORD word0 + uint32_t rd_object_offset; + uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; +#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ + uint32_t rd_object_cnt; + struct lpfc_mbx_host_buf rd_object_hbuf[4]; + } request; + struct { + uint32_t rd_object_actual_rlen; + uint32_t word1; +#define lpfc_mbx_rd_object_eof_SHIFT 31 +#define lpfc_mbx_rd_object_eof_MASK 0x1 +#define lpfc_mbx_rd_object_eof_WORD word1 + } response; + } u; +}; + struct lpfc_mbx_eq_create { struct mbox_header header; union { @@ -1555,7 +1604,7 @@ struct rq_context { #define lpfc_rq_context_hdr_size_WORD word1 uint32_t word2; #define lpfc_rq_context_cq_id_SHIFT 16 -#define lpfc_rq_context_cq_id_MASK 0x000003FF +#define lpfc_rq_context_cq_id_MASK 0x0000FFFF #define lpfc_rq_context_cq_id_WORD word2 #define lpfc_rq_context_buf_size_SHIFT 0 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF @@ -2328,6 +2377,7 @@ struct lpfc_mbx_redisc_fcf_tbl { #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB #define ADD_STATUS_INVALID_REQUEST 0x4B +#define ADD_STATUS_INVALID_OBJECT_NAME 0xA0 #define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58 struct lpfc_mbx_sli4_config { @@ -2803,6 +2853,12 @@ struct lpfc_mbx_read_config { #define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31 #define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001 #define lpfc_mbx_rd_conf_extnts_inuse_WORD word1 +#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */ +#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001 +#define lpfc_mbx_rd_conf_wcs_WORD word1 +#define lpfc_mbx_rd_conf_acs_SHIFT 27 /* alarm signaling */ +#define lpfc_mbx_rd_conf_acs_MASK 0x00000001 +#define lpfc_mbx_rd_conf_acs_WORD word1 uint32_t word2; #define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0 #define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F @@ -3328,17 +3384,20 @@ struct lpfc_sli4_parameters { #define cfg_nosr_SHIFT 9 #define cfg_nosr_MASK 0x00000001 #define cfg_nosr_WORD word19 - #define cfg_bv1s_SHIFT 10 #define cfg_bv1s_MASK 0x00000001 #define cfg_bv1s_WORD word19 -#define cfg_pvl_SHIFT 13 -#define cfg_pvl_MASK 0x00000001 -#define cfg_pvl_WORD word19 #define cfg_nsler_SHIFT 12 #define cfg_nsler_MASK 0x00000001 #define cfg_nsler_WORD word19 +#define cfg_pvl_SHIFT 13 +#define cfg_pvl_MASK 0x00000001 +#define cfg_pvl_WORD word19 + +#define cfg_pbde_SHIFT 20 +#define cfg_pbde_MASK 0x00000001 +#define cfg_pbde_WORD word19 uint32_t word20; #define cfg_max_tow_xri_SHIFT 0 @@ -3346,12 +3405,13 @@ struct lpfc_sli4_parameters { #define cfg_max_tow_xri_WORD word20 uint32_t word21; -#define cfg_mib_bde_cnt_SHIFT 16 -#define cfg_mib_bde_cnt_MASK 0x000000ff -#define cfg_mib_bde_cnt_WORD word21 #define cfg_mi_ver_SHIFT 0 #define cfg_mi_ver_MASK 0x0000ffff #define cfg_mi_ver_WORD word21 +#define cfg_cmf_SHIFT 24 +#define cfg_cmf_MASK 0x000000ff +#define cfg_cmf_WORD word21 + uint32_t mib_size; uint32_t word23; /* RESERVED */ @@ -3380,7 +3440,10 @@ struct lpfc_sli4_parameters { #define LPFC_SET_UE_RECOVERY 0x10 #define LPFC_SET_MDS_DIAGS 0x12 +#define LPFC_SET_CGN_SIGNAL 0x1f #define LPFC_SET_DUAL_DUMP 0x1e +#define LPFC_SET_ENABLE_MI 0x21 +#define LPFC_SET_ENABLE_CMF 0x24 struct lpfc_mbx_set_feature { struct mbox_header header; uint32_t feature; @@ -3395,6 +3458,9 @@ struct lpfc_mbx_set_feature { #define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 #define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 #define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 +#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_warn_freq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_CGN_warn_freq_WORD word6 #define lpfc_mbx_set_feature_dd_SHIFT 0 #define lpfc_mbx_set_feature_dd_MASK 0x00000001 #define lpfc_mbx_set_feature_dd_WORD word6 @@ -3404,6 +3470,15 @@ struct lpfc_mbx_set_feature { #define LPFC_DISABLE_DUAL_DUMP 0 #define LPFC_ENABLE_DUAL_DUMP 1 #define LPFC_QUERY_OP_DUAL_DUMP 2 +#define lpfc_mbx_set_feature_cmf_SHIFT 0 +#define lpfc_mbx_set_feature_cmf_MASK 0x00000001 +#define lpfc_mbx_set_feature_cmf_WORD word6 +#define lpfc_mbx_set_feature_mi_SHIFT 0 +#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff +#define lpfc_mbx_set_feature_mi_WORD word6 +#define lpfc_mbx_set_feature_milunq_SHIFT 16 +#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_milunq_WORD word6 uint32_t word7; #define lpfc_mbx_set_feature_UERP_SHIFT 0 #define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff @@ -3411,16 +3486,51 @@ struct lpfc_mbx_set_feature { #define lpfc_mbx_set_feature_UESR_SHIFT 16 #define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff #define lpfc_mbx_set_feature_UESR_WORD word7 +#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD word7 + u32 word8; +#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff +#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8 }; #define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2 +#define LPFC_SET_HOST_DATE_TIME 0x4 + +struct lpfc_mbx_set_host_date_time { + uint32_t word6; +#define lpfc_mbx_set_host_month_WORD word6 +#define lpfc_mbx_set_host_month_SHIFT 16 +#define lpfc_mbx_set_host_month_MASK 0xFF +#define lpfc_mbx_set_host_day_WORD word6 +#define lpfc_mbx_set_host_day_SHIFT 8 +#define lpfc_mbx_set_host_day_MASK 0xFF +#define lpfc_mbx_set_host_year_WORD word6 +#define lpfc_mbx_set_host_year_SHIFT 0 +#define lpfc_mbx_set_host_year_MASK 0xFF + uint32_t word7; +#define lpfc_mbx_set_host_hour_WORD word7 +#define lpfc_mbx_set_host_hour_SHIFT 16 +#define lpfc_mbx_set_host_hour_MASK 0xFF +#define lpfc_mbx_set_host_min_WORD word7 +#define lpfc_mbx_set_host_min_SHIFT 8 +#define lpfc_mbx_set_host_min_MASK 0xFF +#define lpfc_mbx_set_host_sec_WORD word7 +#define lpfc_mbx_set_host_sec_SHIFT 0 +#define lpfc_mbx_set_host_sec_MASK 0xFF +}; + struct lpfc_mbx_set_host_data { #define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48 struct mbox_header header; uint32_t param_id; uint32_t param_len; - uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; + union { + uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; + struct lpfc_mbx_set_host_date_time tm; + } un; }; struct lpfc_mbx_set_trunk_mode { @@ -3438,6 +3548,21 @@ struct lpfc_mbx_get_sli4_parameters { struct lpfc_sli4_parameters sli4_parameters; }; +struct lpfc_mbx_reg_congestion_buf { + struct mbox_header header; + uint32_t word0; +#define lpfc_mbx_reg_cgn_buf_type_WORD word0 +#define lpfc_mbx_reg_cgn_buf_type_SHIFT 0 +#define lpfc_mbx_reg_cgn_buf_type_MASK 0xFF +#define lpfc_mbx_reg_cgn_buf_cnt_WORD word0 +#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT 16 +#define lpfc_mbx_reg_cgn_buf_cnt_MASK 0xFF + uint32_t word1; + uint32_t length; + uint32_t addr_lo; + uint32_t addr_hi; +}; + struct lpfc_rscr_desc_generic { #define LPFC_RSRC_DESC_WSIZE 22 uint32_t desc[LPFC_RSRC_DESC_WSIZE]; @@ -3603,6 +3728,9 @@ struct lpfc_controller_attribute { #define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8 #define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff #define lpfc_cntl_attr_eprom_ver_hi_WORD word17 +#define lpfc_cntl_attr_flash_id_SHIFT 16 +#define lpfc_cntl_attr_flash_id_MASK 0x000000ff +#define lpfc_cntl_attr_flash_id_WORD word17 uint32_t mbx_da_struct_ver; uint32_t ep_fw_da_struct_ver; uint32_t ncsi_ver_str[3]; @@ -3744,6 +3872,7 @@ struct lpfc_mbx_get_port_name { #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 #define MB_CQE_STATUS_DMA_FAILED 0x5 + #define LPFC_MBX_WR_CONFIG_MAX_BDE 1 struct lpfc_mbx_wr_object { struct mbox_header header; @@ -3760,7 +3889,7 @@ struct lpfc_mbx_wr_object { #define lpfc_wr_object_write_length_MASK 0x00FFFFFF #define lpfc_wr_object_write_length_WORD word4 uint32_t write_offset; - uint32_t object_name[26]; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; uint32_t bde_count; struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE]; } request; @@ -3809,6 +3938,7 @@ struct lpfc_mqe { struct lpfc_mbx_unreg_fcfi unreg_fcfi; struct lpfc_mbx_mq_create mq_create; struct lpfc_mbx_mq_create_ext mq_create_ext; + struct lpfc_mbx_read_object read_object; struct lpfc_mbx_eq_create eq_create; struct lpfc_mbx_modify_eq_delay eq_delay; struct lpfc_mbx_cq_create cq_create; @@ -3834,6 +3964,7 @@ struct lpfc_mqe { struct lpfc_mbx_query_fw_config query_fw_cfg; struct lpfc_mbx_set_beacon_config beacon_config; struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; + struct lpfc_mbx_reg_congestion_buf reg_congestion_buf; struct lpfc_mbx_set_link_diag_state link_diag_state; struct lpfc_mbx_set_link_diag_loopback link_diag_loopback; struct lpfc_mbx_run_link_diag_test link_diag_test; @@ -3888,6 +4019,7 @@ struct lpfc_mcqe { #define LPFC_TRAILER_CODE_GRP5 0x5 #define LPFC_TRAILER_CODE_FC 0x10 #define LPFC_TRAILER_CODE_SLI 0x11 +#define LPFC_TRAILER_CODE_CMSTAT 0x13 }; struct lpfc_acqe_link { @@ -4122,6 +4254,19 @@ struct lpfc_acqe_misconfigured_event { #define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05 }; +struct lpfc_acqe_cgn_signal { + u32 word0; +#define lpfc_warn_acqe_SHIFT 0 +#define lpfc_warn_acqe_MASK 0x7FFFFFFF +#define lpfc_warn_acqe_WORD word0 +#define lpfc_imm_acqe_SHIFT 31 +#define lpfc_imm_acqe_MASK 0x1 +#define lpfc_imm_acqe_WORD word0 + u32 alarm_cnt; + u32 word2; + u32 trailer; +}; + struct lpfc_acqe_sli { uint32_t event_data1; uint32_t event_data2; @@ -4134,8 +4279,10 @@ struct lpfc_acqe_sli { #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 #define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 #define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA +#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG 0xE #define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF #define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10 +#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11 }; /* @@ -4543,6 +4690,69 @@ struct create_xri_wqe { #define T_REQUEST_TAG 3 #define T_XRI_TAG 1 +struct cmf_sync_wqe { + uint32_t rsrvd[3]; + uint32_t word3; +#define cmf_sync_interval_SHIFT 0 +#define cmf_sync_interval_MASK 0x00000ffff +#define cmf_sync_interval_WORD word3 +#define cmf_sync_afpin_SHIFT 16 +#define cmf_sync_afpin_MASK 0x000000001 +#define cmf_sync_afpin_WORD word3 +#define cmf_sync_asig_SHIFT 17 +#define cmf_sync_asig_MASK 0x000000001 +#define cmf_sync_asig_WORD word3 +#define cmf_sync_op_SHIFT 20 +#define cmf_sync_op_MASK 0x00000000f +#define cmf_sync_op_WORD word3 +#define cmf_sync_ver_SHIFT 24 +#define cmf_sync_ver_MASK 0x0000000ff +#define cmf_sync_ver_WORD word3 +#define LPFC_CMF_SYNC_VER 1 + uint32_t event_tag; + uint32_t word5; +#define cmf_sync_wsigmax_SHIFT 0 +#define cmf_sync_wsigmax_MASK 0x00000ffff +#define cmf_sync_wsigmax_WORD word5 +#define cmf_sync_wsigcnt_SHIFT 16 +#define cmf_sync_wsigcnt_MASK 0x00000ffff +#define cmf_sync_wsigcnt_WORD word5 + uint32_t word6; + uint32_t word7; +#define cmf_sync_cmnd_SHIFT 8 +#define cmf_sync_cmnd_MASK 0x0000000ff +#define cmf_sync_cmnd_WORD word7 + uint32_t word8; + uint32_t word9; +#define cmf_sync_reqtag_SHIFT 0 +#define cmf_sync_reqtag_MASK 0x00000ffff +#define cmf_sync_reqtag_WORD word9 +#define cmf_sync_wfpinmax_SHIFT 16 +#define cmf_sync_wfpinmax_MASK 0x0000000ff +#define cmf_sync_wfpinmax_WORD word9 +#define cmf_sync_wfpincnt_SHIFT 24 +#define cmf_sync_wfpincnt_MASK 0x0000000ff +#define cmf_sync_wfpincnt_WORD word9 + uint32_t word10; +#define cmf_sync_qosd_SHIFT 9 +#define cmf_sync_qosd_MASK 0x00000001 +#define cmf_sync_qosd_WORD word10 + uint32_t word11; +#define cmf_sync_cmd_type_SHIFT 0 +#define cmf_sync_cmd_type_MASK 0x0000000f +#define cmf_sync_cmd_type_WORD word11 +#define cmf_sync_wqec_SHIFT 7 +#define cmf_sync_wqec_MASK 0x00000001 +#define cmf_sync_wqec_WORD word11 +#define cmf_sync_cqid_SHIFT 16 +#define cmf_sync_cqid_MASK 0x0000ffff +#define cmf_sync_cqid_WORD word11 + uint32_t read_bytes; + uint32_t word13; + uint32_t word14; + uint32_t word15; +}; + struct abort_cmd_wqe { uint32_t rsrvd[3]; uint32_t word3; @@ -4672,6 +4882,7 @@ union lpfc_wqe { struct fcp_iread64_wqe fcp_iread; struct fcp_iwrite64_wqe fcp_iwrite; struct abort_cmd_wqe abort_cmd; + struct cmf_sync_wqe cmf_sync; struct create_xri_wqe create_xri; struct xmit_bcast64_wqe xmit_bcast64; struct xmit_seq64_wqe xmit_sequence; @@ -4692,6 +4903,7 @@ union lpfc_wqe128 { struct fcp_iread64_wqe fcp_iread; struct fcp_iwrite64_wqe fcp_iwrite; struct abort_cmd_wqe abort_cmd; + struct cmf_sync_wqe cmf_sync; struct create_xri_wqe create_xri; struct xmit_bcast64_wqe xmit_bcast64; struct xmit_seq64_wqe xmit_sequence; @@ -4707,6 +4919,7 @@ union lpfc_wqe128 { #define MAGIC_NUMBER_G6 0xFEAA0003 #define MAGIC_NUMBER_G7 0xFEAA0005 +#define MAGIC_NUMBER_G7P 0xFEAA0020 struct lpfc_grp_hdr { uint32_t size; @@ -4734,6 +4947,7 @@ struct lpfc_grp_hdr { #define FCP_COMMAND_TRSP 0x3 #define FCP_COMMAND_TSEND 0x7 #define OTHER_COMMAND 0x8 +#define CMF_SYNC_COMMAND 0xA #define ELS_COMMAND_NON_FIP 0xC #define ELS_COMMAND_FIP 0xD @@ -4755,6 +4969,7 @@ struct lpfc_grp_hdr { #define CMD_FCP_TRECEIVE64_WQE 0xA1 #define CMD_FCP_TRSP64_WQE 0xA3 #define CMD_GEN_REQUEST64_WQE 0xC2 +#define CMD_CMF_SYNC_WQE 0xE8 #define CMD_WQE_MASK 0xff @@ -4762,3 +4977,43 @@ struct lpfc_grp_hdr { #define LPFC_FW_DUMP 1 #define LPFC_FW_RESET 2 #define LPFC_DV_RESET 3 + +/* On some kernels, enum fc_ls_tlv_dtag does not have + * these 2 enums defined, on other kernels it does. + * To get aound this we need to add these 2 defines here. + */ +#ifndef ELS_DTAG_LNK_FAULT_CAP +#define ELS_DTAG_LNK_FAULT_CAP 0x0001000D +#endif +#ifndef ELS_DTAG_CG_SIGNAL_CAP +#define ELS_DTAG_CG_SIGNAL_CAP 0x0001000F +#endif + +/* + * Initializer useful for decoding FPIN string table. + */ +#define FC_FPIN_CONGN_SEVERITY_INIT { \ + { FPIN_CONGN_SEVERITY_WARNING, "Warning" }, \ + { FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \ +} + +/* EDC supports two descriptors. When allocated, it is the + * size of this structure plus each supported descriptor. + */ +struct lpfc_els_edc_req { + struct fc_els_edc edc; /* hdr up to descriptors */ + struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */ +}; + +/* Minimum structure defines for the EDC response. + * Balance is in buffer. + */ +struct lpfc_els_edc_rsp { + struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */ + struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */ +}; + +/* Used for logging FPIN messages */ +#define LPFC_FPIN_WWPN_LINE_SZ 128 +#define LPFC_FPIN_WWPN_LINE_CNT 6 +#define LPFC_FPIN_WWPN_NUM_LINE 6 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index d48414e295a0..6a90e6e53d09 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC, + PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 5983e05b648f..0ec322f0e3cb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -93,6 +93,7 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); +static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -1243,7 +1244,8 @@ lpfc_idle_stat_delay_work(struct work_struct *work) return; if (phba->link_state == LPFC_HBA_ERROR || - phba->pport->fc_flag & FC_OFFLINE_MODE) + phba->pport->fc_flag & FC_OFFLINE_MODE || + phba->cmf_active_mode != LPFC_CFG_OFF) goto requeue; for_each_present_cpu(i) { @@ -1852,6 +1854,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, { int rc; uint32_t intr_mode; + LPFC_MBOXQ_t *mboxq; if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { @@ -1871,11 +1874,19 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, "Recovery...\n"); /* If we are no wait, the HBA has been reset and is not - * functional, thus we should clear LPFC_SLI_ACTIVE flag. + * functional, thus we should clear + * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. */ if (mbx_action == LPFC_MBX_NO_WAIT) { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } spin_unlock_irq(&phba->hbalock); } @@ -2590,6 +2601,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_LANCER_G7_FC: m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; break; + case PCI_DEVICE_ID_LANCER_G7P_FC: + m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; + break; case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; @@ -3008,6 +3022,123 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) } /** + * lpfc_cmf_stop - Stop CMF processing + * @phba: pointer to lpfc hba data structure. + * + * This is called when the link goes down or if CMF mode is turned OFF. + * It is also called when going offline or unloaded just before the + * congestion info buffer is unregistered. + **/ +void +lpfc_cmf_stop(struct lpfc_hba *phba) +{ + int cpu; + struct lpfc_cgn_stat *cgs; + + /* We only do something if CMF is enabled */ + if (!phba->sli4_hba.pc_sli4_params.cmf) + return; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6221 Stop CMF / Cancel Timer\n"); + + /* Cancel the CMF timer */ + hrtimer_cancel(&phba->cmf_timer); + + /* Zero CMF counters */ + atomic_set(&phba->cmf_busy, 0); + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + atomic64_set(&cgs->total_bytes, 0); + atomic64_set(&cgs->rcv_bytes, 0); + atomic_set(&cgs->rx_io_cnt, 0); + atomic64_set(&cgs->rx_latency, 0); + } + atomic_set(&phba->cmf_bw_wait, 0); + + /* Resume any blocked IO - Queue unblock on workqueue */ + queue_work(phba->wq, &phba->unblock_request_work); +} + +static inline uint64_t +lpfc_get_max_line_rate(struct lpfc_hba *phba) +{ + uint64_t rate = lpfc_sli_port_speed_get(phba); + + return ((((unsigned long)rate) * 1024 * 1024) / 10); +} + +void +lpfc_cmf_signal_init(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6223 Signal CMF init\n"); + + /* Use the new fc_linkspeed to recalculate */ + phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); + phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * + phba->cmf_interval_rate, 1000); + phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; + + /* This is a signal to firmware to sync up CMF BW with link speed */ + lpfc_issue_cmf_sync_wqe(phba, 0, 0); +} + +/** + * lpfc_cmf_start - Start CMF processing + * @phba: pointer to lpfc hba data structure. + * + * This is called when the link comes up or if CMF mode is turned OFF + * to Monitor or Managed. + **/ +void +lpfc_cmf_start(struct lpfc_hba *phba) +{ + struct lpfc_cgn_stat *cgs; + int cpu; + + /* We only do something if CMF is enabled */ + if (!phba->sli4_hba.pc_sli4_params.cmf || + phba->cmf_active_mode == LPFC_CFG_OFF) + return; + + /* Reinitialize congestion buffer info */ + lpfc_init_congestion_buf(phba); + + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + + atomic_set(&phba->cmf_busy, 0); + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + atomic64_set(&cgs->total_bytes, 0); + atomic64_set(&cgs->rcv_bytes, 0); + atomic_set(&cgs->rx_io_cnt, 0); + atomic64_set(&cgs->rx_latency, 0); + } + phba->cmf_latency.tv_sec = 0; + phba->cmf_latency.tv_nsec = 0; + + lpfc_cmf_signal_init(phba); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6222 Start CMF / Timer\n"); + + phba->cmf_timer_cnt = 0; + hrtimer_start(&phba->cmf_timer, + ktime_set(0, LPFC_CMF_INTERVAL * 1000000), + HRTIMER_MODE_REL); + /* Setup for latency check in IO cmpl routines */ + ktime_get_real_ts64(&phba->cmf_latency); + + atomic_set(&phba->cmf_bw_wait, 0); + atomic_set(&phba->cmf_stop_io, 0); +} + +/** * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * @@ -3541,6 +3672,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); + + lpfc_unreg_rpi(vports[i], ndlp); /* * Whenever an SLI4 port goes offline, free the * RPI. Get a new RPI when the adapter port @@ -3556,7 +3689,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } - lpfc_unreg_rpi(vports[i], ndlp); if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, @@ -4666,6 +4798,8 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) if (phba->hba_flag & HBA_FCOE_MODE) return; + if (phba->lmt & LMT_256Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; if (phba->lmt & LMT_128Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; if (phba->lmt & LMT_64Gb) @@ -4845,7 +4979,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) /** * lpfc_vmid_poll - VMID timeout detection - * @ptr: Map to lpfc_hba data structure pointer. + * @t: Timer context used to obtain the pointer to lpfc hba data structure. * * This routine is invoked when there is no I/O on by a VM for the specified * amount of time. When this situation is detected, the VMID has to be @@ -5074,6 +5208,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, case LPFC_FC_LA_SPEED_128G: port_speed = 128000; break; + case LPFC_FC_LA_SPEED_256G: + port_speed = 256000; + break; default: port_speed = 0; } @@ -5267,6 +5404,645 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) return port_speed; } +void +lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) +{ + struct rxtable_entry *entry; + int cnt = 0, head, tail, last, start; + + head = atomic_read(&phba->rxtable_idx_head); + tail = atomic_read(&phba->rxtable_idx_tail); + if (!phba->rxtable || head == tail) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "4411 Rxtable is empty\n"); + return; + } + last = tail; + start = head; + + /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ + while (start != last) { + if (start) + start--; + else + start = LPFC_MAX_RXMONITOR_ENTRY - 1; + entry = &phba->rxtable[start]; + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " + "Lat %lld ASz %lld Info %02d BWUtil %d " + "Int %d slot %d\n", + cnt, entry->max_bytes_per_interval, + entry->total_bytes, entry->rcv_bytes, + entry->avg_io_latency, entry->avg_io_size, + entry->cmf_info, entry->timer_utilization, + entry->timer_interval, start); + cnt++; + if (cnt >= LPFC_MAX_RXMONITOR_DUMP) + return; + } +} + +/** + * lpfc_cgn_update_stat - Save data into congestion stats buffer + * @phba: pointer to lpfc hba data structure. + * @dtag: FPIN descriptor received + * + * Increment the FPIN received counter/time when it happens. + */ +void +lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) +{ + struct lpfc_cgn_info *cp; + struct tm broken; + struct timespec64 cur_time; + u32 cnt; + u16 value; + + /* Make sure we have a congestion info buffer */ + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + + /* Update congestion statistics */ + switch (dtag) { + case ELS_DTAG_LNK_INTEGRITY: + cnt = le32_to_cpu(cp->link_integ_notification); + cnt++; + cp->link_integ_notification = cpu_to_le32(cnt); + + cp->cgn_stat_lnk_month = broken.tm_mon + 1; + cp->cgn_stat_lnk_day = broken.tm_mday; + cp->cgn_stat_lnk_year = broken.tm_year - 100; + cp->cgn_stat_lnk_hour = broken.tm_hour; + cp->cgn_stat_lnk_min = broken.tm_min; + cp->cgn_stat_lnk_sec = broken.tm_sec; + break; + case ELS_DTAG_DELIVERY: + cnt = le32_to_cpu(cp->delivery_notification); + cnt++; + cp->delivery_notification = cpu_to_le32(cnt); + + cp->cgn_stat_del_month = broken.tm_mon + 1; + cp->cgn_stat_del_day = broken.tm_mday; + cp->cgn_stat_del_year = broken.tm_year - 100; + cp->cgn_stat_del_hour = broken.tm_hour; + cp->cgn_stat_del_min = broken.tm_min; + cp->cgn_stat_del_sec = broken.tm_sec; + break; + case ELS_DTAG_PEER_CONGEST: + cnt = le32_to_cpu(cp->cgn_peer_notification); + cnt++; + cp->cgn_peer_notification = cpu_to_le32(cnt); + + cp->cgn_stat_peer_month = broken.tm_mon + 1; + cp->cgn_stat_peer_day = broken.tm_mday; + cp->cgn_stat_peer_year = broken.tm_year - 100; + cp->cgn_stat_peer_hour = broken.tm_hour; + cp->cgn_stat_peer_min = broken.tm_min; + cp->cgn_stat_peer_sec = broken.tm_sec; + break; + case ELS_DTAG_CONGESTION: + cnt = le32_to_cpu(cp->cgn_notification); + cnt++; + cp->cgn_notification = cpu_to_le32(cnt); + + cp->cgn_stat_cgn_month = broken.tm_mon + 1; + cp->cgn_stat_cgn_day = broken.tm_mday; + cp->cgn_stat_cgn_year = broken.tm_year - 100; + cp->cgn_stat_cgn_hour = broken.tm_hour; + cp->cgn_stat_cgn_min = broken.tm_min; + cp->cgn_stat_cgn_sec = broken.tm_sec; + } + if (phba->cgn_fpin_frequency && + phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { + value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; + cp->cgn_stat_npm = cpu_to_le32(value); + } + value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(value); +} + +/** + * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer + * @phba: pointer to lpfc hba data structure. + * + * Save the congestion event data every minute. + * On the hour collapse all the minute data into hour data. Every day + * collapse all the hour data into daily data. Separate driver + * and fabrc congestion event counters that will be saved out + * to the registered congestion buffer every minute. + */ +static void +lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + struct tm broken; + struct timespec64 cur_time; + uint32_t i, index; + uint16_t value, mvalue; + uint64_t bps; + uint32_t mbps; + uint32_t dvalue, wvalue, lvalue, avalue; + uint64_t latsum; + uint16_t *ptr; + uint32_t *lptr; + uint16_t *mptr; + + /* Make sure we have a congestion info buffer */ + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + if (time_before(jiffies, phba->cgn_evt_timestamp)) + return; + phba->cgn_evt_timestamp = jiffies + + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); + phba->cgn_evt_minute++; + + /* We should get to this point in the routine on 1 minute intervals */ + + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + + if (phba->cgn_fpin_frequency && + phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { + value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; + cp->cgn_stat_npm = cpu_to_le32(value); + } + + /* Read and clear the latency counters for this minute */ + lvalue = atomic_read(&phba->cgn_latency_evt_cnt); + latsum = atomic64_read(&phba->cgn_latency_evt); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + + /* We need to store MB/sec bandwidth in the congestion information. + * block_cnt is count of 512 byte blocks for the entire minute, + * bps will get bytes per sec before finally converting to MB/sec. + */ + bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; + phba->rx_block_cnt = 0; + mvalue = bps / (1024 * 1024); /* convert to MB/sec */ + + /* Every minute */ + /* cgn parameters */ + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; + cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; + cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; + + /* Fill in default LUN qdepth */ + value = (uint16_t)(phba->pport->cfg_lun_queue_depth); + cp->cgn_lunq = cpu_to_le16(value); + + /* Record congestion buffer info - every minute + * cgn_driver_evt_cnt (Driver events) + * cgn_fabric_warn_cnt (Congestion Warnings) + * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) + * cgn_fabric_alarm_cnt (Congestion Alarms) + */ + index = ++cp->cgn_index_minute; + if (cp->cgn_index_minute == LPFC_MIN_HOUR) { + cp->cgn_index_minute = 0; + index = 0; + } + + /* Get the number of driver events in this sample and reset counter */ + dvalue = atomic_read(&phba->cgn_driver_evt_cnt); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + + /* Get the number of warning events - FPIN and Signal for this minute */ + wvalue = 0; + if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) + wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + + /* Get the number of alarm events - FPIN and Signal for this minute */ + avalue = 0; + if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) + avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + + /* Collect the driver, warning, alarm and latency counts for this + * minute into the driver congestion buffer. + */ + ptr = &cp->cgn_drvr_min[index]; + value = (uint16_t)dvalue; + *ptr = cpu_to_le16(value); + + ptr = &cp->cgn_warn_min[index]; + value = (uint16_t)wvalue; + *ptr = cpu_to_le16(value); + + ptr = &cp->cgn_alarm_min[index]; + value = (uint16_t)avalue; + *ptr = cpu_to_le16(value); + + lptr = &cp->cgn_latency_min[index]; + if (lvalue) { + lvalue = (uint32_t)div_u64(latsum, lvalue); + *lptr = cpu_to_le32(lvalue); + } else { + *lptr = 0; + } + + /* Collect the bandwidth value into the driver's congesion buffer. */ + mptr = &cp->cgn_bw_min[index]; + *mptr = cpu_to_le16(mvalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", + index, dvalue, wvalue, *lptr, mvalue, avalue); + + /* Every hour */ + if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { + /* Record congestion buffer info - every hour + * Collapse all minutes into an hour + */ + index = ++cp->cgn_index_hour; + if (cp->cgn_index_hour == LPFC_HOUR_DAY) { + cp->cgn_index_hour = 0; + index = 0; + } + + dvalue = 0; + wvalue = 0; + lvalue = 0; + avalue = 0; + mvalue = 0; + mbps = 0; + for (i = 0; i < LPFC_MIN_HOUR; i++) { + dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); + wvalue += le16_to_cpu(cp->cgn_warn_min[i]); + lvalue += le32_to_cpu(cp->cgn_latency_min[i]); + mbps += le16_to_cpu(cp->cgn_bw_min[i]); + avalue += le16_to_cpu(cp->cgn_alarm_min[i]); + } + if (lvalue) /* Avg of latency averages */ + lvalue /= LPFC_MIN_HOUR; + if (mbps) /* Avg of Bandwidth averages */ + mvalue = mbps / LPFC_MIN_HOUR; + + lptr = &cp->cgn_drvr_hr[index]; + *lptr = cpu_to_le32(dvalue); + lptr = &cp->cgn_warn_hr[index]; + *lptr = cpu_to_le32(wvalue); + lptr = &cp->cgn_latency_hr[index]; + *lptr = cpu_to_le32(lvalue); + mptr = &cp->cgn_bw_hr[index]; + *mptr = cpu_to_le16(mvalue); + lptr = &cp->cgn_alarm_hr[index]; + *lptr = cpu_to_le32(avalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2419 Congestion Info - hour " + "(%d): %d %d %d %d %d\n", + index, dvalue, wvalue, lvalue, mvalue, avalue); + } + + /* Every day */ + if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { + /* Record congestion buffer info - every hour + * Collapse all hours into a day. Rotate days + * after LPFC_MAX_CGN_DAYS. + */ + index = ++cp->cgn_index_day; + if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { + cp->cgn_index_day = 0; + index = 0; + } + + /* Anytime we overwrite daily index 0, after we wrap, + * we will be overwriting the oldest day, so we must + * update the congestion data start time for that day. + * That start time should have previously been saved after + * we wrote the last days worth of data. + */ + if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { + time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); + + cp->cgn_info_month = broken.tm_mon + 1; + cp->cgn_info_day = broken.tm_mday; + cp->cgn_info_year = broken.tm_year - 100; + cp->cgn_info_hour = broken.tm_hour; + cp->cgn_info_minute = broken.tm_min; + cp->cgn_info_second = broken.tm_sec; + + lpfc_printf_log + (phba, KERN_INFO, LOG_CGN_MGMT, + "2646 CGNInfo idx0 Start Time: " + "%d/%d/%d %d:%d:%d\n", + cp->cgn_info_day, cp->cgn_info_month, + cp->cgn_info_year, cp->cgn_info_hour, + cp->cgn_info_minute, cp->cgn_info_second); + } + + dvalue = 0; + wvalue = 0; + lvalue = 0; + mvalue = 0; + mbps = 0; + avalue = 0; + for (i = 0; i < LPFC_HOUR_DAY; i++) { + dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); + wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); + lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); + mbps += le32_to_cpu(cp->cgn_bw_hr[i]); + avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); + } + if (lvalue) /* Avg of latency averages */ + lvalue /= LPFC_HOUR_DAY; + if (mbps) /* Avg of Bandwidth averages */ + mvalue = mbps / LPFC_HOUR_DAY; + + lptr = &cp->cgn_drvr_day[index]; + *lptr = cpu_to_le32(dvalue); + lptr = &cp->cgn_warn_day[index]; + *lptr = cpu_to_le32(wvalue); + lptr = &cp->cgn_latency_day[index]; + *lptr = cpu_to_le32(lvalue); + mptr = &cp->cgn_bw_day[index]; + *mptr = cpu_to_le16(mvalue); + lptr = &cp->cgn_alarm_day[index]; + *lptr = cpu_to_le32(avalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2420 Congestion Info - daily (%d): " + "%d %d %d %d %d\n", + index, dvalue, wvalue, lvalue, mvalue, avalue); + + /* We just wrote LPFC_MAX_CGN_DAYS of data, + * so we are wrapped on any data after this. + * Save this as the start time for the next day. + */ + if (index == (LPFC_MAX_CGN_DAYS - 1)) { + phba->hba_flag |= HBA_CGN_DAY_WRAP; + ktime_get_real_ts64(&phba->cgn_daily_ts); + } + } + + /* Use the frequency found in the last rcv'ed FPIN */ + value = phba->cgn_fpin_frequency; + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) + cp->cgn_warn_freq = cpu_to_le16(value); + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) + cp->cgn_alarm_freq = cpu_to_le16(value); + + /* Frequency (in ms) Signal Warning/Signal Congestion Notifications + * are received by the HBA + */ + value = phba->cgn_sig_freq; + + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) + cp->cgn_warn_freq = cpu_to_le16(value); + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) + cp->cgn_alarm_freq = cpu_to_le16(value); + + lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(lvalue); +} + +/** + * lpfc_calc_cmf_latency - latency from start of rxate timer interval + * @phba: The Hba for which this call is being executed. + * + * The routine calculates the latency from the beginning of the CMF timer + * interval to the current point in time. It is called from IO completion + * when we exceed our Bandwidth limitation for the time interval. + */ +uint32_t +lpfc_calc_cmf_latency(struct lpfc_hba *phba) +{ + struct timespec64 cmpl_time; + uint32_t msec = 0; + + ktime_get_real_ts64(&cmpl_time); + + /* This routine works on a ms granularity so sec and usec are + * converted accordingly. + */ + if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { + msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / + NSEC_PER_MSEC; + } else { + if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { + msec = (cmpl_time.tv_sec - + phba->cmf_latency.tv_sec) * MSEC_PER_SEC; + msec += ((cmpl_time.tv_nsec - + phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); + } else { + msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - + 1) * MSEC_PER_SEC; + msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + + cmpl_time.tv_nsec) / NSEC_PER_MSEC); + } + } + return msec; +} + +/** + * lpfc_cmf_timer - This is the timer function for one congestion + * rate interval. + * @timer: Pointer to the high resolution timer that expired + */ +static enum hrtimer_restart +lpfc_cmf_timer(struct hrtimer *timer) +{ + struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, + cmf_timer); + struct rxtable_entry *entry; + uint32_t io_cnt; + uint32_t head, tail; + uint32_t busy, max_read; + uint64_t total, rcv, lat, mbpi; + int timer_interval = LPFC_CMF_INTERVAL; + uint32_t ms; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* Only restart the timer if congestion mgmt is on */ + if (phba->cmf_active_mode == LPFC_CFG_OFF || + !phba->cmf_latency.tv_sec) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6224 CMF timer exit: %d %lld\n", + phba->cmf_active_mode, + (uint64_t)phba->cmf_latency.tv_sec); + return HRTIMER_NORESTART; + } + + /* If pport is not ready yet, just exit and wait for + * the next timer cycle to hit. + */ + if (!phba->pport) + goto skip; + + /* Do not block SCSI IO while in the timer routine since + * total_bytes will be cleared + */ + atomic_set(&phba->cmf_stop_io, 1); + + /* First we need to calculate the actual ms between + * the last timer interrupt and this one. We ask for + * LPFC_CMF_INTERVAL, however the actual time may + * vary depending on system overhead. + */ + ms = lpfc_calc_cmf_latency(phba); + + + /* Immediately after we calculate the time since the last + * timer interrupt, set the start time for the next + * interrupt + */ + ktime_get_real_ts64(&phba->cmf_latency); + + phba->cmf_link_byte_count = + div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); + + /* Collect all the stats from the prior timer interval */ + total = 0; + io_cnt = 0; + lat = 0; + rcv = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_xchg(&cgs->total_bytes, 0); + io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); + lat += atomic64_xchg(&cgs->rx_latency, 0); + rcv += atomic64_xchg(&cgs->rcv_bytes, 0); + } + + /* Before we issue another CMF_SYNC_WQE, retrieve the BW + * returned from the last CMF_SYNC_WQE issued, from + * cmf_last_sync_bw. This will be the target BW for + * this next timer interval. + */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED && + phba->link_state != LPFC_LINK_DOWN && + phba->hba_flag & HBA_SETUP) { + mbpi = phba->cmf_last_sync_bw; + phba->cmf_last_sync_bw = 0; + lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total); + } else { + /* For Monitor mode or link down we want mbpi + * to be the full link speed + */ + mbpi = phba->cmf_link_byte_count; + } + phba->cmf_timer_cnt++; + + if (io_cnt) { + /* Update congestion info buffer latency in us */ + atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); + atomic64_add(lat, &phba->cgn_latency_evt); + } + busy = atomic_xchg(&phba->cmf_busy, 0); + max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); + + /* Calculate MBPI for the next timer interval */ + if (mbpi) { + if (mbpi > phba->cmf_link_byte_count || + phba->cmf_active_mode == LPFC_CFG_MONITOR) + mbpi = phba->cmf_link_byte_count; + + /* Change max_bytes_per_interval to what the prior + * CMF_SYNC_WQE cmpl indicated. + */ + if (mbpi != phba->cmf_max_bytes_per_interval) + phba->cmf_max_bytes_per_interval = mbpi; + } + + /* Save rxmonitor information for debug */ + if (phba->rxtable) { + head = atomic_xchg(&phba->rxtable_idx_head, + LPFC_RXMONITOR_TABLE_IN_USE); + entry = &phba->rxtable[head]; + entry->total_bytes = total; + entry->rcv_bytes = rcv; + entry->cmf_busy = busy; + entry->cmf_info = phba->cmf_active_info; + if (io_cnt) { + entry->avg_io_latency = div_u64(lat, io_cnt); + entry->avg_io_size = div_u64(rcv, io_cnt); + } else { + entry->avg_io_latency = 0; + entry->avg_io_size = 0; + } + entry->max_read_cnt = max_read; + entry->io_cnt = io_cnt; + entry->max_bytes_per_interval = mbpi; + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + entry->timer_utilization = phba->cmf_last_ts; + else + entry->timer_utilization = ms; + entry->timer_interval = ms; + phba->cmf_last_ts = 0; + + /* Increment rxtable index */ + head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; + tail = atomic_read(&phba->rxtable_idx_tail); + if (head == tail) { + tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; + atomic_set(&phba->rxtable_idx_tail, tail); + } + atomic_set(&phba->rxtable_idx_head, head); + } + + if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { + /* If Monitor mode, check if we are oversubscribed + * against the full line rate. + */ + if (mbpi && total > mbpi) + atomic_inc(&phba->cgn_driver_evt_cnt); + } + phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ + + /* Each minute save Fabric and Driver congestion information */ + lpfc_cgn_save_evt_cnt(phba); + + /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the + * minute, adjust our next timer interval, if needed, to ensure a + * 1 minute granularity when we get the next timer interrupt. + */ + if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), + phba->cgn_evt_timestamp)) { + timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - + jiffies); + if (timer_interval <= 0) + timer_interval = LPFC_CMF_INTERVAL; + + /* If we adjust timer_interval, max_bytes_per_interval + * needs to be adjusted as well. + */ + phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * + timer_interval, 1000); + if (phba->cmf_active_mode == LPFC_CFG_MONITOR) + phba->cmf_max_bytes_per_interval = + phba->cmf_link_byte_count; + } + + /* Since total_bytes has already been zero'ed, its okay to unblock + * after max_bytes_per_interval is setup. + */ + if (atomic_xchg(&phba->cmf_bw_wait, 0)) + queue_work(phba->wq, &phba->unblock_request_work); + + /* SCSI IO is now unblocked */ + atomic_set(&phba->cmf_stop_io, 0); + +skip: + hrtimer_forward_now(timer, + ktime_set(0, timer_interval * NSEC_PER_MSEC)); + return HRTIMER_RESTART; +} + #define trunk_link_status(__idx)\ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ @@ -5329,6 +6105,9 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba, trunk_link_status(0), trunk_link_status(1), trunk_link_status(2), trunk_link_status(3)); + if (phba->cmf_active_mode != LPFC_CFG_OFF) + lpfc_cmf_signal_init(phba); + if (port_fault) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3202 trunk error:0x%x (%s) seen on port0:%s " @@ -5510,9 +6289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) uint8_t operational = 0; struct temp_event temp_event_data; struct lpfc_acqe_misconfigured_event *misconfigured; + struct lpfc_acqe_cgn_signal *cgn_signal; struct Scsi_Host *shost; struct lpfc_vport **vports; - int rc, i; + int rc, i, cnt; evt_type = bf_get(lpfc_trailer_type, acqe_sli); @@ -5668,6 +6448,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) "Event Data1:x%08x Event Data2: x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2); break; + case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: + /* Call FW to obtain active parms */ + lpfc_sli4_cgn_parm_chg_evt(phba); + break; case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: /* Misconfigured WWN. Reports that the SLI Port is configured * to use FA-WWN, but the attached device doesn’t support it. @@ -5685,6 +6469,40 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) "Event Data1: x%08x Event Data2: x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2); break; + case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: + if (phba->cmf_active_mode == LPFC_CFG_OFF) + break; + cgn_signal = (struct lpfc_acqe_cgn_signal *) + &acqe_sli->event_data1; + phba->cgn_acqe_cnt++; + + cnt = bf_get(lpfc_warn_acqe, cgn_signal); + atomic64_add(cnt, &phba->cgn_acqe_stat.warn); + atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); + + /* no threshold for CMF, even 1 signal will trigger an event */ + + /* Alarm overrides warning, so check that first */ + if (cgn_signal->alarm_cnt) { + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* Keep track of alarm cnt for cgn_info */ + atomic_add(cgn_signal->alarm_cnt, + &phba->cgn_fabric_alarm_cnt); + /* Keep track of alarm cnt for CMF_SYNC_WQE */ + atomic_add(cgn_signal->alarm_cnt, + &phba->cgn_sync_alarm_cnt); + } + } else if (cnt) { + /* signal action needs to be taken */ + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* Keep track of warning cnt for cgn_info */ + atomic_add(cnt, &phba->cgn_fabric_warn_cnt); + /* Keep track of warning cnt for CMF_SYNC_WQE */ + atomic_add(cnt, &phba->cgn_sync_warn_cnt); + } + } + break; default: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3193 Unrecognized SLI event, type: 0x%x", @@ -6060,6 +6878,276 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, } /** + * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event + * @phba: pointer to lpfc hba data structure. + * + * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event + * is an asynchronous notification of a request to reset CM stats. + **/ +static void +lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) +{ + if (!phba->cgn_i) + return; + lpfc_init_congestion_stat(phba); +} + +/** + * lpfc_cgn_params_val - Validate FW congestion parameters. + * @phba: pointer to lpfc hba data structure. + * @p_cfg_param: pointer to FW provided congestion parameters. + * + * This routine validates the congestion parameters passed + * by the FW to the driver via an ACQE event. + **/ +static void +lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) +{ + spin_lock_irq(&phba->hbalock); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, + LPFC_CFG_MONITOR)) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6225 CMF mode param out of range: %d\n", + p_cfg_param->cgn_param_mode); + p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; + } + + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_cgn_params_parse - Process a FW cong parm change event + * @phba: pointer to lpfc hba data structure. + * @p_cgn_param: pointer to a data buffer with the FW cong params. + * @len: the size of pdata in bytes. + * + * This routine validates the congestion management buffer signature + * from the FW, validates the contents and makes corrections for + * valid, in-range values. If the signature magic is correct and + * after parameter validation, the contents are copied to the driver's + * @phba structure. If the magic is incorrect, an error message is + * logged. + **/ +static void +lpfc_cgn_params_parse(struct lpfc_hba *phba, + struct lpfc_cgn_param *p_cgn_param, uint32_t len) +{ + struct lpfc_cgn_info *cp; + uint32_t crc, oldmode; + + /* Make sure the FW has encoded the correct magic number to + * validate the congestion parameter in FW memory. + */ + if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "4668 FW cgn parm buffer data: " + "magic 0x%x version %d mode %d " + "level0 %d level1 %d " + "level2 %d byte13 %d " + "byte14 %d byte15 %d " + "byte11 %d byte12 %d activeMode %d\n", + p_cgn_param->cgn_param_magic, + p_cgn_param->cgn_param_version, + p_cgn_param->cgn_param_mode, + p_cgn_param->cgn_param_level0, + p_cgn_param->cgn_param_level1, + p_cgn_param->cgn_param_level2, + p_cgn_param->byte13, + p_cgn_param->byte14, + p_cgn_param->byte15, + p_cgn_param->byte11, + p_cgn_param->byte12, + phba->cmf_active_mode); + + oldmode = phba->cmf_active_mode; + + /* Any parameters out of range are corrected to defaults + * by this routine. No need to fail. + */ + lpfc_cgn_params_val(phba, p_cgn_param); + + /* Parameters are verified, move them into driver storage */ + spin_lock_irq(&phba->hbalock); + memcpy(&phba->cgn_p, p_cgn_param, + sizeof(struct lpfc_cgn_param)); + + /* Update parameters in congestion info buffer now */ + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; + cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; + cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } + spin_unlock_irq(&phba->hbalock); + + phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; + + switch (oldmode) { + case LPFC_CFG_OFF: + if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { + /* Turning CMF on */ + lpfc_cmf_start(phba); + + if (phba->link_state >= LPFC_LINK_UP) { + phba->cgn_reg_fpin = + phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = + phba->cgn_init_reg_signal; + lpfc_issue_els_edc(phba->pport, 0); + } + } + break; + case LPFC_CFG_MANAGED: + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + /* Turning CMF off */ + lpfc_cmf_stop(phba); + if (phba->link_state >= LPFC_LINK_UP) + lpfc_issue_els_edc(phba->pport, 0); + break; + case LPFC_CFG_MONITOR: + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4661 Switch from MANAGED to " + "`MONITOR mode\n"); + phba->cmf_max_bytes_per_interval = + phba->cmf_link_byte_count; + + /* Resume blocked IO - unblock on workqueue */ + queue_work(phba->wq, + &phba->unblock_request_work); + break; + } + break; + case LPFC_CFG_MONITOR: + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + /* Turning CMF off */ + lpfc_cmf_stop(phba); + if (phba->link_state >= LPFC_LINK_UP) + lpfc_issue_els_edc(phba->pport, 0); + break; + case LPFC_CFG_MANAGED: + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4662 Switch from MONITOR to " + "MANAGED mode\n"); + lpfc_cmf_signal_init(phba); + break; + } + break; + } + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4669 FW cgn parm buf wrong magic 0x%x " + "version %d\n", p_cgn_param->cgn_param_magic, + p_cgn_param->cgn_param_version); + } +} + +/** + * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. + * @phba: pointer to lpfc hba data structure. + * + * This routine issues a read_object mailbox command to + * get the congestion management parameters from the FW + * parses it and updates the driver maintained values. + * + * Returns + * 0 if the object was empty + * -Eval if an error was encountered + * Count if bytes were read from object + **/ +int +lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) +{ + int ret = 0; + struct lpfc_cgn_param *p_cgn_param = NULL; + u32 *pdata = NULL; + u32 len = 0; + + /* Find out if the FW has a new set of congestion parameters. */ + len = sizeof(struct lpfc_cgn_param); + pdata = kzalloc(len, GFP_KERNEL); + ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, + pdata, len); + + /* 0 means no data. A negative means error. A positive means + * bytes were copied. + */ + if (!ret) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4670 CGN RD OBJ returns no data\n"); + goto rd_obj_err; + } else if (ret < 0) { + /* Some error. Just exit and return it to the caller.*/ + goto rd_obj_err; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "6234 READ CGN PARAMS Successful %d\n", len); + + /* Parse data pointer over len and update the phba congestion + * parameters with values passed back. The receive rate values + * may have been altered in FW, but take no action here. + */ + p_cgn_param = (struct lpfc_cgn_param *)pdata; + lpfc_cgn_params_parse(phba, p_cgn_param, len); + + rd_obj_err: + kfree(pdata); + return ret; +} + +/** + * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event + * @phba: pointer to lpfc hba data structure. + * + * The FW generated Async ACQE SLI event calls this routine when + * the event type is an SLI Internal Port Event and the Event Code + * indicates a change to the FW maintained congestion parameters. + * + * This routine executes a Read_Object mailbox call to obtain the + * current congestion parameters maintained in FW and corrects + * the driver's active congestion parameters. + * + * The acqe event is not passed because there is no further data + * required. + * + * Returns nonzero error if event processing encountered an error. + * Zero otherwise for success. + **/ +static int +lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) +{ + int ret = 0; + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4664 Cgn Evt when E2E off. Drop event\n"); + return -EACCES; + } + + /* If the event is claiming an empty object, it's ok. A write + * could have cleared it. Only error is a negative return + * status. + */ + ret = lpfc_sli4_cgn_params_read(phba); + if (ret < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4667 Error reading Cgn Params (%d)\n", + ret); + } else if (!ret) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4673 CGN Event empty object.\n"); + } + return ret; +} + +/** * lpfc_sli4_async_event_proc - Process all the pending asynchronous event * @phba: pointer to lpfc hba data structure. * @@ -6107,6 +7195,9 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) case LPFC_TRAILER_CODE_SLI: lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); break; + case LPFC_TRAILER_CODE_CMSTAT: + lpfc_sli4_async_cmstat_evt(phba); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -6391,6 +7482,15 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) return rc; } +static void +lpfc_unblock_requests_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(work, struct lpfc_hba, + unblock_request_work); + + lpfc_unblock_requests(phba); +} + /** * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. @@ -6466,7 +7566,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) INIT_DELAYED_WORK(&phba->idle_stat_delay_work, lpfc_idle_stat_delay_work); - + INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); return 0; } @@ -6697,6 +7797,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* FCF rediscover timer */ timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); + /* CMF congestion timer */ + hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phba->cmf_timer.function = lpfc_cmf_timer; + /* * Control structure for handling external multi-buffer mailbox * command pass-through. @@ -7145,6 +8249,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } #endif + phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); + if (!phba->cmf_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3331 Failed allocating per cpu cgn stats\n"); + rc = -ENOMEM; + goto out_free_hba_hdwq_info; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -7164,6 +8276,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_hba_hdwq_info: + free_percpu(phba->sli4_hba.c_stat); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS out_free_hba_idle_stat: kfree(phba->sli4_hba.idle_stat); @@ -7211,6 +8325,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS free_percpu(phba->sli4_hba.c_stat); #endif + free_percpu(phba->cmf_stat); kfree(phba->sli4_hba.idle_stat); /* Free memory allocated for msi-x interrupt vector to CPU mapping */ @@ -8537,9 +9652,12 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) } /* FW supports persistent topology - override module parameter value */ phba->hba_flag |= HBA_PERSISTENT_TOPO; - switch (phba->pcidev->device) { - case PCI_DEVICE_ID_LANCER_G7_FC: - case PCI_DEVICE_ID_LANCER_G6_FC: + + /* if ASIC_GEN_NUM >= 0xC) */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) || + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6)) { if (!tf) { phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) ? FLAGS_TOPOLOGY_MODE_LOOP @@ -8547,8 +9665,7 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) } else { phba->hba_flag &= ~HBA_PERSISTENT_TOPO; } - break; - default: /* G5 */ + } else { /* G5 */ if (tf) { /* If topology failover set - pt is '0' or '1' */ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : @@ -8558,7 +9675,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) ? FLAGS_TOPOLOGY_MODE_PT_PT : FLAGS_TOPOLOGY_MODE_LOOP); } - break; } if (phba->hba_flag & HBA_PERSISTENT_TOPO) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -8683,6 +9799,52 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; + + /* Next decide on FPIN or Signal E2E CGN support + * For congestion alarms and warnings valid combination are: + * 1. FPIN alarms / FPIN warnings + * 2. Signal alarms / Signal warnings + * 3. FPIN alarms / Signal warnings + * 4. Signal alarms / FPIN warnings + * + * Initialize the adapter frequency to 100 mSecs + */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + if (lpfc_use_cgn_signal) { + if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { + phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { + /* MUST support both alarm and warning + * because EDC does not support alarm alone. + */ + if (phba->cgn_reg_signal != + EDC_CG_SIG_WARN_ONLY) { + /* Must support both or none */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; + phba->cgn_reg_signal = + EDC_CG_SIG_NOTSUPPORTED; + } else { + phba->cgn_reg_signal = + EDC_CG_SIG_WARN_ALARM; + phba->cgn_reg_fpin = + LPFC_CGN_FPIN_NONE; + } + } + } + + /* Set the congestion initial signal and fpin values. */ + phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; + phba->cgn_init_reg_signal = phba->cgn_reg_signal; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", + phba->cgn_reg_signal, phba->cgn_reg_fpin); + lpfc_map_topology(phba, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params Extents? %d " @@ -12063,6 +13225,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); + hrtimer_cancel(&phba->cmf_timer); + if (phba->pport) phba->sli4_hba.intr_enable = 0; @@ -12133,6 +13297,240 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) phba->pport->work_port_events = 0; } +static uint32_t +lpfc_cgn_crc32(uint32_t crc, u8 byte) +{ + uint32_t msb = 0; + uint32_t bit; + + for (bit = 0; bit < 8; bit++) { + msb = (crc >> 31) & 1; + crc <<= 1; + + if (msb ^ (byte & 1)) { + crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; + crc |= 1; + } + byte >>= 1; + } + return crc; +} + +static uint32_t +lpfc_cgn_reverse_bits(uint32_t wd) +{ + uint32_t result = 0; + uint32_t i; + + for (i = 0; i < 32; i++) { + result <<= 1; + result |= (1 & (wd >> i)); + } + return result; +} + +/* + * The routine corresponds with the algorithm the HBA firmware + * uses to validate the data integrity. + */ +uint32_t +lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) +{ + uint32_t i; + uint32_t result; + uint8_t *data = (uint8_t *)ptr; + + for (i = 0; i < byteLen; ++i) + crc = lpfc_cgn_crc32(crc, data[i]); + + result = ~lpfc_cgn_reverse_bits(crc); + return result; +} + +void +lpfc_init_congestion_buf(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + struct timespec64 cmpl_time; + struct tm broken; + uint16_t size; + uint32_t crc; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6235 INIT Congestion Buffer %p\n", phba->cgn_i); + + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + + atomic64_set(&phba->cgn_acqe_stat.alarm, 0); + atomic64_set(&phba->cgn_acqe_stat.warn, 0); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + phba->cgn_evt_minute = 0; + phba->hba_flag &= ~HBA_CGN_DAY_WRAP; + + memset(cp, 0xff, LPFC_CGN_DATA_SIZE); + cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); + cp->cgn_info_version = LPFC_CGN_INFO_V3; + + /* cgn parameters */ + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; + cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; + cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; + + ktime_get_real_ts64(&cmpl_time); + time64_to_tm(cmpl_time.tv_sec, 0, &broken); + + cp->cgn_info_month = broken.tm_mon + 1; + cp->cgn_info_day = broken.tm_mday; + cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ + cp->cgn_info_hour = broken.tm_hour; + cp->cgn_info_minute = broken.tm_min; + cp->cgn_info_second = broken.tm_sec; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "2643 CGNInfo Init: Start Time " + "%d/%d/%d %d:%d:%d\n", + cp->cgn_info_day, cp->cgn_info_month, + cp->cgn_info_year, cp->cgn_info_hour, + cp->cgn_info_minute, cp->cgn_info_second); + + /* Fill in default LUN qdepth */ + if (phba->pport) { + size = (uint16_t)(phba->pport->cfg_lun_queue_depth); + cp->cgn_lunq = cpu_to_le16(size); + } + + /* last used Index initialized to 0xff already */ + + cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ; + cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ; + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + + phba->cgn_evt_timestamp = jiffies + + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); +} + +void +lpfc_init_congestion_stat(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + struct timespec64 cmpl_time; + struct tm broken; + uint32_t crc; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6236 INIT Congestion Stat %p\n", phba->cgn_i); + + if (!phba->cgn_i) + return; + + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE); + + ktime_get_real_ts64(&cmpl_time); + time64_to_tm(cmpl_time.tv_sec, 0, &broken); + + cp->cgn_stat_month = broken.tm_mon + 1; + cp->cgn_stat_day = broken.tm_mday; + cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ + cp->cgn_stat_hour = broken.tm_hour; + cp->cgn_stat_minute = broken.tm_min; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "2647 CGNstat Init: Start Time " + "%d/%d/%d %d:%d\n", + cp->cgn_stat_day, cp->cgn_stat_month, + cp->cgn_stat_year, cp->cgn_stat_hour, + cp->cgn_stat_minute); + + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); +} + +/** + * __lpfc_reg_congestion_buf - register congestion info buffer with HBA + * @phba: Pointer to hba context object. + * @reg: flag to determine register or unregister. + */ +static int +__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) +{ + struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + LPFC_MBOXQ_t *mboxq; + int length, rc; + + if (!phba->cgn_i) + return -ENXIO; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2641 REG_CONGESTION_BUF mbox allocation fail: " + "HBA state x%x reg %d\n", + phba->pport->port_state, reg); + return -ENOMEM; + } + + length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, + LPFC_SLI4_MBX_EMBED); + reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; + bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); + if (reg > 0) + bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); + else + bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); + reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); + reg_congestion_buf->addr_lo = + putPaddrLow(phba->cgn_i->phys); + reg_congestion_buf->addr_hi = + putPaddrHigh(phba->cgn_i->phys); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2642 REG_CONGESTION_BUF mailbox " + "failed with status x%x add_status x%x," + " mbx status x%x reg %d\n", + shdr_status, shdr_add_status, rc, reg); + return -ENXIO; + } + return 0; +} + +int +lpfc_unreg_congestion_buf(struct lpfc_hba *phba) +{ + lpfc_cmf_stop(phba); + return __lpfc_reg_congestion_buf(phba, 0); +} + +int +lpfc_reg_congestion_buf(struct lpfc_hba *phba) +{ + return __lpfc_reg_congestion_buf(phba, 1); +} + /** * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. * @phba: Pointer to HBA context object. @@ -12241,7 +13639,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) bf_get(cfg_xib, mbx_sli4_parameters), phba->cfg_enable_fc4_type); fcponly: - phba->nvme_support = 0; phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; phba->cfg_nvme_seg_cnt = 0; @@ -12259,9 +13656,10 @@ fcponly: if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; - /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ - if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) + /* Enable embedded Payload BDE if support is indicated */ + if (bf_get(cfg_pbde, mbx_sli4_parameters)) + phba->cfg_enable_pbde = 1; + else phba->cfg_enable_pbde = 0; /* @@ -12299,7 +13697,7 @@ fcponly: "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", bf_get(cfg_xib, mbx_sli4_parameters), phba->cfg_enable_pbde, - phba->fcp_embed_io, phba->nvme_support, + phba->fcp_embed_io, sli4_params->nvme, phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == @@ -12331,21 +13729,6 @@ fcponly: else phba->nsler = 0; - /* Save PB info for use during HBA setup */ - sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters); - sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters); - sli4_params->mib_size = mbx_sli4_parameters->mib_size; - sli4_params->mi_value = LPFC_DFLT_MIB_VAL; - - /* Next we check for Vendor MIB support */ - if (sli4_params->mi_ver && phba->cfg_enable_mi) - phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n", - sli4_params->mi_ver, phba->cfg_enable_mi, - sli4_params->mi_value, sli4_params->mib_bde_cnt, - sli4_params->mib_size); return 0; } @@ -12978,7 +14361,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, const struct firmware *fw) { int rc; + u8 sli_family; + sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); /* Three cases: (1) FW was not supported on the detected adapter. * (2) FW update has been locked out administratively. * (3) Some other error during FW update. @@ -12986,10 +14371,12 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, * for admin diagnosis. */ if (offset == ADD_STATUS_FW_NOT_SUPPORTED || - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && + (sli_family == LPFC_SLI_INTF_FAMILY_G6 && magic_number != MAGIC_NUMBER_G6) || - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && - magic_number != MAGIC_NUMBER_G7)) { + (sli_family == LPFC_SLI_INTF_FAMILY_G7 && + magic_number != MAGIC_NUMBER_G7) || + (sli_family == LPFC_SLI_INTF_FAMILY_G7P && + magic_number != MAGIC_NUMBER_G7P)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3030 This firmware version is not supported on" " this HBA model. Device:%x Magic:%x Type:%x " @@ -13193,6 +14580,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) if (!phba) return -ENOMEM; + INIT_LIST_HEAD(&phba->poll_list); + /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) @@ -13327,7 +14716,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); - INIT_LIST_HEAD(&phba->poll_list); timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); @@ -13376,6 +14764,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); + if (phba->cgn_i) + lpfc_unreg_congestion_buf(phba); lpfc_free_sysfs_attr(vport); @@ -14040,17 +15430,18 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba) void lpfc_sli4_ras_init(struct lpfc_hba *phba) { - switch (phba->pcidev->device) { - case PCI_DEVICE_ID_LANCER_G6_FC: - case PCI_DEVICE_ID_LANCER_G7_FC: + /* if ASIC_GEN_NUM >= 0xC) */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) || + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6)) { phba->ras_fwlog.ras_hwsupport = true; if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && phba->cfg_ras_fwlog_buffsize) phba->ras_fwlog.ras_enabled = true; else phba->ras_fwlog.ras_enabled = false; - break; - default: + } else { phba->ras_fwlog.ras_hwsupport = false; } } @@ -14163,8 +15554,9 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba) unsigned int temp_idx; int i; int j = 0; - unsigned long rem_nsec; - struct lpfc_vport **vports; + unsigned long rem_nsec, iflags; + bool log_verbose = false; + struct lpfc_vport *port_iterator; /* Don't dump messages if we explicitly set log_verbose for the * physical port or any vport. @@ -14172,16 +15564,24 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba) if (phba->cfg_log_verbose) return; - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) { - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { - if (vports[i]->cfg_log_verbose) { - lpfc_destroy_vport_work_array(phba, vports); + spin_lock_irqsave(&phba->port_list_lock, iflags); + list_for_each_entry(port_iterator, &phba->port_list, listentry) { + if (port_iterator->load_flag & FC_UNLOADING) + continue; + if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { + if (port_iterator->cfg_log_verbose) + log_verbose = true; + + scsi_host_put(lpfc_shost_from_vport(port_iterator)); + + if (log_verbose) { + spin_unlock_irqrestore(&phba->port_list_lock, + iflags); return; } } } - lpfc_destroy_vport_work_array(phba, vports); + spin_unlock_irqrestore(&phba->port_list_lock, iflags); if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) return; diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 5660a8729462..7d480c798794 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -44,6 +44,9 @@ #define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */ #define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */ #define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ +#define LOG_RSVD1 0x01000000 /* Reserved */ +#define LOG_RSVD2 0x02000000 /* Reserved */ +#define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */ #define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */ #define LOG_ALL_MSG 0x7fffffff /* LOG all messages */ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 84bc373190d8..6c754ee96bee 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -513,8 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba, break; } - if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + /* Topology handling for ASIC_GEN_NUM 0xC and later */ + if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 || + phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) && !(phba->sli4_hba.pc_sli4_params.pls) && mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index be54fbf5146f..870e53b8f81d 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -335,6 +335,19 @@ lpfc_mem_free_all(struct lpfc_hba *phba) dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); phba->lpfc_cmd_rsp_buf_pool = NULL; + /* Free Congestion Data buffer */ + if (phba->cgn_i) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + phba->cgn_i->virt, phba->cgn_i->phys); + kfree(phba->cgn_i); + phba->cgn_i = NULL; + } + + /* Free RX table */ + kfree(phba->rxtable); + phba->rxtable = NULL; + /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index e12f83fb795c..27263f02ab9f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -736,9 +736,13 @@ out: * is already in MAPPED or UNMAPPED state. Catch this * condition and don't set the nlp_state again because * it causes an unnecessary transport unregister/register. + * + * Nodes marked for ADISC will move MAPPED or UNMAPPED state + * after issuing ADISC */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { - if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) + if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && + !(ndlp->nlp_flag & NLP_NPR_ADISC)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -863,6 +867,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } out: + /* Unregister from backend, could have been skipped due to ADISC */ + lpfc_nlp_unreg_node(vport, ndlp); + ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1677,9 +1684,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; - memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); - memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); - ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_unreg_rpi(vport, ndlp); @@ -2597,13 +2601,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, void *arg, uint32_t evt) { + lpfc_disc_set_adisc(vport, ndlp); + ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); - lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2645,14 +2650,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - if (ndlp->nlp_flag & NLP_NPR_ADISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - spin_unlock_irq(&ndlp->lock); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(vport, ndlp, 0); - } else { + /* + * ADISC nodes will be handled in regular discovery path after + * receiving response from NS. + * + * For other nodes, Send PLOGI to trigger an implicit LOGO. + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2685,12 +2689,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { - if (ndlp->nlp_flag & NLP_NPR_ADISC) { - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(vport, ndlp, 0); - } else { + /* + * ADISC nodes will be handled in regular discovery path after + * receiving response from NS. + * + * For other nodes, Send PLOGI to trigger an implicit LOGO. + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index bcc804cefd30..73a3568ff17e 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -216,8 +216,8 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) /* The register rebind might have occurred before the delete * downcall. Guard against this race. */ - if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG) - ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD); + if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) + ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); spin_unlock_irq(&ndlp->lock); @@ -931,6 +931,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, uint32_t code, status, idx; uint16_t cid, sqhd, data; uint32_t *ptr; + uint32_t lat; + bool call_done = false; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS int cpu; #endif @@ -1135,10 +1137,21 @@ out_err: freqpriv = nCmd->private; freqpriv->nvme_buf = NULL; lpfc_ncmd->nvmeCmd = NULL; - spin_unlock(&lpfc_ncmd->buf_lock); + call_done = true; + } + spin_unlock(&lpfc_ncmd->buf_lock); + + /* Check if IO qualified for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + nCmd->io_dir == NVMEFC_FCP_READ && + nCmd->payload_length) { + /* Used when calculating average latency */ + lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; + lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); + } + + if (call_done) nCmd->done(nCmd); - } else - spin_unlock(&lpfc_ncmd->buf_lock); /* Call release with XB=1 to queue the IO into the abort list. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); @@ -1212,6 +1225,10 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ wqe->fcp_iread.rsrvd5 = 0; + /* For a CMF Managed port, iod must be zero'ed */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_NONE); cstat->input_requests++; } } else { @@ -1562,6 +1579,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, expedite = 1; } + /* Check if IO qualifies for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + pnvme_fcreq->io_dir == NVMEFC_FCP_READ && + pnvme_fcreq->payload_length) { + ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); + if (ret) { + ret = -EBUSY; + goto out_fail; + } + /* Get start time for IO latency */ + start = ktime_get_ns(); + } + /* The node is shared with FCP IO, make sure the IO pending count does * not exceed the programmed depth. */ @@ -1576,7 +1606,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ndlp->cmd_qdepth); atomic_inc(&lport->xmt_fcp_qdepth); ret = -EBUSY; - goto out_fail; + goto out_fail1; } } @@ -1596,7 +1626,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, "idx %d DID %x\n", lpfc_queue_info->index, ndlp->nlp_DID); ret = -EBUSY; - goto out_fail; + goto out_fail1; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (start) { @@ -1606,6 +1636,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_ncmd->ts_cmd_start = 0; } #endif + lpfc_ncmd->rx_cmd_start = start; /* * Store the data needed by the driver to issue, abort, and complete @@ -1687,6 +1718,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, } else cstat->control_requests--; lpfc_release_nvme_buf(phba, lpfc_ncmd); + out_fail1: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, + pnvme_fcreq->payload_length, NULL); out_fail: return ret; } @@ -2324,7 +2358,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * race that leaves the WAIT flag set. */ spin_lock_irq(&ndlp->lock); - ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG; + ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; ndlp->fc4_xpt_flags |= NVME_XPT_REGD; spin_unlock_irq(&ndlp->lock); rport = remote_port->private; @@ -2336,7 +2370,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) */ spin_lock_irq(&ndlp->lock); ndlp->nrport = NULL; - ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG; + ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; spin_unlock_irq(&ndlp->lock); rport->ndlp = NULL; rport->remoteport = NULL; @@ -2488,7 +2522,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * The transport will update it. */ spin_lock_irq(&vport->phba->hbalock); - ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG; + ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; spin_unlock_irq(&vport->phba->hbalock); /* Don't let the host nvme transport keep sending keep-alives diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 69a5a844c69c..cc54ffb5c205 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -34,11 +34,8 @@ #define LPFC_NVME_FB_SHIFT 9 #define LPFC_NVME_MAX_FB (1 << 20) /* 1M */ -#define LPFC_MAX_NVME_INFO_TMP_LEN 100 -#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" - -#define lpfc_ndlp_get_nrport(ndlp) \ - ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \ +#define lpfc_ndlp_get_nrport(ndlp) \ + ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\ ? NULL : ndlp->nrport) struct lpfc_nvme_qhandle { diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index f2d9a3580887..6e3dd0b9bcfa 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1797,19 +1797,22 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; - spin_lock(&ctxp->ctxlock); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); /* Check if we already received a free context call * and we have completed processing an abort situation. */ if (ctxp->flag & LPFC_NVME_CTX_RLS && !(ctxp->flag & LPFC_NVME_ABORT_OP)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVME_XBUSY; - spin_unlock(&ctxp->ctxlock); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, - iflag); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1b248c237be1..0fde1e874c7a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -96,30 +96,6 @@ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, struct lpfc_vmid *vmid); -static inline unsigned -lpfc_cmd_blksize(struct scsi_cmnd *sc) -{ - return sc->device->sector_size; -} - -#define LPFC_CHECK_PROTECT_GUARD 1 -#define LPFC_CHECK_PROTECT_REF 2 -static inline unsigned -lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) -{ - return 1; -} - -static inline unsigned -lpfc_cmd_guard_csum(struct scsi_cmnd *sc) -{ - if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF) - return 0; - if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP) - return 1; - return 0; -} - /** * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. * @phba: Pointer to HBA object. @@ -683,7 +659,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, cpu = raw_smp_processor_id(); if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { - tag = blk_mq_unique_tag(cmnd->request); + tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); idx = blk_mq_unique_tag_to_hwq(tag); } else { idx = phba->sli4_hba.cpu_map[cpu].hdwq; @@ -1046,13 +1022,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, return 0; sgpe = scsi_prot_sglist(sc); - lba = t10_pi_ref_tag(sc->request); + lba = scsi_prot_ref_tag(sc); if (lba == LPFC_INVALID_REFTAG) return 0; /* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { - blksize = lpfc_cmd_blksize(sc); + blksize = scsi_prot_interval(sc); numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; /* Make sure we have the right LBA if one is specified */ @@ -1441,7 +1417,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, { uint8_t ret = 0; - if (lpfc_cmd_guard_csum(sc)) { + if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: @@ -1521,7 +1497,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, { uint8_t ret = 0; - if (lpfc_cmd_guard_csum(sc)) { + if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: @@ -1629,7 +1605,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, goto out; /* extract some info from the scsi command for pde*/ - reftag = t10_pi_ref_tag(sc->request); + reftag = scsi_prot_ref_tag(sc); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -1668,12 +1644,12 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, * protection data is automatically generated, not checked. */ if (datadir == DMA_FROM_DEVICE) { - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) bf_set(pde6_ce, pde6, checking); else bf_set(pde6_ce, pde6, 0); - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(pde6_re, pde6, checking); else bf_set(pde6_re, pde6, 0); @@ -1791,8 +1767,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, goto out; /* extract some info from the scsi command */ - blksize = lpfc_cmd_blksize(sc); - reftag = t10_pi_ref_tag(sc->request); + blksize = scsi_prot_interval(sc); + reftag = scsi_prot_ref_tag(sc); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -1832,12 +1808,12 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) bf_set(pde6_ce, pde6, checking); else bf_set(pde6_ce, pde6, 0); - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(pde6_re, pde6, checking); else bf_set(pde6_re, pde6, 0); @@ -2023,7 +1999,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, goto out; /* extract some info from the scsi command for pde*/ - reftag = t10_pi_ref_tag(sc->request); + reftag = scsi_prot_ref_tag(sc); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -2051,12 +2027,12 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, * protection data is automatically generated, not checked. */ if (sc->sc_data_direction == DMA_FROM_DEVICE) { - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); else bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(lpfc_sli4_sge_dif_re, diseed, checking); else bf_set(lpfc_sli4_sge_dif_re, diseed, 0); @@ -2223,8 +2199,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, goto out; /* extract some info from the scsi command */ - blksize = lpfc_cmd_blksize(sc); - reftag = t10_pi_ref_tag(sc->request); + blksize = scsi_prot_interval(sc); + reftag = scsi_prot_ref_tag(sc); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -2281,9 +2257,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) { + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - } else { bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); /* @@ -2300,7 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, } - if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(lpfc_sli4_sge_dif_re, diseed, checking); else bf_set(lpfc_sli4_sge_dif_re, diseed, 0); @@ -2557,7 +2532,7 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, * DIF (trailer) attached to it. Must ajust FCP data length * to account for the protection data. */ - fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; return fcpdl; } @@ -2811,14 +2786,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * data length is a multiple of the blksize. */ sgde = scsi_sglist(cmd); - blksize = lpfc_cmd_blksize(cmd); + blksize = scsi_prot_interval(cmd); data_src = (uint8_t *)sg_virt(sgde); data_len = sgde->length; if ((data_len & (blksize - 1)) == 0) chk_guard = 1; src = (struct scsi_dif_tuple *)sg_virt(sgpe); - start_ref_tag = t10_pi_ref_tag(cmd->request); + start_ref_tag = scsi_prot_ref_tag(cmd); if (start_ref_tag == LPFC_INVALID_REFTAG) goto out; start_app_tag = src->app_tag; @@ -2839,7 +2814,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) /* First Guard Tag checking */ if (chk_guard) { guard_tag = src->guard_tag; - if (lpfc_cmd_guard_csum(cmd)) + if (cmd->prot_flags + & SCSI_PROT_IP_CHECKSUM) sum = lpfc_bg_csum(data_src, blksize); else @@ -2910,7 +2886,7 @@ out: phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", - t10_pi_ref_tag(cmd->request), + scsi_prot_ref_tag(cmd), sum, guard_tag); } else if (err_type == BGS_REFTAG_ERR_MASK) { @@ -2920,7 +2896,7 @@ out: phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", - t10_pi_ref_tag(cmd->request), + scsi_prot_ref_tag(cmd), ref_tag, start_ref_tag); } else if (err_type == BGS_APPTAG_ERR_MASK) { @@ -2930,7 +2906,7 @@ out: phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9041 BLKGRD: reftag %x app_tag err %x != %x\n", - t10_pi_ref_tag(cmd->request), + scsi_prot_ref_tag(cmd), app_tag, start_app_tag); } } @@ -2992,7 +2968,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -3007,7 +2983,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -3022,7 +2998,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -3066,9 +3042,9 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_logical_block_count(cmd), bgstat, bghm); - /* Calcuate what type of error it was */ + /* Calculate what type of error it was */ lpfc_calc_bg_err(phba, lpfc_cmd); } return ret; @@ -3103,8 +3079,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9072 BLKGRD: Invalid BG Profile in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); ret = (-1); goto out; } @@ -3115,8 +3091,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9073 BLKGRD: Invalid BG PDIF Block in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); ret = (-1); goto out; } @@ -3131,8 +3107,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9055 BLKGRD: Guard Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -3146,8 +3122,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9056 BLKGRD: Ref Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -3161,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9061 BLKGRD: App Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -3205,10 +3181,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9057 BLKGRD: Unknown error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + scsi_prot_ref_tag(cmd), + scsi_logical_block_count(cmd), bgstat, bghm); - /* Calcuate what type of error it was */ + /* Calculate what type of error it was */ lpfc_calc_bg_err(phba, lpfc_cmd); } out: @@ -3854,6 +3830,143 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) } /** + * lpfc_unblock_requests - allow further commands to be queued. + * @phba: pointer to phba object + * + * For single vport, just call scsi_unblock_requests on physical port. + * For multiple vports, send scsi_unblock_requests for all the vports. + */ +void +lpfc_unblock_requests(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + + if (phba->sli_rev == LPFC_SLI_REV4 && + !phba->sli4_hba.max_cfg_param.vpi_used) { + shost = lpfc_shost_from_vport(phba->pport); + scsi_unblock_requests(shost); + return; + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_unblock_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_block_requests - prevent further commands from being queued. + * @phba: pointer to phba object + * + * For single vport, just call scsi_block_requests on physical port. + * For multiple vports, send scsi_block_requests for all the vports. + */ +void +lpfc_block_requests(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + + if (atomic_read(&phba->cmf_stop_io)) + return; + + if (phba->sli_rev == LPFC_SLI_REV4 && + !phba->sli4_hba.max_cfg_param.vpi_used) { + shost = lpfc_shost_from_vport(phba->pport); + scsi_block_requests(shost); + return; + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_block_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion + * @phba: The HBA for which this call is being executed. + * @time: The latency of the IO that completed (in ns) + * @size: The size of the IO that completed + * @shost: SCSI host the IO completed on (NULL for a NVME IO) + * + * The routine adjusts the various Burst and Bandwidth counters used in + * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, + * that means the IO was never issued to the HBA, so this routine is + * just being called to cleanup the counter from a previous + * lpfc_update_cmf_cmd call. + */ +int +lpfc_update_cmf_cmpl(struct lpfc_hba *phba, + uint64_t time, uint32_t size, struct Scsi_Host *shost) +{ + struct lpfc_cgn_stat *cgs; + + if (time != LPFC_CGN_NOT_SENT) { + /* lat is ns coming in, save latency in us */ + if (time < 1000) + time = 1; + else + time = div_u64(time + 500, 1000); /* round it */ + + cgs = this_cpu_ptr(phba->cmf_stat); + atomic64_add(size, &cgs->rcv_bytes); + atomic64_add(time, &cgs->rx_latency); + atomic_inc(&cgs->rx_io_cnt); + } + return 0; +} + +/** + * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission + * @phba: The HBA for which this call is being executed. + * @size: The size of the IO that will be issued + * + * The routine adjusts the various Burst and Bandwidth counters used in + * Congestion management and E2E. + */ +int +lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) +{ + uint64_t total; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) { + total = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_read(&cgs->total_bytes); + } + if (total >= phba->cmf_max_bytes_per_interval) { + if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { + lpfc_block_requests(phba); + phba->cmf_last_ts = + lpfc_calc_cmf_latency(phba); + } + atomic_inc(&phba->cmf_busy); + return -EBUSY; + } + if (size > atomic_read(&phba->rx_max_read_cnt)) + atomic_set(&phba->rx_max_read_cnt, size); + } + + cgs = this_cpu_ptr(phba->cmf_stat); + atomic64_add(size, &cgs->total_bytes); + return 0; +} + +/** * lpfc_handle_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_io_buf data structure. @@ -4063,6 +4176,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, u32 logit = LOG_FCP; u32 status, idx; unsigned long iflags = 0; + u32 lat; u8 wait_xb_clr = 0; /* Sanity check on return of outstanding command */ @@ -4351,10 +4465,21 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_io_ktime(phba, lpfc_cmd); } #endif + if (likely(!wait_xb_clr)) + lpfc_cmd->pCmd = NULL; + spin_unlock(&lpfc_cmd->buf_lock); + + /* Check if IO qualified for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + cmd->sc_data_direction == DMA_FROM_DEVICE && + (scsi_sg_count(cmd))) { + /* Used when calculating average latency */ + lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; + lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); + } + if (wait_xb_clr) goto out; - lpfc_cmd->pCmd = NULL; - spin_unlock(&lpfc_cmd->buf_lock); /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); @@ -4367,8 +4492,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); -out: spin_unlock(&lpfc_cmd->buf_lock); +out: lpfc_release_scsi_buf(phba, lpfc_cmd); } @@ -4775,6 +4900,11 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, fcp_cmnd->fcpCntl3 = READ_DATA; if (hdwq) hdwq->scsi_cstat.input_requests++; + + /* For a CMF Managed port, iod must be zero'ed */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_NONE); } } else { /* From the icmnd template, initialize words 4 - 11 */ @@ -5029,12 +5159,8 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba) } /* Check for valid Emulex Device ID */ - switch (ptr->device) { - case PCI_DEVICE_ID_LANCER_FC: - case PCI_DEVICE_ID_LANCER_G6_FC: - case PCI_DEVICE_ID_LANCER_G7_FC: - break; - default: + if (phba->sli_rev != LPFC_SLI_REV4 || + phba->hba_flag & HBA_FCOE_MODE) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8347 Incapable PCI reset device: " "0x%04x\n", ptr->device); @@ -5423,13 +5549,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct */ static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) { - char *uuid = NULL; + struct bio *bio = scsi_cmd_to_rq(cmd)->bio; - if (cmd->request) { - if (cmd->request->bio) - uuid = blkcg_get_fc_appid(cmd->request->bio); - } - return uuid; + return bio ? blkcg_get_fc_appid(bio) : NULL; } /** @@ -5462,7 +5584,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (phba->ktime_on) start = ktime_get_ns(); #endif - + start = ktime_get_ns(); rdata = lpfc_rport_data_from_scsi_device(cmnd->device); /* sanity check on references */ @@ -5493,7 +5615,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) * transport is still transitioning. */ if (!ndlp) - goto out_tgt_busy; + goto out_tgt_busy1; + + /* Check if IO qualifies for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + cmnd->sc_data_direction == DMA_FROM_DEVICE && + (scsi_sg_count(cmnd))) { + /* Latency start time saved in rx_cmd_start later in routine */ + err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); + if (err) + goto out_tgt_busy1; + } + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, @@ -5521,7 +5654,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) ndlp->nlp_portname.u.wwn[5], ndlp->nlp_portname.u.wwn[6], ndlp->nlp_portname.u.wwn[7]); - goto out_tgt_busy; + goto out_tgt_busy2; } } @@ -5534,6 +5667,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) "IO busied\n"); goto out_host_busy; } + lpfc_cmd->rx_cmd_start = start; /* * Store the midlayer's command structure for the completion phase @@ -5557,8 +5691,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) "reftag x%x cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], cmnd->cmnd[0], - t10_pi_ref_tag(cmnd->request), - blk_rq_sectors(cmnd->request), + scsi_prot_ref_tag(cmnd), + scsi_logical_block_count(cmnd), (cmnd->cmnd[1]>>5)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); @@ -5569,8 +5703,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x reftag x%x cnt %u pt %x\n", cmnd->cmnd[0], - t10_pi_ref_tag(cmnd->request), - blk_rq_sectors(cmnd->request), + scsi_prot_ref_tag(cmnd), + scsi_logical_block_count(cmnd), (cmnd->cmnd[1]>>5)); } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); @@ -5641,8 +5775,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) bf_get(wqe_tmo, &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) : lpfc_cmd->cur_iocbq.iocb.ulpTimeout, - (uint32_t) - (cmnd->request->timeout / 1000)); + (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); goto out_host_busy_free_buf; } @@ -5678,13 +5811,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) out_host_busy_release_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); return SCSI_MLQUEUE_HOST_BUSY; - out_tgt_busy: + out_tgt_busy2: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); + out_tgt_busy1: return SCSI_MLQUEUE_TARGET_BUSY; out_fail_command_release_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); out_fail_command: cmnd->scsi_done(cmnd); @@ -6273,6 +6413,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_event_header scsi_event; int status; u32 logit = LOG_FCP; + u32 dev_loss_tmo = vport->cfg_devloss_tmo; unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); @@ -6314,39 +6455,44 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, FCP_TARGET_RESET); - if (status != SUCCESS) - logit = LOG_TRACE_EVENT; - spin_lock_irqsave(&pnode->lock, flags); - if (status != SUCCESS && - (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) && - !pnode->logo_waitq) { - pnode->logo_waitq = &waitq; - pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; - pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; - spin_unlock_irqrestore(&pnode->lock, flags); - lpfc_unreg_rpi(vport, pnode); - wait_event_timeout(waitq, - (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)), - msecs_to_jiffies(vport->cfg_devloss_tmo * - 1000)); - - if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "0725 SCSI layer TGTRST failed & LOGO TMO " - " (%d, %llu) return x%x\n", tgt_id, - lun_id, status); - spin_lock_irqsave(&pnode->lock, flags); - pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; + if (status != SUCCESS) { + logit = LOG_TRACE_EVENT; + + /* Issue LOGO, if no LOGO is outstanding */ + spin_lock_irqsave(&pnode->lock, flags); + if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) && + !pnode->logo_waitq) { + pnode->logo_waitq = &waitq; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + pnode->nlp_flag |= NLP_ISSUE_LOGO; + pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; + spin_unlock_irqrestore(&pnode->lock, flags); + lpfc_unreg_rpi(vport, pnode); + wait_event_timeout(waitq, + (!(pnode->upcall_flags & + NLP_WAIT_FOR_LOGO)), + msecs_to_jiffies(dev_loss_tmo * + 1000)); + + if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { + lpfc_printf_vlog(vport, KERN_ERR, logit, + "0725 SCSI layer TGTRST " + "failed & LOGO TMO (%d, %llu) " + "return x%x\n", + tgt_id, lun_id, status); + spin_lock_irqsave(&pnode->lock, flags); + pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; + } else { + spin_lock_irqsave(&pnode->lock, flags); + } + pnode->logo_waitq = NULL; + spin_unlock_irqrestore(&pnode->lock, flags); + status = SUCCESS; + } else { - spin_lock_irqsave(&pnode->lock, flags); + spin_unlock_irqrestore(&pnode->lock, flags); + status = FAILED; } - pnode->logo_waitq = NULL; - spin_unlock_irqrestore(&pnode->lock, flags); - status = SUCCESS; - } else { - status = FAILED; - spin_unlock_irqrestore(&pnode->lock, flags); } lpfc_printf_vlog(vport, KERN_ERR, logit, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index f76667b7da7b..3836d7f6a575 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -142,6 +142,10 @@ struct lpfc_scsicmd_bkt { #define FC_PORTSPEED_128GBIT 0x2000 #endif +#ifndef FC_PORTSPEED_256GBIT +#define FC_PORTSPEED_256GBIT 0x4000 +#endif + #define TXRDY_PAYLOAD_LEN 12 /* For sysfs/debugfs tmp string max len */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f530d8fe7a8c..ffd8a140638c 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1439,7 +1439,7 @@ out: memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; - iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | + iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | LPFC_IO_NVME_LS); list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1769,6 +1769,254 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) } /** + * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to driver command iocb object. + * @cmf_cmpl: Pointer to completed WCQE. + * + * This routine will inform the driver of any BW adjustments we need + * to make. These changes will be picked up during the next CMF + * timer interrupt. In addition, any BW changes will be logged + * with LOG_CGN_MGMT. + **/ +static void +lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_wcqe_complete *cmf_cmpl) +{ + union lpfc_wqe128 *wqe; + uint32_t status, info; + uint64_t bw, bwdif, slop; + uint64_t pcent, bwpcent; + int asig, afpin, sigcnt, fpincnt; + int wsigmax, wfpinmax, cg, tdp; + char *s; + + /* First check for error */ + status = bf_get(lpfc_wcqe_c_status, cmf_cmpl); + if (status) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6211 CMF_SYNC_WQE Error " + "req_tag x%x status x%x hwstatus x%x " + "tdatap x%x parm x%x\n", + bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl), + bf_get(lpfc_wcqe_c_status, cmf_cmpl), + bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl), + cmf_cmpl->total_data_placed, + cmf_cmpl->parameter); + goto out; + } + + /* Gather congestion information on a successful cmpl */ + info = cmf_cmpl->parameter; + phba->cmf_active_info = info; + + /* See if firmware info count is valid or has changed */ + if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) + info = 0; + else + phba->cmf_info_per_interval = info; + + tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl); + cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl); + + /* Get BW requirement from firmware */ + bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; + if (!bw) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6212 CMF_SYNC_WQE x%x: NULL bw\n", + bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl)); + goto out; + } + + /* Gather information needed for logging if a BW change is required */ + wqe = &cmdiocb->wqe; + asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); + afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); + fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); + sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); + if (phba->cmf_max_bytes_per_interval != bw || + (asig || afpin || sigcnt || fpincnt)) { + /* Are we increasing or decreasing BW */ + if (phba->cmf_max_bytes_per_interval < bw) { + bwdif = bw - phba->cmf_max_bytes_per_interval; + s = "Increase"; + } else { + bwdif = phba->cmf_max_bytes_per_interval - bw; + s = "Decrease"; + } + + /* What is the change percentage */ + slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/ + pcent = div64_u64(bwdif * 100 + slop, + phba->cmf_link_byte_count); + bwpcent = div64_u64(bw * 100 + slop, + phba->cmf_link_byte_count); + if (asig) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6237 BW Threshold %lld%% (%lld): " + "%lld%% %s: Signal Alarm: cg:%d " + "Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } else if (afpin) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6238 BW Threshold %lld%% (%lld): " + "%lld%% %s: FPIN Alarm: cg:%d " + "Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } else if (sigcnt) { + wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6239 BW Threshold %lld%% (%lld): " + "%lld%% %s: Signal Warning: " + "Cnt %d Max %d: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, sigcnt, + wsigmax, cg, phba->cmf_active_info); + } else if (fpincnt) { + wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6240 BW Threshold %lld%% (%lld): " + "%lld%% %s: FPIN Warning: " + "Cnt %d Max %d: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, fpincnt, + wfpinmax, cg, phba->cmf_active_info); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6241 BW Threshold %lld%% (%lld): " + "CMF %lld%% %s: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } + } else if (info) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6246 Info Threshold %u\n", info); + } + + /* Save BW change to be picked up during next timer interrupt */ + phba->cmf_last_sync_bw = bw; +out: + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE + * @phba: Pointer to HBA context object. + * @ms: ms to set in WQE interval, 0 means use init op + * @total: Total rcv bytes for this interval + * + * This routine is called every CMF timer interrupt. Its purpose is + * to issue a CMF_SYNC_WQE to the firmware to inform it of any events + * that may indicate we have congestion (FPINs or Signals). Upon + * completion, the firmware will indicate any BW restrictions the + * driver may need to take. + **/ +int +lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) +{ + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *sync_buf; + unsigned long iflags; + u32 ret_val; + u32 atot, wtot, max; + + /* First address any alarm / warning activity */ + atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); + wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); + + /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ + if (phba->cmf_active_mode != LPFC_CFG_MANAGED || + phba->link_state == LPFC_LINK_DOWN) + return 0; + + spin_lock_irqsave(&phba->hbalock, iflags); + sync_buf = __lpfc_sli_get_iocbq(phba); + if (!sync_buf) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6213 No available WQEs for CMF_SYNC_WQE\n"); + ret_val = ENOMEM; + goto out_unlock; + } + + wqe = &sync_buf->wqe; + + /* WQEs are reused. Clear stale data and set key fields to zero */ + memset(wqe, 0, sizeof(*wqe)); + + /* If this is the very first CMF_SYNC_WQE, issue an init operation */ + if (!ms) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6441 CMF Init %d - CMF_SYNC_WQE\n", + phba->fc_eventTag); + bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ + bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); + goto initpath; + } + + bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ + bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); + + /* Check for alarms / warnings */ + if (atot) { + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* We hit an Signal alarm condition */ + bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); + } else { + /* We hit a FPIN alarm condition */ + bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); + } + } else if (wtot) { + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || + phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + /* We hit an Signal warning condition */ + max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * + lpfc_acqe_cgn_frequency; + bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); + bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); + } else { + /* We hit a FPIN warning condition */ + bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); + bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); + } + } + + /* Update total read blocks during previous timer interval */ + wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); + +initpath: + bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); + wqe->cmf_sync.event_tag = phba->fc_eventTag; + bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); + + /* Setup reqtag to match the wqe completion. */ + bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); + + bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); + + bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); + bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); + bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); + + sync_buf->vport = phba->pport; + sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl; + sync_buf->iocb_cmpl = NULL; + sync_buf->context1 = NULL; + sync_buf->context2 = NULL; + sync_buf->context3 = NULL; + sync_buf->sli4_xritag = NO_XRI; + + sync_buf->iocb_flag |= LPFC_IO_CMF; + ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); + if (ret_val) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6214 Cannot issue CMF_SYNC_WQE: x%x\n", + ret_val); +out_unlock: + spin_unlock_irqrestore(&phba->hbalock, iflags); + return ret_val; +} + +/** * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. @@ -4467,6 +4715,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) } else phba->sli4_hba.intr_enable = 0; + phba->hba_flag &= ~HBA_SETUP; return retval; } @@ -4787,6 +5036,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; + phba->hba_flag &= ~HBA_SETUP; spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~(LPFC_PROCESS_LA); @@ -5674,16 +5924,20 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); + phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); + phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", + "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " + "flash_id: x%02x, asic_rev: x%02x\n", phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no, - phba->BIOSVersion); + phba->BIOSVersion, phba->sli4_hba.flash_id, + phba->sli4_hba.asic_rev); out_free_mboxq: if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -6413,6 +6667,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, uint32_t feature) { uint32_t len; + u32 sig_freq = 0; len = sizeof(struct lpfc_mbx_set_feature) - sizeof(struct lpfc_sli4_cfg_mhdr); @@ -6435,6 +6690,35 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.param_len = 8; break; + case LPFC_SET_CGN_SIGNAL: + if (phba->cmf_active_mode == LPFC_CFG_OFF) + sig_freq = 0; + else + sig_freq = phba->cgn_sig_freq; + + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { + bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + bf_set(lpfc_mbx_set_feature_CGN_warn_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + } + + if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) + bf_set(lpfc_mbx_set_feature_CGN_warn_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + + if (phba->cmf_active_mode == LPFC_CFG_OFF || + phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED) + sig_freq = 0; + else + sig_freq = lpfc_acqe_cgn_frequency; + + bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + + mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; + mbox->u.mqe.un.set_feature.param_len = 12; + break; case LPFC_SET_DUAL_DUMP: bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); @@ -6443,8 +6727,22 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; mbox->u.mqe.un.set_feature.param_len = 4; break; + case LPFC_SET_ENABLE_MI: + mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI; + mbox->u.mqe.un.set_feature.param_len = 4; + bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature, + phba->pport->cfg_lun_queue_depth); + bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature, + phba->sli4_hba.pc_sli4_params.mi_ver); + break; + case LPFC_SET_ENABLE_CMF: + bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; + mbox->u.mqe.un.set_feature.param_len = 4; + bf_set(lpfc_mbx_set_feature_cmf, + &mbox->u.mqe.un.set_feature, 1); + break; } - return; } @@ -7365,7 +7663,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; mbox->u.mqe.un.set_host_data.param_len = LPFC_HOST_OS_DRIVER_VERSION_SIZE; - snprintf(mbox->u.mqe.un.set_host_data.data, + snprintf(mbox->u.mqe.un.set_host_data.un.data, LPFC_HOST_OS_DRIVER_VERSION_SIZE, "Linux %s v"LPFC_DRIVER_VERSION, (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); @@ -7433,6 +7731,91 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, return 1; } +static void +lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + union lpfc_sli4_cfg_shdr *shdr; + u32 shdr_status, shdr_add_status; + u32 sig, acqe; + + /* Two outcomes. (1) Set featurs was successul and EDC negotiation + * is done. (2) Mailbox failed and send FPIN support only. + */ + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "2516 CGN SET_FEATURE mbox failed with " + "status x%x add_status x%x, mbx status x%x " + "Reset Congestion to FPINs only\n", + shdr_status, shdr_add_status, + pmb->u.mb.mbxStatus); + /* If there is a mbox error, move on to RDF */ + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + goto out; + } + + /* Zero out Congestion Signal ACQE counter */ + phba->cgn_acqe_cnt = 0; + atomic64_set(&phba->cgn_acqe_stat.warn, 0); + atomic64_set(&phba->cgn_acqe_stat.alarm, 0); + + acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, + &pmb->u.mqe.un.set_feature); + sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, + &pmb->u.mqe.un.set_feature); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4620 SET_FEATURES Success: Freq: %ds %dms " + " Reg: x%x x%x\n", acqe, sig, + phba->cgn_reg_signal, phba->cgn_reg_fpin); +out: + mempool_free(pmb, phba->mbox_mem_pool); + + /* Register for FPIN events from the fabric now that the + * EDC common_set_features has completed. + */ + lpfc_issue_els_rdf(vport, 0); +} + +int +lpfc_config_cgn_signal(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + u32 rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + goto out_rdf; + + lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " + "Reg: x%x x%x\n", + phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, + phba->cgn_reg_signal, phba->cgn_reg_fpin); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out; + return 0; + +out: + mempool_free(mboxq, phba->mbox_mem_pool); +out_rdf: + /* If there is a mbox error, move on to RDF */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; + lpfc_issue_els_rdf(phba->pport, 0); + return -EIO; +} + /** * lpfc_init_idle_stat_hb - Initialize idle_stat tracking * @phba: pointer to lpfc hba data structure. @@ -7464,7 +7847,8 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); idle_stat->prev_wall = wall; - if (phba->nvmet_support) + if (phba->nvmet_support || + phba->cmf_active_mode != LPFC_CFG_OFF) cq->poll_mode = LPFC_QUEUE_WORK; else cq->poll_mode = LPFC_IRQ_POLL; @@ -7496,6 +7880,258 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba) } /** + * lpfc_cmf_setup - Initialize idle_stat tracking + * @phba: Pointer to HBA context object. + * + * This is called from HBA setup during driver load or when the HBA + * comes online. this does all the initialization to support CMF and MI. + **/ +static int +lpfc_cmf_setup(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + struct lpfc_dmabuf *mp; + struct lpfc_pc_sli4_params *sli4_params; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + int length; + int rc, cmf, mi_ver; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + mqe = &mboxq->u.mqe; + + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; + } + + /* Gather info on CMF and MI support */ + sli4_params = &phba->sli4_hba.pc_sli4_params; + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters); + sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters); + + /* Are we forcing MI off via module parameter? */ + if (!phba->cfg_enable_mi) + sli4_params->mi_ver = 0; + + /* Always try to enable MI feature if we can */ + if (sli4_params->mi_ver) { + lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mi_ver = bf_get(lpfc_mbx_set_feature_mi, + &mboxq->u.mqe.un.set_feature); + + if (rc == MBX_SUCCESS) { + if (mi_ver) { + lpfc_printf_log(phba, + KERN_WARNING, LOG_CGN_MGMT, + "6215 MI is enabled\n"); + sli4_params->mi_ver = mi_ver; + } else { + lpfc_printf_log(phba, + KERN_WARNING, LOG_CGN_MGMT, + "6338 MI is disabled\n"); + sli4_params->mi_ver = 0; + } + } else { + /* mi_ver is already set from GET_SLI4_PARAMETERS */ + lpfc_printf_log(phba, KERN_INFO, + LOG_CGN_MGMT | LOG_INIT, + "6245 Enable MI Mailbox x%x (x%x/x%x) " + "failed, rc:x%x mi:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get + (phba, mboxq), + lpfc_sli_config_mbox_opcode_get + (phba, mboxq), + rc, sli4_params->mi_ver); + } + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6217 MI is disabled\n"); + } + + /* Ensure FDMI is enabled for MI if enable_mi is set */ + if (sli4_params->mi_ver) + phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; + + /* Always try to enable CMF feature if we can */ + if (sli4_params->cmf) { + lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + cmf = bf_get(lpfc_mbx_set_feature_cmf, + &mboxq->u.mqe.un.set_feature); + if (rc == MBX_SUCCESS && cmf) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6218 CMF is enabled: mode %d\n", + phba->cmf_active_mode); + } else { + lpfc_printf_log(phba, KERN_WARNING, + LOG_CGN_MGMT | LOG_INIT, + "6219 Enable CMF Mailbox x%x (x%x/x%x) " + "failed, rc:x%x dd:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get + (phba, mboxq), + lpfc_sli_config_mbox_opcode_get + (phba, mboxq), + rc, cmf); + sli4_params->cmf = 0; + phba->cmf_active_mode = LPFC_CFG_OFF; + goto no_cmf; + } + + /* Allocate Congestion Information Buffer */ + if (!phba->cgn_i) { + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (mp) + mp->virt = dma_alloc_coherent + (&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + &mp->phys, GFP_KERNEL); + if (!mp || !mp->virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2640 Failed to alloc memory " + "for Congestion Info\n"); + kfree(mp); + sli4_params->cmf = 0; + phba->cmf_active_mode = LPFC_CFG_OFF; + goto no_cmf; + } + phba->cgn_i = mp; + + /* initialize congestion buffer info */ + lpfc_init_congestion_buf(phba); + lpfc_init_congestion_stat(phba); + } + + rc = lpfc_sli4_cgn_params_read(phba); + if (rc < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "6242 Error reading Cgn Params (%d)\n", + rc); + /* Ensure CGN Mode is off */ + sli4_params->cmf = 0; + } else if (!rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "6243 CGN Event empty object.\n"); + /* Ensure CGN Mode is off */ + sli4_params->cmf = 0; + } + } else { +no_cmf: + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6220 CMF is disabled\n"); + } + + /* Only register congestion buffer with firmware if BOTH + * CMF and E2E are enabled. + */ + if (sli4_params->cmf && sli4_params->mi_ver) { + rc = lpfc_reg_congestion_buf(phba); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + phba->cgn_i->virt, phba->cgn_i->phys); + kfree(phba->cgn_i); + phba->cgn_i = NULL; + /* Ensure CGN Mode is off */ + phba->cmf_active_mode = LPFC_CFG_OFF; + return 0; + } + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6470 Setup MI version %d CMF %d mode %d\n", + sli4_params->mi_ver, sli4_params->cmf, + phba->cmf_active_mode); + + mempool_free(mboxq, phba->mbox_mem_pool); + + /* Initialize atomic counters */ + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + + phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + + /* Allocate RX Monitor Buffer */ + if (!phba->rxtable) { + phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY, + sizeof(struct rxtable_entry), + GFP_KERNEL); + if (!phba->rxtable) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2644 Failed to alloc memory " + "for RX Monitor Buffer\n"); + return -ENOMEM; + } + } + atomic_set(&phba->rxtable_idx_head, 0); + atomic_set(&phba->rxtable_idx_tail, 0); + return 0; +} + +static int +lpfc_set_host_tm(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t len, rc; + struct timespec64 cur_time; + struct tm broken; + uint32_t month, day, year; + uint32_t hour, minute, second; + struct lpfc_mbx_set_host_date_time *tm; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + len = sizeof(struct lpfc_mbx_set_host_data) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_HOST_DATA, len, + LPFC_SLI4_MBX_EMBED); + + mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; + mboxq->u.mqe.un.set_host_data.param_len = + sizeof(struct lpfc_mbx_set_host_date_time); + tm = &mboxq->u.mqe.un.set_host_data.un.tm; + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + month = broken.tm_mon + 1; + day = broken.tm_mday; + year = broken.tm_year - 100; + hour = broken.tm_hour; + minute = broken.tm_min; + second = broken.tm_sec; + bf_set(lpfc_mbx_set_host_month, tm, month); + bf_set(lpfc_mbx_set_host_day, tm, day); + bf_set(lpfc_mbx_set_host_year, tm, year); + bf_set(lpfc_mbx_set_host_hour, tm, hour); + bf_set(lpfc_mbx_set_host_min, tm, minute); + bf_set(lpfc_mbx_set_host_sec, tm, second); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** * lpfc_sli4_hba_setup - SLI4 device initialization PCI function * @phba: Pointer to HBA context object. * @@ -7584,6 +8220,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + rc = lpfc_set_host_tm(phba); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + "6468 Set host date / time: Status x%x:\n", rc); + /* * Continue initialization with default values even if driver failed * to read FCoE param config regions, only read parameters if the @@ -8111,6 +8751,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Indicate device interrupt mode */ phba->sli4_hba.intr_enable = 1; + /* Setup CMF after HBA is initialized */ + lpfc_cmf_setup(phba); + if (!(phba->hba_flag & HBA_FCOE_MODE) && (phba->hba_flag & LINK_DISABLED)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -8132,7 +8775,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } } mempool_free(mboxq, phba->mbox_mem_pool); + + phba->hba_flag |= HBA_SETUP; return rc; + out_io_buff_free: /* Free allocated IO Buffers */ lpfc_io_free(phba); @@ -8790,8 +9436,11 @@ static int lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; int rc = 0; unsigned long timeout = 0; + u32 sli_flag; + u8 cmd, subsys, opcode; /* Mark the asynchronous mailbox command posting as blocked */ spin_lock_irq(&phba->hbalock); @@ -8809,12 +9458,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) if (timeout) lpfc_sli4_process_missed_mbox_completions(phba); - /* Wait for the outstnading mailbox command to complete */ + /* Wait for the outstanding mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { - /* Timeout, marked the outstanding cmd not complete */ + /* Timeout, mark the outstanding cmd not complete */ + + /* Sanity check sli.mbox_active has not completed or + * cancelled from another context during last 2ms sleep, + * so take hbalock to be sure before logging. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + cmd = mboxq->u.mb.mbxCommand; + subsys = lpfc_sli_config_mbox_subsys_get(phba, + mboxq); + opcode = lpfc_sli_config_mbox_opcode_get(phba, + mboxq); + sli_flag = psli->sli_flag; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2352 Mailbox command x%x " + "(x%x/x%x) sli_flag x%x could " + "not complete\n", + cmd, subsys, opcode, + sli_flag); + } else { + spin_unlock_irq(&phba->hbalock); + } + rc = 1; break; } @@ -9763,6 +10437,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, if (pcmd && (*pcmd == ELS_CMD_FLOGI || *pcmd == ELS_CMD_SCR || *pcmd == ELS_CMD_RDF || + *pcmd == ELS_CMD_EDC || *pcmd == ELS_CMD_RSCN_XMT || *pcmd == ELS_CMD_FDISC || *pcmd == ELS_CMD_LOGO || @@ -10097,8 +10772,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); - pcmd = (uint32_t *) (((struct lpfc_dmabuf *) - iocbq->context2)->virt); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, @@ -11619,6 +12292,7 @@ void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; IOCB_t *irsp = &rspiocb->iocb; /* ELS cmd tag <ulpIoTag> completes */ @@ -11627,11 +12301,16 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "x%x x%x x%x\n", irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); - lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1); + /* + * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp + * if exchange is busy. + */ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) lpfc_ct_free_iocb(phba, cmdiocb); else lpfc_els_free_iocb(phba, cmdiocb); + + lpfc_nlp_put(ndlp); } /** @@ -14626,8 +15305,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, switch (cq->poll_mode) { case LPFC_IRQ_POLL: - irq_poll_sched(&cq->iop); - break; + /* CGN mgmt is mutually exclusive from softirq processing */ + if (phba->cmf_active_mode == LPFC_CFG_OFF) { + irq_poll_sched(&cq->iop); + break; + } + fallthrough; case LPFC_QUEUE_WORK: default: if (is_kdump_kernel()) @@ -20021,6 +20704,91 @@ out: } /** + * lpfc_log_fw_write_cmpl - logs firmware write completion status + * @phba: pointer to lpfc hba data structure + * @shdr_status: wr_object rsp's status field + * @shdr_add_status: wr_object rsp's add_status field + * @shdr_add_status_2: wr_object rsp's add_status_2 field + * @shdr_change_status: wr_object rsp's change_status field + * @shdr_csf: wr_object rsp's csf bit + * + * This routine is intended to be called after a firmware write completes. + * It will log next action items to be performed by the user to instantiate + * the newly downloaded firmware or reason for incompatibility. + **/ +static void +lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, + u32 shdr_add_status, u32 shdr_add_status_2, + u32 shdr_change_status, u32 shdr_csf) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "4198 %s: flash_id x%02x, asic_rev x%02x, " + "status x%02x, add_status x%02x, add_status_2 x%02x, " + "change_status x%02x, csf %01x\n", __func__, + phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, + shdr_status, shdr_add_status, shdr_add_status_2, + shdr_change_status, shdr_csf); + + if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { + switch (shdr_add_status_2) { + case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4199 Firmware write failed: " + "image incompatible with flash x%02x\n", + phba->sli4_hba.flash_id); + break; + case LPFC_ADD_STATUS_2_INCORRECT_ASIC: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4200 Firmware write failed: " + "image incompatible with ASIC " + "architecture x%02x\n", + phba->sli4_hba.asic_rev); + break; + default: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4210 Firmware write failed: " + "add_status_2 x%02x\n", + shdr_add_status_2); + break; + } + } else if (!shdr_status && !shdr_add_status) { + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { + if (shdr_csf) + shdr_change_status = + LPFC_CHANGE_STATUS_PCI_RESET; + } + + switch (shdr_change_status) { + case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3198 Firmware write complete: System " + "reboot required to instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_FW_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3199 Firmware write complete: " + "Firmware reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PORT_MIGRATION): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3200 Firmware write complete: Port " + "Migration or PCI Reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PCI_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3201 Firmware write complete: PCI " + "Reset required to instantiate\n"); + break; + default: + break; + } + } +} + +/** * lpfc_wr_object - write an object to the firmware * @phba: HBA structure that indicates port to create a queue on. * @dmabuf_list: list of dmabufs to write to the port. @@ -20046,7 +20814,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; - uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf; + uint32_t shdr_status, shdr_add_status, shdr_add_status_2; + uint32_t shdr_change_status = 0, shdr_csf = 0; uint32_t mbox_tmo; struct lpfc_dmabuf *dmabuf; uint32_t written = 0; @@ -20100,58 +20869,36 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, &wr_object->header.cfg_shdr.response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &wr_object->header.cfg_shdr.response); + shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, + &wr_object->header.cfg_shdr.response); if (check_change_status) { shdr_change_status = bf_get(lpfc_wr_object_change_status, &wr_object->u.response); - - if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || - shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { - shdr_csf = bf_get(lpfc_wr_object_csf, - &wr_object->u.response); - if (shdr_csf) - shdr_change_status = - LPFC_CHANGE_STATUS_PCI_RESET; - } - - switch (shdr_change_status) { - case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3198 Firmware write complete: System " - "reboot required to instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_FW_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3199 Firmware write complete: Firmware" - " reset required to instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_PORT_MIGRATION): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3200 Firmware write complete: Port " - "Migration or PCI Reset required to " - "instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_PCI_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3201 Firmware write complete: PCI " - "Reset required to instantiate\n"); - break; - default: - break; - } + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); } + if (!phba->sli4_hba.intr_enable) mempool_free(mbox, phba->mbox_mem_pool); else if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { + if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3025 Write Object mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); + "status x%x add_status x%x, add_status_2 x%x, " + "mbx status x%x\n", + shdr_status, shdr_add_status, shdr_add_status_2, + rc); rc = -ENXIO; *offset = shdr_add_status; - } else + } else { *offset += wr_object->u.response.actual_write_length; + } + + if (rc || check_change_status) + lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, + shdr_add_status_2, shdr_change_status, + shdr_csf); return rc; } @@ -20543,8 +21290,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } /* NVME_FCREQ and NVME_ABTS requests */ - if (pwqe->iocb_flag & LPFC_IO_NVME || - pwqe->iocb_flag & LPFC_IO_FCP) { + if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ wq = qp->io_wq; pring = wq->pring; @@ -21323,6 +22069,116 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, } /** + * lpfc_read_object - Retrieve object data from HBA + * @phba: The HBA for which this call is being executed. + * @rdobject: Pathname of object data we want to read. + * @datap: Pointer to where data will be copied to. + * @datasz: size of data area + * + * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. + * The data will be truncated if datasz is not large enough. + * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. + * Returns the actual bytes read from the object. + */ +int +lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, + uint32_t datasz) +{ + struct lpfc_mbx_read_object *read_object; + LPFC_MBOXQ_t *mbox; + int rc, length, eof, j, byte_cnt = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_dmabuf *pcmd; + + /* sanity check on queue memory */ + if (!datap) + return -ENODEV; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_read_object) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_READ_OBJECT, + length, LPFC_SLI4_MBX_EMBED); + read_object = &mbox->u.mqe.un.read_object; + shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; + + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); + bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); + read_object->u.request.rd_object_offset = 0; + read_object->u.request.rd_object_cnt = 1; + + memset((void *)read_object->u.request.rd_object_name, 0, + LPFC_OBJ_NAME_SZ); + sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject); + for (j = 0; j < strlen(rdobject); j++) + read_object->u.request.rd_object_name[j] = + cpu_to_le32(read_object->u.request.rd_object_name[j]); + + pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); + if (pcmd) + pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); + if (!pcmd || !pcmd->virt) { + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return -ENOMEM; + } + memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); + read_object->u.request.rd_object_hbuf[0].pa_lo = + putPaddrLow(pcmd->phys); + read_object->u.request.rd_object_hbuf[0].pa_hi = + putPaddrHigh(pcmd->phys); + read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_buf = NULL; + mbox->ctx_ndlp = NULL; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if (shdr_status == STATUS_FAILED && + shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "4674 No port cfg file in FW.\n"); + byte_cnt = -ENOENT; + } else if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "2625 READ_OBJECT mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + byte_cnt = -ENXIO; + } else { + /* Success */ + length = read_object->u.response.rd_object_actual_rlen; + eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, + "2626 READ_OBJECT Success len %d:%d, EOF %d\n", + length, datasz, eof); + + /* Detect the port config file exists but is empty */ + if (!length && eof) { + byte_cnt = 0; + goto exit; + } + + byte_cnt = length; + lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt); + } + + exit: + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return byte_cnt; +} + +/** * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool * @phba: The HBA for which this call is being executed. * @lpfc_buf: IO buf structure to append the SGL chunk diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index dde8eb9d796d..5161ccacea3e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -107,6 +107,7 @@ struct lpfc_iocbq { #define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ #define LPFC_IO_NVMET 0x800000 /* NVMET command */ #define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */ +#define LPFC_IO_CMF 0x4000000 /* CMF command */ uint32_t drvrTimeout; /* driver timeout in seconds */ struct lpfc_vport *vport;/* virtual port pointer */ @@ -462,4 +463,5 @@ struct lpfc_io_buf { uint64_t ts_isr_cmpl; uint64_t ts_data_io; #endif + uint64_t rx_cmd_start; }; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 26f19c95380f..99c5d1e4da5e 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -557,6 +557,7 @@ struct lpfc_pc_sli4_params { uint16_t mi_value; #define LPFC_DFLT_MIB_VAL 2 uint8_t mib_bde_cnt; + uint8_t cmf; uint8_t cqv; uint8_t mqv; uint8_t wqv; @@ -978,6 +979,8 @@ struct lpfc_sli4_hba { #define lpfc_conf_trunk_port3_nd_WORD conf_trunk #define lpfc_conf_trunk_port3_nd_SHIFT 7 #define lpfc_conf_trunk_port3_nd_MASK 0x1 + uint8_t flash_id; + uint8_t asic_rev; }; enum lpfc_sge_type { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 2d62fd2a9824..a7aba7833425 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.8.0.10" +#define LPFC_DRIVER_VERSION "14.0.0.1" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ |